input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
import itertools
from functools import partial
from multiprocessing.pool import Pool
import pandas as pd
from cellphonedb.src.core.core_logger import core_logger
from cellphonedb.src.core.models.complex import complex_helper
def get_significant_means(real_mean_analysis: pd.DataFrame,
result_percent: pd.DataFrame,
min_significant_mean: float) -> pd.DataFrame:
"""
If the result_percent value is > min_significant_mean, sets the value to NaN, else, sets the mean value.
EXAMPLE:
INPUT:
real mean
cluster1 cluster2 cluster
ensembl1 0.1 1.0 2.0
ensembl2 2.0 0.1 0.2
ensembl3 0.3 0.0 0.5
result percent
cluster1 cluster2 cluster
ensembl1 0.0 1.0 1.0
ensembl2 0.04 0.03 0.62
ensembl3 0.3 0.55 0.02
min_significant_mean = 0.05
RESULT:
cluster1 cluster2 cluster
ensembl1 0.1 NaN NaN
ensembl2 2.0 0.1 NaN
ensembl3 NaN NaN 0.5
"""
significant_means = real_mean_analysis.copy()
for index, mean_analysis in real_mean_analysis.iterrows():
for cluster_interaction in list(result_percent.columns):
if result_percent.at[index, cluster_interaction] > min_significant_mean:
significant_means.at[index, cluster_interaction] = pd.np.nan
return significant_means
def shuffle_meta(meta: pd.DataFrame) -> pd.DataFrame:
"""
Permutates the meta values aleatory generating a new meta file
"""
meta_copy = meta.copy()
pd.np.random.shuffle(meta_copy['cell_type'])
return meta_copy
def build_clusters(meta: pd.DataFrame, counts: pd.DataFrame, complex_composition: pd.DataFrame) -> dict:
"""
Builds a cluster structure and calculates the means values
"""
cluster_names = meta['cell_type'].drop_duplicates().tolist()
clusters = {'names': cluster_names, 'counts': {}, 'means': {}}
clusters['counts'] = {}
clusters['means'] = pd.DataFrame(columns=cluster_names, index=counts.index, dtype='float32')
complex_composition = complex_composition
# Simple genes cluster counts
for cluster_name in cluster_names:
cells = meta[meta['cell_type'] == cluster_name].index
cluster_count = counts.loc[:, cells]
clusters['counts'][cluster_name] = cluster_count
clusters['means'][cluster_name] = cluster_count.apply(lambda count: count.mean(), axis=1)
# Complex genes cluster counts
if not complex_composition.empty:
complex_multidata_ids = complex_composition['complex_multidata_id'].drop_duplicates().to_list()
complex_means = pd.DataFrame(columns=cluster_names, index=complex_multidata_ids, dtype='float32')
for cluster_name in cluster_names:
for complex_multidata_id in complex_multidata_ids:
complex_components = complex_composition[
complex_composition['complex_multidata_id'] == complex_multidata_id]
complex_components['mean'] = complex_components['protein_multidata_id'].apply(
lambda protein: clusters['means'].at[protein, cluster_name])
min_component_mean_id = complex_components['mean'].idxmin()
complex_means.at[complex_multidata_id, cluster_name] = complex_components.at[
min_component_mean_id, 'mean']
min_component = complex_components.loc[min_component_mean_id]
clusters['counts'][cluster_name].loc[min_component['complex_multidata_id']] = \
clusters['counts'][cluster_name].loc[min_component['protein_multidata_id']]
clusters['means'] = clusters['means'].append(complex_means)
return clusters
def filter_counts_by_interactions(counts: pd.DataFrame,
interactions: pd.DataFrame) -> pd.DataFrame:
"""
Removes counts if is not defined in interactions components
"""
multidata_genes_ids = interactions['multidata_1_id'].append(
interactions['multidata_2_id']).drop_duplicates().tolist()
counts_filtered = counts.filter(multidata_genes_ids, axis=0)
return counts_filtered
def filter_empty_cluster_counts(counts: pd.DataFrame) -> pd.DataFrame:
"""
Remove count with all values to zero
"""
if counts.empty:
return counts
filtered_counts = counts[counts.apply(lambda row: row.sum() > 0, axis=1)]
return filtered_counts
def mean_pvalue_result_build(real_mean_analysis: pd.DataFrame, result_percent: pd.DataFrame,
interactions_data_result: pd.DataFrame) -> pd.DataFrame:
"""
Merges the pvalues and means in one table
"""
mean_pvalue_result = pd.DataFrame(index=real_mean_analysis.index)
for interaction_cluster in real_mean_analysis.columns.values:
mean_pvalue_result[interaction_cluster] = real_mean_analysis[interaction_cluster].astype(str).str.cat(
result_percent[interaction_cluster].astype(str), sep=' | ')
mean_pvalue_result = pd.concat([interactions_data_result, mean_pvalue_result], axis=1, join='inner', sort=False)
return mean_pvalue_result
def get_cluster_combinations(cluster_names: list) -> list:
"""
Calculates and sort combinations including itself
ie
INPUT
cluster_names = ['cluster1', 'cluster2', 'cluster3']
RESULT
[('cluster1','cluster1'),('cluster1','cluster2'),('cluster1','cluster3'),
('cluster2','cluster1'),('cluster2','cluster2'),('cluster2','cluster3'),
('cluster3','cluster1'),('cluster3','cluster2'),('cluster3','cluster3')]
"""
return sorted(itertools.product(cluster_names, repeat=2))
def build_result_matrix(interactions: pd.DataFrame, cluster_interactions: list, separator: str) -> pd.DataFrame:
"""
builds an empty cluster matrix to fill it later
"""
columns = []
for cluster_interaction in cluster_interactions:
columns.append('{}{}{}'.format(cluster_interaction[0], separator, cluster_interaction[1]))
result = pd.DataFrame(index=interactions.index, columns=columns, dtype=float)
return result
def mean_analysis(interactions: pd.DataFrame,
clusters: dict,
cluster_interactions: list,
base_result: pd.DataFrame,
separator: str) -> pd.DataFrame:
"""
Calculates the mean for the list of interactions and for each cluster
sets 0 if one of both is 0
EXAMPLE:
cluster_means
cluster1 cluster2 cluster3
ensembl1 0.0 0.2 0.3
ensembl2 0.4 0.5 0.6
ensembl3 0.7 0.0 0.9
interactions:
ensembl1,ensembl2
ensembl2,ensembl3
RESULT:
cluster1_cluster1 cluster1_cluster2 ... cluster3_cluster2 cluster3_cluster3
ensembl1_ensembl2 mean(0.0,0.4)* mean(0.0,0.5)* mean(0.3,0.5) mean(0.3,0.6)
ensembl2_ensembl3 mean(0.4,0.7) mean(0.4,0.0)* mean(0.6,0.0)* mean(0.6,0.9)
results with * are 0 because one of both components is 0.
"""
result = base_result.copy()
for interaction_index, interaction in interactions.iterrows():
for cluster_interaction in cluster_interactions:
cluster_interaction_string = '{}{}{}'.format(cluster_interaction[0], separator, cluster_interaction[1])
interaction_mean = cluster_interaction_mean(cluster_interaction, interaction, clusters['means'])
result.at[interaction_index, cluster_interaction_string] = interaction_mean
return result
def percent_analysis(clusters: dict,
threshold: float,
interactions: pd.DataFrame,
cluster_interactions: list,
base_result: pd.DataFrame,
separator: str) -> pd.DataFrame:
"""
Calculates the percents for cluster interactions and foreach gene interaction
If one of both is not 0 sets the value to 0. Else sets 1
EXAMPLE:
INPUT:
threshold = 0.1
cluster1 = cell1,cell2
cluster2 = cell3
cell1 cell2 cell3
ensembl1 0.0 0.6 0.3
ensembl2 0.1 0.05 0.06
ensembl3 0.0 0.0 0.9
interactions:
ensembl1,ensembl2
ensembl1,ensembl3
(after percents calculation)
cluster1 cluster2
ensembl1 0 0
ensembl2 1 1
ensembl3 1 0
RESULT:
cluster1_cluster1 cluster1_cluster2 cluster2_cluster1 cluster2_cluster2
ensembl1_ensembl2 (0,1)-> 0 (0,1)-> 0 (0,1)->0 (0,1)->0
ensembl1_ensembl3 (0,1)-> 0 (0,0)-> 1 (0,1)->0 (0,0)->1
"""
result = base_result.copy()
percents = pd.DataFrame(columns=clusters['names'], index=clusters['means'].index)
# percents calculation
for cluster_name in clusters['names']:
counts = clusters['counts'][cluster_name]
percents[cluster_name] = counts.apply(lambda count: counts_percent(count, threshold), axis=1)
for interaction_index, interaction in interactions.iterrows():
for cluster_interaction in cluster_interactions:
cluster_interaction_string = '{}{}{}'.format(cluster_interaction[0], separator, cluster_interaction[1])
interaction_percent = cluster_interaction_percent(cluster_interaction, interaction, percents)
result.at[interaction_index, cluster_interaction_string] = interaction_percent
return result
def shuffled_analysis(iterations: int,
meta: pd.DataFrame,
counts: pd.DataFrame,
interactions: pd.DataFrame,
cluster_interactions: list,
complex_composition: pd.DataFrame,
base_result: pd.DataFrame,
threads: int,
separator: str) -> list:
"""
Shuffles meta and calculates the means for each and saves it in a list.
Runs it in a multiple threads to run it faster
"""
with Pool(processes=threads) as pool:
statistical_analysis_thread = partial(_statistical_analysis,
base_result,
cluster_interactions,
counts,
interactions,
meta,
complex_composition,
separator)
results = pool.map(statistical_analysis_thread, range(iterations))
return results
def _statistical_analysis(base_result,
cluster_interactions,
counts,
interactions,
meta,
complex_composition: pd.DataFrame,
separator,
iteration_number) -> pd.DataFrame:
"""
Shuffles meta dataset and calculates calculates the means
"""
shuffled_meta = shuffle_meta(meta)
shuffled_clusters = build_clusters(shuffled_meta,
counts,
complex_composition)
result_mean_analysis = mean_analysis(interactions,
shuffled_clusters,
cluster_interactions,
base_result,
separator)
return result_mean_analysis
def build_percent_result(real_mean_analysis: pd.DataFrame, real_perecents_analysis: pd.DataFrame,
statistical_mean_analysis: list, interactions: pd.DataFrame, cluster_interactions: list,
base_result: pd.DataFrame, separator: str) -> pd.DataFrame:
"""
Calculates the pvalues after statistical analysis.
If real_percent or real_mean are zero, result_percent is 1
If not:
Calculates how many shuffled means are bigger than real mean and divides it for the number of
the total iterations
EXAMPLE:
INPUT:
real_mean_analysis:
cluster1_cluster1 cluster1_cluster2 ...
interaction1 0.5 0.4
interaction2 0.0 0.2
real_percents_analysis:
cluster1_cluster1 cluster1_cluster2 ...
interaction1 1 0
interaction2 0 1
statistical means:
[
cluster1_cluster1 cluster1_cluster2 ...
interaction1 0.6 0.1
interaction2 0.0 0.2
,
cluster1_cluster1 cluster1_cluster2 ...
interaction1 0.5 0.4
interaction2 0.0 0.6
]
iterations = 2
RESULT:
cluster1_cluster1 cluster1_cluster2 ...
interaction1 1 1
interaction2 1 0.5
"""
core_logger.info('Building Pvalues result')
percent_result = base_result.copy()
for interaction_index, interaction in interactions.iterrows():
for cluster_interaction in cluster_interactions:
cluster_interaction_string = '{}{}{}'.format(cluster_interaction[0], separator, cluster_interaction[1])
real_mean = real_mean_analysis.at[interaction_index, cluster_interaction_string]
real_percent = real_perecents_analysis.at[interaction_index, cluster_interaction_string]
if int(real_percent) == 0 or real_mean == 0:
result_percent = 1.0
else:
shuffled_bigger = 0
for statistical_mean in statistical_mean_analysis:
mean = statistical_mean.at[interaction_index, cluster_interaction_string]
if mean > real_mean:
shuffled_bigger += 1
result_percent = shuffled_bigger / len(statistical_mean_analysis)
percent_result.at[interaction_index, cluster_interaction_string] = result_percent
return percent_result
def interacting_pair_build(interactions: pd.DataFrame) -> pd.Series:
"""
Returns the interaction result formated with prefixes
"""
def get_interactor_name(interaction: pd.Series, suffix: str) -> str:
if interaction['is_complex{}'.format(suffix)]:
return interaction['name{}'.format(suffix)]
return interaction['gene_name{}'.format(suffix)]
interacting_pair = interactions.apply(
lambda interaction: '{}_{}'.format(get_interactor_name(interaction, '_1'),
get_interactor_name(interaction, '_2')), axis=1)
interacting_pair.rename('interacting_pair', inplace=True)
return interacting_pair
def build_significant_means(real_mean_analysis: pd.DataFrame,
result_percent: pd.DataFrame,
min_significant_mean: float) -> (pd.Series, pd.DataFrame):
"""
Calculates the significant means and add rank (number of non empty entries divided by total entries)
"""
significant_means = get_significant_means(real_mean_analysis, result_percent, min_significant_mean)
significant_mean_rank = significant_means.count(axis=1) # type: pd.Series
number_of_clusters = len(significant_means.columns)
significant_mean_rank = significant_mean_rank.apply(lambda rank: rank / number_of_clusters)
significant_mean_rank = significant_mean_rank.round(3)
significant_mean_rank.name = 'rank'
return significant_mean_rank, significant_means
def cluster_interaction_percent(cluster_interaction: tuple,
interaction: pd.Series,
clusters_percents: pd.DataFrame,
) -> int:
"""
If one of both is not 0 the result is 0 other cases are 1
"""
percent_cluster_receptors = clusters_percents[cluster_interaction[0]]
percent_cluster_ligands = clusters_percents[cluster_interaction[1]]
receptor = interaction['multidata_1_id']
percent_receptor = percent_cluster_receptors.loc[receptor]
ligand = interaction['multidata_2_id']
percent_ligand = percent_cluster_ligands.loc[ligand]
if percent_receptor or percent_ligand:
interaction_percent = 0
else:
interaction_percent = 1
return interaction_percent
def counts_percent(counts: pd.Series,
threshold: float) -> int:
"""
Calculates the number of positive values and divides it for the total.
If this value is < threshold, returns 1, else, returns 0
EXAMPLE:
INPUT:
counts = [0.1, 0.2, 0.3, 0.0]
threshold = 0.1
RESULT:
# 3/4 -> 0.75 not minor than 0.1
result = 0
"""
total = len(counts)
positive = len(counts[counts > 0])
if positive / total < threshold:
return 1
else:
return 0
def cluster_interaction_mean(cluster_interaction: tuple,
interaction: pd.Series,
clusters_means: pd.DataFrame) -> float:
"""
Calculates the mean value for two clusters.
Set 0 if one of both is 0
"""
means_cluster_receptors = clusters_means[cluster_interaction[0]]
means_cluster_ligands = clusters_means[cluster_interaction[1]]
receptor = interaction['multidata_1_id']
mean_receptor = means_cluster_receptors[receptor]
ligand = interaction['multidata_2_id']
mean_ligand = means_cluster_ligands[ligand]
if mean_receptor == 0 or mean_ligand == 0:
interaction_mean = 0
else:
interaction_mean = (mean_receptor + mean_ligand) / 2
return interaction_mean
def filter_interactions_by_counts(interactions: pd.DataFrame,
counts: pd.DataFrame,
complex_composition: pd.DataFrame) -> pd.DataFrame:
multidatas = list(counts.index)
if not complex_composition.empty:
| |
<reponame>intel/acrn-workload-consolidation
import json
import base64
import os
import time
import paho.mqtt.client as mqtt
class MqttClient():
"""
This is a Mosquitto Client class that will create an interface to connect to mosquitto
by creating mqtt clients.
It provides methods for connecting, diconnecting, publishing, subscribing, unsubscribing and
also callbacks related to many different events like on_connect, on_message, on_publish, on_subscribe,
on_unsubcribe, on_disconnect.
"""
def __init__(self, name='user', clientid=None, clean_session=True, userdata=None, pub_only=False, sub_only=False, host='localhost', port=1883, keepalive=60, bind_address=''):
"""
Create a new instance of the MosquittoClient class, passing in the client
informaation, host, port, keepalive parameters.
:param name: name of client trying to connect to msoquitto
:type name: string
:param clientid: unique client id for a client-broker connection
:type clientid: string
:param clean_session: whether to keep persistant connecion or not
:type clean_session: bool
:param userdata: user defined data of any type that is passed as the userdata parameter to callbacks.
It may be updated at a later point with the user_data_set() function.
:type userdata: user defined data (can be int, string, or any object)
:param host: the hostname or IP address of the remote broker
:type host: string
:param port: the network port of the server host to connect to. Defaults to 1883.
Note that the default port for MQTT over SSL/TLS is 8883 so if you are using tls_set() the port may need providing manually
:type port: int
:param keepalive: maximum period in seconds allowed between communications with the broker.
If no other messages are being exchanged, this controls the rate at which the client will send ping messages to the broker
:type keepalive: int
:param bind_address: the IP address of a local network interface to bind this client to, assuming multiple interfaces exist
:type bind_address: string
"""
# pi('__init__')
self._name = name
self._clientid = clientid or self._genid()
self._clean_session = clean_session
self._userdata = userdata
self._host = host
self._port = port
self._keepalive = keepalive
self._bind_address = bind_address
self._pub_only = pub_only
self._sub_only = sub_only
self._connected = False
self._connecting = False
self._closing = False
self._closed = False
self._connection = None
self._client = None
# pr('__init__')
def _genid(self):
"""
Method that generates unique clientids by calling base64.urlsafe_b64encode(os.urandom(32)).replace('=', 'e').
:return: Returns a unique urlsafe id
:rtype: string
"""
# pi('_genid')
return str(base64.urlsafe_b64encode(os.urandom(32)).replace(b'=', b'e'))
def start(self):
"""
Method to start the mosquitto client by initiating a connection to mosquitto broker
by using the connect method and staring the network loop.
"""
# pi('start')
print('[MosquittoClient] starting the mosquitto connection')
self.setup_connection()
self.setup_callbacks()
# self._connection is the return code of the connection, success, failure, error. Success = 0
self._connection = self.connect()
# print '[MosquittoClient] self._connection : ', self._connection
if self._connection == 0:
# Start paho-mqtt mosquitto Event/IO Loop
print('[MosquittoClient] Startig Loop for client : %s ' % self)
self._client.loop_start()
else:
self._connecting = False
print('[MosquittoClient] Connection for client : %s with broker Not Established ' % self)
# pr('start')
def setup_connection(self):
"""
Method to setup the extra options like username,password, will set, tls_set etc
before starting the connection.
"""
# pi('setup_connection')
self._client = self.create_client()
# pr('setup_connection')
def create_client(self):
"""
Method to create the paho-mqtt Client object which will be used to connect
to mosquitto.
:return: Returns a mosquitto mqtt client object
:rtype: paho.mqtt.client.Client
"""
# pi('create_client')
return mqtt.Client(client_id=self._clientid, clean_session=self._clean_session, userdata=self._userdata)
def setup_callbacks(self):
"""
Method to setup all callbacks related to the connection, like on_connect,
on_disconnect, on_publish, on_subscribe, on_unsubcribe etc.
"""
# pi('setup_callbacks')
self._client.on_connect = self.on_connect
self._client.on_disconnect = self.on_disconnect
if self._pub_only:
self._client.on_publish = self.on_publish
elif self._sub_only:
self._client.on_subscribe = self.on_subscribe
self._client.on_unsubcribe = self.on_unsubscribe
else:
self._client.on_publish = self.on_publish
self._client.on_subscribe = self.on_subscribe
self._client.on_unsubcribe = self.on_unsubscribe
# pr('setup_callbacks')
def connect(self):
"""
This method connects to Mosquitto via returning the
connection return code.
When the connection is established, the on_connect callback
will be invoked by paho-mqtt.
:return: Returns a mosquitto mqtt connection return code, success, failure, error, etc
:rtype: int
"""
# pi('connect')
if self._connecting:
print('[MosquittoClient] Already connecting to Mosquitto')
return
self._connecting = True
if self._connected:
print('[MosquittoClient] Already connected to Mosquitto')
else:
print('[MosquittoClient] Connecting to Mosquitto on {}:{}, Object: {} '.format(self._host, self._port, self))
# pr('connect')
return self._client.connect(host=self._host, port=self._port, keepalive=self._keepalive, bind_address=self._bind_address)
def on_connect(self, client, userdata, flags, rc):
"""
This is a Callback method and is called when the broker responds to our
connection request.
:param client: the client instance for this callback
:param userdata: the private user data as set in Client() or userdata_set()
:param flags: response flags sent by the broker
:type flags: dict
:param rc: the connection result
:type rc: int
flags is a dict that contains response flags from the broker:
flags['session present'] - this flag is useful for clients that are using clean session
set to 0 only. If a client with clean session=0, that reconnects to a broker that it has
previously connected to, this flag indicates whether the broker still has the session
information for the client. If 1, the session still exists.
The value of rc indicates success or not:
0: Connection successful 1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier 3: Connection refused - server unavailable
4: Connection refused - bad username or password 5: Connection refused - not authorised
6-255: Currently unused.
"""
# pi('on_connect')
if self._connection == 0:
self._connected = True
print('[MosquittoClient] Connection for client : %s with broker established, Return Code : %s ' % (client, str(rc)))
else:
self._connecting = False
print('[MosquittoClient] Connection for client : %s with broker Not Established, Return Code : %s ' % (client, str(rc)))
# pr('on_connect')
def disconnect(self):
"""
Method to disconnect the mqqt connection with mosquitto broker.
on_disconnect callback is called as a result of this method call.
"""
# pi('disconnect')
if self._closing:
print('[MosquittoClient] Connection for client : %s already disconnecting..' % self)
else:
self._closing = True
if self._closed:
print('[MosquittoClient] Connection for client : %s already disconnected ' % self)
else:
self._client.disconnect()
# pr('disconnect')
def on_disconnect(self, client, userdata, rc):
"""
This is a Callback method and is called when the client disconnects from
the broker.
"""
# pi('on_disconnect')
print('[MosquittoClient] Connection for client : %s with broker cleanly disconnected with return code : %s ' % (client, str(rc)))
self._connecting = False
self._connected = False
self._closing = True
self._closed = True
self._client.loop_stop()
# pr('on_disconnect')
def subscribe(self, topic=None, on_message=None):
"""
This method sets up the mqtt client to start subscribing to topics by accepting a list of tuples
of topic and qos pairs.
The on_subscribe method is called as a callback if subscribing is succesfull or if it unsuccessfull, the broker
returng the suback frame.
:param :topic: topic.
:type :topic: string
"""
# pi('subscribe')
print('[MosquittoClient] client : %s started Subscribing ' % self)
self._client.subscribe(topic)
self._client.on_message = on_message
# pr('subscribe')
def on_subscribe(self, client, userdata, mid, granted_qos):
"""
This is a Callback method and is called when the broker responds to a subscribe request.
The mid variable matches the mid variable returned from the corresponding subscribe() call.
The granted_qos variable is a list of integers that give the QoS level the broker has granted
for each of the different subscription requests.
:param client: the client which subscribed which triggered this callback
:param userdata: the userdata associated with the client during its creation
:param mid: the message id value returned by the broker
:type mid: int
:param granted_qos: list of integers that give the QoS level the broker has granted
for each of the different subscription requests
:type granted_qos: list
"""
# pi('on_subscribe')
print('[MosquittoClient] client : %s subscribed to topic succesfully with message id : %s ' | |
'Maxis'},
'6015881':{'en': 'Packet One'},
'6017':{'en': 'Maxis'},
'6016':{'en': 'DiGi'},
'6019':{'en': 'Celcom'},
'88018':{'en': 'Robi'},
'88019':{'en': 'Banglalink'},
'62562994':{'en': 'Esia'},
'62562991':{'en': 'Esia'},
'556399911':{'en': 'Vivo'},
'62562993':{'en': 'Esia'},
'62562992':{'en': 'Esia'},
'558899623':{'en': 'TIM'},
'559498146':{'en': 'TIM'},
'559498147':{'en': 'TIM'},
'559498144':{'en': 'TIM'},
'559498145':{'en': 'TIM'},
'62361602':{'en': 'Esia'},
'559498142':{'en': 'TIM'},
'62361600':{'en': 'Esia'},
'62361601':{'en': 'Esia'},
'62361606':{'en': 'Esia'},
'62361604':{'en': 'Esia'},
'62361605':{'en': 'Esia'},
'917896':{'en': 'Airtel'},
'917897':{'en': 'Airtel'},
'917894':{'en': 'Airtel'},
'917895':{'en': 'Airtel'},
'917892':{'en': 'Reliance Jio'},
'917893':{'en': 'Airtel'},
'917890':{'en': 'Idea'},
'917891':{'en': 'Idea'},
'559498141':{'en': 'TIM'},
'917898':{'en': 'Airtel'},
'917899':{'en': 'Idea'},
'557199908':{'en': 'Vivo'},
'557199909':{'en': 'Vivo'},
'557199904':{'en': 'Vivo'},
'557199905':{'en': 'Vivo'},
'557199906':{'en': 'Vivo'},
'557199907':{'en': 'Vivo'},
'557199901':{'en': 'Vivo'},
'557199902':{'en': 'Vivo'},
'557199903':{'en': 'Vivo'},
'9176568':{'en': 'CellOne'},
'558399948':{'en': 'TIM'},
'558399941':{'en': 'TIM'},
'558399940':{'en': 'TIM'},
'558399943':{'en': 'TIM'},
'558399942':{'en': 'TIM'},
'558399945':{'en': 'TIM'},
'558399944':{'en': 'TIM'},
'558399947':{'en': 'TIM'},
'558399946':{'en': 'TIM'},
'6272199':{'en': 'Esia'},
'6272190':{'en': 'Esia'},
'6272193':{'en': 'Esia'},
'8536652':{'en': 'CTM'},
'659716':{'en': 'SingTel'},
'659714':{'en': 'SingTel'},
'659715':{'en': 'SingTel'},
'659712':{'en': 'SingTel'},
'659713':{'en': 'SingTel'},
'659710':{'en': 'SingTel'},
'659711':{'en': 'SingTel'},
'67649':{'en': 'U-Call'},
'67646':{'en': 'U-Call'},
'558299444':{'en': 'Claro BR'},
'62341681':{'en': 'Esia'},
'62341682':{'en': 'Esia'},
'55779814':{'en': 'Claro BR'},
'556398133':{'en': 'TIM'},
'556398132':{'en': 'TIM'},
'55779810':{'en': 'Claro BR'},
'55779811':{'en': 'Claro BR'},
'55779812':{'en': 'Claro BR'},
'55779813':{'en': 'Claro BR'},
'918369':{'en': 'Reliance Jio'},
'556199826':{'en': 'Vivo'},
'917780':{'en': 'Airtel'},
'918360':{'en': 'Reliance Jio'},
'556199824':{'en': 'Vivo'},
'5588985':{'en': 'Oi'},
'9172839':{'en': 'Idea'},
'9172838':{'en': 'Idea'},
'556199825':{'en': 'Vivo'},
'9175919':{'en': 'Vodafone'},
'9174784':{'en': 'Vodafone'},
'9172831':{'en': 'Vodafone'},
'556199822':{'en': 'Vivo'},
'601881':{'en': 'YTL'},
'601880':{'en': 'YTL'},
'601887':{'en': 'U Mobile'},
'9172834':{'en': 'Vodafone'},
'9172837':{'en': 'Vodafone'},
'556199823':{'en': 'Vivo'},
'5588988':{'en': 'Oi'},
'9174786':{'en': 'Vodafone'},
'9173610':{'en': 'Vodafone'},
'5588989':{'en': 'Oi'},
'9174781':{'en': 'Airtel'},
'556298126':{'en': 'TIM'},
'9173618':{'en': 'Vodafone'},
'559699177':{'en': 'Vivo'},
'9174783':{'en': 'Airtel'},
'918219':{'en': 'Reliance Jio'},
'9174782':{'en': 'Airtel'},
'918218':{'en': 'Reliance Jio'},
'9175910':{'en': 'Idea'},
'559699179':{'en': 'Vivo'},
'9174789':{'en': 'Vodafone'},
'9174788':{'en': 'Vodafone'},
'852550':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'917429':{'en': 'Reliance'},
'62295995':{'en': 'Esia'},
'9174920':{'en': 'Idea'},
'917906':{'en': 'Reliance Jio'},
'556598436':{'en': 'Brasil Telecom GSM'},
'556598437':{'en': 'Brasil Telecom GSM'},
'556598434':{'en': 'Brasil Telecom GSM'},
'556598435':{'en': 'Brasil Telecom GSM'},
'556598432':{'en': 'Brasil Telecom GSM'},
'556598433':{'en': 'Brasil Telecom GSM'},
'556598431':{'en': 'Brasil Telecom GSM'},
'556598438':{'en': 'Brasil Telecom GSM'},
'556598439':{'en': 'Brasil Telecom GSM'},
'9177750':{'en': 'Idea'},
'9177759':{'en': 'Idea'},
'9177758':{'en': 'Idea'},
'557999603':{'en': 'Vivo'},
'557999602':{'en': 'Vivo'},
'557999601':{'en': 'Vivo'},
'557999600':{'en': 'Vivo'},
'557999607':{'en': 'Vivo'},
'557999606':{'en': 'Vivo'},
'557999605':{'en': 'Vivo'},
'557999604':{'en': 'Vivo'},
'917908':{'en': 'Reliance Jio'},
'917353':{'en': 'Idea'},
'917909':{'en': 'Idea'},
'917600':{'en': 'Airtel'},
'6229891':{'en': 'Esia'},
'917607':{'en': 'Airtel'},
'62322924':{'en': 'Esia'},
'62322922':{'en': 'Esia'},
'62322923':{'en': 'Esia'},
'62322920':{'en': 'Esia'},
'62322921':{'en': 'Esia'},
'918296':{'en': 'Tata Docomo'},
'852681':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'63932':{'en': 'Sun'},
'63933':{'en': 'Sun'},
'63930':{'en': 'Smart'},
'917727':{'en': 'Hexacom'},
'63936':{'en': 'Globe'},
'63937':{'en': 'Globe'},
'63935':{'en': 'Globe'},
'63938':{'en': 'Smart'},
'63939':{'en': 'Smart'},
'9181010':{'en': 'Reliance'},
'569929':{'en': 'Entel'},
'622993':{'en': 'Esia'},
'559599959':{'en': 'Oi'},
'569922':{'en': 'Movistar'},
'569921':{'en': 'Entel'},
'569920':{'en': 'Claro'},
'569927':{'en': 'Movistar'},
'569926':{'en': 'Movistar'},
'569925':{'en': 'Movistar'},
'569924':{'en': 'Movistar'},
'8536575':{'en': '3'},
'8536574':{'en': '3'},
'8536577':{'en': '3'},
'622991':{'en': 'Esia'},
'8536571':{'en': 'China Telecom'},
'8536570':{'en': 'China Telecom'},
'8536573':{'en': 'China Telecom'},
'8536572':{'en': 'China Telecom'},
'622996':{'en': 'Esia'},
'8536579':{'en': '3'},
'8536578':{'en': '3'},
'556798402':{'en': 'Brasil Telecom GSM'},
'622994':{'en': 'Esia'},
'556299959':{'en': 'Vivo'},
'556299958':{'en': 'Vivo'},
'556299957':{'en': 'Vivo'},
'556299956':{'en': 'Vivo'},
'556299955':{'en': 'Vivo'},
'556299954':{'en': 'Vivo'},
'556299953':{'en': 'Vivo'},
'556299952':{'en': 'Vivo'},
'556299951':{'en': 'Vivo'},
'556798404':{'en': 'Brasil Telecom GSM'},
'558599675':{'en': 'TIM'},
'558599674':{'en': 'TIM'},
'558599677':{'en': 'TIM'},
'558599676':{'en': 'TIM'},
'558599671':{'en': 'TIM'},
'556798159':{'en': 'TIM'},
'558599673':{'en': 'TIM'},
'558599672':{'en': 'TIM'},
'556798406':{'en': 'Brasil Telecom GSM'},
'556798157':{'en': 'TIM'},
'556798156':{'en': 'TIM'},
'918186':{'en': 'Idea'},
'6254190':{'en': 'Esia'},
'6254191':{'en': 'Esia'},
'6254192':{'en': 'Esia'},
'918398':{'en': 'Vodafone'},
'9178883':{'en': 'Reliance Jio'},
'9174473':{'en': 'Idea'},
'9174472':{'en': 'Idea'},
'5599988':{'en': 'Oi'},
'5599989':{'en': 'Oi'},
'9174477':{'en': 'Idea'},
'9174476':{'en': 'Idea'},
'9174475':{'en': 'Idea'},
'9174474':{'en': 'Idea'},
'9174478':{'en': 'Idea'},
'5599986':{'en': 'Oi'},
'5599987':{'en': 'Oi'},
'62911999':{'en': 'Esia'},
'5599985':{'en': 'Oi'},
'658388':{'en': 'M1'},
'658389':{'en': 'StarHub'},
'658382':{'en': 'M1'},
'658383':{'en': 'M1'},
'658380':{'en': 'StarHub'},
'658381':{'en': 'SingTel'},
'658386':{'en': 'SingTel'},
'658387':{'en': 'SingTel'},
'658384':{'en': 'StarHub'},
'658385':{'en': 'SingTel'},
'9181270':{'en': 'Airtel'},
'918391':{'en': 'Reliance'},
'9180052':{'en': 'CellOne'},
'9180053':{'en': 'CellOne'},
'556498129':{'en': 'TIM'},
'556498128':{'en': 'TIM'},
'9180056':{'en': 'Reliance Jio'},
'9180057':{'en': 'Reliance Jio'},
'9180054':{'en': 'CellOne'},
'9180055':{'en': 'Reliance Jio'},
'556498123':{'en': 'TIM'},
'556498122':{'en': 'TIM'},
'556498121':{'en': 'TIM'},
'9180059':{'en': 'Reliance Jio'},
'556498127':{'en': 'TIM'},
'556498126':{'en': 'TIM'},
'556498125':{'en': 'TIM'},
'556498124':{'en': 'TIM'},
'559999146':{'en': 'Vivo'},
'559999147':{'en': 'Vivo'},
'559999144':{'en': 'Vivo'},
'559999145':{'en': 'Vivo'},
'559999142':{'en': 'Vivo'},
'559999143':{'en': 'Vivo'},
'658652':{'en': 'SingTel'},
'559999141':{'en': 'Vivo'},
'918397':{'en': 'Vodafone'},
'658658':{'en': 'StarHub'},
'658659':{'en': 'StarHub'},
'559999148':{'en': 'Vivo'},
'559999149':{'en': 'Vivo'},
'8536650':{'en': 'CTM'},
'62335999':{'en': 'Esia'},
'557199249':{'en': 'TIM'},
'557199248':{'en': 'TIM'},
'557199243':{'en': 'TIM'},
'557199242':{'en': 'TIM'},
'557199241':{'en': 'TIM'},
'557199247':{'en': 'TIM'},
'557199246':{'en': 'TIM'},
'557199245':{'en': 'TIM'},
'557199244':{'en': 'TIM'},
'917319':{'en': 'Airtel'},
'917318':{'en': 'Airtel'},
'86188':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'86189':{'en': 'China Telecom', 'zh': u('\u4e2d\u56fd\u7535\u4fe1'), 'zh_Hant': u('\u4e2d\u570b\u96fb\u4fe1')},
'86186':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'86187':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'86184':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'86185':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'86182':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'86183':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'86180':{'en': 'China Telecom', 'zh': u('\u4e2d\u56fd\u7535\u4fe1'), 'zh_Hant': u('\u4e2d\u570b\u96fb\u4fe1')},
'86181':{'en': 'China Telecom', 'zh': u('\u4e2d\u56fd\u7535\u4fe1'), 'zh_Hant': u('\u4e2d\u570b\u96fb\u4fe1')},
'559698117':{'en': 'TIM'},
'559698116':{'en': 'TIM'},
'559698115':{'en': 'TIM'},
'559698114':{'en': 'TIM'},
'559698113':{'en': 'TIM'},
'559698112':{'en': 'TIM'},
'559698111':{'en': 'TIM'},
'62335997':{'en': 'Esia'},
'559698119':{'en': 'TIM'},
'559698118':{'en': 'TIM'},
'569787':{'en': 'Entel'},
'569786':{'en': 'Claro'},
'569785':{'en': 'Claro'},
'569784':{'en': 'Claro'},
'569783':{'en': 'Claro'},
'569782':{'en': 'Claro'},
'569781':{'en': 'Claro'},
'569780':{'en': 'Entel'},
'9173005':{'en': 'Hexacom'},
'6223194':{'en': 'Esia'},
'569789':{'en': 'Entel'},
'569788':{'en': 'Entel'},
'558999972':{'en': 'TIM'},
'558999973':{'en': 'TIM'},
'6223192':{'en': 'Esia'},
'6223193':{'en': 'Esia'},
'556499658':{'en': 'Vivo'},
'9173004':{'en': 'Airtel'},
'556499652':{'en': 'Vivo'},
'556499653':{'en': 'Vivo'},
'6223191':{'en': 'Esia'},
'556499654':{'en': 'Vivo'},
'556499655':{'en': 'Vivo'},
'8536852':{'en': '3'},
'9183810':{'en': 'Idea'},
'918189':{'en': 'Tata Docomo'},
'8493':{'en': 'MobiFone'},
'8492':{'en': 'Vietnamobile'},
'8491':{'en': 'Vinaphone'},
'8490':{'en': 'MobiFone'},
'8497':{'en': 'Viettel'},
'8496':{'en': 'Viettel'},
'8494':{'en': 'Vinaphone'},
'8498':{'en': 'Viettel'},
'8536854':{'en': '3'},
'918188':{'en': 'Tata Docomo'},
'559299907':{'en': 'Oi'},
'559299906':{'en': 'Oi'},
'559299905':{'en': 'Oi'},
'559299904':{'en': 'Oi'},
'559299903':{'en': 'Oi'},
'559299902':{'en': 'Oi'},
'559299901':{'en': 'Oi'},
'559898461':{'en': 'Claro BR'},
'559898460':{'en': 'Claro BR'},
'559299909':{'en': 'Oi'},
'559299908':{'en': 'Oi'},
'555399962':{'en': 'Vivo'},
'555399963':{'en': 'Vivo'},
'559599161':{'en': 'Vivo'},
'559599162':{'en': 'Vivo'},
'559599163':{'en': 'Vivo'},
'559599164':{'en': 'Vivo'},
'559599165':{'en': 'Vivo'},
'559599166':{'en': 'Vivo'},
'559599167':{'en': 'Vivo'},
'559599168':{'en': 'Vivo'},
'9173260':{'en': 'Airtel'},
'556499611':{'en': 'Vivo'},
'55749812':{'en': 'Claro BR'},
'9174279':{'en': 'Vodafone'},
'556599931':{'en': 'Vivo'},
'556599932':{'en': 'Vivo'},
'556599933':{'en': 'Vivo'},
'556599934':{'en': 'Vivo'},
'556599935':{'en': 'Vivo'},
'556599936':{'en': 'Vivo'},
'556599937':{'en': 'Vivo'},
'556599938':{'en': 'Vivo'},
'556599939':{'en': 'Vivo'},
'8536380':{'en': '3'},
'8536381':{'en': '3'},
'8536386':{'en': 'China Telecom'},
'8536387':{'en': 'China Telecom'},
'8536384':{'en': '3'},
'8536385':{'en': '3'},
'9179928':{'en': 'Idea'},
'9179929':{'en': 'Idea'},
'9179922':{'en': 'Reliance Jio'},
'9179923':{'en': 'Reliance Jio'},
'9179920':{'en': 'Airtel'},
'9179921':{'en': 'Airtel'},
'9179926':{'en': 'Idea'},
'9179927':{'en': 'Idea'},
'9179924':{'en': 'Reliance Jio'},
'9179925':{'en': 'Idea'},
'556499618':{'en': 'Vivo'},
'9177568':{'en': 'Airtel'},
'59069036':{'en': 'Digicel'},
'555599955':{'en': 'Vivo'},
'555599954':{'en': 'Vivo'},
'555599957':{'en': 'Vivo'},
'555599956':{'en': 'Vivo'},
'555599951':{'en': 'Vivo'},
'555599953':{'en': 'Vivo'},
'555599952':{'en': 'Vivo'},
'555599959':{'en': 'Vivo'},
'555599958':{'en': 'Vivo'},
'658409':{'en': 'SingTel'},
'658408':{'en': 'SingTel'},
'9177849':{'en': 'Dishnet'},
'559598412':{'en': 'Claro BR'},
'559598411':{'en': 'Claro BR'},
'559598410':{'en': 'Claro BR'},
'658407':{'en': 'SingTel'},
'9175588':{'en': 'Idea'},
'9175589':{'en': 'Idea'},
'658406':{'en': 'SingTel'},
'9175580':{'en': 'Idea'},
'9175581':{'en': 'Airtel'},
'558599444':{'en': 'Claro BR'},
'9175583':{'en': 'Airtel'},
'9175584':{'en': 'Airtel'},
'9175585':{'en': 'Airtel'},
'9175586':{'en': 'Airtel'},
'9175587':{'en': 'Airtel'},
'556298149':{'en': 'TIM'},
'556298148':{'en': 'TIM'},
'556298143':{'en': 'TIM'},
'556298142':{'en': 'TIM'},
'556298141':{'en': 'TIM'},
'556298147':{'en': 'TIM'},
'556298146':{'en': 'TIM'},
'556298145':{'en': 'TIM'},
'556298144':{'en': 'TIM'},
'658469':{'en': 'M1'},
'658468':{'en': 'StarHub'},
'658463':{'en': 'M1'},
'658462':{'en': 'M1'},
'658461':{'en': 'M1'},
'658460':{'en': 'M1'},
'658467':{'en': 'M1'},
'658466':{'en': 'M1'},
'658465':{'en': 'M1'},
'658464':{'en': 'M1'},
'569923':{'en': 'Movistar'},
'5939920':{'en': 'Claro'},
'5939921':{'en': 'Claro'},
'62896':{'en': '3'},
'62897':{'en': '3'},
'62895':{'en': 'Hutchison'},
'559998152':{'en': 'TIM'},
'559998153':{'en': 'TIM'},
'559998151':{'en': 'TIM'},
'62898':{'en': '3'},
'62899':{'en': '3'},
'5939929':{'en': 'Movistar'},
'9174780':{'en': 'Airtel'},
'675775':{'en': 'Telikom'},
'8536816':{'en': 'SmarTone'},
'559699181':{'en': 'Vivo'},
'555398132':{'en': 'TIM'},
'555398133':{'en': 'TIM'},
'555398131':{'en': 'TIM'},
'555398136':{'en': 'TIM'},
'555398137':{'en': 'TIM'},
'555398134':{'en': 'TIM'},
'555398135':{'en': 'TIM'},
'555398138':{'en': 'TIM'},
'555398139':{'en': 'TIM'},
'9174860':{'en': 'Airtel'},
'5699381':{'en': 'Movistar'},
'5699380':{'en': 'Movistar'},
'5699383':{'en': 'Entel'},
'5699382':{'en': 'Entel'},
'5699385':{'en': 'Claro'},
'5699384':{'en': 'Scharfstein'},
'5699387':{'en': 'Claro'},
'5699386':{'en': 'Claro'},
'5699389':{'en': 'Movistar'},
'5699388':{'en': 'Claro'},
'614444':{'en': 'Telstra'},
'55919920':{'en': 'Vivo'},
'55919921':{'en': 'Vivo'},
'55919922':{'en': 'Vivo'},
'55919923':{'en': 'Vivo'},
'55919924':{'en': 'Vivo'},
'55919925':{'en': 'Vivo'},
'55919926':{'en': 'Vivo'},
'55919927':{'en': 'Vivo'},
'918343':{'en': 'Idea'},
'918342':{'en': 'Vodafone'},
'559699981':{'en': 'Oi'},
'918347':{'en': 'Idea'},
'918346':{'en': 'Idea'},
'556398119':{'en': 'TIM'},
'556398118':{'en': 'TIM'},
'556398117':{'en': 'TIM'},
'556398116':{'en': 'TIM'},
'556398115':{'en': 'TIM'},
'556398114':{'en': 'TIM'},
'556398113':{'en': 'TIM'},
'556398112':{'en': 'TIM'},
'556398111':{'en': 'TIM'},
'659868':{'en': 'SingTel'},
'659869':{'en': 'SingTel'},
'9173670':{'en': 'Airtel'},
'9176300':{'en': 'Hexacom'},
'9173678':{'en': 'Airtel'},
'9173679':{'en': 'Airtel'},
'61497':{'en': 'Telstra'},
'9173229':{'en': 'Idea'},
'9173228':{'en': 'Idea'},
'60158860':{'en': 'Izzinet'},
'61491':{'en': 'Telstra'},
'61490':{'en': 'Telstra'},
'9177400':{'en': 'Reliance'},
'9175805':{'en': 'Vodafone'},
'9175804':{'en': 'Vodafone'},
'9175807':{'en': 'Vodafone'},
'9172850':{'en': 'Idea'},
'9175801':{'en': 'Vodafone'},
'9175800':{'en': 'Idea'},
'9175803':{'en': 'Vodafone'},
'9175802':{'en': 'Vodafone'},
'55899811':{'en': 'Vivo'},
'55899810':{'en': 'Vivo'},
'9175809':{'en': 'Vodafone'},
'9175808':{'en': 'Vodafone'},
'556598418':{'en': 'Brasil Telecom GSM'},
'556598419':{'en': 'Brasil Telecom GSM'},
'556598414':{'en': 'Brasil Telecom GSM'},
'556598415':{'en': 'Brasil Telecom GSM'},
'556598416':{'en': 'Brasil Telecom GSM'},
'556598417':{'en': 'Brasil Telecom GSM'},
'9176578':{'en': 'Tata Docomo'},
'556598411':{'en': 'Brasil Telecom GSM'},
'556598412':{'en': 'Brasil Telecom GSM'},
'556598413':{'en': 'Brasil Telecom GSM'},
'8536614':{'en': 'SmarTone'},
'852629':{'en': '1O1O / One2Free', 'zh': '1O1O / One2Free', 'zh_Hant': '1O1O / One2Free'},
'852628':{'en': '1O1O / One2Free', 'zh': '1O1O / One2Free', 'zh_Hant': '1O1O / One2Free'},
'852625':{'en': 'PCCW Mobile', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852624':{'en': 'CM Mobile', 'zh': u('\u6da6\u8fc5\u901a\u4fe1'), 'zh_Hant': u('\u6f64\u8fc5\u901a\u4fe1')},
'852627':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'9177408':{'en': 'Idea'},
'852621':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852620':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852623':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852622':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'9177778':{'en': 'Tata Docomo'},
'9177770':{'en': 'Airtel'},
'8536615':{'en': 'SmarTone'},
'9174919':{'en': 'Idea'},
'918107':{'en': 'Airtel'},
'852672':{'en': 'China | |
<reponame>hodossy/pandas-extras
"""
Contains functions to help transform columns data containing complex types,
like lists or dictionaries.
"""
from functools import reduce
from itertools import zip_longest
import numpy as np
import pandas as pd
def extract_dictionary(dataframe, column, key_list=None, prefix=None, separator='.'):
"""
Extract values of keys in ``key_list`` into separate columns.
.. code-block:: python
>>> df = DataFrame({
... 'trial_num': [1, 2, 1, 2],
... 'subject': [1, 1, 2, 2],
... 'samples': [
... {'A': 1, 'B': 2, 'C': None},
... {'A': 3, 'B': 4, 'C': 5},
... {'A': 6, 'B': 7, 'C': None},
... None,
... ]
...})
>>>df.pipe(extract_dictionary, 'samples', key_list=('A', 'B'))
trial_num subject samples.A samples.B
0 1 1 1 2
1 2 1 3 4
2 1 2 6 7
3 2 2 NaN NaN
.. warning::
``column`` will be dropped from the DataFrame.
:param dataframe: The DataFrame object to work on.
:type dataframe: :class:`DataFrame <pandas.DataFrame>`
:param str column: The name of the column which should be extracted.
:param list key_list: Collection of keys that should be extracted. The new column names
will be created from the key names.
:param str prefix: Prefix for new column names. By default, ``column`` will be applied
as prefix.
:param str separator: The separator between the prefix and the key name for new column
names.
:returns: The extracted DataFrame
:rtype: :class:`DataFrame <pandas.DataFrame>`
"""
if key_list is None:
try:
key_list = next(val for val in dataframe[column] if isinstance(val, dict)).keys()
except StopIteration:
key_list = []
for key in key_list:
new_column = '{}{}{}'.format(prefix, separator, key) if prefix else prefix
dataframe = extract_dict_key(
dataframe, column, key, new_column=new_column, separator=separator
)
return dataframe.drop(column, axis=1)
def extract_dict_key(dataframe, column, key, new_column=None, separator='.'):
"""
Extract values of ``key`` into ``new_column``. If key is missing, ``None`` is added to
the column.
.. code-block:: python
>>> df = DataFrame({
... 'trial_num': [1, 2, 1, 2],
... 'subject': [1, 1, 2, 2],
... 'samples': [
... {'A': 1, 'B': 2, 'C': None},
... {'A': 3, 'B': 4, 'C': 5},
... {'A': 6, 'B': 7, 'C': None},
... None,
... ]
...})
>>>df.pipe(extract_dict_key, 'samples', key='A')
trial_num subject samples.A samples
0 1 1 1 {'A': 1, 'B': 2, 'C': None}
1 2 1 3 {'A': 3, 'B': 4, 'C': 5}
2 1 2 6 {'A': 6, 'B': 7, 'C': None}
3 2 2 NaN NaN
:param dataframe: The DataFrame object to work on.
:type dataframe: :class:`DataFrame <pandas.DataFrame>`
:param str column: The name of the column which should be extracted.
:param str key: Key that should be extracted.
:param str new_column: Name of the new column. By default, ``column`` will be applied as
prefix to ``key``.
:param str separator: The separator between ``column`` and ``key`` if ``new_column`` is
not specified.
:returns: The extracted DataFrame
:rtype: :class:`DataFrame <pandas.DataFrame>`
"""
new_column = new_column or '{}{}{}'.format(column, separator, key) if new_column != "" else key
dataframe.loc[:, new_column] = dataframe[column].apply(
lambda x: x.get(key) if isinstance(x, dict) else x
).rename(new_column)
return dataframe
def expand_list(dataframe, column, new_column=None):
"""
Expands lists to new rows.
.. code-block:: python
>>> df = DataFrame({
... 'trial_num': [1, 2, 3, 1, 2, 3],
... 'subject': [1, 1, 1, 2, 2, 2],
... 'samples': [
... [1, 2, 3, 4],
... [1, 2, 3],
... [1, 2],
... [1],
... [],
... None,
... ]
... })
>>> df.pipe(expand_list, 'samples', new_column='sample_id').head(7)
trial_num subject sample_id
0 1 1 1
0 1 1 2
0 1 1 3
0 1 1 4
1 2 1 1
1 2 1 2
1 2 1 3
.. warning::
Between calls of ``expand_list`` and/or ``expand_lists``, the dataframe index
duplications must be removed, otherwise plenty of duplications will occur.
.. warning::
Calling ``expand_list`` on multiple columns might cause data duplications,
that shall be handled.
:param dataframe: The DataFrame object to work on.
:type dataframe: :class:`DataFrame <pandas.DataFrame>`
:param column: The name of the column which should be extracted.
:type column: :class: str
:param new_column: Name of the new columns. If not defined, columns will not be renamed.
:type new_column: :class: str
:returns: The expanded DataFrame
:rtype: :class:`DataFrame <pandas.DataFrame>`
"""
new_column = new_column or column
values, indices = [], []
for index, value in dataframe[column].items():
if value and not isinstance(value, float):
values.extend(value)
indices.extend([index, ] * len(value))
if indices and isinstance(indices[0], tuple):
indices = pd.MultiIndex.from_tuples(indices, names=dataframe.index.names)
else:
indices = pd.Series(indices, name=dataframe.index.name)
return pd.DataFrame({new_column: values}, index=indices).\
merge(dataframe.drop(column, axis=1), left_index=True, right_index=True, how='outer')
def expand_lists(dataframe, columns, new_columns=None):
"""
Expands multiple lists to new rows. Pairs elements of lists respective to their index.
Pads with ``None`` to the longest list.
.. code-block:: python
>>> df = DataFrame({
... 'trial_num': [1, 2, 3, 1, 2, 3],
... 'subject': [1, 1, 1, 2, 2, 2],
... 'samples': [
... [1, 2, 3, 4],
... [1, 2, 3],
... [1, 2],
... [1],
... [],
... None,
... ],
... 'samples2': [
... [1, 2],
... [1, 2, 3],
... [1, 2],
... [1],
... [],
... None,
... ]
... })
>>> df.pipe(
... expand_lists, ['samples', 'samples'], new_column=['sample_id', 'sample_id2']
... ).head(7)
trial_num subject sample_id sample_id2
0 1 1 1 1
0 1 1 2 2
0 1 1 3 Nan
0 1 1 4 Nan
1 2 1 1 1
1 2 1 2 2
1 2 1 3 3
.. warning::
Between calls of ``expand_list`` and/or ``expand_lists``, the dataframe index
duplications must be removed, otherwise plenty of duplications will occur.
.. warning::
Calling ``expand_lists`` on multiple columns might cause data duplications,
that shall be handled.
:param dataframe: The DataFrame object to work on.
:type dataframe: :class:`DataFrame <pandas.DataFrame>`
:param columns: The name of the columns which should be extracted.
:type columns: :class: list or :class: tuple of :class: str
:param new_columns: Name of the new columns. If not defined, columns will not be renamed.
:type new_columns: :class: list or :class: tuple of :class: str
:returns: The expanded DataFrame
:rtype: :class:`DataFrame <pandas.DataFrame>`
"""
new_columns = new_columns or columns
if not len(columns) == len(new_columns):
raise ValueError('new_columns must contain the same amount of items as columns')
if len(columns) == 1:
return expand_list(dataframe, *columns, *new_columns)
if not len(columns) > 1:
raise ValueError('columns argument must contain at least two items.')
values, indices = [], []
for index, row in dataframe[columns].iterrows():
if not row.empty and all(row.notna()):
values.extend(zip_longest(*row))
indices.extend([index, ] * max(map(len, row)))
if indices and isinstance(indices[0], tuple):
indices = pd.MultiIndex.from_tuples(indices, names=dataframe.index.names)
else:
indices = pd.Series(indices, name=dataframe.index.name)
return pd.DataFrame(values, columns=new_columns, index=indices).fillna(np.nan).\
merge(dataframe.drop(columns, axis=1), left_index=True, right_index=True, how='outer')
def merge_columns(dataframe, col_header_list, new_column_name, keep=None, aggr=None):
"""
Add a new column or modify an existing one in *dataframe* called *new_column_name* by
iterating over the rows and select the proper notnull element from the values of
*col_header_list* columns in the given row if *keep* is filled OR call the *aggr*
function with the values of *col_header_list*. Only one of (*keep*, *aggr*) can be filled.
:param dataframe: the pandas.DataFrame object to modify
:param col_header_list: list of the names of the headers to merge
:param str new_column_name: the name of the new column, if it already exists the operation
will overwrite it
:param str keep: Specify whether the first or the last proper value is needed.
values: *first* and *last* as string.
:param aggr: Callable function which will get the values of *col_header_list* as parameter.
The return value of this function will be the value in *new_column_name*
:returns: The merged DataFrame
:rtype: :class:`DataFrame <pandas.DataFrame>`
"""
if keep and aggr:
raise ValueError(
'Parameter keep and aggr can not be handled at the same time. Use only one.'
)
old_columns = [x for x in col_header_list if x in list(dataframe)]
if not old_columns:
raise ValueError(
f'None of the following columns were found: {", ".join(col_header_list)}'
)
if keep:
if keep not in ('first', 'last'):
raise ValueError('Improper value for parameter keep. Possible values: first, last.')
first_valid = lambda x, y: y if pd.isnull(x) else x
if keep.startswith('f'):
aggr = lambda x: reduce(first_valid, x.tolist())
else:
aggr = lambda x: reduce(first_valid, x.tolist()[::-1])
if not callable(aggr):
raise ValueError('Improper value | |
<reponame>smaeland/ML-2HDM
# pylint: disable=C0303, C0103
## Class for a 2HDM model, contains parameters and computes xsec / BR
import os
import subprocess
import argparse
import h5py
import numpy as np
from glob import glob
from lhatool import LHA, Block, Entry
from dispatcher import Dispatcher
class Model(object):
""" All properties of a model and methods to run Pythia, 2HDMC, SusHi """
def __init__(self, param_dict, gen_settings_dict,
lumi, massrange,
backgrounds,
outdir,
title,
ignore_higgsbounds=False):
"""
paramdict: Model parameters in a dict, i.e {'mH': 450, ...}
gen_settings_dict: Settings for sample generation
lumi: luminosity in fb-1
"""
super(Model, self).__init__()
# Sanity check
expected_params = set(['mh', 'mH', 'mA', 'mC',
'sin_ba', 'm12_2', 'tanb',
'lambda_6', 'lambda_7'])
diff = set(param_dict.keys()).symmetric_difference(expected_params)
if diff:
print 'Problem with parameter(s):', diff
exit(-1)
self.params = param_dict
self.title = title
self.lumi = lumi
self.massrange = massrange
self.ignore_higgsbounds = ignore_higgsbounds
self.gen_settings = gen_settings_dict
# Samples: Signal and backgrounds
if backgrounds is None:
backgrounds = []
self.backgrounds = backgrounds
self.samples = ['H', 'A'] + backgrounds
# Properties
self.xsec = {'A': None, 'H': None}
self.xsec_err = {'A': None, 'H': None}
self.br_tau_pipi = 0.2552*0.2552
self.br_tautau = {
'A': None,
'H': None,
'Z': 3.337e-2,
'ttbar': 11.38e-2*11.38e-2,
'VV': 11.38e-2*11.38e-2, # Assuming WW dominates
}
self.efficiency = {'A': None, 'H': None}
self.nexpected = {'A': None, 'H': None}
self.targets = {
'H': 0,
'A': 1,
'Z': 2,
'ttbar': 2,
'VV': 2,
}
# Goto work dir
self.originalpath = os.getcwd()
if not os.path.exists(outdir):
os.mkdir(outdir)
os.chdir(outdir)
self.outdir = os.getcwd()
print 'self.outdir:', self.outdir
# Create train/test/validate directories
for d in ['train', 'test', 'validation']:
if not os.path.exists(d):
os.mkdir(d)
# I/O
self.lhafile = None
self.sushi_input = {'A': None, 'H': None}
self.sushi_output = {'A': None, 'H': None}
self.trainfiles = {'A': None, 'H': None}
# TODO: improve cmnd file locations
self.cmndfiles = {
'A': None,
'H': None,
'Z': self.originalpath + '/../../pythia/processes/Z0_tautau_hadhad.cmnd',
'VV': self.originalpath + '/../../pythia/processes/diboson_tautau_hadhad.cmnd',
'ttbar': self.originalpath + '/../../pythia/processes/ttbar_tautau_hadhad.cmnd'}
# Try to retrieve existing files
if os.path.exists(self.title + '.lha'):
self.lhafile = os.path.abspath(self.title + '.lha')
print 'Located LHA file:', self.lhafile
for key in self.samples:
if key in ['H', 'A']:
# SusHi inputs
if os.path.exists('%s_%s_sushi.in' % (self.title, key)):
self.sushi_input[key] = os.path.abspath('%s_%s_sushi.in' % (self.title, key))
print 'Located SusHi input file:', self.sushi_input[key]
# SusHi outputs
if os.path.exists('%s_%s_sushi.out' % (self.title, key)):
self.sushi_output[key] = os.path.abspath('%s_%s_sushi.out' % (self.title, key))
print 'Located SusHi output file:', self.sushi_output[key]
# Pythia cmnd files
if os.path.exists('%s_%s.cmnd' % (self.title, key)):
self.cmndfiles[key] = os.path.abspath('%s_%s.cmnd' % (self.title, key))
print 'Located Pythia command file:', self.cmndfiles[key]
# Train sets for this model
if os.path.exists('train/%s_%s_merged.h5' % (self.title, key)):
self.trainfiles[key] = os.path.abspath('train/%s_%s_merged.h5' % (self.title, key))
print 'Located train set:', self.trainfiles[key]
os.chdir(self.originalpath)
def compute_decay_table(self):
"""
Run 2HDMC to calculate BRs and widths
Also runs HiggsBounds/Signals to check points.
"""
outfile = '%s/%s.lha' % (self.outdir, self.title)
cmnd = [os.environ['TWOHDMCCODEPATH'] + '/calculate_point',
self.params['mh'],
self.params['mH'],
self.params['mA'],
self.params['mC'],
self.params['sin_ba'],
self.params['lambda_6'],
self.params['lambda_7'],
self.params['m12_2'],
self.params['tanb'],
outfile,
int(self.ignore_higgsbounds)
]
cmnd = [str(c) for c in cmnd]
print 'Running command:', cmnd
self.lhafile = outfile
return subprocess.check_call(cmnd)
def prepare_sushi_input(self):
"""
Read LHA file from 2HDMC, and create a .in file for SusHi
higgstype:
11: h
12: H
21: A
"""
for higgsname, higgstype in {'H': 12, 'A': 21}.iteritems():
# Parse LHA file
lha = LHA(self.lhafile)
# Add SusHi-specific blocks
sushi = Block('SUSHI', comment='SusHi specific')
sushi.add(Entry([1, 2], comment='Select 2HDM'))
sushi.add(Entry([2, higgstype], comment='h / H / A'))
sushi.add(Entry([3, 0], comment='p-p collisions'))
sushi.add(Entry([4, 13000], comment='E_cm'))
sushi.add(Entry([5, 2], comment='ggH at NNLO'))
sushi.add(Entry([6, 2], comment='bbH at NNLO'))
sushi.add(Entry([7, 2], comment='SM EW content'))
sushi.add(Entry([19, 1], comment='Verbosity'))
sushi.add(Entry([20, 0], comment='All processes'))
lha.add_block(sushi)
# 2HDM block
thdm = Block('2HDM', '2HDM parameters')
thdm.add(Entry([1], comment='Type I'))
lha.add_block(thdm)
# Kinematic distribution parameters
distrib = Block('DISTRIB', comment='Kinematic requirements')
distrib.add(Entry([1, 0], comment='Sigma total'))
distrib.add(Entry([2, 0], comment='Disable pT cut'))
#distrib.add(Entry([21, GENER_SETTINGS['higgs_pt_min']], comment='Min higgs pT'))
distrib.add(Entry([3, 0], comment='Disable eta cut'))
#distrib.add(Entry([32, GENER_SETTINGS['higgs_eta_max']], comment='Max eta'))
distrib.add(Entry([4, 1], comment='Use eta, not y'))
lha.add_block(distrib)
# PDF selection
pdfspec = Block('PDFSPEC')
pdfspec.add(Entry([1, 'MMHT2014lo68cl.LHgrid'], comment='Name of pdf (lo)'))
pdfspec.add(Entry([2, 'MMHT2014nlo68cl.LHgrid'], comment='Name of pdf (nlo)'))
pdfspec.add(Entry([3, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (nnlo)'))
pdfspec.add(Entry([4, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (n3lo)'))
pdfspec.add(Entry([10, 0], comment='Set number'))
lha.add_block(pdfspec)
# Add charm mass
lha.get_block('SMINPUTS').add(Entry([8, 1.275], comment='m_c'))
# Write output
suffix = '_%s_sushi.in' % higgsname
outname = self.lhafile.replace('.lha', suffix)
self.sushi_input[higgsname] = outname
lha.write(outname)
return 0
def get_cross_section(self, sample):
"""
For higgses, compute the ggF cross section with SusHi
For backgrounds, take value from Pythia
Return value in pb
"""
# Path to SusHi
sushi_binary = os.environ['SUSHIPATH']
if not os.path.exists(sushi_binary):
print 'No known SusHi binary file'
exit(-1)
if sample in ['H', 'A']:
self.sushi_output[sample] = self.sushi_input[sample].replace('.in', '.out')
# Convert to relative path to shorten the file name, since SusHi
# can't deal with inputs >60 chars
relpath_in = os.path.relpath(self.sushi_input[sample], os.getcwd())
relpath_out = os.path.relpath(self.sushi_output[sample], os.getcwd())
# Run SusHi
ret = subprocess.check_call([sushi_binary,
relpath_in,
relpath_out])
#self.sushi_input[sample],
#self.sushi_output[sample]])
if ret: return ret
# Parse result
lha = LHA(self.sushi_output[sample])
self.xsec[sample] = float(lha.get_block('SUSHIggh').get_entry_by_key(1))
# Compare to Pythia
with h5py.File(self.trainfiles[sample]) as hf:
xsec = float(hf.get('data').attrs['cross_section'])
xsec = xsec * 10e9 # convert from mb to pb
print 'SAMPLE:', sample, ':\tSusHi = %.4e, \t Pythia = %.4e' % (self.xsec[sample], xsec)
elif sample == 'Z':
self.xsec[sample] = 2.7910 # from FEWZ at LO
elif sample in self.backgrounds and sample != 'Z':
# Open train set
with h5py.File(self.trainfiles[sample]) as hf:
xsec = float(hf.get('data').attrs['cross_section'])
self.xsec[sample] = xsec * 10e9 # convert from mb to pb
#print 'Cross section for %s = %.3e pb' % (sample, self.xsec[sample])
return 0
def create_pythia_cmnd_files(self):
"""
Create a command file for A/H -> tautau
Needs higgs masses and decay widths
"""
for higgsname, higgspid in {'H': 35, 'A': 36}.iteritems():
# Get mass and width from 2HDMC LHA file
lha = LHA(self.lhafile)
mass = lha.get_block('MASS').get_entry_by_key(higgspid)
width = lha.get_decay(higgspid).width
outname = self.lhafile.replace('.lha', '_%s.cmnd' % higgsname)
self.cmndfiles[higgsname] = outname
# Write command file
with open(outname, 'w') as outfile:
outfile.write('Beams:eCM = 13000.\n')
outfile.write('Higgs:useBSM = on\n')
if higgspid == 36:
#outfile.write('HiggsBSM:allA3 = on\n') # All production modes
outfile.write('HiggsBSM:ffbar2A3 = on\n') # quark fusion
outfile.write('HiggsBSM:gg2A3 = on\n') # gluon fusion
elif higgspid == 35:
#outfile.write('HiggsBSM:allH2 = on\n') # All production modes
outfile.write('HiggsBSM:ffbar2H2 = on\n') # quark fusion
outfile.write('HiggsBSM:gg2H2 = on\n') # gluon fusion
outfile.write('{}:all = A0 A0 1 0 0 {} {} 50.0 0.0\n'.format(higgspid, mass, width))
outfile.write('{}:onMode = off\n'.format(higgspid))
outfile.write('{}:onIfMatch = 15 -15\n'.format(higgspid))
outfile.write('15:onMode = off\n')
outfile.write('15:onIfMatch = 16 111 211\n')
outfile.write('\n')
outfile.write('Next:numberShowEvent = 0\n')
return 0
def compute_expected_event_numbers(self, sample):
# Get generation efficiency from train sets
assert self.trainfiles[sample] is not None
with h5py.File(self.trainfiles[sample]) as hfile:
data = hfile.get('data')
self.efficiency[sample] = float(data.attrs['efficiency'])
if sample == 'Z':
self.efficiency[sample] = float(data.attrs['events_accepted'])/float(data.attrs['events_passed_mass_cuts'])
# Signal
if sample in ['H', 'A']:
# Branching ratios
lha = LHA(self.lhafile)
self.br_tautau['H'] = float(lha.get_decay(35).get_branching_ratio(15, -15))
self.br_tautau['A'] = float(lha.get_decay(36).get_branching_ratio(15, -15))
# Number of expected events
pb_to_fb = 10e3
self.nexpected[sample] = (self.lumi * self.xsec[sample] * pb_to_fb *
self.br_tautau[sample] * self.br_tau_pipi *
self.efficiency[sample])
self.nexpected[sample] = int(round(self.nexpected[sample]))
res = [sample, self.lumi, self.xsec[sample] * pb_to_fb,
self.br_tautau[sample], self.efficiency[sample],
self.nexpected[sample]]
#print '\nExpected event numbers:'
st = ['', 'Lumi (fb-1)', 'xsec (fb)', 'BR', 'efficiency', 'N']
print '{:4} {:>15} {:>15} {:>15} {:>15} {:>15}'.format(*st)
print '{:4} {:15.1f} {:15.4e} {:15.4e} {:15.4e} {:15d}'.format(*res)
def merge_datasets(self, name):
print 'Merging %s datasets' % name
assert name in ['train', 'test', 'validation']
def merge(filelist, outname, remove_originals=False):
if len(filelist) < 1:
print 'Error: No files to merge'
return
atts = {}
open_files = []
X = np.array([])
for fin in filelist:
# Skip old merge file
if fin == outname:
continue
hf = h5py.File(fin, 'r')
open_files.append(hf)
data = hf.get('data')
if not len(data):
print 'In merge_datasets(): File', fin, 'is | |
elevation,
5 - noise diode state, and
6 - channel [if argument chan=None; see get_boresight_data()]
* 'xpwr_metadata' is a 2D-array with a row for each configuration and columns::
0 - 'xpwr_cfg_id'
1 - UNIX time,
2 - rss_cfg_id,
3 - source_id,
4 - axis, and
5 - chan
"""
def __init__(self, parent, year, doy):
"""
"""
if parent:
mylogger = logging.getLogger(parent.logger.name+".Session")
else:
mylogger = logging.getLogger(logger.name+".Session")
mylogger.debug("__init__: logger is %s", mylogger.name)
if parent:
self.db = parent
else:
self.db = DSS28db() # default is GAVRT
datestr = "%4d/%03d" % (year, doy)
#DR.Session.__init__(self, parent=parent, year=year, doy=doy,
# project="SolarPatrol")
DR.Session.__init__(self, parent=parent, date=datestr, dss=28,
project="SolarPatrol")
self.logger = mylogger
self.logger.info("Getting maps and boresights; this may take a while.")
self.logger.debug("__init__: subclasses: %s", Session.__subclasses__())
self.logger.debug("__init__: has attribute 'maps'? %s", hasattr(self, "maps"))
if hasattr(self, "maps"):
# instantiating map plotters also gets the maps
pass
else:
self.get_maps()
self.get_boresights()
self.get_session_dir()
def get_session_dir(self):
"""
"""
self.logger.debug("get_session_dir: entered")
obs_dir = local_dirs.projects_dir+"SolarPatrol/Observations/dss28/"
self.session_dir = obs_dir + "%4d" % self.year +"/"+ "%03d" % self.doy +"/"
if not os.path.exists(self.session_dir):
os.makedirs(self.session_dir, mode=0o775)
def summary(self, save=False):
if not self.list_maps(save=save):
print("no usable maps found")
if not self.make_bs_dir(save=save):
print("no usable boresights found")
# ------------------------------ maps ---------------------------------------
def get_map_IDs(self):
"""
"""
map_cfg_ids = self.db.get(
"select raster_cfg_id from raster_cfg where year = " +
str(self.year) + " and doy = " + str(self.doy) +
";")
self.logger.debug("get_maps: map IDs: %s", map_cfg_ids)
return map_cfg_ids
def get_maps(self, map_IDs=[]):
"""
Returns maps from the raster configuration IDs for the specified date
"""
if map_IDs == []:
map_cfg_ids = self.get_map_IDs()
else:
map_cfg_ids = NP.array(map_IDs)
if map_cfg_ids.any():
self.maps = {}
for map_id in map_cfg_ids[:,0]:
self.logger.debug("get_maps: getting %d", map_id)
self.maps[map_id] = Map(self, map_id)
self.logger.info("%4d/%03d found %d maps", self.year, self.doy,
len(list(self.maps.keys())))
else:
self.logger.info("No maps found for %4d/%03d", self.year, self.doy)
def get_boresights(self):
"""
Returns boresights from the xpwr configurations
"""
try:
xpwr_cfg_ids = self.db.get("select xpwr_cfg_id from xpwr_cfg where year = "
+str(self.year)+" and doy = "+str(self.doy)+";")[:,0]
except IndexError:
# 'too many indices for array' means no data were returned
xpwr_cfg_ids = []
xpwr_cfg_ids.sort()
self.boresights = {}
for xpwr_cfg_id in xpwr_cfg_ids:
try:
self.boresights[xpwr_cfg_id] = BoresightScan(self, xpwr_cfg_id)
except:
pass
self.logger.info("%4d/%03d found %d boresights", self.year, self.doy,
len(list(self.boresights.keys())))
def list_maps(self, save=False):
"""
"""
if save:
fileobj = open(self.session_dir+"maps.txt", "w")
else:
fileobj = sys.stdout
print("----------------- Session Maps for %4d/%03d -------------------" %\
(self.year, self.doy), file=fileobj)
print(" ID start-stop ch freq. pol. b.w. IFmode attn. source",
file=fileobj)
print("--- ---------- -- ------ ----- ----- ------ ----- -------------",
file=fileobj)
mapkeys = list(self.maps.keys())
mapkeys.sort()
if mapkeys == []:
print("no valid maps with tlog data found", file=fileobj)
return False
for mapno in list(self.maps.keys()):
try:
channels = self.maps[mapno].channels
for chno in channels:
print(" %3d %4s-%4s %2d %6.0f %4s %4.2f %4s %4.1d %16s" % (
mapno,
time.strftime("%H%M", time.gmtime(self.maps[mapno].start)),
time.strftime("%H%M", time.gmtime(self.maps[mapno].end)),
chno,
self.maps[mapno].channel[chno]["freq"],
self.maps[mapno].channel[chno]['pol'][0],
self.maps[mapno].channel[chno]["bw"],
self.maps[mapno].channel[chno]["ifmode"][0],
self.maps[mapno].channel[chno]["atten"],
self.maps[mapno].source), file=fileobj)
except AttributeError:
print("map", mapno, "has no channels")
return True
def save_map_data(self, mapkeys=None):
"""
create a dict with the map data from the designated images
This speeds up retrieval of images
@param mapkeys : numbers of the maps (default: all)
@type mapkeys : list of int
"""
if mapkeys:
self.logger.info("show_images:")
else:
mapkeys = list(self.maps.keys())
mapkeys.sort()
for key in mapkeys:
try:
list(self.maps[key].map_data.keys())
self.logger.debug("save_map_data: mapdata[%d] exists", key)
except AttributeError:
self.maps[key].maps_from_tlogs()
self.logger.debug("save_map_data: loaded mapdata[%d]", key)
if 'dec_offset' in self.maps[key].map_data:
self.logger.debug("save_map_data: mapdata[%d] is centered", key)
else:
self.maps[key].get_offsets()
self.logger.debug("save_map_data: mapdata[%d] has been centered", key)
if 'grid_x' in self.maps[key].map_data:
self.logger.debug("save_map_data: mapdata[%d] is regridded", key)
else:
self.maps[key].regrid()
self.logger.debug("save_map_data: mapdata[%d] has been regridded", key)
export = {}
for key in mapkeys:
export[key] = self.maps[key].map_data
filename = "maps-%4d-%03d.pkl" % (self.year, self.doy)
exportfile = open(filename, "w")
pickle.dump(export, exportfile)
exportfile.close()
return export
# --------------------------- method for boresights -------------------------
def get_good_boresights(self):
"""
Retrieves data from 'tlog' table for boresights with a given channel
Returns a numpy array with columns containing::
0 - UNIX time
1 - counts
2 - integration time
3 - azimuth
4 - elevation
5 - noise diode state
6 - chan (if chan=None)
"""
keys = list(self.boresights.keys())
keys.sort()
self.good_boresights = {}
for key in keys:
self.good_boresights[key] = []
try:
channels = list(self.boresights[key].channels)
except AttributeError:
self.logger.warning("get_good_boresights: %d has no channels", key)
else:
if bool(channels):
for ch in channels:
if hasattr(self.boresights[key], start):
start = self.start
end = self.end
else:
continue
self.good_boresights[key].append(ch)
if self.good_boresights[key] == []:
self.good_boresights.pop(key)
return self.good_boresights
def make_bs_dir(self, good_only=False, save=False):
"""
Notes
=====
Each good boresight consists of two scans
"""
if save:
fileobj = open(self.session_dir+"xscans.txt", "w")
else:
fileobj = sys.stdout
if good_only:
bs_keys = list(self.get_good_boresights().keys())
else:
# these are the keys for all boresights, good or bad
bs_keys = list(self.boresights.keys())
bs_keys.sort()
num_scans = len(bs_keys)
if num_scans == 0:
# no data
print(" Boresight Summary for %4d/%03d" % (self.year, self.doy), file=fileobj)
print("\nNo valid boresights with tlog data found", file=fileobj)
return False
print(" Boresight Summary for %4d/%03d" % (self.year, self.doy), file=fileobj)
print(" ID date ch axis freq. pol IF bw source Top diode az el",
file=fileobj)
print("------ ------------- -- ---- ------ ---- ---- ---------------- ------ ------ ----- ----",
file=fileobj)
for bs in bs_keys:
source = self.boresights[bs].source
try:
bs_channels = self.boresights[bs].channels
except AttributeError:
print("%6d has no channels" % bs, file=fileobj)
try:
top = self.boresights[bs].bs_data['tsys'][0]
except AttributeError:
print("%6d has no data" % bs, file=fileobj)
else:
bs_channels.sort()
if bool(bs_channels.any()):
for ch in bs_channels:
UNIXtime = self.boresights[bs].epoch
axis = self.boresights[bs].axis
az = self.boresights[bs].bs_data['az'][0]
el = self.boresights[bs].bs_data['el'][0]
print("%6d %13s %2s %4s %6.0f %4s %4.0f %16s %6.2f %6s %5.1f %4.1f" % (
bs,
time.strftime("%Y/%j %H%M", time.gmtime(UNIXtime)),
ch, axis,
self.boresights[bs].freq,
self.boresights[bs].pol,
self.boresights[bs].IFbw,
source, top,
self.boresights[bs].diode, az, el), file=fileobj)
else:
print("%6d has no channels" % bs, file=fileobj)
return True
class DSS28db(mysql.BaseDB):
"""
subclass for the DSS-28 EAC database
provides methods for handling tables
Attributes::
logger - logging.Logger object
receiver - receivers which provide data
sessions - dict of sessions obtained with 'get_session'
"""
def __init__(self, host=_host, user=_user, pw=_pw,
name='dss28_eac', port=3306):
"""
create an instance BaseDB subclass for the DSS-28 EAC database
The defaults for BaseDB are for the DSS-28 EAC database
"""
mylogger = logging.getLogger(logger.name+".DSS28db")
mysql.BaseDB.__init__(self, host=host, user=user, pw=pw, name=name, port=port)
self.logger = mylogger
self.sessions = {}
def insertRecord(self, table, rec):
"""
not allowed for subclass
"""
self.logger.warning("insertRecord: not allowed for %s", self.name)
def updateValues(self, vald, table):
"""
not allowed for subclass
"""
self.logger.warning("updateValues: not allowed for %s", self.name)
def extract_boresight_data(self, year, doy):
"""
Get the metadata for the boresights on the designated day.
The boresights are extracted from table 'xscan'. Missing 'el' data are
obtained from table 'xpwr'. The source, scan axis and channel are obtained
from table 'xpwr_cfg'. The receiver data are obtained from table 'rss_cfg'.
Returns a dictionary like this::
{'utc': list of datetime.timedelta,
'epoch': list of float,
'az': list of float,
'el': list of value,
'chan': list of int,
'tsrc': list of float,
'axis': list of str,
'source': list of str,
'xpwr_cfg_id: list of int',
'xscan_id': list of int,
'source_id': list of int,
'rx': list of dict}
An 'rx' dict looks like this::
{ 2: {'if_bw': float,
'if_mode': str,
'pol': str,
'sky_freq': float,
'utc': datetime.timedelta},
4: { ... },
....
16: { ... }}
@param year : year of observation
@type year : int
@param doy : day of year
@type doy : int
@return: dict
"""
# Get the boresight data from xscan
columns = "utc, epoch, tsrc, az, el, xscan_id, xpwr_cfg_id"
boresight_data = self.get_rows_by_date("xscan", columns, year, doy)
# Get the missing elevation data from xpwr
times = boresight_data['utc']
power_data = self.get_rows_by_time('xpwr',['utc','el','tsys'],
year,doy,times)
# Fix the missing elevation data
boresight_data['el'] = power_data['el']
# Get the source information from gavrt_sources.source
columns = "source_id, axis, chan"
for column in columns.split(','):
boresight_data[column.strip()] = []
for cfg_id in boresight_data['xpwr_cfg_id']:
response = self.get_as_dict("select "
+ columns
+ " from xpwr_cfg where xpwr_cfg_id="+str(cfg_id)+";")
for key in list(response.keys()):
boresight_data[key].append(response[key][0])
boresight_data['source'] = []
for source_id in boresight_data['source_id']:
response = self.get_as_dict("select name from gavrt_sources.source where source_id="
+str(source_id)+";")
boresight_data['source'].append(response['name'][0])
# Get the receiver information from rss_cfg
columns | |
<filename>src/sage/quivers/algebra.py
"""
Path Algebras
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2012 <NAME> <<EMAIL>>
# 2013 <NAME> <<EMAIL>>
# 2014 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details; the full text
# is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import six
from sage.misc.cachefunc import cached_method
from sage.combinat.free_module import CombinatorialFreeModule, CombinatorialFreeModuleElement
from .algebra_elements import PathAlgebraElement
class PathAlgebra(CombinatorialFreeModule):
r"""
Create the path algebra of a :class:`quiver <DiGraph>` over a given field.
Given a quiver `Q` and a field `k`, the path algebra `kQ` is defined as
follows. As a vector space it has basis the set of all paths in `Q`.
Multiplication is defined on this basis and extended bilinearly. If `p`
is a path with terminal vertex `t` and `q` is a path with initial vertex
`i` then the product `p*q` is defined to be the composition of the
paths `p` and `q` if `t = i` and `0` otherwise.
INPUT:
- ``k`` -- field (or commutative ring), the base field of the path algebra
- ``P`` -- the path semigroup of a quiver `Q`
- ``order`` -- optional string, one of "negdegrevlex" (default),
"degrevlex", "negdeglex" or "deglex", defining the monomial order to be
used.
OUTPUT:
- the path algebra `kP` with the given monomial order
NOTE:
Monomial orders that are not degree orders are not supported.
EXAMPLES::
sage: P = DiGraph({1:{2:['a']}, 2:{3:['b']}}).path_semigroup()
sage: A = P.algebra(GF(7))
sage: A
Path algebra of Multi-digraph on 3 vertices over Finite Field of size 7
sage: A.variable_names()
('e_1', 'e_2', 'e_3', 'a', 'b')
Note that path algebras are uniquely defined by their quiver, field and
monomial order::
sage: A is P.algebra(GF(7))
True
sage: A is P.algebra(GF(7), order="degrevlex")
False
sage: A is P.algebra(RR)
False
sage: A is DiGraph({1:{2:['a']}}).path_semigroup().algebra(GF(7))
False
The path algebra of an acyclic quiver has a finite basis::
sage: A.dimension()
6
sage: list(A.basis())
[e_1, e_2, e_3, a, b, a*b]
The path algebra can create elements from paths or from elements of the
base ring::
sage: A(5)
5*e_1 + 5*e_2 + 5*e_3
sage: S = A.semigroup()
sage: S
Partial semigroup formed by the directed paths of Multi-digraph on 3 vertices
sage: p = S([(1, 2, 'a')])
sage: r = S([(2, 3, 'b')])
sage: e2 = S([(2, 2)])
sage: x = A(p) + A(e2)
sage: x
a + e_2
sage: y = A(p) + A(r)
sage: y
a + b
Path algebras are graded algebras. The grading is given by assigning
to each basis element the length of the path corresponding to that
basis element::
sage: x.is_homogeneous()
False
sage: x.degree()
Traceback (most recent call last):
...
ValueError: Element is not homogeneous.
sage: y.is_homogeneous()
True
sage: y.degree()
1
sage: A[1]
Free module spanned by [a, b] over Finite Field of size 7
sage: A[2]
Free module spanned by [a*b] over Finite Field of size 7
TESTS::
sage: TestSuite(A).run()
"""
Element = PathAlgebraElement
###########################################################################
# #
# PRIVATE FUNCTIONS #
# These functions are not meant to be seen by the end user. #
# #
###########################################################################
def __init__(self, k, P, order = "negdegrevlex"):
"""
Creates a :class:`PathAlgebra` object. Type ``PathAlgebra?`` for
more information.
INPUT:
- ``k`` -- a commutative ring
- ``P`` -- the partial semigroup formed by the paths of a quiver
TESTS::
sage: P = DiGraph({1:{2:['a']}, 2:{3:['b', 'c']}, 4:{}}).path_semigroup()
sage: P.algebra(GF(5))
Path algebra of Multi-digraph on 4 vertices over Finite Field of size 5
"""
# The following hidden methods are relevant:
#
# - _base
# The base ring of the path algebra.
# - _basis_keys
# Finite enumerated set containing the QuiverPaths that form the
# basis.
# - _quiver
# The quiver of the path algebra
# - _semigroup
# Shortcut for _quiver.semigroup()
from sage.categories.graded_algebras_with_basis import GradedAlgebrasWithBasis
self._quiver = P.quiver()
self._semigroup = P
self._ordstr = order
super(PathAlgebra, self).__init__(k, self._semigroup,
prefix='',
#element_class=self.Element,
category=GradedAlgebrasWithBasis(k),
bracket=False)
self._assign_names(self._semigroup.variable_names())
def order_string(self):
"""
Return the string that defines the monomial order of this algebra.
EXAMPLES::
sage: P1 = DiGraph({1:{1:['x','y','z']}}).path_semigroup().algebra(GF(25,'t'))
sage: P2 = DiGraph({1:{1:['x','y','z']}}).path_semigroup().algebra(GF(25,'t'), order="degrevlex")
sage: P3 = DiGraph({1:{1:['x','y','z']}}).path_semigroup().algebra(GF(25,'t'), order="negdeglex")
sage: P4 = DiGraph({1:{1:['x','y','z']}}).path_semigroup().algebra(GF(25,'t'), order="deglex")
sage: P1.order_string()
'negdegrevlex'
sage: P2.order_string()
'degrevlex'
sage: P3.order_string()
'negdeglex'
sage: P4.order_string()
'deglex'
"""
return self._ordstr
@cached_method
def gens(self):
"""
Return the generators of this algebra (idempotents and arrows).
EXAMPLES::
sage: P = DiGraph({1:{2:['a']}, 2:{3:['b', 'c']}, 4:{}}).path_semigroup()
sage: A = P.algebra(GF(5))
sage: A.variable_names()
('e_1', 'e_2', 'e_3', 'e_4', 'a', 'b', 'c')
sage: A.gens()
(e_1, e_2, e_3, e_4, a, b, c)
"""
return tuple(self.gen(i) for i in range(self.ngens()))
@cached_method
def arrows(self):
"""
Return the arrows of this algebra (corresponding to edges of the
underlying quiver).
EXAMPLES::
sage: P = DiGraph({1:{2:['a']}, 2:{3:['b', 'c']}, 4:{}}).path_semigroup()
sage: A = P.algebra(GF(5))
sage: A.arrows()
(a, b, c)
"""
return tuple(self._from_dict( {index: self.base_ring().one()},
remove_zeros=False )
for index in self._semigroup.arrows())
@cached_method
def idempotents(self):
"""
Return the idempotents of this algebra (corresponding to vertices
of the underlying quiver).
EXAMPLES::
sage: P = DiGraph({1:{2:['a']}, 2:{3:['b', 'c']}, 4:{}}).path_semigroup()
sage: A = P.algebra(GF(5))
sage: A.idempotents()
(e_1, e_2, e_3, e_4)
"""
return tuple(self._from_dict( {index: self.base_ring().one()},
remove_zeros=False )
for index in self._semigroup.idempotents())
@cached_method
def gen(self, i):
"""
Return the `i`-th generator of this algebra.
This is an idempotent (corresponding to a trivial path at a
vertex) if `i < n` (where `n` is the number of vertices of the
quiver), and a single-edge path otherwise.
EXAMPLES::
sage: P = DiGraph({1:{2:['a']}, 2:{3:['b', 'c']}, 4:{}}).path_semigroup()
sage: A = P.algebra(GF(5))
sage: A.gens()
(e_1, e_2, e_3, e_4, a, b, c)
sage: A.gen(2)
e_3
sage: A.gen(5)
b
"""
return self._from_dict( {self._semigroup.gen(i): self.base_ring().one()},
remove_zeros = False )
def ngens(self):
"""
Number of generators of this algebra.
EXAMPLES::
sage: P = DiGraph({1:{2:['a']}, 2:{3:['b', 'c']}, 4:{}}).path_semigroup()
sage: A = P.algebra(GF(5))
sage: A.ngens()
7
"""
return self._semigroup.ngens()
def _element_constructor_(self, x):
"""
Attempt to construct an element of ``self`` from ``x``.
TESTS::
sage: A = DiGraph({2:{3:['b']}}).path_semigroup().algebra(ZZ)
sage: B = DiGraph({0:{1:['a']}, 1:{2:['c']}, 2:{3:['b']}}).path_semigroup().algebra(GF(5))
sage: x = A('b') + 1 # indirect doctest
sage: x
e_2 + b + e_3
sage: B(x) # indirect doctest
e_2 + b + e_3
sage: A(1) # indirect doctest
e_2 + e_3
sage: B(2) # indirect doctest
2*e_0 + 2*e_1 + 2*e_2 + 2*e_3
sage: B([(0,1,'a'),(1,2,'c')]) # indirect doctest
a*c
"""
from sage.quivers.paths import QuiverPath
# If it's an element of another path algebra, do a linear combination
# of the basis
if isinstance(x, PathAlgebraElement) and isinstance(x.parent(), PathAlgebra):
result = {}
coeffs = x.monomial_coefficients()
for key in coeffs:
result[self._semigroup(key)] = coeffs[key]
return self.element_class(self, result)
# If it's a QuiverPath return the associated basis element
if isinstance(x, QuiverPath):
return self.element_class(self, {x: self.base_ring().one()})
# If it's a scalar, return a multiple of one:
if x in self.base_ring():
return self.one()*x
# If it's a tuple or a list, try and create a QuiverPath from it and
# then return the associated basis element
if isinstance(x, (tuple, list, six.string_types)):
return self.element_class(self, {self._semigroup(x): self.base_ring().one()})
if isinstance(x, dict):
return self.element_class(self, x)
# Otherwise let CombinatorialFreeModule try
return super(PathAlgebra, self)._element_constructor_(x)
def _coerce_map_from_(self, other):
"""
Return ``True`` if there is a coercion from ``other`` to ``self``.
The algebras that coerce into a path algebra are rings `k` or path
algebras `kQ` such that `k` has a coercion into the base ring of
``self`` and `Q` is a subquiver of the quiver of ``self``.
In particular, the path semigroup of a subquiver coerces into the
algebra.
TESTS::
sage: P1 = DiGraph({1:{2:['a']}}).path_semigroup()
sage: P2 = DiGraph({1:{2:['a','b']}}).path_semigroup()
sage: A1 = P1.algebra(GF(3))
sage: A2 = P2.algebra(GF(3))
sage: A1.coerce_map_from(A2) # indirect doctest
sage: A2.coerce_map_from(A1) # indirect doctest
Conversion map:
From: Path algebra of Multi-digraph on 2 vertices over Finite Field of size 3
To: Path algebra of Multi-digraph on 2 vertices over Finite Field of size 3
sage: A1.coerce_map_from(ZZ) # indirect doctest
Composite map:
From: Integer Ring
To: Path algebra of | |
<reponame>jonathancross/specter-desktop
import base64
import datetime
import hashlib
import json
import logging
import secrets
from decimal import Decimal
from os import access
from typing import List
from urllib import request
from urllib.parse import urlparse
import pytz
import requests
from flask import current_app as app
from flask_babel import lazy_gettext as _
logger = logging.getLogger(__name__)
# TODO: Update with prod values
code_verifier = "<KEY>"
class SwanApiException(Exception):
pass
class SwanApiRefreshTokenException(SwanApiException):
pass
class SwanClient:
def __init__(
self, hostname, access_token: str, access_token_expires, refresh_token
):
self.hostname = hostname
self.access_token: str = str(
access_token
) # The ServiceSwan storage might have one in its storage
self.access_token_expires = (
access_token_expires # a timestamp when the access token will expire
)
self.refresh_token = refresh_token
self.api_url = app.config.get("SWAN_API_URL")
self.client_id = app.config.get("SWAN_CLIENT_ID")
self.client_secret = app.config.get("SWAN_CLIENT_SECRET")
def is_access_token_valid(self):
return (
self.access_token_expires > datetime.datetime.now(tz=pytz.utc).timestamp()
)
def calc_callback_url(self):
return f"http://{ self.hostname }{app.config['APP_URL_PREFIX']}{app.config['EXT_URL_PREFIX']}/swan/oauth2/callback"
def get_oauth2_start_url(self, callback_hostname):
"""
Set up the Swan API integration by requesting our initial access_token and
refresh_token.
"""
# Let's start the PKCE-flow
global code_verifier
self.hostname = callback_hostname
if code_verifier is None:
code_verifier = secrets.token_urlsafe(43)
# see specification: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2
# and example impl: https://github.com/RomeoDespres/pkce/blob/master/pkce/__init__.py#L94-L96
hashed = hashlib.sha256(code_verifier.encode("ascii")).digest()
encoded = base64.urlsafe_b64encode(hashed)
code_challenge = encoded.decode("ascii")[:-1]
flow_url = f"{self.api_url}/oidc/auth?"
query_params = [
f"client_id={self.client_id}",
f"redirect_uri={self.calc_callback_url()}",
"response_type=code",
"response_mode=query",
f"code_challenge={code_challenge}",
"code_challenge_method=S256",
"state=kjkmdskdmsmmsmdslmdlsm",
"scope=offline_access v1 write:vendor_wallet read:vendor_wallet write:automatic_withdrawal read:automatic_withdrawal",
"prompt=consent",
]
flow_url += "&".join(query_params)
return flow_url
def _get_access_token(
self, code: str = None, code_verifier: str = None, request_uri=None
) -> str:
"""
If code and code_verifier are specified, this is our initial request for an
access_token and, more importantly, the refresh_token.
If code is None, use the refresh_token to get a new short-lived access_token.
If we don't have the refresh_token, raise SwanApiRefreshTokenException.
"""
# Must explicitly set User-Agent; Swan firewall blocks all requests with "python".
auth_header = {"User-Agent": "Specter Desktop"}
if code:
# Requesting initial refresh_token and access_token
payload = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"code_verifier": code_verifier,
"grant_type": "authorization_code",
"state": "kjkmdskdmsmmsmdslmdlsm",
"code": code,
"redirect_uri": request_uri,
}
else:
if self.is_access_token_valid():
return self.access_token
# Use the refresh_token to get a new access_token
if not self.refresh_token:
raise SwanApiRefreshTokenException(
"access_token is expired but we don't have a refresh_token"
)
payload = {
"grant_type": "refresh_token",
# "redirect_uri": # Necessary? Probably yes but more difficult to reconstruct it but the refresh-token is not yet used anyway
"refresh_token": self.refresh_token,
"scope": "offline_access v1 write:vendor_wallet read:vendor_wallet write:automatic_withdrawal read:automatic_withdrawal",
}
auth_hash = base64.b64encode(
f"{self.client_id}:{self.client_secret}".encode()
).decode()
auth_header["Authorization"] = f"Basic {auth_hash}"
response = requests.post(
f"{self.api_url}/oidc/token",
data=payload,
headers=auth_header,
)
resp = json.loads(response.text)
"""
{
"access_token": "***************",
"expires_in": 3600,
"refresh_token": "***************",
"scope": "offline_access v1 write:vendor_wallet read:vendor_wallet write:automatic_withdrawal read:automatic_withdrawal",
"token_type": "Bearer"
}
"""
if resp.get("access_token"):
self.access_token = resp.get("access_token")
self.access_token_expires = (
datetime.datetime.now(tz=pytz.utc)
+ datetime.timedelta(seconds=resp["expires_in"])
).timestamp()
self.refresh_token = resp.get("refresh_token")
return self.access_token
else:
logger.warning(response)
raise SwanApiException(response.text)
def handle_oauth2_auth_callback(self, request):
code = request.args.get("code")
rp = urlparse(request.url)
request_uri = f"{rp.scheme}://{rp.netloc}{rp.path}"
self._get_access_token(
code=code, code_verifier=code_verifier, request_uri=request_uri
)
def authenticated_request(
self, endpoint: str, method: str = "GET", json_payload: dict = {}
) -> dict:
logger.debug(f"{method} endpoint: {endpoint}")
access_token = self._get_access_token()
# Must explicitly set User-Agent; Swan firewall blocks all requests with "python".
auth_header = {
"User-Agent": "Specter Desktop",
"Authorization": f"Bearer {access_token}",
}
try:
if method == "GET":
response = requests.get(self.api_url + endpoint, headers=auth_header)
elif method in ["POST", "PATCH", "PUT", "DELETE"]:
response = requests.request(
method=method,
url=self.api_url + endpoint,
headers=auth_header,
json=json_payload,
)
if response.status_code != 200:
raise SwanApiException(f"{response.status_code}: {response.text}")
return response.json()
except Exception as e:
# TODO: tighten up expected Exceptions
logger.exception(e)
logger.error(
f"endpoint: {self.api_url}{endpoint} | method: {method} | payload: {json.dumps(json_payload, indent=4)}"
)
logger.error(f"{response.status_code}: {response.text}")
raise e
def get_autowithdrawal_addresses(self, swan_wallet_id: str) -> dict:
"""
{
"entity": "wallet",
"item": {
"id": "c47e1e83-90a0-45da-ae25-6a0d324b9f29",
"isConfirmed": false,
"displayName": "Specter autowithdrawal to SeedSigner demo",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "seedsigner_demo"
},
"btcAddresses": []
}
}
"""
resp = self.authenticated_request(
endpoint=f"/apps/v20210824/wallets/{swan_wallet_id}?full=true",
method="GET",
)
return resp
def update_autowithdrawal_addresses(
self,
swan_wallet_id: str,
specter_wallet_name: str,
specter_wallet_alias: str,
addresses: List[str],
) -> dict:
"""
* If SWAN_WALLET_ID is known, any existing unused addresses are cleared.
* If there is no known SWAN_WALLET_ID, we `POST` to create an initial Swan wallet and store the resulting SWAN_WALLET_ID.
* Sends the list of new addresses for SWAN_WALLET_ID.
"""
# normalize the strucure compatible with what swan will accept:
# like: [{"address": "bcrt1q8k8a73crvjs06jhdj7xee8mace3mhlxj4pdvna"}, {"address": "bcrt ...
addresses = [address["address"] for address in addresses]
if swan_wallet_id:
# We already have a Swan walletId. DELETE the existing unused addresses...
self.delete_autowithdrawal_addresses(swan_wallet_id)
# ...and then append the new ones.
endpoint = f"/apps/v20210824/wallets/{swan_wallet_id}/addresses"
method = "PATCH"
else:
# We don't yet have a Swan walletId. POST to create one.
endpoint = "/apps/v20210824/wallets"
method = "POST"
resp = self.authenticated_request(
endpoint=endpoint,
method=method,
json_payload={
"btcAddresses": [{"address": addr} for addr in addresses],
"displayName": str(
_('Specter Desktop "{}"').format(specter_wallet_name)
), # Can't pass a LazyString into json
"metadata": {
"specter_wallet_alias": specter_wallet_alias,
},
},
)
"""
Response should include wallet ("item") details:
{
"entity": "wallet",
"item": {
"id": "c47e1e83-90a0-45da-ae25-6a0d324b9f29",
"isConfirmed": false,
"displayName": "Specter autowithdrawal to SeedSigner demo",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "seedsigner_demo"
}
}
}
"""
if "item" in resp and "id" in resp["item"]:
if resp["item"]["id"] != swan_wallet_id:
swan_wallet_id = resp["item"]["id"]
return swan_wallet_id
else:
raise SwanApiException(
f"No 'id' returned for the new/updated wallet: {json.dumps(resp, indent=4)}"
)
def delete_autowithdrawal_addresses(self, swan_wallet_id: str):
"""
Deletes all unused autowithdrawal addresses from the specified SWAN_WALLET_ID
"""
resp = self.authenticated_request(
endpoint=f"/apps/v20210824/wallets/{swan_wallet_id}/addresses",
method="DELETE",
)
return resp
def get_autowithdrawal_info(self) -> dict:
"""
See note in set_autowithdrawal. This returns all autowithdrawal objs from the Swan
side.
"""
resp = self.authenticated_request(
endpoint="/apps/v20210824/automatic-withdrawal",
method="GET",
)
return resp
def set_autowithdrawal(self, swan_wallet_id, btc_threshold: Decimal) -> dict:
"""
0 == Weekly; other float values = BTC threshold
The Swan api generates a new autowithdrawal id each time but there is no support to
update an existing autowithdrawal, other than activating or deactivating it.
New autowithdrawals are initialized as `isActive: false` and require the user to
complete a Swan-side email verification step.
We save the resulting autowithdrawal_id even though it isn't clear at the moment if
it's desirable to ever call the `deactivate/` or `activate/` endpoints.
"""
endpoint = "/apps/v20210824/automatic-withdrawal"
method = "POST"
resp = self.authenticated_request(
endpoint=endpoint,
method=method,
json_payload={
"walletId": swan_wallet_id,
"minBtcThreshold": btc_threshold,
},
)
"""
{
"entity": "automaticWithdrawal",
"item": {
"id": "******************",
"minBtcThreshold": "0.01",
"isActive": false,
"isCanceled": false,
"createdAt": "2022-01-07T02:14:56.070Z",
"walletId": "******************",
"walletAddressId": null
}
}
"""
if "item" in resp and "id" in resp["item"]:
return resp
else:
raise SwanApiException(
f"No 'id' returned for the new/updated autowithdrawal: {json.dumps(resp, indent=4)}"
)
def activate_autowithdrawal(self, autowithdrawal_id) -> dict:
"""
Activates the autowithdrawal specified in SwanService.AUTOWITHDRAWAL_ID.
If the automatic withdrawal was just created, this will generate a 400 error:
"Cannot activate an automatic withdrawal before withdrawal address is confirmed".
The user must first confirm the first withdrawal addr via Swan-side email flow.
After they confirm, the autowithdrawal should then return `isActive: true`.
NOT CURRENTLY USED; remove if we don't ever enable disable/activate flows.
"""
endpoint = f"/apps/v20210824/automatic-withdrawal/{autowithdrawal_id}/activate"
method = "POST"
resp = self.authenticated_request(
endpoint=endpoint,
method=method,
)
"""
{
"entity": "automaticWithdrawal",
"item": {
"id": "******************",
"minBtcThreshold": "0.01",
"isActive": true,
"isCanceled": false,
"createdAt": "2022-01-07T02:14:56.070Z",
"walletId": "******************",
"walletAddressId": null
}
}
"""
if "item" in resp and "id" in resp["item"]:
autowithdrawal_id = resp["item"]["id"]
return autowithdrawal_id
raise SwanApiException(
f"No 'id' returned for the new/updated autowithdrawal: {json.dumps(resp, indent=4)}"
)
def get_wallet_details(self, swan_wallet_id: str) -> dict:
"""
{
"entity": "wallet",
"item": {
"id": "********************",
"walletAddressId": ""********************",
"btcAddress": ""********************",
"isConfirmed": true,
"displayName": "Specter Desktop \"DCA Cold Storage\"",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "dca_cold_storage"
}
}
}
"""
resp = self.authenticated_request(
endpoint=f"/apps/v20210824/wallets/{swan_wallet_id}",
method="GET",
)
return resp
def get_wallets(self) -> dict:
"""
Return all Swan wallet entries. Should only be one per Specter-Swan user combo (but can be more due
to testing/debugging, calling `/wallets` POST more than once, etc.)
"""
resp = self.authenticated_request(
endpoint=f"/apps/v20210824/wallets",
method="GET",
)
"""
{
"entity": "wallet",
"list": [
{
"id": "**********",
"walletAddressId": "**********",
"btcAddress": "bc1q**********",
"isConfirmed": false,
"displayName": "Specter Desktop \"SeedSigner demo\"",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "seedsigner_demo"
}
},
{
"id": "**********",
"walletAddressId": "**********",
"btcAddress": "bc1q**********",
"isConfirmed": false,
"displayName": "Specter Desktop \"DCA Corn\"",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
| |
## dea_plotting.py
'''
Description: This file contains a set of python functions for plotting
Digital Earth Australia data.
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Australia data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, file one on
Github: https://github.com/GeoscienceAustralia/dea-notebooks/issues/new
Functions included:
rgb
display_map
map_shapefile
xr_animation
plot_wo
Last modified: February 2021
'''
# Import required packages
import math
import branca
import folium
import calendar
import ipywidgets
import numpy as np
import geopandas as gpd
import matplotlib as mpl
import matplotlib.cm as cm
from matplotlib import colors as mcolours
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from datetime import datetime
from pyproj import Proj, transform
from IPython.display import display
from matplotlib.colors import ListedColormap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1 import make_axes_locatable
from ipyleaflet import Map, Marker, Popup, GeoJSON, basemaps, Choropleth
from skimage import exposure
from odc.ui import image_aspect
from matplotlib.animation import FuncAnimation
import pandas as pd
from pathlib import Path
from shapely.geometry import box
from skimage.exposure import rescale_intensity
from tqdm.auto import tqdm
import warnings
def rgb(ds,
bands=['nbart_red', 'nbart_green', 'nbart_blue'],
index=None,
index_dim='time',
robust=True,
percentile_stretch=None,
col_wrap=4,
size=6,
aspect=None,
savefig_path=None,
savefig_kwargs={},
**kwargs):
"""
Takes an xarray dataset and plots RGB images using three imagery
bands (e.g ['nbart_red', 'nbart_green', 'nbart_blue']). The `index`
parameter allows easily selecting individual or multiple images for
RGB plotting. Images can be saved to file by specifying an output
path using `savefig_path`.
This function was designed to work as an easier-to-use wrapper
around xarray's `.plot.imshow()` functionality.
Last modified: September 2020
Parameters
----------
ds : xarray Dataset
A two-dimensional or multi-dimensional array to plot as an RGB
image. If the array has more than two dimensions (e.g. multiple
observations along a 'time' dimension), either use `index` to
select one (`index=0`) or multiple observations
(`index=[0, 1]`), or create a custom faceted plot using e.g.
`col="time"`.
bands : list of strings, optional
A list of three strings giving the band names to plot. Defaults
to '['nbart_red', 'nbart_green', 'nbart_blue']'.
index : integer or list of integers, optional
`index` can be used to select one (`index=0`) or multiple
observations (`index=[0, 1]`) from the input dataset for
plotting. If multiple images are requested these will be plotted
as a faceted plot.
index_dim : string, optional
The dimension along which observations should be plotted if
multiple observations are requested using `index`. Defaults to
`time`.
robust : bool, optional
Produces an enhanced image where the colormap range is computed
with 2nd and 98th percentiles instead of the extreme values.
Defaults to True.
percentile_stretch : tuple of floats
An tuple of two floats (between 0.00 and 1.00) that can be used
to clip the colormap range to manually specified percentiles to
get more control over the brightness and contrast of the image.
The default is None; '(0.02, 0.98)' is equivelent to
`robust=True`. If this parameter is used, `robust` will have no
effect.
col_wrap : integer, optional
The number of columns allowed in faceted plots. Defaults to 4.
size : integer, optional
The height (in inches) of each plot. Defaults to 6.
aspect : integer, optional
Aspect ratio of each facet in the plot, so that aspect * size
gives width of each facet in inches. Defaults to None, which
will calculate the aspect based on the x and y dimensions of
the input data.
savefig_path : string, optional
Path to export image file for the RGB plot. Defaults to None,
which does not export an image file.
savefig_kwargs : dict, optional
A dict of keyword arguments to pass to
`matplotlib.pyplot.savefig` when exporting an image file. For
all available options, see:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html
**kwargs : optional
Additional keyword arguments to pass to `xarray.plot.imshow()`.
For example, the function can be used to plot into an existing
matplotlib axes object by passing an `ax` keyword argument.
For more options, see:
http://xarray.pydata.org/en/stable/generated/xarray.plot.imshow.html
Returns
-------
An RGB plot of one or multiple observations, and optionally an image
file written to file.
"""
# Get names of x and y dims
# TODO: remove geobox and try/except once datacube 1.8 is default
try:
y_dim, x_dim = ds.geobox.dimensions
except AttributeError:
from datacube.utils import spatial_dims
y_dim, x_dim = spatial_dims(ds)
# If ax is supplied via kwargs, ignore aspect and size
if 'ax' in kwargs:
# Create empty aspect size kwarg that will be passed to imshow
aspect_size_kwarg = {}
else:
# Compute image aspect
if not aspect:
aspect = image_aspect(ds)
# Populate aspect size kwarg with aspect and size data
aspect_size_kwarg = {'aspect': aspect, 'size': size}
# If no value is supplied for `index` (the default), plot using default
# values and arguments passed via `**kwargs`
if index is None:
# Select bands and convert to DataArray
da = ds[bands].to_array().compute()
# If percentile_stretch == True, clip plotting to percentile vmin, vmax
if percentile_stretch:
vmin, vmax = da.quantile(percentile_stretch).values
kwargs.update({'vmin': vmin, 'vmax': vmax})
# If there are more than three dimensions and the index dimension == 1,
# squeeze this dimension out to remove it
if ((len(ds.dims) > 2) and
('col' not in kwargs) and
(len(da[index_dim]) == 1)):
da = da.squeeze(dim=index_dim)
# If there are more than three dimensions and the index dimension
# is longer than 1, raise exception to tell user to use 'col'/`index`
elif ((len(ds.dims) > 2) and
('col' not in kwargs) and
(len(da[index_dim]) > 1)):
raise Exception(
f'The input dataset `ds` has more than two dimensions: '
f'{list(ds.dims.keys())}. Please select a single observation '
'using e.g. `index=0`, or enable faceted plotting by adding '
'the arguments e.g. `col="time", col_wrap=4` to the function '
'call'
)
img = da.plot.imshow(x=x_dim,
y=y_dim,
robust=robust,
col_wrap=col_wrap,
**aspect_size_kwarg,
**kwargs)
# If values provided for `index`, extract corresponding observations and
# plot as either single image or facet plot
else:
# If a float is supplied instead of an integer index, raise exception
if isinstance(index, float):
raise Exception(
f'Please supply `index` as either an integer or a list of '
'integers'
)
# If col argument is supplied as well as `index`, raise exception
if 'col' in kwargs:
raise Exception(
f'Cannot supply both `index` and `col`; please remove one and '
'try again'
)
# Convert index to generic type list so that number of indices supplied
# can be computed
index = index if isinstance(index, list) else [index]
# Select bands and observations and convert to DataArray
da = ds[bands].isel(**{index_dim: index}).to_array().compute()
# If percentile_stretch == True, clip plotting to percentile vmin, vmax
if percentile_stretch:
vmin, vmax = da.quantile(percentile_stretch).values
kwargs.update({'vmin': vmin, 'vmax': vmax})
# If multiple index values are supplied, plot as a faceted plot
if len(index) > 1:
img = da.plot.imshow(x=x_dim,
y=y_dim,
robust=robust,
col=index_dim,
col_wrap=col_wrap,
**aspect_size_kwarg,
**kwargs)
# If only one index is supplied, squeeze out index_dim and plot as a
# single panel
else:
img = da.squeeze(dim=index_dim).plot.imshow(robust=robust,
**aspect_size_kwarg,
**kwargs)
# If an export path is provided, save image to file. Individual and
# faceted plots have a different API (figure vs fig) so we get around this
# using a try statement:
if savefig_path:
print(f'Exporting image to {savefig_path}')
try:
img.fig.savefig(savefig_path, **savefig_kwargs)
except:
img.figure.savefig(savefig_path, **savefig_kwargs)
def display_map(x, y, crs='EPSG:4326', margin=-0.5, zoom_bias=0):
"""
Given a set of x and y coordinates, this function generates an
interactive map with a bounded rectangle overlayed on Google Maps
imagery.
Last modified: September 2019
Modified from function written by <NAME> available here:
https://github.com/ceos-seo/data_cube_utilities/tree/master/data_cube_utilities
Parameters
---------- | |
)
if index_species is None or spec in index_species:
forward_strand_start = c.forward_strand_start
forward_strand_end = c.forward_strand_end
try:
forward_strand_start = int( forward_strand_start )
forward_strand_end = int( forward_strand_end )
except ValueError:
continue # start and end are not integers, can't add component to index, goto next component
# this likely only occurs when parse_e_rows is True?
# could a species exist as only e rows? should the
if forward_strand_end > forward_strand_start:
# require positive length; i.e. certain lines have start = end = 0 and cannot be indexed
indexes.add( c.src, forward_strand_start, forward_strand_end, pos, max=c.src_size )
except Exception, e:
# most likely a bad MAF
log.debug( 'Building MAF index on %s failed: %s' % ( filename, e ) )
return ( None, [], {}, 0 )
return ( indexes, species, species_chromosomes, blocks )
# builds and returns ( index, index_filename ) for specified maf_file
def build_maf_index( maf_file, species=None ):
indexes, found_species, species_chromosomes, blocks = build_maf_index_species_chromosomes( maf_file, species )
if indexes is not None:
fd, index_filename = tempfile.mkstemp()
out = os.fdopen( fd, 'w' )
indexes.write( out )
out.close()
return ( bx.align.maf.Indexed( maf_file, index_filename=index_filename, keep_open=True, parse_e_rows=False ), index_filename )
return ( None, None )
def component_overlaps_region( c, region ):
if c is None:
return False
start, end = c.get_forward_strand_start(), c.get_forward_strand_end()
if region.start >= end or region.end <= start:
return False
return True
def chop_block_by_region( block, src, region, species=None, mincols=0 ):
# This chopping method was designed to maintain consistency with how start/end padding gaps have been working in Galaxy thus far:
# behavior as seen when forcing blocks to be '+' relative to src sequence (ref) and using block.slice_by_component( ref, slice_start, slice_end )
# whether-or-not this is the 'correct' behavior is questionable, but this will at least maintain consistency
# comments welcome
slice_start = block.text_size # max for the min()
slice_end = 0 # min for the max()
old_score = block.score # save old score for later use
# We no longer assume only one occurance of src per block, so we need to check them all
for c in iter_components_by_src( block, src ):
if component_overlaps_region( c, region ):
if c.text is not None:
rev_strand = False
if c.strand == "-":
# We want our coord_to_col coordinates to be returned from positive stranded component
rev_strand = True
c = c.reverse_complement()
start = max( region.start, c.start )
end = min( region.end, c.end )
start = c.coord_to_col( start )
end = c.coord_to_col( end )
if rev_strand:
# need to orient slice coordinates to the original block direction
slice_len = end - start
end = len( c.text ) - start
start = end - slice_len
slice_start = min( start, slice_start )
slice_end = max( end, slice_end )
if slice_start < slice_end:
block = block.slice( slice_start, slice_end )
if block.text_size > mincols:
# restore old score, may not be accurate, but it is better than 0 for everything?
block.score = old_score
if species is not None:
block = block.limit_to_species( species )
block.remove_all_gap_columns()
return block
return None
def orient_block_by_region( block, src, region, force_strand=None ):
# loop through components matching src,
# make sure each of these components overlap region
# cache strand for each of overlaping regions
# if force_strand / region.strand not in strand cache, reverse complement
# we could have 2 sequences with same src, overlapping region, on different strands, this would cause no reverse_complementing
strands = [ c.strand for c in iter_components_by_src( block, src ) if component_overlaps_region( c, region ) ]
if strands and ( force_strand is None and region.strand not in strands ) or ( force_strand is not None and force_strand not in strands ):
block = block.reverse_complement()
return block
def get_oriented_chopped_blocks_for_region( index, src, region, species=None, mincols=0, force_strand=None ):
for block, idx, offset in get_oriented_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols, force_strand ):
yield block
def get_oriented_chopped_blocks_with_index_offset_for_region( index, src, region, species=None, mincols=0, force_strand=None ):
for block, idx, offset in get_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols ):
yield orient_block_by_region( block, src, region, force_strand ), idx, offset
# split a block with multiple occurances of src into one block per src
def iter_blocks_split_by_src( block, src ):
for src_c in iter_components_by_src( block, src ):
new_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) )
new_block.text_size = block.text_size
for c in block.components:
if c == src_c or c.src != src:
new_block.add_component( deepcopy( c ) ) # components have reference to alignment, dont want to loose reference to original alignment block in original components
yield new_block
# split a block into multiple blocks with all combinations of a species appearing only once per block
def iter_blocks_split_by_species( block, species=None ):
def __split_components_by_species( components_by_species, new_block ):
if components_by_species:
# more species with components to add to this block
components_by_species = deepcopy( components_by_species )
spec_comps = components_by_species.pop( 0 )
for c in spec_comps:
newer_block = deepcopy( new_block )
newer_block.add_component( deepcopy( c ) )
for value in __split_components_by_species( components_by_species, newer_block ):
yield value
else:
# no more components to add, yield this block
yield new_block
# divide components by species
spec_dict = {}
if not species:
species = []
for c in block.components:
spec, chrom = src_split( c.src )
if spec not in spec_dict:
spec_dict[ spec ] = []
species.append( spec )
spec_dict[ spec ].append( c )
else:
for spec in species:
spec_dict[ spec ] = []
for c in iter_components_by_src_start( block, spec ):
spec_dict[ spec ].append( c )
empty_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) ) # should we copy attributes?
empty_block.text_size = block.text_size
# call recursive function to split into each combo of spec/blocks
for value in __split_components_by_species( spec_dict.values(), empty_block ):
sort_block_components_by_block( value, block ) # restore original component order
yield value
# generator yielding only chopped and valid blocks for a specified region
def get_chopped_blocks_for_region( index, src, region, species=None, mincols=0 ):
for block, idx, offset in get_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols ):
yield block
def get_chopped_blocks_with_index_offset_for_region( index, src, region, species=None, mincols=0 ):
for block, idx, offset in index.get_as_iterator_with_index_and_offset( src, region.start, region.end ):
block = chop_block_by_region( block, src, region, species, mincols )
if block is not None:
yield block, idx, offset
# returns a filled region alignment for specified regions
def get_region_alignment( index, primary_species, chrom, start, end, strand='+', species=None, mincols=0, overwrite_with_gaps=True, temp_file_handler=None ):
if species is not None:
alignment = RegionAlignment( end - start, species, temp_file_handler=temp_file_handler )
else:
alignment = RegionAlignment( end - start, primary_species, temp_file_handler=temp_file_handler )
return fill_region_alignment( alignment, index, primary_species, chrom, start, end, strand, species, mincols, overwrite_with_gaps )
# reduces a block to only positions exisiting in the src provided
def reduce_block_by_primary_genome( block, species, chromosome, region_start ):
# returns ( startIndex, {species:texts}
# where texts' contents are reduced to only positions existing in the primary genome
src = "%s.%s" % ( species, chromosome )
ref = block.get_component_by_src( src )
start_offset = ref.start - region_start
species_texts = {}
for c in block.components:
species_texts[ c.src.split( '.' )[0] ] = list( c.text )
# remove locations which are gaps in the primary species, starting from the downstream end
for i in range( len( species_texts[ species ] ) - 1, -1, -1 ):
if species_texts[ species ][i] == '-':
for text in species_texts.values():
text.pop( i )
for spec, text in species_texts.items():
species_texts[spec] = ''.join( text )
return ( start_offset, species_texts )
# fills a region alignment
def fill_region_alignment( alignment, index, primary_species, chrom, start, end, strand='+', species=None, mincols=0, overwrite_with_gaps=True ):
region = bx.intervals.Interval( start, end )
region.chrom = chrom
region.strand = strand
primary_src = "%s.%s" % ( primary_species, chrom )
# Order blocks overlaping this position by score, lowest first
blocks = []
for block, idx, offset in index.get_as_iterator_with_index_and_offset( primary_src, start, end ):
score = float( block.score )
for i in range( 0, len( blocks ) ):
if score < blocks[i][0]:
blocks.insert( i, ( score, idx, offset ) )
break
else:
blocks.append( ( score, idx, offset ) )
# gap_chars_tuple = tuple( GAP_CHARS )
gap_chars_str = ''.join( GAP_CHARS )
# Loop through ordered blocks and layer by increasing score
for block_dict in blocks:
for block in iter_blocks_split_by_species( block_dict[1].get_at_offset( block_dict[2] | |
in uvf.history
uvf2 = uvf.copy()
uvf2.flag_array = np.ones_like(uvf2.flag_array)
uvf.flag_array[0] = True
uvf2.flag_array[0] = False
uvf2.flag_array[1] = False
uvf3 = uvf | uvf2
assert pyuvdata_version_str in uvf3.history
def test_or_error():
uvf = UVFlag(test_f_file)
uvf2 = uvf.copy()
uvf.to_flag()
with pytest.raises(ValueError) as cm:
uvf.__or__(uvf2)
assert str(cm.value).startswith('UVFlag object must be in "flag" mode')
def test_or_add_history():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.history = "Different history"
uvf3 = uvf | uvf2
assert uvf.history in uvf3.history
assert uvf2.history in uvf3.history
assert "Flags OR'd with:" in uvf3.history
def test_ior():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.flag_array = np.ones_like(uvf2.flag_array)
uvf.flag_array[0] = True
uvf2.flag_array[0] = False
uvf2.flag_array[1] = False
uvf |= uvf2
assert np.all(uvf.flag_array[0])
assert not np.any(uvf.flag_array[1])
assert np.all(uvf.flag_array[2:])
def test_to_flag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert uvf.mode == "flag"
assert 'Converted to mode "flag"' in uvf.history
def test_to_flag_add_version_str():
uvf = UVFlag(test_f_file)
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_flag()
assert pyuvdata_version_str in uvf.history
def test_to_flag_threshold():
uvf = UVFlag(test_f_file)
uvf.metric_array = np.zeros_like(uvf.metric_array)
uvf.metric_array[0, 0, 4, 0] = 2.0
uvf.to_flag(threshold=1.0)
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert uvf.mode == "flag"
assert uvf.flag_array[0, 0, 4, 0]
assert np.sum(uvf.flag_array) == 1.0
assert 'Converted to mode "flag"' in uvf.history
def test_flag_to_flag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.to_flag()
assert uvf == uvf2
def test_to_flag_unknown_mode():
uvf = UVFlag(test_f_file)
uvf.mode = "foo"
with pytest.raises(ValueError) as cm:
uvf.to_flag()
assert str(cm.value).startswith("Unknown UVFlag mode: foo")
def test_to_metric_baseline():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.flag_array[:, :, 10] = True
uvf.flag_array[1, :, :] = True
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert uvf.mode == "flag"
uvf.to_metric(convert_wgts=True)
assert hasattr(uvf, "metric_array")
assert hasattr(uvf, "flag_array")
assert uvf.flag_array is None
assert uvf.mode == "metric"
assert 'Converted to mode "metric"' in uvf.history
assert np.isclose(uvf.weights_array[1], 0.0).all()
assert np.isclose(uvf.weights_array[:, :, 10], 0.0).all()
def test_to_metric_add_version_str():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.flag_array[:, :, 10] = True
uvf.flag_array[1, :, :] = True
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert uvf.mode == "flag"
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_metric(convert_wgts=True)
assert pyuvdata_version_str in uvf.history
def test_to_metric_waterfall():
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[:, 10] = True
uvf.flag_array[1, :, :] = True
uvf.to_metric(convert_wgts=True)
assert np.isclose(uvf.weights_array[1], 0.0).all()
assert np.isclose(uvf.weights_array[:, 10], 0.0).all()
def test_to_metric_antenna():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc, mode="flag")
uvf.flag_array[10, :, :, 1, :] = True
uvf.flag_array[15, :, 3, :, :] = True
uvf.to_metric(convert_wgts=True)
assert np.isclose(uvf.weights_array[10, :, :, 1, :], 0.0).all()
assert np.isclose(uvf.weights_array[15, :, 3, :, :], 0.0).all()
def test_metric_to_metric():
uvf = UVFlag(test_f_file)
uvf2 = uvf.copy()
uvf.to_metric()
assert uvf == uvf2
def test_to_metric_unknown_mode():
uvf = UVFlag(test_f_file)
uvf.mode = "foo"
with pytest.raises(ValueError) as cm:
uvf.to_metric()
assert str(cm.value).startswith("Unknown UVFlag mode: foo")
def test_antpair2ind():
uvf = UVFlag(test_f_file)
ind = uvf.antpair2ind(uvf.ant_1_array[0], uvf.ant_2_array[0])
assert np.all(uvf.ant_1_array[ind] == uvf.ant_1_array[0])
assert np.all(uvf.ant_2_array[ind] == uvf.ant_2_array[0])
def test_antpair2ind_nonbaseline():
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
with pytest.raises(ValueError) as cm:
uvf.antpair2ind(0, 3)
assert str(cm.value).startswith(
"UVFlag object of type "
+ uvf.type
+ " does not contain antenna "
+ "pairs to index."
)
def test_baseline_to_antnums():
uvf = UVFlag(test_f_file)
a1, a2 = uvf.baseline_to_antnums(uvf.baseline_array[0])
assert a1 == uvf.ant_1_array[0]
assert a2 == uvf.ant_2_array[0]
def test_get_baseline_nums():
uvf = UVFlag(test_f_file)
bls = uvf.get_baseline_nums()
assert np.array_equal(bls, np.unique(uvf.baseline_array))
def test_get_antpairs():
uvf = UVFlag(test_f_file)
antpairs = uvf.get_antpairs()
for a1, a2 in antpairs:
ind = np.where((uvf.ant_1_array == a1) & (uvf.ant_2_array == a2))[0]
assert len(ind) > 0
for a1, a2 in zip(uvf.ant_1_array, uvf.ant_2_array):
assert (a1, a2) in antpairs
def test_missing_nants_telescope(tmp_path):
testfile = str(tmp_path / "test_missing_Nants.h5")
shutil.copyfile(test_f_file, testfile)
with h5py.File(testfile, "r+") as f:
del f["/Header/Nants_telescope"]
with uvtest.check_warnings(
UserWarning, match="Nants_telescope not available in file",
):
uvf = UVFlag(testfile)
uvf2 = UVFlag(test_f_file)
uvf2.Nants_telescope = 2047
assert uvf == uvf2
os.remove(testfile)
def test_combine_metrics_inplace():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.metric_array *= 2
uvf3 = uvf.copy()
uvf3.metric_array *= 3
uvf.combine_metrics([uvf2, uvf3])
factor = np.sqrt((1 + 4 + 9) / 3.0) / 2.0
assert np.allclose(uvf.metric_array, np.abs(uvf2.metric_array) * factor)
def test_combine_metrics_not_inplace():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.metric_array *= 2
uvf3 = uvf.copy()
uvf3.metric_array *= 3
uvf4 = uvf.combine_metrics([uvf2, uvf3], inplace=False)
factor = np.sqrt((1 + 4 + 9) / 3.0)
assert np.allclose(uvf4.metric_array, np.abs(uvf.metric_array) * factor)
def test_combine_metrics_not_uvflag():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
with pytest.raises(ValueError) as cm:
uvf.combine_metrics("bubblegum")
assert str(cm.value).startswith('"others" must be UVFlag or list of UVFlag objects')
def test_combine_metrics_not_metric():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.to_flag()
with pytest.raises(ValueError) as cm:
uvf.combine_metrics(uvf2)
assert str(cm.value).startswith('UVFlag object and "others" must be in "metric"')
def test_combine_metrics_wrong_shape():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.to_waterfall()
with pytest.raises(ValueError) as cm:
uvf.combine_metrics(uvf2)
assert str(cm.value).startswith("UVFlag metric array shapes do not match.")
def test_combine_metrics_add_version_str():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.metric_array *= 2
uvf3 = uvf.copy()
uvf3.metric_array *= 3
uvf4 = uvf.combine_metrics([uvf2, uvf3], inplace=False)
assert pyuvdata_version_str in uvf4.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_super(uvdata_obj):
class TestClass(UVFlag):
def __init__(
self,
indata,
mode="metric",
copy_flags=False,
waterfall=False,
history="",
label="",
test_property="prop",
):
super(TestClass, self).__init__(
indata,
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
label=label,
)
self.test_property = test_property
uv = uvdata_obj
tc = TestClass(uv, test_property="test_property")
# UVFlag.__init__ is tested, so just see if it has a metric array
assert hasattr(tc, "metric_array")
# Check that it has the property
assert tc.test_property == "test_property"
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_flags2waterfall(uvdata_obj):
uv = uvdata_obj
np.random.seed(0)
uv.flag_array = np.random.randint(0, 2, size=uv.flag_array.shape, dtype=bool)
wf = flags2waterfall(uv)
assert np.allclose(np.mean(wf), np.mean(uv.flag_array))
assert wf.shape == (uv.Ntimes, uv.Nfreqs)
wf = flags2waterfall(uv, keep_pol=True)
assert wf.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
# Test external flag_array
uv.flag_array = np.zeros_like(uv.flag_array)
f = np.random.randint(0, 2, size=uv.flag_array.shape, dtype=bool)
wf = flags2waterfall(uv, flag_array=f)
assert np.allclose(np.mean(wf), np.mean(f))
assert wf.shape == (uv.Ntimes, uv.Nfreqs)
# UVCal version
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvc.flag_array = np.random.randint(0, 2, size=uvc.flag_array.shape, dtype=bool)
wf = flags2waterfall(uvc)
assert np.allclose(np.mean(wf), np.mean(uvc.flag_array))
assert wf.shape == (uvc.Ntimes, uvc.Nfreqs)
wf = flags2waterfall(uvc, keep_pol=True)
assert wf.shape == (uvc.Ntimes, uvc.Nfreqs, uvc.Njones)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_flags2waterfall_errors(uvdata_obj):
# First argument must be UVData or UVCal object
with pytest.raises(ValueError) as cm:
flags2waterfall(5)
assert str(cm.value).startswith(
"flags2waterfall() requires a UVData or " + "UVCal object"
)
uv = uvdata_obj
# Flag array must have same shape as uv.flag_array
with pytest.raises(ValueError) as cm:
flags2waterfall(uv, np.array([4, 5]))
assert str(cm.value).startswith("Flag array must align with UVData or UVCal")
def test_and_rows_cols():
d = np.zeros((10, 20), np.bool_)
d[1, :] = True
d[:, 2] = True
d[5, 10:20] = True
d[5:8, 5] = True
o = and_rows_cols(d)
assert o[1, :].all()
assert o[:, 2].all()
assert not o[5, :].all()
assert not o[:, 5].all()
def test_select_waterfall_errors(uvf_from_waterfall):
uvf = uvf_from_waterfall
with pytest.raises(ValueError) as cm:
uvf.select(antenna_nums=[0, 1, 2])
assert str(cm.value).startswith("Cannot select on antenna_nums with waterfall")
with pytest.raises(ValueError) as cm:
uvf.select(bls=[(0, 1), (0, 2)])
assert str(cm.value).startswith("Cannot select on bls with waterfall")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize("dimension", list(range(1, 4)))
def test_select_blt_inds(input_uvf, uvf_mode, dimension):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
if uvf.type == "baseline":
n_select = uvf.Nblts
else:
n_select = uvf.Ntimes
blt_inds = np.random.choice(n_select, size=n_select // 2, replace=False)
new_nblts = n_select // 2
if dimension == 1:
blt_inds = np.atleast_1d(blt_inds)
elif dimension == 2:
blt_inds = np.atleast_2d(blt_inds)
elif dimension == 3:
blt_inds = np.atleast_3d(blt_inds)
uvf1 = uvf.select(blt_inds=blt_inds, inplace=False)
# test the data was extracted correctly for each case
for param_name, new_param in zip(uvf._data_params, uvf1.data_like_parameters):
old_param = getattr(uvf, param_name)
if uvf.type == "baseline":
assert np.allclose(old_param[blt_inds.squeeze()], new_param)
if uvf.type == "antenna":
assert np.allclose(old_param[:, :, :, blt_inds.squeeze()], new_param)
if uvf.type == "waterfall":
assert np.allclose(old_param[blt_inds.squeeze()], new_param)
if uvf.type == "baseline":
assert uvf1.Nblts == new_nblts
else:
assert uvf1.Ntimes == new_nblts
# verify that histories are different
assert not uvutils._check_histories(uvf.history, uvf1.history)
if uvf.type == "baseline":
addition_str = "baseline-times"
else:
addition_str = "times"
assert uvutils._check_histories(
uvf.history + f" Downselected to specific {addition_str} using pyuvdata.",
uvf1.history,
)
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize(
"select_kwargs,err_msg",
[
({"blt_inds": []}, "No baseline-times were found"),
({"blt_inds": [int(1e9)]}, "blt_inds contains indices that are too large"),
({"blt_inds": [-1]}, "blt_inds contains indices that are negative"),
],
)
def test_select_blt_inds_errors(input_uvf, uvf_mode, select_kwargs, err_msg):
uvf | |
<gh_stars>1-10
# coding=utf-8
from .core import *
from collections import deque
from typing import Set, Tuple
import math
class GreedyShortestDeadlineFirstScheduler(BaseScheduler):
def __init__(self, sim: Sim,
incremental: bool = True, oracle=True,
admission_control_threshold_low: float = 0.8,
admission_control_threshold_high: float = 1.1
):
super().__init__(sim=sim,
incremental=incremental,
oracle=oracle,
activate_acp=not oracle,
admission_control_threshold_low=admission_control_threshold_low,
admission_control_threshold_high=admission_control_threshold_high)
self.admission_control_threshold_low = admission_control_threshold_low
self.admission_control_threshold_high = admission_control_threshold_high
assert admission_control_threshold_low < admission_control_threshold_high
if not self.oracle:
self.rate_estimator: Dict[int, RateEstimator] = {}
def is_always_able_to_build_full_path(self):
return self.oracle and not self.incremental
def apply_scheduling_logic_for_packet(self, packet: Packet):
sff_props: SFF.Props = self.sim.props.sff
# get the packet from the scheduler's queue
if self.requires_queues_per_class():
popped_packet = self.mySFF.packet_queue_per_class[Flow.get_packet_class_of_packet(packet)].pop()
else:
popped_packet = self.mySFF.packet_queue.pop()
assert popped_packet == packet
packet.timeQueueScheduling += packet.get_delta_of_time_mark()
self.mark_time_scheduling_starts()
# this packet starts from this SFF, so search from this point
p_at_sff: SFF = self.mySFF
if self.sim.DEBUG:
print(". heuristic scheduler @ {0}".format(p_at_sff))
scheduled_path = []
incremental_path = False
# for each remaining sf type of the requested chain of this packet
while 0 < len(packet.toBeVisited) and not incremental_path:
next_sf_type = packet.toBeVisited.pop(0)
# we need to get a SFI for this corresponding sf type
# try to get this SFI from the SFF where this packet is currently
# (p_at_sff)
sff_to_check = sff_props.allSFFs if self.oracle else [p_at_sff.id]
sfi_to_check = []
# get a list of all possible SFIs
# calculate for each of these SFIs the cost value
best_latency = -1
for sffIDToAsk in sff_to_check:
sff_to_ask = sff_props.allSFFs[sffIDToAsk]
delay_of_sff_connection = 0 if p_at_sff == sff_to_ask else SFF.get_multi_hop_latency_for(self.sim,
p_at_sff.id,
sff_to_ask.id)
if best_latency != -1 and best_latency <= delay_of_sff_connection:
# we skip this sff if the latency to this sff is bigger than the best option we found so far
continue
if next_sf_type in sff_to_ask.SFIsPerType:
for sfi in sff_to_ask.SFIsPerType[next_sf_type]:
delay = sfi.get_expected_waiting_time() + sfi.get_expected_processing_time()
if p_at_sff != sff_to_ask:
delay += SFF.get_delay_of_connection(p_at_sff, sff_to_ask)
if best_latency == -1:
best_latency = delay
best_latency = min(best_latency, delay)
sfi_to_check.append((delay, sff_to_ask, sfi))
if len(sfi_to_check) == 0:
if not self.oracle:
raise NameError(f'something is going wrong. I don\'t have any SFI which could serve this packet,'
f'but ACP should have handeled this case!?')
packet.realTimeScheduling += self.get_time_delta_of_scheduling()
try:
raise SchedulingFailure("failed to find a path for {0}".format(
str(packet.flow.sfTypeChain)))
except SchedulingFailure as e:
raise e
finally:
packet.reject()
# find the sfi with the lowest delay till packet is processed
best_sfi = sfi_to_check[0]
for sfi_tuple in sfi_to_check:
if sfi_tuple[0] <= best_sfi[0]:
# update the best sfi, but for equal values we prefer to stay at our sff
if sfi_tuple[0] != best_sfi[0] or sfi_tuple[1] == self.mySFF:
best_sfi = sfi_tuple
sff_to_ask = best_sfi[1]
sfi = best_sfi[2]
if self.sim.DEBUG:
print(". found a SFI at SFF {0}".format(sff_to_ask))
# do we need to go to another SFF, or do we stay at the
# current SFF?
if p_at_sff != sff_to_ask:
# we need to go to a different SFF, but before,
# we have to go back to the original SFF, so that
# if the packet is at a SFI, it goes fist back
# to the SFF of this SFI, and then to the next SFF
# so go to p_at_sff if the previous path element was
# a SFI
if (len(scheduled_path) >
0 and scheduled_path[-1][0] == SFI.__name__):
scheduled_path.append((SFF.__name__, p_at_sff))
path_to_other_sff = SFF.get_multi_hop_path_for(self.sim, p_at_sff.id, sff_to_ask.id)
if self.sim.DEBUG:
print(". path to this guy contains {0} intermediate SFFs".format(len(path_to_other_sff)))
for next_sff in path_to_other_sff:
scheduled_path.append((SFF.__name__, sff_props.allSFFs[next_sff]))
p_at_sff = sff_to_ask
scheduled_path.append((SFI.__name__, sfi))
# if we are in incremental scheduling mode, we set the incremental_path flag,
# so that we stop scheduling from here on
if self.incremental:
# incremental_path, so we found a different SFF,
# hence we stop scheduling here
incremental_path = True
# and then go back to the SFF for scheduling
scheduled_path.append((SFF.__name__, sff_to_ask))
if not incremental_path:
# finally, add the egress SFF
if p_at_sff.id != packet.flow.desiredEgressSSFid:
# go back to the sff
scheduled_path.append((SFF.__name__, p_at_sff))
path_to_dest = SFF.get_multi_hop_path_for(self.sim, p_at_sff.id, packet.flow.desiredEgressSSFid)
for sff_id in path_to_dest:
scheduled_path.append((SFF.__name__, sff_props.allSFFs[sff_id]))
else:
scheduled_path.append((SFF.__name__, sff_props.allSFFs[packet.flow.desiredEgressSSFid]))
if self.sim.DEBUG:
Packet.debug_print_path(scheduled_path)
packet.fullPath += scheduled_path
self.scheduling_attempts += 1
if packet.id == self.sim.PACKET_ID_TO_DEBUG:
print("** debug packet visited the scheduler, current status:")
Packet.debug_packet(packet)
packet.realTimeScheduling += self.get_time_delta_of_scheduling()
self.mySFF.handle_packet_from_scheduler(packet)
class LoadUnawareRoundRobinScheduler(BaseScheduler):
@Sim.register_reset_global_fields
class Props:
def __init__(self):
self.sfi_round_robin_marker = dict()
def __init__(self, sim: Sim, incremental: bool = True, oracle=True):
super().__init__(sim=sim,
incremental=incremental,
oracle=oracle,
activate_acp=False)
def applies_round_robin(self):
return True
def is_always_able_to_build_full_path(self):
return not self.incremental
def apply_scheduling_logic_for_packet(self, packet: Packet):
sfi_props: SFI.Props = self.sim.props.sfi
sff_props: SFF.Props = self.sim.props.sff
# get the packet from the scheduler's queue
if self.requires_queues_per_class():
popped_packet = self.mySFF.packet_queue_per_class[Flow.get_packet_class_of_packet(packet)].pop()
else:
popped_packet = self.mySFF.packet_queue.pop()
assert popped_packet == packet
packet.timeQueueScheduling += packet.get_delta_of_time_mark()
self.mark_time_scheduling_starts()
# this packet starts from this SFF, so search from this point
p_at_sff_id = self.mySFF.id
scheduled_path = []
# for each remaining sf type of the requested chain of this packet
while 0 < len(packet.toBeVisited):
next_sf_type = packet.toBeVisited.pop(0)
# get the sfi which has to serve this packet
if self.static_sfi_rates_per_sf_cum_weights is None:
self.update_cum_weights_of_sfi_rates(include_own_sff=True)
assert isinstance(next_sf_type, int)
if (next_sf_type not in self.static_sfi_rates_per_sf_sorted_sfi
or next_sf_type not in self.static_sfi_rates_per_sf_cum_weights
or len(self.static_sfi_rates_per_sf_sorted_sfi[next_sf_type]) == 0
or len(self.static_sfi_rates_per_sf_cum_weights[next_sf_type]) == 0):
packet.realTimeScheduling += self.get_time_delta_of_scheduling()
packet.reject()
if self.sim.DEBUG:
print(f"I'm not aware of any SFI of the required type:{next_sf_type}, so I have to reject the packet")
raise SchedulingFailure(f"failed to find a path for {packet}")
elif (len(self.static_sfi_rates_per_sf_sorted_sfi[next_sf_type]) !=
len(self.static_sfi_rates_per_sf_cum_weights[next_sf_type])):
raise NameError(f'data structures broken')
target_sfi_id = self.sim.random.choices(self.static_sfi_rates_per_sf_sorted_sfi[next_sf_type],
cum_weights=self.static_sfi_rates_per_sf_cum_weights[next_sf_type],
k=1)[0]
target_sff_id = sfi_props.all_sfi[target_sfi_id].sffId
# is this a different sff of where we are currently?
if p_at_sff_id != target_sff_id:
# we need to go to a different SFF, but before,
# we have to go back to the original SFF, so that
# if the packet is at a SFI, it goes fist back
# to the SFF of this SFI, and then to the next SFF
# (we need to check this, because thi scheduler supports to schedule the whole path at once)
# so go to p_at_sff if the previous path element was
# a SFI
if (len(scheduled_path) >
0 and scheduled_path[-1][0] == SFI.__name__):
scheduled_path.append((SFF.__name__, sff_props.allSFFs[p_at_sff_id]))
path_to_other_sff = SFF.get_multi_hop_path_for(self.sim, p_at_sff_id, target_sff_id)
for p in path_to_other_sff:
scheduled_path.append((SFF.__name__, sff_props.allSFFs[p]))
p_at_sff_id = target_sff_id
scheduled_path.append((SFI.__name__, sfi_props.all_sfi[target_sfi_id]))
if self.incremental:
scheduled_path.append((SFF.__name__, sff_props.allSFFs[target_sff_id]))
break
if self.sim.DEBUG:
Packet.debug_print_path(scheduled_path)
packet.fullPath += scheduled_path
self.scheduling_attempts += 1
if packet.id == self.sim.PACKET_ID_TO_DEBUG:
print("** debug packet visited the scheduler, current status:")
Packet.debug_packet(packet)
packet.realTimeScheduling += self.get_time_delta_of_scheduling()
self.mySFF.handle_packet_from_scheduler(packet)
class MppScheduler(BaseScheduler):
@Sim.register_reset_global_fields
class Props:
def __init__(self):
self.allow_up_to_x_packets_underway_per_server: int = None
self.do_sanity_checks: bool = False
self.map_server_to_classes: dict = None
self.r_matrix = None
self.batch_scheduling = None
self.blocked_sfi: Set[SFI] = set()
self.packet_underway_counter_per_server: Dict[Server, int] = None
def __init__(self,
sim: Sim,
incremental: bool = True, oracle=True,
block_sfi_while_packet_on_wire: bool = False,
consider_alpha_by_using_timeouts: bool = True,
allow_up_to_x_packets_underway_per_server: int = 1,
admission_control_threshold_low: float = 0.1,
admission_control_threshold_high: float = 1.3,
batch_scheduling: int = 1):
super().__init__(sim=sim,
incremental=incremental,
oracle=oracle,
activate_acp=not oracle,
admission_control_threshold_high=admission_control_threshold_high,
admission_control_threshold_low=admission_control_threshold_low)
self.block_sfi_while_packet_on_wire = block_sfi_while_packet_on_wire
self.consider_alpha_by_using_timeouts = consider_alpha_by_using_timeouts
self.free_server_count = -1 # important that we init with -1
self.sim.props.mpp_scheduler.allow_up_to_x_packets_underway_per_server = allow_up_to_x_packets_underway_per_server
self.assert_on_reject = False
self.accessible_sf = None
if self.sim.props.mpp_scheduler.batch_scheduling is None:
self.sim.props.mpp_scheduler.batch_scheduling = batch_scheduling
else:
assert self.sim.props.mpp_scheduler.batch_scheduling == batch_scheduling
if allow_up_to_x_packets_underway_per_server < batch_scheduling:
raise NameError(f"Invalid configuration: batch_scheduling ({batch_scheduling}) > "
f"allow_underway_per_server ({allow_up_to_x_packets_underway_per_server})")
if allow_up_to_x_packets_underway_per_server < 1:
raise NameError("Invalid configuration, at least 1 packets needs to be underway")
if not self.incremental:
raise NameError("%s Scheduler does support incremental scheduling")
def supports_cpu_policy(self, cpu_policy: ServerCpuPolicy):
if cpu_policy == ServerCpuPolicy.one_at_a_time:
return True
return False
def is_always_able_to_build_full_path(self):
return False
def requires_queues_per_class(self):
return True
def get_load_of_sfis_of_sf(self, sf: int):
return self.get_arrival_rate_estimate(sf) / self.mySFF.service_rate_per_sf[sf]
def cache_map_server_to_classes(self):
print(".. cache map server->classes")
sfi_props: SFI.Props = self.sim.props.sfi
flow_props: Flow.Props = self.sim.props.flow
mpp_sched_props: MppScheduler.Props = self.sim.props.mpp_scheduler
assert (mpp_sched_props.map_server_to_classes is None)
mpp_sched_props.map_server_to_classes = dict()
if self.sim.DEBUG:
print("MppScheduler creates map server->classes")
mpp_sched_props.packet_underway_counter_per_server = {s: 0 for s in self.sim.props.server.all_servers}
for sfi_id in sfi_props.all_sfi:
sfi_of_class = sfi_props.all_sfi[sfi_id]
server = sfi_of_class.server
| |
"""This module contains all functions required to perform the visibility computations.
These computations are based on a paper by <NAME>, <NAME> and <NAME>:
Rapid Satellite-to-Site Visibility Determination Based on Self-Adaptive Interpolation Technique.
https://arxiv.org/abs/1611.02402
"""
# pylint: disable=too-many-locals
from __future__ import division
import numpy as np
import mpmath as mp
from .cubic_equation_solver import solve
from .coord_conversion import lla_to_ecef
from .interpolator import Interpolator
from ..tuples import TimeInterval
from ..errors import VisibilityFinderError
class VisibilityFinder(object):
"""An adaptive visibility finder used to determine the visibility interval of a point on earth
from a satellite.
"""
def __init__(self, satellite_id, site, interval):
"""Args:
satellite_id (integer): Satellite ID in the database
site (tuple:float): The site location as a lat/lon tuple
interval (tuple:float): The search window as a start_time, end_time tuple
"""
self.satellite_id = satellite_id
self.site_ecef = lla_to_ecef(site[0], site[1], 0)
self.interval = interval
self.sat_irp = Interpolator(satellite_id)
def profile_determine_visibility(self, brute_force=False):
"""Profile's the algorithm.
Args:
brute_force (boolean): if true runs the brute-force method instead.
Returns:
The return value of the algorithm
"""
import cProfile
import pstats
import sys
profile = cProfile.Profile()
profile.enable()
if brute_force is False:
retval = self.determine_visibility()
else:
retval = self.determine_visibility_brute_force()
profile.disable()
stats = pstats.Stats(profile, stream=sys.stdout)
stats.strip_dirs().sort_stats('tottime').print_stats(50)
return retval
def visibility(self, posix_time):
"""Calculate the visibility function of the satellite and the site at a given time.
Args:
posix_time (float): The time to evaluate the visibility function at
Returns:
The value of the visibility function evaluated at the provided time.
Note:
This function assumes the FOV of the sensors on the satellite are 180 degrees
"""
# Since most helper functions don't play well with mpmath floats we have to perform a lossy
# conversion.
posix_time = float(posix_time)
site_pos = np.array(self.site_ecef) * mp.mpf(1.0)
site_normal_pos = site_pos / mp.norm(site_pos)
sat_pos = self.sat_irp.interpolate(posix_time)[0]
sat_site = np.subtract(sat_pos, site_pos)
return mp.mpf(mp.fdot(sat_site, site_normal_pos) / mp.norm(sat_site))
def visibility_first_derivative(self, posix_time):
"""Calculate the derivative of the visibility function of the satellite and the site at a
given time.
Args:
posix_time (float): The UNIX time to evaluate the derivative visibility function at.
Returns:
The value of the visibility function evaluated at the provided time.
"""
# Since most helper functions don't play well with mpmath floats we have to perform a lossy
# conversion.
posix_time = float(posix_time)
sat_pos_vel = np.array(self.sat_irp.interpolate(posix_time)) * mp.mpf(1.0)
site_pos = np.array(self.site_ecef) * mp.mpf(1.0)
pos_diff = np.subtract(sat_pos_vel[0], site_pos)
vel_diff = sat_pos_vel[1]
site_normal_pos = site_pos / mp.norm(site_pos)
site_normal_vel = [0, 0, 0]
first_term = mp.mpf(((1.0 / mp.norm(pos_diff)) *
(mp.fdot(vel_diff, site_normal_pos) +
mp.fdot(pos_diff, site_normal_vel))))
second_term = mp.mpf(((1.0 / mp.power((mp.norm(pos_diff)), 3)) *
mp.fdot(pos_diff, vel_diff) * mp.fdot(pos_diff, site_normal_pos)))
return first_term - second_term
# pylint: disable=invalid-name
def visibility_fourth_derivative_max(self, sub_interval):
"""Calculate the maximum of the fourth derivative of the visibility function of the
satellite through a given sub interval.
Args:
time (float): The time at which to evaluate the fourth derivative of the visibility
function
time_interval (tuple): A tuple containing the time stamps that mark the boundaries of
the subinterval under consideration.
Returns:
The value of the visibility function evaluated at the provided time.
Note:
This function uses the approximation defined in the Rapid Satellite-to-Site Visibility
paper.
"""
start_time, end_time = sub_interval
interval_length = end_time - start_time
mid_time = start_time + (interval_length / 2)
# In order to approximate the fourth order derivative, we need to evaluate both the
# visibility function and its first derivative at 3 points:
# 1- The interval start
# 2- The interval midpoint
# 3- The interval end
visibility_start = mp.mpf(self.visibility(start_time))
visibility_mid = mp.mpf(self.visibility(mid_time))
visibility_end = mp.mpf(self.visibility(end_time))
visibility_d_start = mp.mpf(self.visibility_first_derivative(start_time))
visibility_d_mid = mp.mpf(self.visibility_first_derivative(mid_time))
visibility_d_end = mp.mpf(self.visibility_first_derivative(end_time))
# Calculating the a5 and a4 constants used in the approximation
a5 = mp.mpf((((24.0 / (interval_length ** 5.0)) * (visibility_start - visibility_end)) +
((4.0 / (interval_length ** 4.0)) *
(visibility_d_start + (4.0 * visibility_d_mid) + visibility_d_end))))
# Since a4's computation is complex, it was split into several parts
a4_first_term = mp.mpf(((4.0 / (interval_length ** 4.0)) *
(visibility_start + (4.0 * visibility_mid) + visibility_end)))
a4_second_term = mp.mpf(((4.0 / (interval_length ** 4.0)) *
((visibility_d_start * ((2.0 * start_time) + (3.0 * end_time))) +
((10.0 * visibility_d_mid) * (start_time + end_time)) +
(visibility_d_end * ((3.0 * start_time) + (2.0 * end_time))))))
a4_third_term = mp.mpf(((24.0 / (interval_length ** 5.0)) *
((visibility_start * ((2.0 * start_time) + (3.0 * end_time))) -
(visibility_end * ((3.0 * start_time) + (2.0 * end_time))))))
a4 = a4_first_term - a4_second_term - a4_third_term
return max(abs((120 * a5 * start_time) + (24 * a4)), abs((120 * a5 * end_time) + (24 * a4)))
# pylint: enable=invalid-name
def bound_time_step_error(self, time_interval, error):
"""Corrects the time step for the current sub interval to mach the desired error rate.
Args:
time_interval (tuple): The two UNIX timestamps that bound the desired sub-interval
error (float): The desired approximate error in results. This error is the max deviation
presented as the difference between the approximated and real value of
the visibility function
Returns:
The new time step to use in order to mach the approximate error.
"""
# First we compute the maximum of the fourth derivative as per eq 8 in the referenced
# paper
visibility_4_prime_max = self.visibility_fourth_derivative_max(time_interval)
# Then we use the error and eq 9 to calculate the new time_step.
return mp.power((16.0 * mp.mpf(error)) / (visibility_4_prime_max / 24), 0.25)
def find_approx_coeffs(self, time_interval):
"""Calculates the coefficients of the Hermite approximation to the visibility function for a
given interval.
Args:
interval (tuple): The two UNIX timestamps that bound the desired interval
Returns:
An array containing the coefficients for the Hermite approximation of the
visibility function
Note:
This function assumes the FOV of the sensors on the satellite are 180 degrees
"""
start_time, end_time = time_interval
time_step = mp.mpf(end_time - start_time)
visibility_start = mp.mpf(self.visibility(start_time))
visibility_end = mp.mpf(self.visibility(end_time))
visibility_first_start = mp.mpf(self.visibility_first_derivative(start_time))
visibility_first_end = mp.mpf(self.visibility_first_derivative(end_time))
const = (((-2 * (start_time ** 3) * visibility_start) / (time_step ** 3)) +
((2 * (start_time ** 3) * visibility_end) / (time_step ** 3)) +
((-1 * (start_time ** 2) * end_time * visibility_first_end) / (time_step ** 2)) +
((-1 * 3 * (start_time ** 2) * visibility_start) / (time_step ** 2)) +
((3 * (start_time ** 2) * visibility_end) / (time_step ** 2)) +
((-1 * start_time * (end_time ** 2) * visibility_first_start) / (time_step ** 2)) +
visibility_start)
t_coeffs = (((6 * (start_time ** 2) * visibility_start) / (time_step ** 3)) +
((-1 * 6 * (start_time ** 2) * visibility_end) / (time_step ** 3)) +
(((start_time ** 2) * visibility_first_end) / (time_step ** 2)) +
((2 * start_time * end_time * visibility_first_start) / (time_step ** 2)) +
((2 * start_time * end_time * visibility_first_end) / (time_step ** 2)) +
((6 * start_time * visibility_start) / (time_step ** 2)) +
((-1 * 6 * start_time * visibility_end) / (time_step ** 2)) +
(((end_time ** 2) * visibility_first_start) / (time_step ** 2)))
t_2_coeffs = (((-1 * 6 * start_time * visibility_start) / (time_step ** 3)) +
((6 * start_time * visibility_end) / (time_step ** 3)) +
((-1 * start_time * visibility_first_start) / (time_step ** 2)) +
((-1 * 2 * start_time * visibility_first_end) / (time_step ** 2)) +
((-1 * 2 * end_time * visibility_first_start) / (time_step ** 2)) +
((-1 * end_time * visibility_first_end) / (time_step ** 2)) +
((-1 * 3 * visibility_start) / (time_step ** 2)) +
((3 * visibility_end) / (time_step ** 2)))
t_3_coeffs = (((2 * visibility_start) / (time_step ** 3)) +
((-1 * 2 * visibility_end) / (time_step ** 3)) +
((visibility_first_start) / (time_step ** 2)) +
((visibility_first_end) / (time_step ** 2)))
return [t_3_coeffs, t_2_coeffs, t_coeffs, const]
def find_visibility(self, time_interval):
"""Given a sub interval, this function uses the adaptive Hermite interpolation method to
calculate the roots of the visibility function and hence the visibility period.
Args:
time_interval (tuple): The subinterval over which the visibility period is to be
calculated.
"""
roots = solve(*self.find_approx_coeffs(time_interval))
return roots
def determine_visibility(self, error=0.001, tolerance_ratio=0.1, max_iter=100):
"""Using the self adapting interpolation algorithm described in the cited paper, this
function returns the subintervals for which the satellites | |
<filename>tests/p4transfer/test_move.py
from __future__ import annotations
import logging
import pytest
import p4transfer
@pytest.mark.parametrize("filetype", ["text", "binary"])
def test_move(source, target, default_transfer_config, filetype):
"""Test for replicating a basic move."""
original_file = source.local_path("inside/original/original_file")
original_file.write_bytes(b"Some content")
renamed_file = source.local_path("inside/new/new_file")
source.p4("add", "-t", filetype, original_file)
source.p4("submit", "-d", "original_file added")
source.p4("edit", original_file)
source.p4("move", original_file, renamed_file)
source.p4("submit", "-d", "original/original_file -> new/new_file")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 2
assert len(target.p4("changes")) == 2
change = target.p4("describe", "1")[0]
assert len(change["depotFile"]) == 1
assert change["depotFile"][0] == "//depot/import/original/original_file"
change = target.p4("describe", "2")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/new/new_file"
assert change["depotFile"][1] == "//depot/import/original/original_file"
assert change["action"][0] == "move/add"
assert change["action"][1] == "move/delete"
def test_move_then_move_back(source, target, default_transfer_config):
"""Test for replicating a move then a move back to the original file."""
original_file = source.local_path("inside/original/original_file")
original_file.write_bytes(b"Some content")
renamed_file = source.local_path("inside/new/new_file")
source.p4("add", original_file)
source.p4("submit", "-d", "original_file added")
source.p4("edit", original_file)
source.p4("move", original_file, renamed_file)
source.p4("submit", "-d", "original/original_file -> new/new_file")
source.p4("edit", renamed_file)
source.p4("move", renamed_file, original_file)
source.p4("submit", "-d", "new/new_file -> original/original_file")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 3
assert len(target.p4("changes")) == 3
change = target.p4("describe", "3")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/new/new_file"
assert change["depotFile"][1] == "//depot/import/original/original_file"
assert change["action"][0] == "move/delete"
assert change["action"][1] == "move/add"
def test_move_inside_to_outside(source, target, default_transfer_config):
"""Test for replicating a move from a mapped location to an unmapped location."""
inside_file = source.local_path("inside/inside_file")
inside_file.write_bytes(b"Some content")
outside_file = source.local_path("outside/outside_file")
source.p4("add", inside_file)
source.p4("submit", "-d", "inside_file added")
source.p4("edit", inside_file)
source.p4("move", inside_file, outside_file)
source.p4("submit", "-d", "inside/inside_file -> outside/outside_file")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 2
assert len(target.p4("changes")) == 2
change = target.p4("describe", "2")[0]
assert len(change["depotFile"]) == 1
assert change["depotFile"][0] == "//depot/import/inside_file"
assert change["action"][0] == "delete"
def test_move_outside_to_inside(source, target, default_transfer_config):
"""Test for replicating a move from an unmapped location to a mapped location."""
outside_file = source.local_path("outside/outside_file")
outside_file.write_bytes(b"Some content")
inside_file = source.local_path("inside/inside_file")
source.p4("add", outside_file)
source.p4("submit", "-d", "outside_file added")
source.p4("edit", outside_file)
source.p4("move", outside_file, inside_file)
source.p4("submit", "-d", "outside/outside_file -> inside/inside_file")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 2
assert len(target.p4("changes")) == 1
change = target.p4("describe", "1")[0]
assert len(change["depotFile"]) == 1
assert change["depotFile"][0] == "//depot/import/inside_file"
assert change["action"][0] == "add"
@pytest.fixture
def special_move_transfer_config(default_transfer_config):
# Adjust the view settings for test_move_outside_to_inside_then_edit.
transfer_config = default_transfer_config.copy()
transfer_config[
"views"
] = """
//depot/inside/main/Dir/... //target/inside/main/Dir/...
"""
return transfer_config
def test_move_outside_to_inside_then_edit(source, target, special_move_transfer_config):
"""Test of a move from an unmapped location to a mapped location then edit."""
depot = target.fetch("depot", "target")
target.save("depot", depot)
# Temporarily create different source client.
source_client = source.fetch("client", source.client_name)
source_client._view = [
f"//depot/inside/main/Dir/... //{source.client_name}/main/Dir/...",
f"//depot/outside/... //{source.client_name}/outside/...",
]
source.save("client", source_client)
original_file1 = source.local_path("outside/original_file1")
original_file2 = source.local_path("outside/original_file2")
renamed_file1 = source.local_path("main/Dir/new_file1")
renamed_file2 = source.local_path("main/Dir/new_file2")
original_file1.write_bytes(b"Some content")
original_file2.write_bytes(b"Some content")
source.p4("add", original_file1, original_file2)
source.p4("submit", "-d", "adding original files")
source.p4("edit", original_file1, original_file2)
source.p4("move", original_file1, renamed_file1)
source.p4("move", original_file2, renamed_file2)
source.p4("submit", "-d", "renaming files")
source_client = source.fetch("client", source.client_name)
source_client._view = [
f"//depot/inside/main/Dir/... //{source.client_name}/main/Dir/..."
]
source.save("client", source_client)
source.p4("edit", renamed_file1)
source.p4("edit", renamed_file2)
source.p4("submit", "-d", "editing file")
source.p4("delete", renamed_file1)
source.p4("delete", renamed_file2)
source.p4("submit", "-d", "deleting file")
p4transfer.test_transfer(special_move_transfer_config)
assert target.counter == 4
assert len(target.p4("changes")) == 3
change = target.p4("describe", "1")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//target/inside/main/Dir/new_file1"
assert change["depotFile"][1] == "//target/inside/main/Dir/new_file2"
assert change["action"][0] == "add"
assert change["action"][1] == "add"
change = target.p4("describe", "2")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//target/inside/main/Dir/new_file1"
assert change["depotFile"][1] == "//target/inside/main/Dir/new_file2"
assert change["action"][0] == "edit"
assert change["action"][1] == "edit"
def test_move_when_deleted_at_head(source, target, default_transfer_config):
"""Test for replicating a move of a prior revision of a file that is deleted at head."""
file1 = source.local_path("inside/file1")
file2 = source.local_path("inside/file2")
file1.write_bytes(b"Some content")
source.p4("add", file1)
source.p4("submit", "-d", "file1 added")
source.p4("delete", file1)
source.p4("submit", "-d", "file1 deleted")
source.p4("sync", f"{file1}#1")
source.p4("edit", file1)
source.p4("move", file1, file2)
source.p4("sync")
source.p4("submit", "-d", "file1#1 -> file2")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 3
assert len(target.p4("changes")) == 3
change = target.p4("describe", "3")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/file1"
assert change["depotFile"][1] == "//depot/import/file2"
assert change["action"][0] == "move/delete"
assert change["action"][1] == "move/add"
def test_move_with_edit_when_deleted_at_head(source, target, default_transfer_config):
"""Test for replicating a move of an edited prior revision of a file that is deleted at head."""
file1 = source.local_path("inside/file1")
file2 = source.local_path("inside/file2")
file1.write_bytes(b"Some content")
source.p4("add", file1)
source.p4("submit", "-d", "file1 added")
source.p4("delete", file1)
source.p4("submit", "-d", "file1 deleted")
source.p4("sync", f"{file1}#1")
source.p4("edit", file1)
source.p4("move", file1, file2)
source.p4("sync")
file2.write_bytes(b"Some content\nA change")
source.p4("submit", "-d", "file1#1 -> file2 with edit")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 3
assert len(target.p4("changes")) == 3
change = target.p4("describe", "3")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/file1"
assert change["depotFile"][1] == "//depot/import/file2"
assert change["action"][0] == "move/delete"
assert change["action"][1] == "move/add"
def test_move_then_copy(source, target, default_transfer_config):
"""Test for replicating a move and subsequent copy of the moved file."""
original_file = source.local_path("inside/original/original_file")
original_file.write_bytes(b"Some content")
renamed_file = source.local_path("inside/new/new_file")
branched_file = source.local_path("inside/branch/new_file")
source.p4("add", original_file)
source.p4("submit", "-d", "original_file added")
source.p4("edit", original_file)
source.p4("move", original_file, renamed_file)
source.p4("submit", "-d", "original/original_file -> new/new_file")
source.p4("integrate", "-Di", renamed_file, branched_file)
source.p4("submit", "-d", "new/new_file branched into branch/new_file")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 3
assert len(target.p4("changes")) == 3
change = target.p4("describe", "2")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/new/new_file"
assert change["depotFile"][1] == "//depot/import/original/original_file"
assert change["action"][0] == "move/add"
assert change["action"][1] == "move/delete"
change = target.p4("describe", "3")[0]
assert len(change["depotFile"]) == 1
assert change["depotFile"][0] == "//depot/import/branch/new_file"
assert change["action"][0] == "branch"
@pytest.mark.parametrize("filetype", ["text", "binary"])
def test_move_and_integrate(source, target, default_transfer_config, filetype):
"""Test for move with a merge - requires add -d."""
original_file = source.local_path("inside/original/original_file")
original_file.write_bytes(b"Some content")
renamed_file = source.local_path("inside/new/new_file")
other_file = source.local_path("inside/branch/new_file")
other_file.write_bytes(b"Some content\nnew")
source.p4("add", "-t", filetype, original_file, other_file)
source.p4("submit", "-d", "adding original and other file")
source.p4("edit", original_file)
source.p4("move", original_file, renamed_file)
source.p4("integ", "-f", other_file, renamed_file)
source.p4("resolve", "-am", renamed_file)
source.p4("submit", "-d", "renaming file")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 2
assert len(target.p4("changes")) == 2
change = target.p4("describe", "2")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/new/new_file"
assert change["depotFile"][1] == "//depot/import/original/original_file"
assert change["action"][0] == "move/add"
assert change["action"][1] == "move/delete"
def test_move_copy_combo(source, target, default_transfer_config):
"""Test for move where the add also has a copy."""
original_file = source.local_path("inside/original/original_file")
original_file.write_bytes(b"Some content")
copied_file = source.local_path("inside/original/copied_file")
copied_file.write_bytes(b"Other content")
renamed_file = source.local_path("inside/new/new_file")
source.p4("add", original_file, copied_file)
source.p4("submit", "-d", "original_file and copied_file added")
source.p4("edit", original_file)
original_file.write_bytes(b"Some content\nA change")
source.p4("submit", "-d", "original_file edited")
source.p4("edit", original_file)
source.p4("move", original_file, renamed_file)
source.p4("integ", copied_file, renamed_file)
source.p4("resolve", "-at", renamed_file)
source.p4("submit", "-d", "new_file moved from original_file and copied over")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 3
assert len(target.p4("changes")) == 3
change = target.p4("describe", "3")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/new/new_file"
assert change["depotFile"][1] == "//depot/import/original/original_file"
assert change["action"][0] == "move/add"
assert change["action"][1] == "move/delete"
filelog = target.filelog("//depot/import/new/new_file")
assert len(filelog[0].revisions[0].integrations) == 2
assert filelog[0].revisions[0].integrations[0].how == "copy from"
assert filelog[0].revisions[0].integrations[1].how == "moved from"
def test_move_copy_ignore_combo(source, target, default_transfer_config):
"""Test for move where the add also has a copy and an ignore."""
original_file = source.local_path("inside/original/original_file")
original_file.write_bytes(b"Some content")
file2 = source.local_path("inside/new/file2")
file2.write_bytes(b"Other content")
file3 = source.local_path("inside/new/file3")
file3.write_bytes(b"Some other content")
renamed_file = source.local_path("inside/new/new_file")
source.p4("add", original_file, file2, file3)
source.p4("submit", "-d", "add original files")
source.p4("edit", original_file)
original_file.write_bytes(b"Some content\nA change")
source.p4("submit", "-d", "original_file edited")
source.p4("edit", original_file)
source.p4("move", original_file, renamed_file)
source.p4("integ", file3, renamed_file)
source.p4("resolve", "-ay", renamed_file)
source.p4("integ", file2, renamed_file)
source.p4("resolve", "-at", renamed_file)
source.p4("submit", "-d", "rename/copy/ignore file")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 3
assert len(target.p4("changes")) == 3
change = target.p4("describe", "3")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/new/new_file"
assert change["depotFile"][1] == "//depot/import/original/original_file"
assert change["action"][0] == "move/add"
assert change["action"][1] == "move/delete"
filelog = target.filelog("//depot/import/new/new_file")
assert len(filelog[0].revisions[0].integrations) == 3
assert filelog[0].revisions[0].integrations[0].how == "copy from"
assert filelog[0].revisions[0].integrations[1].how == "ignored"
assert filelog[0].revisions[0].integrations[2].how == "moved from"
def test_move_copy_combo_from_outside(source, target, default_transfer_config):
"""Test for move where the add also has a copy from an unmapped file."""
original_file = source.local_path("inside/original/original_file")
original_file.write_bytes(b"Some content")
# Note _outside sorts before inside - important to provoke a problem.
copied_file = source.local_path("_outside/original/copied_file")
copied_file.write_bytes(b"Other content")
renamed_file = source.local_path("inside/new/new_file")
source.p4("add", "-t", "binary", original_file, copied_file)
source.p4("submit", "-d", "original_file and copied_file added")
source.p4("edit", original_file)
source.p4("move", original_file, renamed_file)
source.p4("integ", "-f", copied_file, renamed_file)
source.p4("resolve", "-at")
source.p4("submit", "-d", "new_file moved from original_file and copied over")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 2
assert len(target.p4("changes")) == 2
change = target.p4("describe", "2")[0]
assert len(change["depotFile"]) == 2
assert change["depotFile"][0] == "//depot/import/new/new_file"
assert change["depotFile"][1] == "//depot/import/original/original_file"
assert change["action"][0] == "move/add"
assert change["action"][1] == "move/delete"
filelog = target.filelog("//depot/import/new/new_file")
assert len(filelog[0].revisions[0].integrations) == 1
assert filelog[0].revisions[0].integrations[0].how == "moved from"
@pytest.mark.parametrize("filetype", ["text", "binary"])
def test_move_with_obliterated_delete(
source, target, default_transfer_config, filetype
):
"""Test for move where move/delete file has been obliterated."""
original_file = source.local_path("inside/original/original_file")
original_file.write_bytes(b"Some content")
renamed_file = source.local_path("inside/new/new_file")
source.p4("add", "-t", filetype, original_file)
source.p4("submit", "-d", "original_file added")
source.p4("edit", original_file)
source.p4("move", original_file, renamed_file)
source.p4("submit", "-d", "original/original_file -> new/new_file")
source.p4("obliterate", "-y", original_file)
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 2
assert len(target.p4("changes")) == 1
change = target.p4("describe", "1")[0]
assert len(change["depotFile"]) == 1
| |
AFTER incrementing
latest_row = append_row.commit()
max_segment_id = column.deserialize(latest_row[column.family_id][column.key][0][0])
min_segment_id = max_segment_id + 1 - step
segment_id_range = np.arange(min_segment_id, max_segment_id + 1,
dtype=basetypes.SEGMENT_ID)
return segment_id_range
def get_unique_segment_id(self, chunk_id: np.uint64) -> np.uint64:
""" Return unique Segment ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
return self.get_unique_segment_id_range(chunk_id=chunk_id, step=1)[0]
def get_unique_node_id_range(self, chunk_id: np.uint64, step: int = 1
) -> np.ndarray:
""" Return unique Node ID range for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
segment_ids = self.get_unique_segment_id_range(chunk_id=chunk_id,
step=step)
node_ids = np.array([self.get_node_id(segment_id, chunk_id)
for segment_id in segment_ids], dtype=np.uint64)
return node_ids
def get_unique_node_id(self, chunk_id: np.uint64) -> np.uint64:
""" Return unique Node ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:return: np.uint64
"""
return self.get_unique_node_id_range(chunk_id=chunk_id, step=1)[0]
def get_max_seg_id(self, chunk_id: np.uint64) -> np.uint64:
""" Gets maximal seg id in a chunk based on the atomic counter
This is an approximation. It is not guaranteed that all ids smaller or
equal to this id exists. However, it is guaranteed that no larger id
exist at the time this function is executed.
:return: uint64
"""
# Incrementer row keys start with an "i"
row_key = serializers.serialize_key("i%s" % serializers.pad_node_id(chunk_id))
row = self.read_byte_row(row_key, columns=column_keys.Concurrency.CounterID)
# Read incrementer value (default to 0) and interpret is as Segment ID
return basetypes.SEGMENT_ID.type(row[0].value if row else 0)
def get_max_node_id(self, chunk_id: np.uint64) -> np.uint64:
""" Gets maximal node id in a chunk based on the atomic counter
This is an approximation. It is not guaranteed that all ids smaller or
equal to this id exists. However, it is guaranteed that no larger id
exist at the time this function is executed.
:return: uint64
"""
max_seg_id = self.get_max_seg_id(chunk_id)
return self.get_node_id(segment_id=max_seg_id, chunk_id=chunk_id)
def get_unique_operation_id(self) -> np.uint64:
""" Finds a unique operation id
atomic counter
Operations essentially live in layer 0. Even if segmentation ids might
live in layer 0 one day, they would not collide with the operation ids
because we write information belonging to operations in a separate
family id.
:return: str
"""
column = column_keys.Concurrency.CounterID
append_row = self.table.row(row_keys.OperationID, append=True)
append_row.increment_cell_value(column.family_id, column.key, 1)
# This increments the row entry and returns the value AFTER incrementing
latest_row = append_row.commit()
operation_id_b = latest_row[column.family_id][column.key][0][0]
operation_id = column.deserialize(operation_id_b)
return np.uint64(operation_id)
def get_max_operation_id(self) -> np.int64:
""" Gets maximal operation id based on the atomic counter
This is an approximation. It is not guaranteed that all ids smaller or
equal to this id exists. However, it is guaranteed that no larger id
exist at the time this function is executed.
:return: int64
"""
column = column_keys.Concurrency.CounterID
row = self.read_byte_row(row_keys.OperationID, columns=column)
return row[0].value if row else column.basetype(0)
def get_cross_chunk_edges_layer(self, cross_edges):
""" Computes the layer in which a cross chunk edge becomes relevant.
I.e. if a cross chunk edge links two nodes in layer 4 this function
returns 3.
:param cross_edges: n x 2 array
edges between atomic (level 1) node ids
:return: array of length n
"""
if len(cross_edges) == 0:
return np.array([], dtype=np.int)
cross_chunk_edge_layers = np.ones(len(cross_edges), dtype=np.int)
cross_edge_coordinates = []
for cross_edge in cross_edges:
cross_edge_coordinates.append(
[self.get_chunk_coordinates(cross_edge[0]),
self.get_chunk_coordinates(cross_edge[1])])
cross_edge_coordinates = np.array(cross_edge_coordinates, dtype=np.int)
for layer in range(2, self.n_layers):
edge_diff = np.sum(np.abs(cross_edge_coordinates[:, 0] -
cross_edge_coordinates[:, 1]), axis=1)
cross_chunk_edge_layers[edge_diff > 0] += 1
cross_edge_coordinates = cross_edge_coordinates // self.fan_out
return cross_chunk_edge_layers
def get_cross_chunk_edge_dict(self, cross_edges):
""" Generates a cross chunk edge dict for a list of cross chunk edges
:param cross_edges: n x 2 array
:return: dict
"""
cce_layers = self.get_cross_chunk_edges_layer(cross_edges)
u_cce_layers = np.unique(cce_layers)
cross_edge_dict = {}
for l in range(2, self.n_layers):
cross_edge_dict[l] = column_keys.Connectivity.CrossChunkEdge.deserialize(b'')
val_dict = {}
for cc_layer in u_cce_layers:
layer_cross_edges = cross_edges[cce_layers == cc_layer]
if len(layer_cross_edges) > 0:
val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \
layer_cross_edges
cross_edge_dict[cc_layer] = layer_cross_edges
return cross_edge_dict
def read_byte_rows(
self,
start_key: Optional[bytes] = None,
end_key: Optional[bytes] = None,
end_key_inclusive: bool = False,
row_keys: Optional[Iterable[bytes]] = None,
columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
end_time_inclusive: bool = False) -> Dict[bytes, Union[
Dict[column_keys._Column, List[bigtable.row_data.Cell]],
List[bigtable.row_data.Cell]
]]:
"""Main function for reading a row range or non-contiguous row sets from Bigtable using
`bytes` keys.
Keyword Arguments:
start_key {Optional[bytes]} -- The first row to be read, ignored if `row_keys` is set.
If None, no lower boundary is used. (default: {None})
end_key {Optional[bytes]} -- The end of the row range, ignored if `row_keys` is set.
If None, no upper boundary is used. (default: {None})
end_key_inclusive {bool} -- Whether or not `end_key` itself should be included in the
request, ignored if `row_keys` is set or `end_key` is None. (default: {False})
row_keys {Optional[Iterable[bytes]]} -- An `Iterable` containing possibly
non-contiguous row keys. Takes precedence over `start_key` and `end_key`.
(default: {None})
columns {Optional[Union[Iterable[column_keys._Column], column_keys._Column]]} --
Optional filtering by columns to speed up the query. If `columns` is a single
column (not iterable), the column key will be omitted from the result.
(default: {None})
start_time {Optional[datetime.datetime]} -- Ignore cells with timestamp before
`start_time`. If None, no lower bound. (default: {None})
end_time {Optional[datetime.datetime]} -- Ignore cells with timestamp after `end_time`.
If None, no upper bound. (default: {None})
end_time_inclusive {bool} -- Whether or not `end_time` itself should be included in the
request, ignored if `end_time` is None. (default: {False})
Returns:
Dict[bytes, Union[Dict[column_keys._Column, List[bigtable.row_data.Cell]],
List[bigtable.row_data.Cell]]] --
Returns a dictionary of `byte` rows as keys. Their value will be a mapping of
columns to a List of cells (one cell per timestamp). Each cell has a `value`
property, which returns the deserialized field, and a `timestamp` property, which
returns the timestamp as `datetime.datetime` object.
If only a single `column_keys._Column` was requested, the List of cells will be
attached to the row dictionary directly (skipping the column dictionary).
"""
# Create filters: Column and Time
filter_ = get_time_range_and_column_filter(
columns=columns,
start_time=start_time,
end_time=end_time,
end_inclusive=end_time_inclusive)
# Create filters: Rows
row_set = RowSet()
if row_keys is not None:
for row_key in row_keys:
row_set.add_row_key(row_key)
elif start_key is not None and end_key is not None:
row_set.add_row_range_from_keys(
start_key=start_key,
start_inclusive=True,
end_key=end_key,
end_inclusive=end_key_inclusive)
else:
raise cg_exceptions.PreconditionError("Need to either provide a valid set of rows, or"
" both, a start row and an end row.")
# Bigtable read with retries
rows = self._execute_read(row_set=row_set, row_filter=filter_)
# Deserialize cells
for row_key, column_dict in rows.items():
for column, cell_entries in column_dict.items():
for cell_entry in cell_entries:
cell_entry.value = column.deserialize(cell_entry.value)
# If no column array was requested, reattach single column's values directly to the row
if isinstance(columns, column_keys._Column):
rows[row_key] = cell_entries
return rows
def read_byte_row(
self,
row_key: bytes,
columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
end_time_inclusive: bool = False) -> \
Union[Dict[column_keys._Column, List[bigtable.row_data.Cell]],
List[bigtable.row_data.Cell]]:
"""Convenience function for reading a single row from Bigtable using its `bytes` keys.
Arguments:
row_key {bytes} -- The row to be read.
Keyword Arguments:
columns {Optional[Union[Iterable[column_keys._Column], column_keys._Column]]} --
Optional filtering by columns to speed up the query. If `columns` is a single
column (not iterable), the column key will be omitted from the result.
(default: {None})
start_time {Optional[datetime.datetime]} -- Ignore cells with timestamp before
`start_time`. If None, no lower bound. (default: {None})
end_time {Optional[datetime.datetime]} -- Ignore cells with timestamp after `end_time`.
If None, no upper bound. (default: {None})
end_time_inclusive {bool} -- Whether or not `end_time` itself should be included in the
request, ignored if `end_time` is None. (default: {False})
Returns:
Union[Dict[column_keys._Column, List[bigtable.row_data.Cell]],
List[bigtable.row_data.Cell]] --
Returns a mapping of columns to a List of cells (one cell per timestamp). Each cell
has a `value` property, which returns the deserialized field, and a `timestamp`
property, which returns the timestamp as `datetime.datetime` object.
If only a single `column_keys._Column` was requested, the List of cells is returned
directly.
"""
row = self.read_byte_rows(row_keys=[row_key], columns=columns, start_time=start_time,
end_time=end_time, end_time_inclusive=end_time_inclusive)
if isinstance(columns, column_keys._Column):
return row.get(row_key, [])
else:
return row.get(row_key, {})
def read_node_id_rows(
self,
start_id: Optional[np.uint64] = None,
end_id: Optional[np.uint64] = None,
end_id_inclusive: bool = False,
node_ids: Optional[Iterable[np.uint64]] = None,
columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
end_time_inclusive: bool = False) -> Dict[np.uint64, Union[
Dict[column_keys._Column, List[bigtable.row_data.Cell]],
List[bigtable.row_data.Cell]
]]:
"""Convenience | |
"""Classes for efficient reading of multipage tif files.
PIL/pillow will parse a tif file by making a large number of very small reads. While this is in general perfectly fine
when reading from the local filesystem, in the case where a single read is turned into an http GET request (for
instance, reading from AWS S3), this can get pretty inefficient.
These classes, primarily accessed through TiffParser, attempt to optimize access to a single page of a multipage tif
by batching read requests into a smaller number of larger reads. The resulting page data can then be repacked into a
single-page tif file format using the packSinglePage() function. This data can then be handed off to PIL, etc
to be (again...) parsed, decompressed, turned into arrays and so on.
"""
from collections import namedtuple
import ctypes
import os
import struct
import operator
import sys
class TiffFormatError(ValueError):
"""Exception thrown when the file being read does not appear to conform to the TIF spec.
"""
pass
class TiffParser(object):
"""Encapsulates file access in parsing a tiff file.
Two main uses:
1. Populates TiffData object while first reading through pages of a multipage tif. Primary methods here are
parseFileHeader() and parseNextImageFileDirectory().
2. Generates TiffIFDData object corresponding to the desired page of a multipage tif, via getOffsetDataForIFD().
"""
INIT_IFD_SIZE = 6 + 12 * 24 # 2b num entries, N*12b entries, 4b offset to next IFD
def __init__(self, fp, debug=True):
"""
Parameters
----------
fp: file or file-like object, open for reading
The parser will interpret this handle as pointing to a TIFF file.
debug: boolean, default true
If true, the parser will keep track of the number of seeks and total bytes read in the attributes
self.nseeks and self.bytes_read.
"""
self._fp = fp
self._debug = debug
self.maxIfdSize = TiffParser.INIT_IFD_SIZE
self._order = None
if self._debug:
self.nseeks = 0
self.bytesRead = 0
self.nreads = 0
def __seek(self, pos):
cp = self._fp.tell()
if cp != pos:
self._fp.seek(pos, os.SEEK_SET)
if self._debug:
self.nseeks += 1
def __read(self, size=-1):
curBuf = self._fp.read(size)
if self._debug:
self.nreads += 1
if size >= 0 and len(curBuf) < size:
# print "incomplete read: requested %d bytes, got %d; retrying" % (size, len(curbuf))
size -= len(curBuf)
buf = ' ' # init loop
while size > 0 and len(buf) > 0:
# keep reading while we're still getting data (no EOF) and still have data left to get
buf = self._fp.read(size)
# if len(buf) < size:
# if len(buf) > 0:
# print "incomplete read: requested %d bytes, got %d; retrying" % (size, len(curbuf))
# else:
# print "incomplete read: requested %d bytes, got 0 (EOF)" % size
if self._debug:
self.nreads += 1
curBuf += buf # costly concatenation here...
size -= len(buf)
if self._debug:
self.bytesRead += len(curBuf)
return curBuf
@property
def order(self):
"""Byte order used to interpret wrapped tif file, either '<' (little-endian) or '>' (big-endian)
"""
return self._order
def parseFileHeader(self, destinationTiff=None):
"""
Reads the initial 8-byte file header from the wrapped file pointer.
Parameters:
-----------
destinationTiff: TiffData object, or None
If destinationTiff is not None, then the parsed file header will be attached to the passed destinationTiff
object as its fileHeader attribute, in addition to being returned from the method call.
Returns:
--------
TiffFileHeader object
"""
self.__seek(0)
headerBuf = self.__read(8)
fileHeader = TiffFileHeader.fromBytes(headerBuf)
self._order = fileHeader.byteOrder
if destinationTiff:
destinationTiff.fileHeader = fileHeader
return fileHeader
def parseNextImageFileDirectory(self, destinationTiff=None, ifdOffset=None):
"""
Reads the next Image File Directory in the wrapped file.
The offset of the next IFD within the file is determined either from the passed destinationTiff, or is passed
explicitly in ifdOffset. One or the other must be passed.
Parameters:
-----------
destinationTiff: TiffData object with a valid fileHeader attribute, or None
If passed, the offset of the next IFD will be found either from the previous IFDs stored within
destinationTiff if any, or from destinationTiff.fileHeader if not. The parsed IFD will be added to
the destinationTiff.ifds sequence.
ifdOffset: positive integer offset within the wrapped file, or None
If destinationTiff is None and ifdOffset is passed, then ifdOffset will be used as the file offset
at which to look for the next IFD.
Returns:
--------
TiffImageFileDirectory object
"""
if (not destinationTiff) and (ifdOffset is None):
raise ValueError("Either destinationTiff or ifdOffset must be specified")
if destinationTiff:
offset = destinationTiff.ifds[-1].ifdOffset if destinationTiff.ifds else \
destinationTiff.fileHeader.ifdOffset
if not offset:
return None
else:
offset = ifdOffset
# read out our current best guess at the IFD size for this file in bytes:
self.__seek(offset)
ifdBuf = self.__read(self.maxIfdSize)
# check whether we actually got enough:
reqdBufSize = TiffImageFileDirectory.parseIFDBufferSize(ifdBuf, self.order)
if reqdBufSize > self.maxIfdSize:
self.maxIfdSize = reqdBufSize
if reqdBufSize > len(ifdBuf):
# we hope we get the full buffer on the second attempt
ifdBuf = self.__read(reqdBufSize)
if len(ifdBuf) < reqdBufSize:
raise IOError("Unable to read all %d bytes of tiff image file directory; got only %d bytes" %
(reqdBufSize, len(ifdBuf)))
ifd = TiffImageFileDirectory.fromBytes(ifdBuf, self.order)
if destinationTiff:
destinationTiff.ifds.append(ifd)
return ifd
def getOffsetDataForIFD(self, ifd, maxBuf=10**6, maxGap=1024):
"""Loads TIF tag offset and image data for the page described in the passed IFD.
This method will typically be called from packSinglePage() rather than being used directly by clients.
Parameters:
-----------
ifd: TiffImageFileDirectory
maxBuf: positive integer, default 10^6 (1MB)
Requests a largest size to use for file reads. Multiple contiguous image strips (or other data) of less
than maxBuf in size will be read in a single read() call. If a single strip is larger than maxBuf, then
it will still be read, in a single read call requesting exactly the strip size.
maxGap: positive integer, default 1024 (1KB)
Specifies the largest gap in meaningful data to tolerate within a single read() call. If two items of offset
data for a single IFD are separated by more than maxGap of data not within the IFD, then they will be read
in multiple read() calls. If they are separated by maxGap or less, then a single read() will be used and
the irrelevant data in between simply ignored.
Returns:
--------
TiffIFDData
"""
returnData = TiffIFDData()
returnData.ifd = ifd
startLengths = ifd.getOffsetStartsAndLengths()
bufStartLens = calcReadsForOffsets(startLengths, maxBuf, maxGap)
buffers = []
for bs, bl in bufStartLens:
self.__seek(bs)
buf = self.__read(bl)
buffers.append(TiffBuffer(bs, buf))
for entry in ifd.entries:
if entry.isOffset:
offset, valLength = entry.getOffsetStartAndLength()
found = False
for tiffBuff in buffers:
if tiffBuff.contains(offset, valLength):
found = True
fmt = self.order + entry.getOffsetDataFormat()
vals = tiffBuff.unpackFrom(fmt, offset)
returnData.entriesAndOffsetData.append(
TiffIFDEntryAndOffsetData(*(entry, vals)))
break
if not found:
raise ValueError("Offset data at start: %d length: %d not found in available buffers" %
(offset, valLength))
else:
returnData.entriesAndOffsetData.append(
TiffIFDEntryAndOffsetData(*(entry, None)))
del buffers
imageOffsets = None
imageBytesizes = None
for ifdEntryAndData in returnData.entriesAndOffsetData:
if ifdEntryAndData.entry.isImageDataOffsetEntry():
if imageOffsets:
raise TiffFormatError("Found duplicate image data offset entries in single IFD")
imageOffsets = ifdEntryAndData.getData()
elif ifdEntryAndData.entry.isImageDataByteCountEntry():
if imageBytesizes:
raise TiffFormatError("Found duplicate image data byte size entries in single IFD")
imageBytesizes = ifdEntryAndData.getData()
if (not imageOffsets) or (not imageBytesizes):
raise TiffFormatError("Missing image offset or byte size data in IFD")
if len(imageOffsets) != len(imageBytesizes):
raise TiffFormatError("Unequal numbers of image data offset and byte size entries in IFD " +
"(offsets: %d, byte sizes: %d" % (len(imageOffsets), len(imageBytesizes)))
startLengths = zip(imageOffsets, imageBytesizes)
del imageOffsets, imageBytesizes
bufStartLens = calcReadsForOffsets(startLengths, maxBuf, maxGap)
buffers = []
for bs, bl in bufStartLens:
self.__seek(bs)
buf = self.__read(bl)
buffers.append(TiffBuffer(bs, buf))
# validate that all data was read successfully and set up views
dataViews = []
for st, l in startLengths:
found = False
for buf in buffers:
if buf.contains(st, l):
# print "Buffer at orig offset %d, length %d, contains strip starting at %d, length %d" % \
# (buf.orig_offset, len(buf.buffer), st, l)
dataViews.append(buf.bufferFrom(st, l))
found = True
break
if not found:
raise TiffFormatError("Could not find buffer with data at offset: %d, size: %d" % (st, l))
returnData.imagedataBuffers = dataViews
return returnData
def packSinglePage(parser, tiffData=None, pageIdx=0):
"""Creates a string buffer with valid tif file data from a single page of a multipage tif.
The resulting string buffer can be written to disk as | |
import os
import glob
import click
import pandas
import nibabel as nib
from .batch_manager import BatchManager, Job
from .config_json_parser import ClpipeConfigParser
import json
from pkg_resources import resource_stream, resource_filename
import clpipe.postprocutils
import numpy
import logging
import gc
import psutil
import sys
from .error_handler import exception_handler
#import nipy.modalities.fmri.hrf
import re
@click.command()
@click.argument('subjects', nargs=-1, required=False, default=None)
@click.option('-config_file', type=click.Path(exists=True, dir_okay=False, file_okay=True), default=None, help = 'Use a given configuration file. If left blank, uses the default config file, requiring definition of BIDS, working and output directories.')
@click.option('-target_dir', type=click.Path(exists=True, dir_okay=True, file_okay=False), help='Which fmriprep directory to process. If a configuration file is provided with a BIDS directory, this argument is not necessary. Note, must point to the ``fmriprep`` directory, not its parent directory.')
@click.option('-target_suffix', help= 'Which file suffix to use. If a configuration file is provided with a target suffix, this argument is not necessary. Defaults to "preproc_bold.nii.gz"')
@click.option('-output_dir', type=click.Path(dir_okay=True, file_okay=False), help = 'Where to put the postprocessed data. If a configuration file is provided with a output directory, this argument is not necessary.')
@click.option('-output_suffix', help = 'What suffix to append to the postprocessed files. If a configuration file is provided with a output suffix, this argument is not necessary.')
@click.option('-task', help = 'Which task to postprocess. If left blank, defaults to all tasks.')
@click.option('-TR', help = 'The TR of the scans. If a config file is not provided, this option is required. If a config file is provided, this information is found from the sidecar jsons.')
@click.option('-processing_stream', help = 'Optional processing stream selector.')
@click.option('-log_dir', type=click.Path(dir_okay=True, file_okay=False), help = 'Where to put HPC output files. If not specified, defaults to <outputDir>/batchOutput.')
@click.option('-beta_series', is_flag = True, default = False, help = "Flag to activate beta-series correlation correlation. ADVANCED METHOD, refer to the documentation.")
@click.option('-submit', is_flag = True, default=False, help = 'Flag to submit commands to the HPC.')
@click.option('-batch/-single', default=True, help = 'Submit to batch, or run in current session. Mainly used internally.')
@click.option('-debug', is_flag = True, default=False, help = 'Print detailed processing information and traceback for errors.')
def fmri_postprocess(config_file=None, subjects=None, target_dir=None, target_suffix=None, output_dir=None,
output_suffix=None, log_dir=None,
submit=False, batch=True, task=None, tr=None, processing_stream = None, debug = False, beta_series = False):
"""This command runs an fMRIprep'ed dataset through additional processing, as defined in the configuration file. To run specific subjects, specify their IDs. If no IDs are specified, all subjects are ran."""
if not debug:
sys.excepthook = exception_handler
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.DEBUG)
if config_file is None and tr is None:
raise ValueError('No config file and no specified TR. Please include one.')
config = ClpipeConfigParser()
config.config_updater(config_file)
config.setup_postproc(target_dir, target_suffix, output_dir, output_suffix, beta_series,
log_dir)
config.validate_config()
if beta_series:
raise ValueError("At this time, the beta series functionality is no longer working due to incompatibilities between packages.")
output_type = 'BetaSeriesOptions'
else:
output_type = 'PostProcessingOptions'
if config_file is None:
config_file = resource_filename(__name__, "data/defaultConfig.json")
alt_proc_toggle = False
if processing_stream is not None:
processing_stream_config = config.config['ProcessingStreams']
processing_stream_config = [i for i in processing_stream_config if i['ProcessingStream'] == processing_stream]
if len(processing_stream_config) == 0:
raise KeyError('The processing stream you specified was not found.')
alt_proc_toggle = True
if alt_proc_toggle:
if beta_series:
config.update_processing_stream(processing_stream, processing_stream_config[0]['BetaSeriesOptions']['OutputDirectory'],
processing_stream_config[0]['BetaSeriesOptions']['OutputSuffix'],
processing_stream_config[0]['BetaSeriesOptions']['LogDirectory'])
config.config['BetaSeriesOptions'].update(processing_stream_config[0]['BetaSeriesOptions'])
else:
config.config['PostProcessingOptions'].update(processing_stream_config[0]['PostProcessingOptions'])
config.update_processing_stream(processing_stream, processing_stream_config[0]['PostProcessingOptions']['OutputDirectory'],
processing_stream_config[0]['PostProcessingOptions']['OutputSuffix'],
processing_stream_config[0]['PostProcessingOptions']['LogDirectory'])
if not subjects:
subjectstring = "ALL"
sublist = [o.replace('sub-', '') for o in os.listdir(config.config[output_type]['TargetDirectory'])
if os.path.isdir(os.path.join(config.config[output_type]['TargetDirectory'], o)) and 'sub-' in o]
else:
subjectstring = " , ".join(subjects)
sublist = subjects
submission_string = '''fmri_postprocess -config_file={config} -target_dir={targetDir} -target_suffix={targetSuffix} ''' \
'''-output_dir={outputDir} -output_suffix={outputSuffix} {procstream} -log_dir={logOutputDir} {taskString} {trString} {beta_series} -single {sub}'''
task_string = ""
tr_string = ""
beta_series_string = ""
if task is not None:
task_string = '-task='+task
if tr is not None:
tr_string = '-tr='+tr
if beta_series:
beta_series_string = '-beta_series'
if processing_stream is not None:
procstream = "-processing_stream=" + processing_stream
else:
procstream = ""
if batch:
config_string = config.config_json_dump(config.config[output_type]['OutputDirectory'], os.path.basename(config_file))
batch_manager = BatchManager(config.config['BatchConfig'], config.config[output_type]['LogDirectory'])
batch_manager.update_mem_usage(config.config['PostProcessingOptions']['PostProcessingMemoryUsage'])
batch_manager.update_time(config.config['PostProcessingOptions']['PostProcessingTimeUsage'])
batch_manager.update_nthreads(config.config['PostProcessingOptions']['NThreads'])
batch_manager.update_email(config.config["EmailAddress"])
for sub in sublist:
sub_string_temp = submission_string.format(
config=config_string,
targetDir=config.config[output_type]['TargetDirectory'],
targetSuffix=config.config[output_type]['TargetSuffix'],
outputDir=config.config[output_type]['OutputDirectory'],
outputSuffix=config.config[output_type]['OutputSuffix'],
procstream = procstream,
taskString = task_string,
trString = tr_string,
logOutputDir=config.config[output_type]['LogDirectory'],
beta_series = beta_series_string,
sub=sub
)
if debug:
sub_string_temp = sub_string_temp + " -debug"
batch_manager.addjob(Job("PostProcessing" + sub, sub_string_temp))
if submit:
batch_manager.createsubmissionhead()
batch_manager.compilejobstrings()
batch_manager.submit_jobs()
else:
batch_manager.createsubmissionhead()
batch_manager.compilejobstrings()
click.echo(batch_manager.print_jobs())
else:
for sub in subjects:
logging.debug(beta_series)
logging.info('Running Subject ' + sub)
_fmri_postprocess_subject(config, sub, task, tr, beta_series)
def _fmri_postprocess_subject(config, subject, task, tr=None, beta_series = False):
if beta_series:
output_type = 'BetaSeriesOptions'
else:
output_type = 'PostProcessingOptions'
search_string = os.path.abspath(
os.path.join(config.config[output_type]['TargetDirectory'], "sub-" + subject, "**",
"*" + config.config[output_type]['TargetSuffix']))
subject_files = glob.glob(search_string, recursive=True)
if config.config['PostProcessingOptions']["DropCSV"] is not "":
drop_tps = pandas.read_csv(config.config['PostProcessingOptions']["DropCSV"])
logging.info('Finding Image Files')
for image in subject_files:
if task is None or 'task-' + task in image:
logging.info('Processing ' + image)
try:
tps_drop = None
temp = None
if config.config['PostProcessingOptions']["DropCSV"] is not "":
temp = drop_tps[drop_tps['file_name'].str.match(os.path.basename(image))]['TR_round']
if len(temp) is 1:
tps_drop = int(temp)
logging.info('Found drop TP info, will remove last ' + str(tps_drop) + ' time points')
else:
tps_drop = None
_fmri_postprocess_image(config, image, task, tr, beta_series, tps_drop)
except Exception as err:
logging.exception(err)
def _fmri_postprocess_image(config, file, task = None, tr=None, beta_series = False, drop_tps = None):
confound_regressors = _find_confounds(config, file)
output_file_path = _build_output_directory_structure(config, file, beta_series)
if os.path.exists(output_file_path):
logging.info("Output File Exists! Skipping.")
return 0
logging.info('Looking for: ' + confound_regressors)
if not os.path.exists(confound_regressors):
logging.warning('Could not find a confound file for ' + file + ". Moving onto next scan")
return
else:
logging.info('Found confound regressors')
confounds, fdts = _regression_prep(config, confound_regressors)
if drop_tps is not None:
confounds = confounds.iloc[:(confounds.shape[0]-(drop_tps))]
logging.info('Removing last ' + str(drop_tps) + ' time points')
fdts = fdts.iloc[:(fdts.shape[0]-(drop_tps))]
if tr is None:
image_json_path = _find_json(config, file)
with open(os.path.abspath(image_json_path), "r") as json_path:
image_json = json.load(json_path)
tr = float(image_json['RepetitionTime'])
logging.info('TR found: ' + str(tr))
image = nib.load(file)
data = image.get_fdata()
data = data.astype(numpy.float32)
orgImageShape = data.shape
coordMap = image.affine
data = data.reshape((numpy.prod(numpy.shape(data)[:-1]), data.shape[-1]))
data = numpy.transpose(data)
if drop_tps is not None:
data = data[0:(data.shape[0]-(drop_tps)), :]
orgImageShape = list(orgImageShape)
orgImageShape[3] = data.shape[0]
orgImageShape = tuple(orgImageShape)
row_means = data.mean(axis=0)
data = (data - data.mean(axis=0))
if not beta_series:
regress_toggle = config.config['PostProcessingOptions']['Regress']
scrub_toggle = False
if config.config['PostProcessingOptions']['Scrubbing']:
logging.debug('Scrubbing Toggle Activated')
scrub_toggle = True
scrub_ahead = int(config.config['PostProcessingOptions']['ScrubAhead'])
scrub_behind = int(config.config['PostProcessingOptions']['ScrubBehind'])
scrub_contig = int(config.config['PostProcessingOptions']['ScrubContig'])
fd_thres = float(config.config['PostProcessingOptions']['ScrubFDThreshold'])
orig_fdts = fdts
if config.config['PostProcessingOptions']['RespNotchFilter']:
fdts = _notch_filter_fd(config, confound_regressors, tr, drop_tps)
scrubTargets = clpipe.postprocutils.utils.scrub_setup(fdts, fd_thres, scrub_behind, scrub_ahead, scrub_contig)
hp = float(config.config['PostProcessingOptions']['FilteringHighPass'])
lp = float(config.config['PostProcessingOptions']['FilteringLowPass'])
filter_toggle = False
if hp > 0 or lp > 0:
logging.info('Filtering Toggle Activated')
filter_toggle = True
order = int(config.config['PostProcessingOptions']['FilteringOrder'])
filt = clpipe.postprocutils.utils.calc_filter(hp, lp, tr, order)
confounds = clpipe.postprocutils.utils.apply_filter(filt, confounds)
if scrub_toggle and filter_toggle:
logging.info('Using Spectral Interpolation')
ofreq = int(config.config['PostProcessingOptions']['OversamplingFreq'])
hfreq = float(config.config['PostProcessingOptions']['PercentFreqSample'])
logging.debug('Memory Usage Before Spectral Interpolation:' +str(psutil.virtual_memory().total >> 30) +' GB')
data = clpipe.postprocutils.spec_interpolate.spec_inter(data, tr, ofreq, scrubTargets, hfreq, binSize=config.config['PostProcessingOptions']["SpectralInterpolationBinSize"])
gc.collect()
logging.debug('Memory Usage After Spectral Interpolation GC:' +str(psutil.virtual_memory().total >> 30) +' GB')
if filter_toggle:
logging.info('Filtering Data Now')
data = clpipe.postprocutils.utils.apply_filter(filt, data)
if regress_toggle:
logging.info('Regressing Data Now')
logging.debug(str(confounds.shape))
logging.debug(str(data.shape))
data = clpipe.postprocutils.utils.regress(confounds, data)
if scrub_toggle:
logging.info('Scrubbing data Now')
data = clpipe.postprocutils.utils.scrub_image(data, scrubTargets)
data = (data + row_means)
data = numpy.transpose(data)
data = data.reshape(orgImageShape)
data32 = numpy.float32(data)
out_image = nib.Nifti1Image(data32, coordMap)
output_file_path = _build_output_directory_structure(config, file)
logging.info('Saving post processed data to ' + output_file_path)
nib.save(out_image, output_file_path)
if scrub_toggle:
file_name = os.path.basename(file)
sans_ext = os.path.splitext(os.path.splitext(file_name)[0])[0]
toOut = numpy.column_stack([numpy.arange(1, len(scrubTargets) + 1, 1), numpy.asarray(scrubTargets), fdts, orig_fdts])
logging.info('Saving Scrub Targets to ' + os.path.join(os.path.dirname(output_file_path),
sans_ext + "_scrubTargets.csv"))
numpy.savetxt(os.path.join(os.path.dirname(output_file_path), sans_ext + "_scrubTargets.csv"), toOut,
delimiter=",")
else:
beta_series_options = config.config['BetaSeriesOptions']['TaskSpecificOptions']
avail_tasks = [x['Task'] for x in beta_series_options]
logging.debug(avail_tasks)
img_task = _find_image_task(file)
logging.debug(img_task)
if img_task not in avail_tasks:
logging.info('Did not find beta series specification for the task ' +img_task+ ' for image ' +file )
return
else:
beta_series_options = beta_series_options[avail_tasks.index(img_task)]
hp = float(config.config['BetaSeriesOptions']['FilteringHighPass'])
lp = float(config.config['BetaSeriesOptions']['FilteringLowPass'])
events_file = _find_events(config, file)
logging.debug(events_file)
if os.path.exists(events_file):
confounds, fdts = _regression_prep(config, confound_regressors, beta_series)
ntp = len(confounds)
if tr is None:
image_json_path = _find_json(config, file)
with open(os.path.abspath(image_json_path), "r") as json_path:
image_json = json.load(json_path)
tr = float(image_json['RepetitionTime'])
filter_toggle = False
filt = None
if hp > 0 or lp > 0:
logging.info('Filtering Toggle Activated')
filter_toggle = True
order = int(config.config['BetaSeriesOptions']['FilteringOrder'])
filt = clpipe.postprocutils.utils.calc_filter(hp, lp, tr, order)
confounds = clpipe.postprocutils.utils.apply_filter(filt, confounds)
filt_ev_array, valid_events = _ev_mat_prep(events_file, filt, tr, ntp, beta_series_options)
image = nib.load(file)
data = image.get_fdata()
data = data.astype(numpy.float32)
orgImageShape = data.shape
coordMap = image.affine
data = data.reshape((numpy.prod(numpy.shape(data)[:-1]), data.shape[-1]))
data = numpy.transpose(data)
data = (data | |
size_range = (256, 512)
# Pick one random number from the range of 0 and 255 (512 - 256)
oneside_length = sampling.pick_random_permutation(1, size_range[1] - size_range[0] + 1)[0]
# Add random number to size_range[0]
oneside_length = size_range[0] + oneside_length
y, x = x_or_probability.shape[:2]
# calculate the size to keep the ratio of the picture
# find smaller side of picture and then calculate the scale
if y <= x:
scale = float(oneside_length) / y
sizes = (oneside_length, int(scale * x))
else:
scale = float(oneside_length) / x
sizes = (int(scale * y), oneside_length)
info = {'resized_size': sizes, 'original_size': (y, x), 'actual_interpolation': interpolation, 'scale': scale}
return (preprocess.resize_image(x_or_probability, sizes, interpolation=interpolation, mode=mode), info)
@execute_based_on_probability
def crop_picture_randomly(self, x_or_probability, sizes=(224, 224)):
"""crop picture out randomly with the size
Edited date:
160422
Test:
160711
Example:
::
pseudo_picture_asymmetry = np.array(np.random.uniform(0, 255, (100, 50, 3)), dtype=np.int)
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
da.crop_picture_randomly(pseudo_picture_asymmetry, sizes=(10, 10))
>>> print(da.x.shape)
(10, 10, 3)
>>> print(da.info)
{'0': {'execute': True,
'keypoints': ((11, 21), (19, 29)),
'original_size': (100, 50),
'sizes': (10, 10),
'whoami': 'crop_picture_randomly'},
'pc': 1}
Args:
x_or_probability Optional([int, float, str, numpy.ndarray]): If int or float, this argument is considered as the probability and self.x is used for convert_to_image_format. If str or numpy.ndarray, set this argument as self.x and execute crop_picture_randomly with self.x.
sizes (tuple): crop size, (y, x)
__no_record (bool): the value of __no_record changes the value to be returned.
Returns:
Optional([tuple, class]): If __no_record is False, return self, otherwise return tuple(shaped x, info)
"""
y, x = x_or_probability.shape[0:2]
keypoints = DataAugmentationPicture.get_keypoints_randomly_for_cropping((y, x), sizes)
info = {'keypoints': keypoints, 'original_size': (y, x)}
return (DataAugmentationPicture.crop_picture(x_or_probability, keypoints), info)
@execute_based_on_probability
def rgb_shift(self, x_or_probability, mean=0.0, variance=0.1):
"""Execute rgb_shift
Edited date:
160501
Test:
160711
Example:
::
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
da.register_eigen(data)
rgb_shifted_picture = da.rgb_shift(picture)
Args:
x_or_probability Optional([int, float, str, numpy.ndarray]): If int or float, this argument is considered as the probability and self.x is used for convert_to_image_format. If str or numpy.ndarray, set this argument as self.x and execute rgb_shift with self.x.
mean (float): mean for the gaussian distribution
variance (float): variance for the gaussian distribution
__no_record (bool): the value of __no_record changes the value to be returned.
Returns:
Optional([tuple, class]): If __no_record is False, return self, otherwise return tuple(shaped x, info)
"""
y, x, channel = x_or_probability.shape
shifted_pic = np.zeros((y, x, channel), dtype=x_or_probability.dtype)
element_y = six.moves.range(y)
element_x = six.moves.range(x)
for i, ii in itertools.product(element_y, element_x):
# rgb shift
shifted_pic[i][ii] = x_or_probability[i][ii] + self._one_rgb_shift(mean=mean, variance=variance)
return (shifted_pic, {})
@execute_based_on_probability
def crop_center(self, x_or_probability, sizes=(384, 384)):
"""Crop the center of the picture
Edited date:
160515
Test:
160711
Example:
::
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
picture = np.random.normal(0, 0.1, (100, 100, 3))
da.crop_center(picture, sizes=(10, 10))
>>> print(da.x.shape)
(10, 10, 3)
Args:
x_or_probability Optional([int, float, str, numpy.ndarray]): If int or float, this argument is considered as the probability and self.x is used for convert_to_image_format. If str or numpy.ndarray, set this argument as self.x and execute crop_center with self.x.
sizes (tuple): crop size, (y, x)
__no_record (bool): the value of __no_record changes the value to be returned.
Returns:
Optional([tuple, class]): If __no_record is False, return self, otherwise return tuple(shaped x, info)
"""
y, x, channel = x_or_probability.shape
center_y = int(y / 2)
center_x = int(x / 2)
frame_y, frame_x = sizes
up = -int((frame_y + 1) / 2)
down = int(frame_y / 2)
left = -int((frame_x + 1) / 2)
right = int(frame_x / 2)
start_y = max(center_y + up, 0)
end_y = min(center_y + down, y)
start_x = max(center_x + left, 0)
end_x = min(center_x + right, x)
keypoints = ((start_y, end_y), (start_x, end_x))
return (DataAugmentationPicture.crop_picture(x_or_probability, keypoints), {'keypoints': keypoints})
@execute_based_on_probability
def crop_upper_left(self, x_or_probability, sizes=(384, 384)):
y, x, channel = x_or_probability.shape
frame_y, frame_x = sizes
start_y = 0
end_y = min(frame_y, y)
start_x = 0
end_x = min(frame_x, x)
keypoints = ((start_y, end_y), (start_x, end_x))
return (DataAugmentationPicture.crop_picture(x_or_probability, keypoints), {'keypoints': keypoints})
@execute_based_on_probability
def crop_top_left(self, x_or_probability, sizes=(384, 384)):
y, x, channel = x_or_probability.shape
frame_y, frame_x = sizes
start_y = 0
end_y = min(frame_y, y)
start_x = 0
end_x = min(frame_x, x)
keypoints = ((start_y, end_y), (start_x, end_x))
return (DataAugmentationPicture.crop_picture(x_or_probability, keypoints), {'keypoints': keypoints})
@execute_based_on_probability
def crop_bottom_left(self, x_or_probability, sizes=(384, 384)):
y, x, channel = x_or_probability.shape
frame_y, frame_x = sizes
start_y = max(y - frame_y, 0)
end_y = y
start_x = 0
end_x = min(frame_x, x)
keypoints = ((start_y, end_y), (start_x, end_x))
return (DataAugmentationPicture.crop_picture(x_or_probability, keypoints), {'keypoints': keypoints})
@execute_based_on_probability
def crop_top_right(self, x_or_probability, sizes=(384, 384)):
y, x, channel = x_or_probability.shape
frame_y, frame_x = sizes
start_y = 0
end_y = min(frame_y, y)
start_x = max(x - frame_x, 0)
end_x = x
keypoints = ((start_y, end_y), (start_x, end_x))
return (DataAugmentationPicture.crop_picture(x_or_probability, keypoints), {'keypoints': keypoints})
@execute_based_on_probability
def crop_bottom_right(self, x_or_probability, sizes=(384, 384)):
y, x, channel = x_or_probability.shape
frame_y, frame_x = sizes
start_y = max(y - frame_y, 0)
end_y = y
start_x = max(x - frame_x, 0)
end_x = x
keypoints = ((start_y, end_y), (start_x, end_x))
return (DataAugmentationPicture.crop_picture(x_or_probability, keypoints), {'keypoints': keypoints})
@execute_based_on_probability
def subtract_local_mean(self, x_or_probability):
"""Subtract local mean
Edited date:
160515
Test:
160711
Example:
::
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
path = 'lenna.jpg'
da.load_picture(path).subtract_local_mean()
>>> print(da.x)
-2.1359828785497543e-14
>>> print(da.info)
{'0': {'dtype': None,
'execute': True,
'path': 'lenna.jpg',
'whoami': 'load_picture'},
'1': {'execute': True,
'mean': 95.497653021442488,
'whoami': 'subtract_local_mean'},
'pc': 2}
Args:
x_or_probability Optional([int, float, str, numpy.ndarray]): If int or float, this argument is considered as the probability and self.x is used for convert_to_image_format. If str or numpy.ndarray, set this argument as self.x and execute crop_center with self.x.
__no_record (bool): the value of __no_record changes the value to be returned.
Returns:
Optional([tuple, class]): If __no_record is False, return self, otherwise return tuple(shaped x, info)
"""
mean = preprocess.calculate_local_average(x_or_probability)
return (x_or_probability - mean, {'mean': mean})
@execute_based_on_probability
def normalize_picture(self, x_or_probability, value=0., each_rgb=False, dtype=np.float32):
"""Normalize the picture
Edited date:
160515
Test:
160711
Note:
| The equation for normalization: (x - mean) / sqrt(variance + value)
| value 0 is typical case and the default value for arguments, but setting value as 10 for a picture normalization is the good choice to suppress noises.
Example:
::
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
path = 'lenna.jpg'
da.load_picture(path).normalize_picture()
>>> print(np.mean(da.x))
1.21000026113065e-16
>>> print(np.var(da.x))
1.0
Args:
x_or_probability Optional([int, float, str, numpy.ndarray]): If int or float, this argument is considered as the probability and self.x is used for convert_to_image_format. If str or numpy.ndarray, set this argument as self.x and execute crop_center with self.x.
value (float): for an RGB picture, value 10 is a good start point. Check at Note.
__no_record (bool): the value of __no_record changes the value to be returned.
Returns:
Optional([tuple, class]): If __no_record is False, return self, otherwise return tuple(shaped x, info)
"""
x_or_probability = x_or_probability.astype(dtype)
if each_rgb:
var = np.var(x_or_probability, axis=(0, 1))
std = np.sqrt(var + value)
mean = np.mean(x_or_probability, axis=(0, 1))
for i in six.moves.range(x_or_probability.shape[2]):
x_or_probability[:, :, i] = (x_or_probability[:, :, i] - mean[i]) / std[i]
return (x_or_probability, {'mean': mean, 'var': var, 'std': std})
else:
var = np.var(x_or_probability)
std = np.sqrt(var + value)
mean = preprocess.calculate_local_average(x_or_probability)
return ((x_or_probability - mean) / std, {'mean': mean, 'var': var, 'std': std})
@execute_based_on_probability
def shift_global_hsv_randomly(self, x_or_probability, hsv='h', low=(-31.992, -0.10546, -0.24140), high=(31.992, 0.10546, 0.24140), ceiling=True):
"""Shift HSV globally
Edited date:
160515
Test:
160712
Note:
| default low and high parameters are from the paper: Scalable Bayesian Optimization Using Deep Neural Networks
| url: http://arxiv.org/abs/1502.05700
| For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255].
| Different softwares use different scales, thus if you compares values with OpenCV, you need to normalize in the range I mentioned above.
Example:
::
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
da.load_picture(path).shift_global_hsv_randomly()
>>> print(da.x.shape)
(855, 400, 3)
Args:
x_or_probability Optional([int, float, str, numpy.ndarray]): If int or float, this argument is considered as the probability and self.x is used for convert_to_image_format. If str or numpy.ndarray, set this argument as self.x and execute crop_center with self.x.
hsv Optional(['h', 's', 'v']): 'h' is hue, 's' is saturation and 'v' is value
low (tuple): lower bound of random value of HSV
high (tuple): higher bound of random value of HSV
| |
# ******************************************************************************
# pysimm.cassandra module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
from StringIO import StringIO
import subprocess
import os
import re
import numpy as np
import random
import logging
import types
from collections import Iterable, OrderedDict
from pysimm import system
from string import ascii_uppercase
DATA_PATH = os.path.relpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../dat/csndra_data'))
KCALMOL_2_K = 503.22271716452
CASSANDRA_EXEC = os.environ.get('CASSANDRA_EXEC')
# Creating a logger instance and send its output to console 'deafault'
logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S',
format='%(asctime)s [%(levelname)s]: %(message)s')
def check_cs_exec():
global CASSANDRA_EXEC
if CASSANDRA_EXEC is None:
print('Please specify the OS environment variable ''CASSANDRA_EXEC'' that points to '
'CASSANDRA compiled binary file, which is by default cassandra_{compiler-name}[_openMP].exe ')
return False
# else:
# try:
# stdout, stderr = Popen('CASSANDRA_EXEC', stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate()
# return True
# except OSError:
# print('Seems the environment variable ''CASSANDRA_EXEC'' is not configured properely. '
# 'Please check the OS environment variable ''CASSANDRA_EXEC'' it should point '
# 'to CASSANDRA compiled binary file ( cassandra_{compiler-name}[_openMP].exe ) ')
# return False
check_cs_exec()
class GCMC(object):
"""pysimm.cassandra.GCMC
Object containing the settings for the Grand-Canonical Monte-Carlo simulations provided by the CASSANDRA software.
The object includes also the simulation results
Attributes:
props: dictionary containing all simulation settings to be written to the CASSANDRA .inp file
input: text stream to be written to the CASSANDRA .inp file
mc_sst: `~pysimm.cassandra.McSystem` object containing all molecules to be inserted by CASSANDRA
fxd_sst: :class:`~pysimm.system.System` object that describes the optional fixed molecular system for MC simulations (default: None)
tot_sst: the :class:`~pysimm.system.System` object containing results of CASSANDRA simulations
logger: :class:`~logging.Logger` object for multi-level verbose program execution
"""
def __init__(self, mc_sst=None, fxd_sst=None, **kwargs):
global DATA_PATH
# Initializing CASSANDRA input stream, empty at the beginning
self.input = ''
self.logger = logging.getLogger('GCMC')
# Initializing dictionary that contains records that directly will be sent to the .inp file
self.props = OrderedDict()
# Reading default properties of the GCMC simulations
def_dat = Cassandra(fxd_sst).read_input(os.path.join(DATA_PATH, '_gcmc_default.inp'))
# Static (unchangeable) properties
self.props['Sim_Type'] = InpSpec('Sim_Type', 'GCMC', 'GCMC')
tmp = kwargs.get('out_folder') # Folder for the results and intermediate files
if tmp:
self.out_folder = tmp
if os.path.isabs(tmp):
self.out_folder = os.path.relpath(tmp)
else:
self.out_folder = os.getcwd()
if not os.path.exists(self.out_folder):
os.makedirs(self.out_folder, mode=0755)
prefix = kwargs.get('Run_Name') or def_dat['Run_Name']
self.props['Run_Name'] = InpSpec('Run_Name', os.path.join(self.out_folder, prefix), '')
# Defining the path where to write all intermediate files () and results
self.props_file = os.path.join(self.out_folder, kwargs.get('props_file') or 'gcmc_input_file.inp')
# Molecule configuration files describing all species of the system.
# They are **absolutely** needed to start calculation
mol_files = OrderedDict()
sst_count = 0
# Setting the simulation box generating system
self.fxd_sst = fxd_sst
self.fxd_sst.center('box', [0, 0, 0], True) # the center of the box around the system should be at origin
self.fxd_sst.name = 'matrix'
self.fixed_syst_mcf_file = None
if self.fxd_sst.particles.count > 0:
# Check few things of the system in order for CASSANDRA not to raise an exception
self.fxd_sst.zero_charge() # 1) the sum of the charges should be 0
self.fixed_syst_mcf_file = os.path.join(self.out_folder, 'fixed_syst.mcf')
mol_files['file1'] = [self.fixed_syst_mcf_file, 1]
sst_count = 1
self.tot_sst = self.fxd_sst
if not self.tot_sst.ff_class:
self.tot_sst.ff_class = '1'
else:
if self.tot_sst.ff_class is not '1':
print('CASSANDRA supports only 1-st class force fields')
exit(1)
# self.tot_sst.add(fxd_sst, change_dim=False)
# self.tot_sst.dim = fxd_sst.dim
self.mc_sst = mc_sst
if mc_sst:
mc_sst.file_store = self.out_folder
mol_files = mc_sst.update_props(mol_files)
if kwargs.get('Molecule_Files'):
mol_files = OrderedDict(sorted(kwargs.get('Molecule_Files').items()))
# Raising an error and stop execution if no MCF information in one or another way is provided
if (mc_sst is None) and (not kwargs.get('Molecule_Files')):
self.logger.error('The molecular configuration files of gas molecules for simulation are not set. '
'Nothing to simulate. Exiting...')
exit(1)
n_spec = len(mol_files)
self.props['Nbr_Species'] = InpSpec('Nbr_Species', n_spec, n_spec)
self.props['Molecule_Files'] = InpSpec('Molecule_Files', mol_files, None, **{'new_line': True})
self.props['Chemical_Potential_Info'] = InpSpec('Chemical_Potential_Info', mc_sst.chem_pot,
def_dat['Chemical_Potential_Info'] * (n_spec - sst_count))
self.props['Seed_Info'] = InpSpec('Seed_Info', kwargs.get('Seed_Info'),
[random.randint(int(1e+7), int(1e+8 - 1)),
random.randint(int(1e+7), int(1e+8 - 1))])
# Simple (one-value) dynamic properties
self.props['Temperature_Info'] = InpSpec('Temperature_Info',
kwargs.get('Temperature_Info'), def_dat['Temperature_Info'])
self.props['Average_Info'] = InpSpec('Average_Info', kwargs.get('Average_Info'), def_dat['Average_Info'])
self.props['Pair_Energy'] = InpSpec('Pair_Energy', kwargs.get('Pair_Energy'), def_dat['Pair_Energy'])
self.props['Rcutoff_Low'] = InpSpec('Rcutoff_Low', kwargs.get('Rcutoff_Low'), def_dat['Rcutoff_Low'])
self.props['Mixing_Rule'] = InpSpec('Mixing_Rule', kwargs.get('Mixing_Rule'), def_dat['Mixing_Rule'])
# Multiple-value one/many line dynamic properties
self.props['Run_Type'] = InpSpec('Run_Type', kwargs.get('Run_Type'), def_dat['Run_Type'])
self.props['Charge_Style'] = InpSpec('Charge_Style', kwargs.get('Charge_Style'), def_dat['Charge_Style'])
self.props['VDW_Style'] = InpSpec('VDW_Style', kwargs.get('VDW_Style'), def_dat['VDW_Style'])
self.props['Simulation_Length_Info'] = InpSpec('Simulation_Length_Info', kwargs.get('Simulation_Length_Info'),
def_dat['Simulation_Length_Info'],
**{'write_headers': True, 'new_line': True})
self.props['CBMC_Info'] = InpSpec('CBMC_Info', kwargs.get('CBMC_Info'), def_dat['CBMC_Info'],
**{'write_headers': True, 'new_line': True})
self.props['Box_Info'] = InpSpec('Box_Info', kwargs.get('Box_Info'), def_dat['Box_Info'], **{'new_line': True})
self.props['Property_Info 1'] = InpSpec('Property_Info 1', kwargs.get('Property_Info'), None, **{'new_line': True})
# Order of the next three items is IMPORTANT! Check the CASSANDRA spec file for further info
limits = [0.3] * n_spec
if self.fxd_sst.particles.count:
limits[0] = 0
self.props['Prob_Translation'] = InpProbSpec('Prob_Translation', kwargs.get('Prob_Translation'),
OrderedDict([('tot_prob', 0.25),
('limit_vals', limits)]),
**{'new_line': True, 'indicator': 'start'})
tps = ['cbmc'] * n_spec
if self.fxd_sst.particles.count:
tps[0] = 'none'
self.props['Prob_Insertion'] = InpProbSpec('Prob_Insertion', kwargs.get('Prob_Insertion'),
OrderedDict([('tot_prob', 0.25), ('types', tps)]),
**{'new_line': True})
max_ang = [180] * n_spec
if self.fxd_sst.particles.count:
max_ang[0] = 0
self.props['Prob_Rotation'] = InpProbSpec('Prob_Rotation', kwargs.get('Prob_Rotation'),
OrderedDict([('tot_prob', 0.25), ('limit_vals', max_ang)]),
**{'new_line': True})
self.props['Prob_Deletion'] = InpProbSpec('Prob_Deletion',
kwargs.get('Prob_Deletion'), 0.25, **{'indicator': 'end'})
# Synchronzing "start type" .inp record
self.fxd_sst_xyz = ''
pops_list = [0] * n_spec
start_type = 'make_config'
if self.fxd_sst.particles.count:
pops_list[0] = 1
self.fxd_sst_xyz = os.path.join(self.out_folder, 'fixed_syst.xyz')
start_type = 'read_config'
start_conf_dict = OrderedDict([('start_type', start_type), ('species', pops_list),
('file_name', self.fxd_sst_xyz)])
self.props['Start_Type'] = InpSpec('Start_Type', kwargs.get('Start_Type'), start_conf_dict)
# Synchronzing Fragment files:
frag_files = OrderedDict()
if mc_sst:
mc_sst.temperature = self.props['Temperature_Info'].value
frag_files = mc_sst.update_frag_record(frag_files)
if kwargs.get('Fragment_Files'):
frag_files = OrderedDict(sorted(kwargs.get('Fragment_Files').items()))
if (mc_sst is None) and (not kwargs.get('Fragment_Files')):
self.logger.error('Cannot set the fragment files of gas molecules for simulation')
exit(1)
self.props['Fragment_Files'] = InpSpec('Fragment_Files', frag_files, None, **{'new_line': True})
def write(self):
"""pysimm.cassandra.GCMC
Iterates through the `~GCMC.props` dictionary creating the text for correct CASSANDRA input
"""
for key in self.props.keys():
if self.props[key].value is not None:
self.input += '{:}\n'.format(self.props[key].to_string())
self.input += '\nEND'
# Initializing output stream
self.logger.info('Writing CASSANDRA .inp file to "{:}"...'.format(self.props_file))
out_stream = open(self.props_file, 'w')
out_stream.write('{:}'.format(self.input))
out_stream.close()
self.logger.info('File: "{:}" was created sucsessfully'.format(self.props_file))
def __write_chk__(self, out_file):
"""pysimm.cassandra.__write_chk__
Creates the CASSANDRA checkpoint file basing on the information from the `~GCMC.tot_sst` field
"""
# Initializing output stream
if out_file == 'string':
out_stream = StringIO()
else:
out_stream = open(out_file, 'w+')
blk_separ = ' {:*^75}\n'
# Writing Translation/rotation/... info
out_stream.write(blk_separ.format('Translation,rotation, dihedral, angle distortion'))
tmplate = '{t[0]$$}{t[1]$$}{t[2]$$}{t[3]$$}{t[4]$$}\n'
molecules = self.props['Molecule_Files'].value
for m, i in zip(molecules, range(len(molecules))):
out_stream.write(tmplate.replace('$$', ':>6d').format(t=[i + 1, 0, 0, 0, 0]))
out_stream.write(tmplate.replace('$$', ':>6d').format(t=[i + 1, 0, 0, 0, 0]))
# TODO: There are some nonzeros in example .chk file for index 2; check where they come from
out_stream.write('{t[0]:>23.14E}{t[2]:>23.14E}{t[2]:>23.14E}\n'.format(t=[0, 0, 0]))
out_stream.write('{0:>12d}{0:>12d}\n'.format(0, 0))
# Small section with total # of MC trials -- it is 0 at the beggining
out_stream.write(blk_separ.format('# of MC steps'))
out_stream.write('{:>12d}\n'.format(0))
# Writing Box-info information
out_stream.write(blk_separ.format('Box info'))
tmp = self.props['Box_Info'].value['box_size']
x, y, z = 0, 0, 0
bx_type = None
if isinstance(tmp, types.ListType):
if len(tmp) > 3:
x, y, z = tmp[0], tmp[1], tmp[2]
elif isinstance(tmp, int) or isinstance(tmp, float):
x, y, z = tmp, tmp, tmp
else:
exit(0)
# First 0 here correspond to the # of trials
out_stream.write('{0:>12d}\n{1:<18.10f}\n{2:}\n'.format(0, x * y * z, self.props['Box_Info'].value['box_type']))
tmpl = '{t[0]&&}{t[1]&&}{t[2]&&}\n'
tmp = np.diag([x, y, z])
for lines in tmp:
out_stream.write((tmpl.replace('&&', ':^22.14f')).format(t=lines))
tmp = np.diag([1 / x, 1 / y, 1 / z])
for lines in tmp:
out_stream.write((tmpl.replace('&&', ':^22.8f')).format(t=lines))
out_stream.write('{:>18.12f}\n'.format(0))
# Creating seeds
out_stream.write(blk_separ.format('SEEDS'))
out_stream.write('{t[0]:>12d}{t[1]:>12d}{t[2]:>12d}\n{t[3]:>12d}{t[4]:>12d}\n'.format(
t=np.random.random_integers(int(1e+7), int(1e+8 | |
* j, 0), (step_col * j, self.size()[1]))))
j += 1
return lineFS
def logicalAND(self, img, grayscale=True):
if not self.size() == img.size():
print("Both images must have same sizes")
return None
try:
import cv2
except ImportError:
print("This function is available for OpenCV >= 2.3")
if grayscale:
retval = cv2.bitwise_and(self.gray_narray,
img.gray_narray)
else:
retval = cv2.bitwise_and(self.cvnarray, img.cvnarray)
return Image(retval, cv2image=True)
def logicalNAND(self, img, grayscale=True):
if not self.size() == img.size():
print("Both images must have same sizes")
return None
try:
import cv2
except ImportError:
print("This function is available for OpenCV >= 2.3")
if grayscale:
retval = cv2.bitwise_and(self.gray_narray,
img.gray_narray)
else:
retval = cv2.bitwise_and(self.cvnarray, img.cvnarray)
retval = cv2.bitwise_not(retval)
return Image(retval, cv2image=True)
def logicalOR(self, img, grayscale=True):
if not self.size() == img.size():
print("Both images must have same sizes")
return None
try:
import cv2
except ImportError:
print("This function is available for OpenCV >= 2.3")
if grayscale:
retval = cv2.bitwise_or(self.gray_narray,
img.gray_narray)
else:
retval = cv2.bitwise_or(self.cvnarray, img.cvnarray)
return Image(retval, cv2image=True)
def logicalXOR(self, img, grayscale=True):
if not self.size() == img.size():
print("Both images must have same sizes")
return None
try:
import cv2
except ImportError:
print("This function is available for OpenCV >= 2.3")
if grayscale:
retval = cv2.bitwise_xor(self.gray_narray,
img.gray_narray)
else:
retval = cv2.bitwise_xor(self.cvnarray, img.cvnarray)
return Image(retval, cv2image=True)
def matchSIFTKeyPoints(self, template, quality=200):
try:
import cv2
except ImportError:
warnings.warn("OpenCV >= 2.4.3 required")
return None
if not hasattr(cv2, "FeatureDetector_create"):
warnings.warn("OpenCV >= 2.4.3 required")
return None
if template is None:
return None
detector = cv2.FeatureDetector_create("SIFT")
descriptor = cv2.DescriptorExtractor_create("SIFT")
img = self.cvnarray
template_img = template.cvnarray
skp = detector.detect(img)
skp, sd = descriptor.compute(img, skp)
tkp = detector.detect(template_img)
tkp, td = descriptor.compute(template_img, tkp)
idx, dist = self._get_FLANN_matches(sd, td)
dist = dist[:, 0] / 2500.0
dist = dist.reshape(-1, ).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
sfs = []
for i, dis in itertools.izip(idx, dist):
if dis < quality:
sfs.append(KeyPoint(template, skp[i], sd, "SIFT"))
else:
break # since sorted
idx, dist = self._get_FLANN_matches(td, sd)
dist = dist[:, 0] / 2500.0
dist = dist.reshape(-1, ).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
tfs = []
for i, dis in itertools.izip(idx, dist):
if dis < quality:
tfs.append(KeyPoint(template, tkp[i], td, "SIFT"))
else:
break
return sfs, tfs
def drawSIFTKeyPointMatch(self, template, distance=200, num=-1, width=1):
if template is None:
return
resultImg = template. side_by_side(self, scale=False)
hdif = (self.height - template.height) / 2
sfs, tfs = self.matchSIFTKeyPoints(template, distance)
maxlen = min(len(sfs), len(tfs))
if num < 0 or num > maxlen:
num = maxlen
for i in range(num):
skp = sfs[i]
tkp = tfs[i]
pt_a = (int(tkp.y), int(tkp.x) + hdif)
pt_b = (int(skp.y) + template.width, int(skp.x))
resultImg.drawLine(pt_a, pt_b, color=Colorrandom(),
thickness=width)
return resultImg
def stega_encode(self, message):
try:
import stepic
except ImportError:
logger.warning("stepic library required")
return None
warnings.simplefilter("ignore")
pilImg = PILImage.frombuffer("RGB", self.size(), self.toString())
stepic.encode_inplace(pilImg, message)
ret = Image(pilImg)
return ret.flip_vertical()
def stega_decode(self):
try:
import stepic
except ImportError:
logger.warning("stepic library required")
return None
warnings.simplefilter("ignore")
pilImg = PILImage.frombuffer("RGB", self.size(), self.toString())
result = stepic.decode(pilImg)
return result
def find_features(self, method="szeliski", threshold=1000):
try:
import cv2
except ImportError:
logger.warning("OpenCV >= 2.3.0 required")
return None
img = self.gray_narray
blur = cv2.GaussianBlur(img, (3, 3), 0)
Ix = cv2.Sobel(blur, cv2.CV_32F, 1, 0)
Iy = cv2.Sobel(blur, cv2.CV_32F, 0, 1)
Ix_Ix = np.multiply(Ix, Ix)
Iy_Iy = np.multiply(Iy, Iy)
Ix_Iy = np.multiply(Ix, Iy)
Ix_Ix_blur = cv2.GaussianBlur(Ix_Ix, (5, 5), 0)
Iy_Iy_blur = cv2.GaussianBlur(Iy_Iy, (5, 5), 0)
Ix_Iy_blur = cv2.GaussianBlur(Ix_Iy, (5, 5), 0)
harris_thresh = threshold * 5000
alpha = 0.06
detA = Ix_Ix_blur * Iy_Iy_blur - Ix_Iy_blur ** 2
traceA = Ix_Ix_blur + Iy_Iy_blur
feature_list = []
if method == "szeliski":
harmonic_mean = detA / traceA
for j, i in np.argwhere(harmonic_mean > threshold):
feature_list.append(
Feature(self, i, j, ((i, j), (i, j), (i, j), (i, j))))
elif method == "harris":
harris_function = detA - (alpha * traceA * traceA)
for j, i in np.argwhere(harris_function > harris_thresh):
feature_list.append(
Feature(self, i, j, ((i, j), (i, j), (i, j), (i, j))))
else:
logger.warning("Invalid method.")
return None
return feature_list
def watershed(self, mask=None, erode=2, dilate=2, useMyMask=False):
try:
import cv2
except ImportError:
logger.warning("OpenCV >= 2.3.0 required")
return None
output = self.zeros(3)
if mask is None:
mask = self.binarize().invert()
newmask = None
if (not useMyMask):
newmask = Image((self.width, self.height))
newmask = newmask.floodFill((0, 0), color=ColorWATERSHED_BG)
newmask = (newmask - mask.dilate(dilate) + mask.erode(erode))
else:
newmask = mask
m = np.int32(newmask.gray_narray)
cv2.watershed(self.cvnarray, m)
m = cv2.convertScaleAbs(m)
ret, thresh = cv2.threshold(m, 0, 255, cv2.cv2.CV_THRESH_OTSU)
ret = Image(thresh, cv2image=True)
return ret
def findBlobsFromWatershed(self, mask=None, erode=2, dilate=2,
useMyMask=False, invert=False, minsize=20,
maxsize=None):
newmask = self.watershed(mask, erode, dilate, useMyMask)
if (invert):
newmask = mask.invert()
return self.find_blobs_from_mask(newmask, minsize=minsize, maxsize=maxsize)
def maxValue(self, locations=False):
if (locations):
val = np.max(self.gray_narray)
x, y = np.where(self.gray_narray == val)
locs = zip(x.tolist(), y.tolist())
return int(val), locs
else:
val = np.max(self.gray_narray)
return int(val)
def minValue(self, locations=False):
if (locations):
val = np.min(self.gray_narray)
x, y = np.where(self.gray_narray == val)
locs = zip(x.tolist(), y.tolist())
return int(val), locs
else:
val = np.min(self.gray_narray)
return int(val)
def findKeypointClusters(self, num_of_clusters=5, order='dsc',
flavor='surf'):
if flavor.lower() == 'corner':
keypoints = self.find_corners() # fallback to corners
else:
keypoints = self.find_keypoints(
flavor=flavor.upper()) # find the keypoints
if keypoints is None or keypoints <= 0:
return None
xypoints = np.array([(f.x, f.y) for f in keypoints])
xycentroids, xylabels = scv2.kmeans2(xypoints,
num_of_clusters) # find the clusters of keypoints
xycounts = np.array([])
for i in range(
num_of_clusters): # count the frequency of occurences for sorting
xycounts = np.append(xycounts, len(np.where(xylabels == i)[-1]))
merged = np.msort(np.hstack(
(np.vstack(xycounts), xycentroids))) # sort based on occurence
clusters = [c[1:] for c in
merged] # strip out just the values ascending
if order.lower() == 'dsc':
clusters = clusters[::-1] # reverse if descending
fs = FeatureSet()
for x, y in clusters: # map the values to a features set
f = Corner(self, x, y)
fs.append(f)
return fs
def getFREAKDescriptor(self, flavor="SURF"):
try:
import cv2
except ImportError:
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
if cv2.__version__.startswith('$Rev:'):
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
if int(cv2.__version__.replace('.', '0')) < 20402:
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
flavors = ["SIFT", "SURF", "BRISK", "ORB", "STAR", "MSER", "FAST",
"Dense"]
if flavor not in flavors:
warnings.warn("Unkown Keypoints detector. Returning None.")
return None
detector = cv2.FeatureDetector_create(flavor)
extractor = cv2.DescriptorExtractor_create("FREAK")
self._keypoints = detector.detect(self.gray_narray)
self._keypoints, self._kp_descriptors = extractor.compute(
self.gray_narray,
self._keypoints)
fs = FeatureSet()
for i in range(len(self._keypoints)):
fs.append(
KeyPoint(self, self._keypoints[i], self._kp_descriptors[i],
flavor))
return fs, self._kp_descriptors
def get_gray_histogram_counts(self, bins=255, limit=-1):
hist = self.histogram(bins)
vals = [(e, h) for h, e in enumerate(hist)]
vals.sort()
vals.reverse()
if limit == -1:
limit = bins
return vals[:limit]
def gray_peaks(self, bins=255, delta=0, lookahead=15):
y_axis, x_axis = np.histogram(self.gray_narray, bins=range(bins + 2))
x_axis = x_axis[0:bins + 1]
maxtab = []
mintab = []
length = len(y_axis)
if x_axis is None:
x_axis = range(length)
# perform some checks
if length != len(x_axis):
raise ValueError("Input vectors y_axis and x_axis must have same length")
if lookahead < 1:
raise ValueError("Lookahead must be above '1' in value")
if not (np.isscalar(delta) and delta >= 0):
raise ValueError("delta must be a positive number")
# needs to be a numpy array
y_axis = np.asarray(y_axis)
# maxima and minima candidates are temporarily stored in
# mx and mn respectively
mn, mx = np.Inf, -np.Inf
# Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(
zip(x_axis[:-lookahead], y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx - delta and mx != np.Inf:
# Maxima peak candidate found
# look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index + lookahead].max() < mx:
maxtab.append((mxpos, mx))
# set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
if y > mn + delta and mn != -np.Inf:
# Minima peak candidate found
# look ahead in signal to ensure that this is a peak and | |
'''
Created on Dec 10, 2014
@author: Dxmahata
'''
__author__ = "<NAME>"
import requests
import sys
import json
API_REQUEST_COUNT = 0
#Base Url for calling Seen API
BASE_URL = "http://api.seen.co/v0.1"
#Setting up the endpoints
ENDPOINTS = {}
#setting up events endpoint
ENDPOINTS["events"] = {}
#setting up endpoint for searching events from the Seen.co event database
ENDPOINTS["events"]["search"] = "/events/search"
#setting up endpoint for requesting events of type "popular"
ENDPOINTS["events"]["popular"] = "/events/popular"
#setting up endpoint for requesting events of type "recent"
ENDPOINTS["events"]["recent"] = "/events/recent"
#setting up endpoint for requesting events of type "happening"
ENDPOINTS["events"]["happening"] = "/events/happening"
#setting up endpoint for requesting events of type "mine"
ENDPOINTS["events"]["mine"] = "/events/mine"
#setting up create endpoint
ENDPOINTS["create"] = "/create"
#setting up event endpoint
ENDPOINTS["event"] = "/event"
API_KEY = ""
def setApiKey():
"""It loads the API key from api_key.txt"""
global API_KEY
try:
fp = open("api_key.txt")
API_KEY = fp.readline()
if API_KEY == "":
print("The api_key.txt file appears to be blank")
print("If you do not have an API Key from Seen.co, please get it from: http://www.developer.seen.co")
sys.exit(0)
fp.close()
except IOError:
print('API Key not found! Please create and fill up api_key.txt file in the same directory which contains the SeenAPI module')
print('If you do not have an API Key from Seen.co, please get it from: http://www.developer.seen.co')
sys.exit(0)
except Exception as e:
print(e)
def getEvents(flavor,query="",limit=10):
"""
flavor can be any one of the following:
1. search
2. popular
3. recent
4. happening
5. mine
1. search:
/events/search/:keyword
Search events by keyword(s) or hashtag (no need to add #).
Parameter Description
keywords required Space-separated list of keywords to search for.
limit optional Maximum number of results; default = 10, max = 20.
Request example:
http://api.seen.co/v0.1/events/search/electric%20zoo%20festival%20ezoo5?api_key=YOUR_API_KEY
2. All other types: popular, recent, happening, mine (request_type)
/events/:request_type
Returns a list of events based on the requested type from the list below
Type Description
popular A set of popular events on Seen, based on our algorithm
recent Events from the last two weeks
happening Events ongoing at the time of the call
upcoming Events that have not yet started
mine Events created by the developer associated with the provided key
Parameter Description
type required One type from the list above
limit optional Maximum number of results; default = 10, max = 20.
Request example:
http://api.seen.co/v0.1/events/popular?api_key=YOUR_API_KEY
"""
global API_REQUEST_COUNT
setApiKey()
#Make sure this request supports this flavor
if flavor not in ENDPOINTS['events']:
return { 'status':'ERROR', 'statusInfo':'events for the ' + flavor + ' is not available. Please enter one of these 1.search, 2.popular, 3.recent, 4.happening, 5.mine' }
if flavor == "search" and query == "":
print "Please provide a query string in the getEvents(flavor,query="",options={}) parameter"
sys.exit(0)
payload = {}
payload["api_key"] = API_KEY
payload["limit"] = limit
getUrl = BASE_URL+ENDPOINTS["events"][flavor]+"/"+str(query)
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT +=1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def createEvent(keywords,options={}):
"""
POST /create/:keywords
This endpoint lets you enter new events in our system. Once an event has been successfully created, you will need to wait a few minutes in order for our engine to crawl all tweets and images and analyze them. Please consider that it could take a few minutes (depending on the number of events currently tracked) for the data to be updated.
Every developer can create a maximum of 5 events an hour, and every event can have a maximum duration of two days. You can create events for past time periods but the results will vary - anything past 7 days will have incomplete tweets and limited Instagram photos.
Parameter Description
keywords required Spaces-separated list of hashtags that define the event. These will be treated as hashtags, no need to add a # prefix. Please consider that our engine will consider all tweets/instagrams containing either one of the specified keywords ("OR semantics"). We will drop any event that is tracking non-specific hashtags, like #fun, #happiness, #love or other joyful or volume-heavy tags.
start_date optional Start date (UTC) of the event in the '%FT%T' format (yyyy-mm-ddThh:mm:ss)
end_date optional End date (UTC) of the event in the '%FT%T' format (yyyy-mm-ddThh:mm:ss)
tz optional The UTC offset (timzone) for the event location, e.g. "+2.0". Defaults to "-5.0".
location optional Free-text string, the venue/location of the event take place, e.g. New York, NY.
POST Request example:
POST http://api.seen.co/v0.1/create/hackny?api_key=YOUR_API_KEY&start_time=2013-09-27T13:00:00&end_time=2013-09-28T15:00:00&title=Hack%20NY&location=New%20York,%20NY&tz=-5.0
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
for option in options:
payload[option] = options[option]
postUrl = BASE_URL+ENDPOINTS["create"]+"/"+str(keywords)
try:
r = requests.post(postUrl,params=payload)
print r.url
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def getEventDescription(eventLocator):
"""
/event/:locator
Retrieve an event's descriptive data and statistics by its locator. If successful, the response will contain an Event object.
Request example:
http://api.seen.co/v0.1/event/hackny-hackathon-nyu-courant-institute-new-york-ny-2012-3441?api_key=YOUR_API_KEY
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
getUrl = BASE_URL+ENDPOINTS["event"]+"/"+str(eventLocator)
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def getEventHighlights(eventLocator):
"""
/event/:locator/highlights
Retrieve an array of highlights for the event. Request example:
http://api.seen.co/v0.1/event/hackny-hackathon-nyu-courant-institute-new-york-ny-2012-3441/highlights?api_key=YOUR_API_KEY
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
getUrl = BASE_URL+ENDPOINTS["event"]+"/"+str(eventLocator)+"/highlights"
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def getEventHighlightObject(eventLocator,objectId):
"""
/event/:locator/highlight/:id
Returns a specific highlight object by its id.
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
getUrl = BASE_URL+ENDPOINTS["event"]+"/"+str(eventLocator)+"/highlight/"+str(objectId)
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def getEventRecentHighlights(eventLocator):
"""
/event/:locator/highlights/recent
Returns the most recent highlight objects for this event.
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
getUrl = BASE_URL+ENDPOINTS["event"]+"/"+str(eventLocator)+"/highlights/recent"
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def getEventHistogram(eventLocator):
"""
/event/:locator/histogram
Returns an array of timelights, each of them contains statistics about a particular time bucket, different timelights' time ranges don't overlap. This is the typical schema for a timelight:
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
getUrl = BASE_URL+ENDPOINTS["event"]+"/"+str(eventLocator)+"/histogram"
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def searchEventByTime(eventLocator,startTime,endTime):
"""
/event/:locator/search/:search_type
You can use this endpoint for retrieving an event's items according to their publishing time. It returns an array of items. Not that response is capped at 1000 items.
Search type Description
/time/:start/:end Specify start and end times with the "%FT%T%z" format.(yyyy-nn-ddThh:mm:ss+hh:mm).
Query example:
http://api.seen.co/v0.1/event/electric-run-new-york-brooklyn-ny-2013-9865/search/time/2013-09-27T23:00:00-04:00/2013-09-28T01:00:00-04:00/?api_key=YOUR_API_KEY
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
getUrl = BASE_URL+ENDPOINTS["event"]+"/"+str(eventLocator)+"/search/time/"+str(startTime)+"/"+str(endTime)
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def searchEventByTerm(eventLocator,terms):
"""
/event/:locator/search/:search_type
You can use this endpoint for retrieving an event's items according to their contained terms. It returns an array of items. Not that response is capped at 1000 items.
Search type Description
/terms/:terms :terms is a comma-separated list of terms.
Query example:
http://api.seen.co/v0.1/event/electric-run-new-york-brooklyn-ny-2013-9865/search/terms/brooklyn?api_key=YOUR_API_KEY
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
getUrl = BASE_URL+ENDPOINTS["event"]+"/"+str(eventLocator)+"/search/terms/"+str(terms)
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def getEventTopUsers(eventLocator):
"""
/event/:locator/top_users
Retrieves the top users for this event. Response example:
"""
global API_REQUEST_COUNT
setApiKey()
payload = {}
payload["api_key"] = API_KEY
getUrl = BASE_URL+ENDPOINTS["event"]+"/"+str(eventLocator)+"/top_users/"
try:
r = requests.get(getUrl,params=payload)
API_REQUEST_COUNT += 1
return json.loads(r.text)
except Exception as e:
print("Error for URL: ", r.url)
print(e)
return { 'status':'ERROR', 'statusInfo':r.status_code }
def getEventTopItems(eventLocator):
"""/event/:locator/top_items
Returns an array containing this event's items. with the | |
import pandas as pd
import numpy as np
import h5py
import os
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from pandas_plink import read_plink1_bin
from ..utils import helper_functions
from . import encoding_functions as enc
def prepare_data_files(data_dir: str, genotype_matrix_name: str, phenotype_matrix_name: str, phenotype: str,
datasplit: str, n_outerfolds: int, n_innerfolds: int, test_set_size_percentage: int,
val_set_size_percentage: int, models, user_encoding: str, maf_percentage: int):
"""
Prepare all data files for a common format: genotype matrix, phenotype matrix and index file.
First check if genotype file is .h5 file (standard format of this framework):
- YES: First check if all required information is present in the file, raise Exception if not. Then check if index file exists:
- NO: Load genotype and create all required index files
- YES: Append all required data splits and maf-filters to index file
- NO: Load genotype and create all required files
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param phenotype_matrix_name: name of the phenotype matrix including datatype ending
:param phenotype: name of the phenotype to predict
:param datasplit: datasplit to use. Options are: nested-cv, cv-test, train-val-test
:param n_outerfolds: number of outerfolds relevant for nested-cv
:param n_innerfolds: number of folds relevant for nested-cv and cv-test
:param test_set_size_percentage: size of the test set relevant for cv-test and train-val-test
:param val_set_size_percentage: size of the validation set relevant for train-val-test
:param models: models to consider
:param user_encoding: encoding specified by the user
:param maf_percentage: threshold for MAF filter as percentage value
"""
print('Check if all data files have the required format')
if os.path.isfile(data_dir + '/' + genotype_matrix_name.split('.')[0] + '.h5') and \
(genotype_matrix_name.split('.')[-1] != 'h5'):
print("Found same file name with ending .h5")
print("Assuming that the raw file was already prepared using our pipepline. Will continue with the .h5 file.")
genotype_matrix_name = genotype_matrix_name.split('.')[0] + '.h5'
suffix = genotype_matrix_name.split('.')[-1]
if suffix in ('h5', 'hdf5', 'h5py'):
# Genotype matrix has standard file format -> check information in the file
check_genotype_h5_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
encodings=enc.get_encoding(models=models, user_encoding=user_encoding))
print('Genotype file available in required format, check index file now.')
# Check / create index files
if check_index_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype):
print('Index file ' + genotype_matrix_name.split('.')[0] + '-'
+ phenotype_matrix_name.split('.')[0] + '-' + phenotype + '.h5' + ' already exists.'
' Will append required filters and data splits now.')
append_index_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype,
maf_percentage=maf_percentage,
datasplit=datasplit, n_outerfolds=n_outerfolds, n_innerfolds=n_innerfolds,
test_set_size_percentage=test_set_size_percentage,
val_set_size_percentage=val_set_size_percentage)
print('Done checking data files. All required datasets are available.')
else:
print('Index file ' + genotype_matrix_name.split('.')[0] + '-' + phenotype_matrix_name.split('.')[0]
+ '-' + phenotype + '.h5' + ' does not fulfill requirements. '
'Will load genotype and phenotype matrix and create new index file.')
save_all_data_files(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype,
models=models, user_encoding=user_encoding, maf_percentage=maf_percentage,
datasplit=datasplit, n_outerfolds=n_outerfolds, n_innerfolds=n_innerfolds,
test_set_size_percentage=test_set_size_percentage,
val_set_size_percentage=val_set_size_percentage)
print('Done checking data files. All required datasets are available.')
else:
print('Genotype file not in required format. Will load genotype matrix and save as .h5 file. Will also create '
'required index file.')
save_all_data_files(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype,
models=models, user_encoding=user_encoding, maf_percentage=maf_percentage,
datasplit=datasplit, n_outerfolds=n_outerfolds, n_innerfolds=n_innerfolds,
test_set_size_percentage=test_set_size_percentage,
val_set_size_percentage=val_set_size_percentage)
print('Done checking data files. All required datasets are available.')
def check_genotype_h5_file(data_dir: str, genotype_matrix_name: str, encodings: list):
"""
Check .h5 genotype file. Should contain:
- sample_ids: vector with sample names of genotype matrix,
- snp_ids: vector with SNP identifiers of genotype matrix,
- X_{enc}: (samples x SNPs)-genotype matrix in enc encoding, where enc might refer to:
- '012': additive (number of minor alleles)
- 'raw': raw (alleles)
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the phenotype matrix including datatype ending
:param encodings: list of needed encodings
"""
with h5py.File(data_dir + '/' + genotype_matrix_name, "r") as f:
keys = list(f.keys())
if {'sample_ids', 'snp_ids'}.issubset(keys):
# check if required encoding is available or can be created
for elem in encodings:
if f'X_{elem}' not in f and f'X_{enc.get_base_encoding(encoding=elem)}' not in f:
raise Exception('Genotype in ' + elem + ' encoding missing. Can not create required encoding. '
'See documentation for help')
else:
raise Exception('sample_ids and/or snp_ids are missing in' + genotype_matrix_name)
def check_index_file(data_dir: str, genotype_matrix_name: str, phenotype_matrix_name: str, phenotype: str) -> bool:
"""
Check if index file is available and if the datasets 'y', 'matched_sample_ids', 'X_index', 'y_index' and
'ma_frequency' exist.
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param phenotype_matrix_name: name of the phenotype matrix including datatype ending
:param phenotype: name of the phenotype to predict
:return: bool reflecting check result
"""
index_file = data_dir + '/' + genotype_matrix_name.split('.')[0] + '-' + phenotype_matrix_name.split('.')[0] \
+ '-' + phenotype + '.h5'
if os.path.isfile(index_file):
matched_datasets = ['y', 'matched_sample_ids', 'X_index', 'y_index', 'non_informative_filter', 'ma_frequency']
with h5py.File(index_file, 'a') as f:
if 'matched_data' in f and all(z in f['matched_data'] for z in matched_datasets):
return True
else:
return False
else:
return False
def save_all_data_files(data_dir: str, genotype_matrix_name: str, phenotype_matrix_name: str, phenotype: str,
models, user_encoding: str, maf_percentage: int,
datasplit: str, n_outerfolds: int, n_innerfolds: int,
test_set_size_percentage: int, val_set_size_percentage: int):
"""
Prepare and save all required data files:
- genotype matrix in unified format as .h5 file with,
- phenotype matrix in unified format as .csv file,
- file containing maf filter and data split indices as .h5
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param phenotype_matrix_name: name of the phenotype matrix including datatype ending
:param phenotype: name of the phenotype to predict
:param models: models to consider
:param user_encoding: encoding specified by the user
:param maf_percentage: threshold for MAF filter as percentage value
:param datasplit: datasplit to use. Options are: nested-cv, cv-test, train-val-test
:param n_outerfolds: number of outerfolds relevant for nested-cv
:param n_innerfolds: number of folds relevant for nested-cv and cv-test
:param test_set_size_percentage: size of the test set relevant for cv-test and train-val-test
:param val_set_size_percentage: size of the validation set relevant for train-val-test
"""
print('Load genotype file ' + data_dir + '/' + genotype_matrix_name)
X, X_ids = check_transform_format_genotype_matrix(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
models=models, user_encoding=user_encoding)
print('Have genotype matrix. Load phenotype ' + phenotype + ' from ' + data_dir + '/' + phenotype_matrix_name)
y = check_and_load_phenotype_matrix(data_dir=data_dir,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype)
print('Have phenotype vector. Start matching genotype and phenotype.')
X, y, sample_ids, X_index, y_index = genotype_phenotype_matching(X=X, X_ids=X_ids, y=y)
print('Done matching genotype and phenotype. Create index file now.')
create_index_file(data_dir=data_dir, genotype_matrix_name=genotype_matrix_name,
phenotype_matrix_name=phenotype_matrix_name, phenotype=phenotype,
datasplit=datasplit, n_outerfolds=n_outerfolds, n_innerfolds=n_innerfolds,
test_set_size_percentage=test_set_size_percentage,
val_set_size_percentage=val_set_size_percentage,
maf_percentage=maf_percentage, X=X, y=y, sample_ids=sample_ids, X_index=X_index, y_index=y_index
)
def check_transform_format_genotype_matrix(data_dir: str, genotype_matrix_name: str, models, user_encoding: str) \
-> (np.array, np.array):
"""
Check the format of the specified genotype matrix.
Unified genotype matrix will be saved in subdirectory data and named NAME_OF_GENOTYPE_MATRIX.h5
Unified format of the .h5 file of the genotype matrix required for the further processes:
- mandatory:
- sample_ids: vector with sample names of genotype matrix,
- SNP_ids: vector with SNP identifiers of genotype matrix,
- X_{enc}: (samples x SNPs)-genotype matrix in enc encoding, where enc might refer to:
- '012': additive (number of minor alleles)
- 'raw': raw (alleles)
- optional: genotype in additional encodings
Accepts .h5, .hdf5, .h5py, .csv, PLINK binary and PLINK files. .h5, .hdf5, .h5py files must satisfy the unified
format. If the genotype matrix contains constant SNPs, those will be removed and a new file will be saved.
Will open .csv, PLINK and binary PLINK files and generate required .h5 format.
:param data_dir: data directory where the phenotype and genotype matrix are stored
:param genotype_matrix_name: name of the genotype matrix including datatype ending
:param models: models to consider
:param user_encoding: encoding specified by the user
:return: genotype matrix (raw encoded if present, 012 encoded otherwise) and sample ids
"""
suffix = genotype_matrix_name.split('.')[-1]
encoding = enc.get_encoding(models=models, user_encoding=user_encoding)
if suffix in ('h5', 'hdf5', 'h5py'):
with h5py.File(data_dir + '/' + genotype_matrix_name, "r") as f:
sample_ids = f['sample_ids'][:].astype(str)
if 'X_raw' in f:
X = f['X_raw'][:]
elif 'X_012' in f:
X = f['X_012'][:]
else:
if suffix == 'csv':
sample_ids, snp_ids, X = check_genotype_csv_file(data_dir=data_dir,
genotype_matrix_name=genotype_matrix_name,
encodings=encoding)
elif suffix in ('bed', 'bim', 'fam'):
sample_ids, snp_ids, | |
<filename>backup.py<gh_stars>1-10
#!/usr/bin/env python
# (c) Copyright 2017 <NAME>
#
# Licensed under the MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
A micro backup manager, designed to be lightly configurable, simple and
unobtrusive. Useful for maintaining lightweight backups.
Maintained at https://github.com/jonsim/tiny-backup
"""
import argparse # ArgumentParser
import ConfigParser # SafeConfigParser
import os # makedirs
import os.path # exists, isfile, isdir, expanduser
import subprocess # check_output
import shutil # rmtree
import sys # stdout
import tempfile # mkdtemp
__version__ = '1.0.0'
DEST_KEY = 'dest'
SRC_KEY = 'src'
ARCHIVE_KEY = 'archive'
COMPRESS_KEY = 'compress'
ENCRYPT_KEY = 'encrypt'
DEFAULTS = {
ARCHIVE_KEY: 'no',
COMPRESS_KEY: 'no',
ENCRYPT_KEY: 'no',
}
_TEMPDIR = None
def make_tempdir():
"""
Retrieves a temporary directory, creating it if necessary.
Returns:
string path to a temporary directory.
"""
global _TEMPDIR
if not _TEMPDIR:
_TEMPDIR = tempfile.mkdtemp()
return _TEMPDIR
def archive_path(dest, src, excludes=None, verbose=False):
"""
Packs a file or directory into a .tar archive.
Args:
dest: string path for the destination file for the archive. Must
end with '.tar'.
src: string path for the source file or directory for the
archive.
excludes: list of strings of paths to exclude from the archive. May be
None or an empty list to include all files from source. Defaults to
None.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'tar' command fails for any reason.
"""
assert dest and dest.endswith('.tar') and not os.path.isdir(dest) and \
os.path.isdir(os.path.dirname(dest))
assert src and os.path.exists(src)
cmd = ['tar']
cmd.append('--create')
if verbose:
print '\narchive_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
if excludes:
for exclude in excludes:
cmd.append('--exclude=%s' % (exclude))
cmd.append('--file')
cmd.append(dest)
cmd.append('--directory')
cmd.append(os.path.dirname(src))
cmd.append(os.path.basename(src))
sys.stdout.write(subprocess.check_output(cmd))
def unarchive_path(dest, src, verbose=False):
"""
Extracts a .tar archive into a directory.
Args:
dest: string path for the destination *directory* into which the
archive contents will be extracted. NB: This is the directory to
extract into, not the final path for the contents of the archive.
src: string path for the source archive file. Must end with
'.tar'.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'tar' command fails for any reason.
"""
assert dest and os.path.isdir(dest)
assert src and src.endswith('.tar') and os.path.isfile(src)
cmd = ['tar']
cmd.append('--extract')
if verbose:
print '\nunarchive_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
cmd.append('--file')
cmd.append(src)
cmd.append('--directory')
cmd.append(dest)
sys.stdout.write(subprocess.check_output(cmd))
def compress_path(dest, src, verbose=False):
"""
Compresses a file into an xz-compressed file.
Args:
dest: string path for the destination file. Must end with '.xz'.
src: string path for the source file to compress.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'xz' command fails for any reason.
"""
assert dest and dest.endswith('.xz') and not os.path.isdir(dest) and \
os.path.isdir(os.path.dirname(dest))
assert src and os.path.isfile(src)
cmd = ['xz']
if verbose:
print '\ncompress_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
cmd.append('--keep')
cmd.append('--stdout')
cmd.append('--compress')
cmd.append(src)
try:
dest_file = open(dest, 'w')
subprocess.check_call(cmd, stdout=dest_file)
finally:
dest_file.close()
def uncompress_path(dest, src, verbose=False):
"""
Uncompresses an xz-compressed file into it's original format.
Args:
dest: string path for the destination uncompressed file.
src: string path for the source compressed file. Must end with
'.xz'.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'xz' command fails for any reason.
"""
assert dest and not os.path.isdir(dest) and \
os.path.isdir(os.path.dirname(dest))
assert src and src.endswith('.xz') and os.path.isfile(src)
cmd = ['xz']
if verbose:
print '\nuncompress_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
cmd.append('--keep')
cmd.append('--stdout')
cmd.append('--decompress')
cmd.append(src)
try:
dest_file = open(dest, 'w')
subprocess.check_call(cmd, stdout=dest_file)
finally:
dest_file.close()
def encrypt_path(dest, src, homedir=None, verbose=False):
"""
Encrypts a file into a gpg-encrypted file.
Args:
dest: string path for the destination file. Must end with '.gpg'.
src: string path for the source file to encrypt.
homedir: string path for the location of the GPG home directory to
use. May be None to use the default location for the machine's GPG
implementation (typically ~/gnupg). Defaults to None.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'gpg' command fails for any reason.
"""
assert dest and dest.endswith('.gpg') and not os.path.isdir(dest) and \
os.path.isdir(os.path.dirname(dest))
assert src and os.path.isfile(src)
cmd = ['gpg']
if verbose:
print '\nencrypt_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
if homedir:
cmd.append('--homedir')
cmd.append(homedir)
cmd.append('--default-recipient-self')
cmd.append('--output')
cmd.append(dest)
cmd.append('--encrypt')
cmd.append(src)
sys.stdout.write(subprocess.check_output(cmd))
def unencrypt_path(dest, src, homedir=None, verbose=False):
"""
Decrypts a gpg-encrypted file into its original format.
Args:
dest: string path for the destination decrypted file.
src: string path for the source file to decrypt. Must end with
'.gpg'.
homedir: string path for the location of the GPG home directory to
use. May be None to use the default location for the machine's GPG
implementation (typically ~/gnupg). Defaults to None.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'gpg' command fails for any reason.
"""
assert dest and not os.path.isdir(dest)and \
os.path.isdir(os.path.dirname(dest))
assert src and src.endswith('.gpg') and os.path.isfile(src)
cmd = ['gpg']
if verbose:
print '\nunencrypt_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
if homedir:
cmd.append('--homedir')
cmd.append(homedir)
cmd.append('--default-recipient-self')
cmd.append('--output')
cmd.append(dest)
cmd.append('--decrypt')
cmd.append(src)
sys.stdout.write(subprocess.check_output(cmd))
def copy_path(dest, src, excludes=None, verbose=False):
"""
Copies a path to another location.
Args:
dest: string path for the destination copied file or directory.
src: string path for the source file or directory to copy.
excludes: list of strings of paths to exclude from the copy. May be
None or an empty list to include all files from source. Defaults to
None.
verbose: boolean, True to output verbose status to stdout. Defaults
to False.
Raises:
CalledProcessError: if the 'rsync' command fails for any reason.
"""
assert dest and os.path.isdir(os.path.dirname(dest))
assert src and os.path.exists(src)
cmd = ['rsync']
if verbose:
print '\ncopy_path(%s, %s)' % (dest, src)
cmd.append('--verbose')
else:
cmd.append('--quiet')
cmd.append('--archive') # Preserve metadata (-a)
cmd.append('--delete') # Delete extra files
cmd.append('--compress') # Compress xfer data (-z)
cmd.append('--protect-args') # Preserve whitespace (-s)
if excludes:
for exclude in excludes:
cmd.append('--filter=exclude_%s' % (exclude))
cmd.append(src)
cmd.append(dest)
sys.stdout.write(subprocess.check_output(cmd))
def resolve_relative_path(path, config_path):
"""
Resolves relative paths into absolute paths relative to the config file.
Args:
path: string (potentially) relative path to resolve.
config_path: string path to the config file to resolve relative to.
Returns:
string absolute path (unaltered if 'path' was already absolute).
"""
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(config_path), path)
def get_out_filename(dirname, src, extension):
"""
Forms a filename from a dir-name, file-name and file-extension.
Args:
dirname: string path to directory to use.
src: string path to file whose basename to use.
extension: string file extension (without preceding '.') to use.
Returns:
string path formed from the given components.
"""
return os.path.join(dirname, '%s.%s' % (os.path.basename(src), extension))
def process_section(config, section, config_path, verbose=False, gpg_home=None):
"""
Process a config file section and perform the actions it describes.
Args:
config: ConfigParser to read the section from.
section: string section name to read from the ConfigParser.
config_path: string path to the read config file.
verbose: boolean, True to output verbose status to stdout.
Defaults to False.
gpg_home: string path for the location of the GPG home directory
to use. May be None to use the default location for the machine's
GPG implementation (typically ~/gnupg). Defaults to None.
| |
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2019
import datetime
from tempfile import gettempdir
import streamsx.spl.op
import streamsx.spl.types
from streamsx.topology.schema import CommonSchema, StreamSchema
from streamsx.spl.types import rstring
import streamsx.spl.toolkit
from streamsx.toolkits import download_toolkit
_TOOLKIT_NAME = 'com.ibm.streamsx.inet'
HttpResponseSchema = StreamSchema('tuple<rstring status, int32 statusCode, rstring contentEncoding, rstring contentType, list<rstring> responseHeader, rstring responseData>')
"""Structured schema containing HTTP GET/PUT/POST/DELETE response values.
``'tuple<rstring status, int32 statusCode, rstring contentEncoding, rstring contentType, list<rstring> responseHeader, rstring responseData>'``
"""
def download_toolkit(url=None, target_dir=None):
r"""Downloads the latest Inet toolkit from GitHub.
Example for updating the Inet toolkit for your topology with the latest toolkit from GitHub::
import streamsx.inet as inet
# download Inet toolkit from GitHub
inet_toolkit_location = inet.download_toolkit()
# add the toolkit to topology
streamsx.spl.toolkit.add_toolkit(topology, inet_toolkit_location)
Example for updating the topology with a specific version of the Inet toolkit using a URL::
import streamsx.inet as inet
url310 = 'https://github.com/IBMStreams/streamsx.inet/releases/download/v3.1.0/streamsx.inet.toolkit-3.1.0-el6-amd64-70c49d5-20190320-1318.tgz'
inet_toolkit_location = inet.download_toolkit(url=url310)
streamsx.spl.toolkit.add_toolkit(topology, inet_toolkit_location)
Args:
url(str): Link to toolkit archive (\*.tgz) to be downloaded. Use this parameter to
download a specific version of the toolkit.
target_dir(str): the directory where the toolkit is unpacked to. If a relative path is given,
the path is appended to the system temporary directory, for example to /tmp on Unix/Linux systems.
If target_dir is ``None`` a location relative to the system temporary directory is chosen.
Returns:
str: the location of the downloaded Inet toolkit
.. note:: This function requires an outgoing Internet connection
.. versionadded:: 1.2
"""
_toolkit_location = streamsx.toolkits.download_toolkit (toolkit_name=_TOOLKIT_NAME, url=url, target_dir=target_dir)
return _toolkit_location
def _add_toolkit_dependency(topo, version):
# IMPORTANT: Dependency of this python wrapper to a specific toolkit version
# This is important when toolkit is not set with streamsx.spl.toolkit.add_toolkit (selecting toolkit from remote build service)
streamsx.spl.toolkit.add_toolkit_dependency(topo, _TOOLKIT_NAME, version)
def request_delete(stream, url=None, url_attribute=None, extra_header_attribute=None, ssl_accept_all_certificates=False, name=None):
"""Issues HTTP DELETE requests. For each input tuple a DELETE request is issued and the response is on the returned stream. You can specifiy the URL either dynamic (part of input stream) or static (as parameter).
Example with URL as part of the input stream of type ``CommonSchema.String``. The parameters ``url`` and ``url_attribute`` can be omitted in this case::
import streamsx.inet as inet
s = topo.source(['http://httpbin.org/delete']).as_string()
result_http_del = inet.request_delete(s)
result_http_del.print()
Args:
stream(streamsx.topology.topology.Stream): Stream of tuples containing the HTTP request url. Supports ``streamsx.topology.schema.StreamSchema`` (schema for a structured stream) or ``CommonSchema.String`` as input.
url(str): String containing the URL to send HTTP requests to.
url_attribute(str): Attribute name of the input stream containing the URL to send HTTP requests to. Use this as alternative to the 'url' parameter.
extra_header_attribute(str): Attribute name of the input stream containing one extra header to send with the request, the attribute must contain a string in the form Header-Name: value. If the attribute value is an empty string, no additional header is send.
ssl_accept_all_certificates(bool): Accept all SSL certificates, even those that are self-signed. Setting this option will allow potentially insecure connections. Default is false.
name(str): Sink name in the Streams context, defaults to a generated name.
Returns:
:py:class:`topology_ref:streamsx.topology.topology.Stream`: Output Stream with schema :py:const:`~streamsx.inet.HttpResponseSchema`.
"""
if url_attribute is None and url is None:
if stream.oport.schema == CommonSchema.String:
url_attribute = 'string'
else:
raise ValueError("Either url_attribute or url parameter must be set.")
_op = _HTTPRequest(stream, schema=HttpResponseSchema, name=name)
_op.params['fixedMethod'] = _op.expression('DELETE')
if url_attribute is not None:
_op.params['url'] = _op.attribute(stream, url_attribute)
else:
_op.params['fixedUrl'] = url
# set output schema attribute names
_op.params['outputBody'] = 'responseData'
_op.params['outputStatus'] = 'status'
_op.params['outputStatusCode'] = 'statusCode'
_op.params['outputContentEncoding'] = 'contentEncoding'
_op.params['outputContentType'] = 'contentType'
_op.params['outputHeader'] = 'responseHeader'
_op.params['sslAcceptAllCertificates'] = ssl_accept_all_certificates
if extra_header_attribute is not None:
_op.params['extraHeaderAttribute'] = _op.attribute(stream, extra_header_attribute)
_add_toolkit_dependency(stream.topology, '[3.0.0,4.0.0)') # extraHeaderAttribute parameter has been introduced in v3.0
return _op.outputs[0]
def request_get(stream, url=None, url_attribute=None, extra_header_attribute=None, ssl_accept_all_certificates=False, name=None):
"""Issues HTTP GET requests. For each input tuple a GET request is issued and the response is on the returned stream. You can specifiy the URL either dynamic (part of input stream) or static (as parameter).
Example with URL as part of the input stream of type ``CommonSchema.String``. The parameters ``url`` and ``url_attribute`` can be omitted in this case::
import streamsx.inet as inet
s = topo.source(['http://httpbin.org/get']).as_string()
result_http_get = inet.request_get(s)
result_http_get.print()
Args:
stream(streamsx.topology.topology.Stream): Stream of tuples containing the HTTP request url. Supports ``streamsx.topology.schema.StreamSchema`` (schema for a structured stream) or ``CommonSchema.String`` as input.
url(str): String containing the URL to send HTTP requests to.
url_attribute(str): Attribute name of the input stream containing the URL to send HTTP requests to. Use this as alternative to the 'url' parameter.
extra_header_attribute(str): Attribute name of the input stream containing one extra header to send with the request, the attribute must contain a string in the form Header-Name: value. If the attribute value is an empty string, no additional header is send.
ssl_accept_all_certificates(bool): Accept all SSL certificates, even those that are self-signed. Setting this option will allow potentially insecure connections. Default is false.
name(str): Sink name in the Streams context, defaults to a generated name.
Returns:
:py:class:`topology_ref:streamsx.topology.topology.Stream`: Output Stream with schema :py:const:`~streamsx.inet.HttpResponseSchema`.
"""
if url_attribute is None and url is None:
if stream.oport.schema == CommonSchema.String:
url_attribute = 'string'
else:
raise ValueError("Either url_attribute or url parameter must be set.")
_op = _HTTPRequest(stream, schema=HttpResponseSchema, name=name)
_op.params['fixedMethod'] = _op.expression('GET')
if url_attribute is not None:
_op.params['url'] = _op.attribute(stream, url_attribute)
else:
_op.params['fixedUrl'] = url
# set output schema attribute names
_op.params['outputBody'] = 'responseData'
_op.params['outputStatus'] = 'status'
_op.params['outputStatusCode'] = 'statusCode'
_op.params['outputContentEncoding'] = 'contentEncoding'
_op.params['outputContentType'] = 'contentType'
_op.params['outputHeader'] = 'responseHeader'
_op.params['sslAcceptAllCertificates'] = ssl_accept_all_certificates
if extra_header_attribute is not None:
_op.params['extraHeaderAttribute'] = _op.attribute(stream, extra_header_attribute)
_add_toolkit_dependency(stream.topology, '[3.0.0,4.0.0)') # extraHeaderAttribute parameter has been introduced in v3.0
return _op.outputs[0]
def request_post(stream, url=None, url_attribute=None, body_attribute=None, content_type=None, content_type_attribute=None, extra_header_attribute=None, ssl_accept_all_certificates=False, name=None):
"""Issues HTTP POST requests. For each input tuple a POST request is issued and the response is on the returned stream. You can specifiy the URL either dynamic (part of input stream) or static (as parameter).
Example with URL as part of the input stream of type ``CommonSchema.String``. The parameters ``url`` and ``url_attribute`` can be omitted in this case::
import streamsx.inet as inet
s = topo.source(['http://httpbin.org/post']).as_string()
result_http_post = inet.request_post(s)
result_http_post.print()
Example with URL as part of the input stream and content type as parameter::
import streamsx.inet as inet
s = topo.source(['http://httpbin.org/post']).as_string()
result_http_post = inet.request_post(s, content_type='application/x-www-form-urlencoded')
result_http_post.print()
Args:
stream(streamsx.topology.topology.Stream): Stream of tuples containing the HTTP request url. Supports ``streamsx.topology.schema.StreamSchema`` (schema for a structured stream) or ``CommonSchema.String`` as input.
url(str): String containing the URL to send HTTP requests to.
url_attribute(str): Attribute name of the input stream containing the URL to send HTTP requests to. Use this as alternative to the 'url' parameter.
body_attribute(str): Request body attribute for POST method that accepts an entity.
content_type(str): MIME content type of entity for POST requests. If not specified the default 'application/json' is used.
content_type_attribute(str): Attribute name of the input stream containing the MIME content type. Use this as alternative to the 'content_type' parameter.
extra_header_attribute(str): Attribute name of the input stream containing one extra header to send with the request, the attribute must contain a string in the form Header-Name: value. If the attribute value is an empty string, no additional header is send.
ssl_accept_all_certificates(bool): Accept all SSL certificates, even those that are self-signed. Setting this option will allow potentially insecure connections. Default is false.
name(str): Sink name in the Streams context, defaults to a generated name.
Returns:
:py:class:`topology_ref:streamsx.topology.topology.Stream`: Output Stream with schema :py:const:`~streamsx.inet.HttpResponseSchema`.
"""
if url_attribute is None and url is None:
if stream.oport.schema == CommonSchema.String:
url_attribute = 'string'
else:
raise ValueError("Either url_attribute or url parameter must be set.")
_op = _HTTPRequest(stream, schema=HttpResponseSchema, name=name)
_op.params['fixedMethod'] = _op.expression('POST')
if url_attribute is not None:
_op.params['url'] = _op.attribute(stream, url_attribute)
else:
_op.params['fixedUrl'] = url
if body_attribute is not None:
_op.params['requestBodyAttribute'] = _op.attribute(stream, body_attribute)
if content_type_attribute is not None:
_op.params['contentType'] = _op.attribute(stream, content_type_attribute)
else:
if content_type is not None:
_op.params['fixedContentType'] = content_type
# set output schema attribute names
_op.params['outputBody'] = 'responseData'
_op.params['outputStatus'] = 'status'
_op.params['outputStatusCode'] = 'statusCode'
_op.params['outputContentEncoding'] = 'contentEncoding'
_op.params['outputContentType'] = 'contentType'
_op.params['outputHeader'] = 'responseHeader'
_op.params['sslAcceptAllCertificates'] = ssl_accept_all_certificates
if extra_header_attribute is not None:
_op.params['extraHeaderAttribute'] = _op.attribute(stream, extra_header_attribute)
_add_toolkit_dependency(stream.topology, '[3.0.0,4.0.0)') # extraHeaderAttribute parameter has been introduced | |
mod_attr_val_list:
setattr(mod, attr, val)
# end teardown_mocks
@contextlib.contextmanager
def patch(target_obj, target_method_name, patched):
orig_method = getattr(target_obj, target_method_name)
def patched_wrapper(*args, **kwargs):
return patched(orig_method, *args, **kwargs)
setattr(target_obj, target_method_name, patched_wrapper)
try:
yield
finally:
setattr(target_obj, target_method_name, orig_method)
#end patch
@contextlib.contextmanager
def patch_imports(imports):
# save original, patch and restore
orig_modules = {}
mocked_modules = []
try:
for import_str, fake in imports:
cur_module = None
for mod_part in import_str.split('.'):
if not cur_module:
cur_module = mod_part
else:
cur_module += "." + mod_part
if cur_module in sys.modules:
orig_modules[cur_module] = sys.modules[cur_module]
else:
mocked_modules.append(cur_module)
sys.modules[cur_module] = fake
yield
finally:
for mod_name, mod in list(orig_modules.items()):
sys.modules[mod_name] = mod
for mod_name in mocked_modules:
del sys.modules[mod_name]
#end patch_import
def fake_wrapper(self, func, *args, **kwargs):
def wrapper(*wargs, **wkwargs):
return func(*wargs, **wkwargs)
return wrapper
cov_handle = None
class TestCase(testtools.TestCase, fixtures.TestWithFixtures):
_HTTP_HEADERS = {
'Content-type': 'application/json; charset="UTF-8"',
}
_config_knobs = [
('DEFAULTS', '', ''),
]
mocks = [
(novaclient.client, 'Client', FakeNovaClient.initialize),
(kazoo.client.KazooClient, '__new__',FakeKazooClient),
(kazoo.recipe.counter.Counter, '__init__',fake_zk_counter_init),
(kazoo.recipe.counter.Counter, '_change',fake_zk_counter_change),
(kazoo.recipe.counter.Counter, 'value',fake_zk_counter_value),
(kazoo.recipe.counter.Counter, '_ensure_node',
fake_zk_counter_ensure_node),
(kazoo.handlers.gevent.SequentialGeventHandler, '__init__',stub),
(kombu.Connection, '__new__',FakeKombu.Connection),
(kombu.Exchange, '__new__',FakeKombu.Exchange),
(kombu.Queue, '__new__',FakeKombu.Queue),
(kombu.Consumer, '__new__',FakeKombu.Consumer),
(kombu.Producer, '__new__',FakeKombu.Producer),
(VncApiConfigLog, '__new__',FakeApiConfigLog),
]
def __init__(self, *args, **kwargs):
self._logger = logging.getLogger(__name__)
self._assert_till_max_tries = 600
super(TestCase, self).__init__(*args, **kwargs)
self.addOnException(self._add_detailed_traceback)
def _add_detailed_traceback(self, exc_info):
vnc_cgitb.enable(format='text')
from six import StringIO
tmp_file = StringIO()
cgitb_hook(format="text", file=tmp_file, info=exc_info)
tb_str = tmp_file.getvalue()
tmp_file.close()
self.addDetail('detailed-traceback', content.text_content(tb_str))
def _add_detail(self, detail_str):
frame = inspect.stack()[1]
self.addDetail('%s:%s ' %(frame[1],frame[2]), content.text_content(detail_str))
def _add_request_detail(self, op, url, headers=None, query_params=None,
body=None):
request_str = ' URL: ' + pformat(url) + \
' OPER: ' + pformat(op) + \
' Headers: ' + pformat(headers) + \
' Query Params: ' + pformat(query_params) + \
' Body: ' + pformat(body)
self._add_detail('Requesting: ' + request_str)
def _http_get(self, uri, query_params=None):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('GET', url, headers=self._HTTP_HEADERS,
query_params=query_params)
response = self._api_server_session.get(url, headers=self._HTTP_HEADERS,
params=query_params)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_get
def _http_post(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('POST', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.post(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_post
def _http_delete(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('DELETE', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.delete(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_delete
def _http_put(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('PUT', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.put(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_put
def _create_test_objects(self, count=1, proj_obj=None):
ret_objs = []
for i in range(count):
obj_name = self.id() + '-vn-' + str(i)
obj = VirtualNetwork(obj_name, parent_obj=proj_obj)
self._add_detail('creating-object ' + obj_name)
self._vnc_lib.virtual_network_create(obj)
ret_objs.append(obj)
return ret_objs
def _create_test_object(self):
return self._create_test_objects()[0]
def _delete_test_object(self, obj):
self._vnc_lib.virtual_network_delete(id=obj.uuid)
def get_cf(self, keyspace_name, cf_name):
ks_name = '%s_%s' %(self._cluster_id, keyspace_name)
return CassandraCFs.get_cf(ks_name, cf_name)
# end get_cf
def vnc_db_has_ident(self, obj=None, id=None, type_fq_name=None):
if obj:
_type = obj.get_type()
_fq_name = obj.get_fq_name()
if id:
_type = self._vnc_lib.id_to_fq_name_type(id)
_fq_name = self._vnc_lib.id_to_fq_name(id)
if type_fq_name:
_type = type_fq_name[0]
_fq_name = type_fq_name[1]
try:
vnc_obj = self._vnc_lib._object_read(_type, _fq_name)
except NoIdError:
return None
return vnc_obj
def vnc_db_ident_has_prop(self, obj, prop_name, prop_value):
vnc_obj = self.vnc_db_has_ident(obj=obj)
if vnc_obj is None:
return False
return getattr(vnc_obj, prop_name) == prop_value
def vnc_db_ident_has_ref(self, obj, ref_name, ref_fq_name):
vnc_obj = self.vnc_db_has_ident(obj=obj)
if vnc_obj is None:
return False
refs = getattr(vnc_obj, ref_name, [])
for ref in refs:
if ref['to'] == ref_fq_name:
return True
return False
def vnc_db_doesnt_have_ident(self, obj=None, id=None, type_fq_name=None):
return not self.vnc_db_has_ident(obj=obj, id=id,
type_fq_name=type_fq_name)
def vnc_db_ident_doesnt_have_ref(self, obj, ref_name, ref_fq_name=None):
return not self.vnc_db_ident_has_ref(obj, ref_name, ref_fq_name)
def assertTill(self, expr_or_cb, *cb_args, **cb_kwargs):
tries = 0
while True:
if callable(expr_or_cb):
ret = expr_or_cb(*cb_args, **cb_kwargs)
else:
ret = eval(expr_or_cb)
if ret:
break
tries = tries + 1
if tries >= self._assert_till_max_tries:
raise Exception('Max retries')
self._logger.warn('Retrying at ' + str(inspect.stack()[1]))
gevent.sleep(0.1)
@classmethod
def setUpClass(cls, extra_mocks=None, extra_config_knobs=None,
db='cassandra', in_place_upgrade_path=None):
cls._cluster_id = cls.__name__
cls._in_place_upgrade_path = in_place_upgrade_path
super(TestCase, cls).setUpClass()
cfgm_common.zkclient.LOG_DIR = './'
gevent.pywsgi.WSGIServer.handler_class = FakeWSGIHandler
cls.orig_mocked_values = setup_mocks(cls.mocks + (extra_mocks or []))
# For performance reasons, don't log cassandra requests
VncCassandraClient._handle_exceptions = fake_wrapper
# Load DB if JSON DBs dump file is provided
cls._load_db_contents()
cls._server_info = create_api_server_instance(
cls._cluster_id, cls._config_knobs + (extra_config_knobs or []),
db=db)
try:
cls._api_server_ip = cls._server_info['ip']
cls._api_server_port = cls._server_info['service_port']
cls._api_admin_port = cls._server_info['admin_port']
cls._api_svr_greenlet = cls._server_info['greenlet']
cls._api_svr_app = cls._server_info['app']
cls._vnc_lib = cls._server_info['api_conn']
cls._api_server_session = cls._server_info['api_session']
cls._api_server = cls._server_info['api_server']
except Exception as e:
cls.tearDownClass()
raise
# end setUpClass
@classmethod
def tearDownClass(cls):
# Dump DBs into a JSON file if a path was provided
cls._dump_db_contents()
destroy_api_server_instance(cls._server_info)
teardown_mocks(cls.orig_mocked_values)
def setUp(self, extra_mocks=None, extra_config_knobs=None):
self._logger.info("Running %s" %(self.id()))
super(TestCase, self).setUp()
# end setUp
def tearDown(self):
self._logger.info("Finished %s" %(self.id()))
self.wait_till_api_server_idle()
super(TestCase, self).tearDown()
# end tearDown
def wait_till_api_server_idle(self):
# wait for in-flight messages to be processed
if hasattr(self._api_server._db_conn, '_msgbus'):
while self._api_server._db_conn._msgbus.num_pending_messages() > 0:
gevent.sleep(0.001)
vhost_url = self._api_server._db_conn._msgbus._urls
while not FakeKombu.is_empty(vhost_url, 'vnc_config'):
gevent.sleep(0.001)
# wait_till_api_server_idle
def create_virtual_network(self, vn_name, vn_subnet='10.0.0.0/24'):
vn_obj = VirtualNetwork(name=vn_name)
ipam_fq_name = [
'default-domain', 'default-project', 'default-network-ipam']
ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
subnets = [vn_subnet] if isinstance(vn_subnet, basestring) else vn_subnet
subnet_infos = []
for subnet in subnets:
cidr = IPNetwork(subnet)
subnet_infos.append(
IpamSubnetType(
subnet=SubnetType(
str(cidr.network),
int(cidr.prefixlen),
),
default_gateway=str(IPAddress(cidr.last - 1)),
subnet_uuid=str(uuid.uuid4()),
)
)
subnet_data = VnSubnetsType(subnet_infos)
vn_obj.add_network_ipam(ipam_obj, subnet_data)
self._vnc_lib.virtual_network_create(vn_obj)
vn_obj.clear_pending_updates()
return vn_obj
# end create_virtual_network
def _create_service(self, vn_list, si_name, auto_policy,
create_right_port=True, **kwargs):
sa_set = None
if kwargs.get('service_virtualization_type') == 'physical-device':
pr = PhysicalRouter(si_name, physical_router_role='pnf')
self._vnc_lib.physical_router_create(pr)
sa_set = ServiceApplianceSet('sa_set-'+si_name)
self._vnc_lib.service_appliance_set_create(sa_set)
sa = ServiceAppliance('sa-'+si_name, parent_obj=sa_set)
left_value = "default-global-system-config:5c3-qfx5:et-0/0/50"
right_value = "default-global-system-config:5c3-qfx5:et-0/0/51"
sa.set_service_appliance_properties(KeyValuePairs([KeyValuePair(key='left-attachment-point',
value= left_value),
KeyValuePair(key='right-attachment-point',
value= right_value)]))
for if_type, _ in vn_list:
attr = ServiceApplianceInterfaceType(interface_type=if_type)
pi = PhysicalInterface('pi-'+si_name+if_type, parent_obj=pr)
self._vnc_lib.physical_interface_create(pi)
sa.add_physical_interface(pi, attr)
self._vnc_lib.service_appliance_create(sa)
sti = [ServiceTemplateInterfaceType(k) for k, _ in vn_list]
st_prop = ServiceTemplateType(
flavor='medium',
image_name='junk',
ordered_interfaces=True,
interface_type=sti, **kwargs)
service_template = ServiceTemplate(
name=si_name + 'template',
service_template_properties=st_prop)
if sa_set:
service_template.add_service_appliance_set(sa_set)
self._vnc_lib.service_template_create(service_template)
scale_out = ServiceScaleOutType()
if kwargs.get('service_mode') in ['in-network', 'in-network-nat']:
if_list = [ServiceInstanceInterfaceType(virtual_network=vn)
for _, vn in vn_list]
si_props = ServiceInstanceType(auto_policy=auto_policy,
interface_list=if_list,
scale_out=scale_out)
else:
if_list = [ServiceInstanceInterfaceType(),
ServiceInstanceInterfaceType()]
si_props = ServiceInstanceType(interface_list=if_list,
scale_out=scale_out)
service_instance = ServiceInstance(
name=si_name, service_instance_properties=si_props)
service_instance.add_service_template(service_template)
self._vnc_lib.service_instance_create(service_instance)
if kwargs.get('version') == 2:
proj = Project()
pt = PortTuple('pt-'+si_name, parent_obj=service_instance)
self._vnc_lib.port_tuple_create(pt)
for if_type, vn_name in vn_list:
if if_type == 'right' and not create_right_port:
continue
port = VirtualMachineInterface(si_name+if_type, parent_obj=proj)
vmi_props = VirtualMachineInterfacePropertiesType(
service_interface_type=if_type)
vn_obj = self._vnc_lib.virtual_network_read(fq_name_str=vn_name)
port.set_virtual_machine_interface_properties(vmi_props)
port.add_virtual_network(vn_obj)
port.add_port_tuple(pt)
self._vnc_lib.virtual_machine_interface_create(port)
# Let a chance to the API to create iip for the vmi of the pt
# before creating the si and the schema allocates an iip
# address to the service chain
gevent.sleep(0.1)
return service_instance.get_fq_name_str()
def create_network_policy(self, vn1, vn2, service_list=None, mirror_service=None,
auto_policy=False, create_right_port = True, **kwargs):
vn1_name = vn1 if isinstance(vn1, basestring) else vn1.get_fq_name_str()
vn2_name = vn2 if isinstance(vn2, basestring) else vn2.get_fq_name_str()
addr1 = AddressType(virtual_network=vn1_name, subnet=kwargs.get('subnet_1'))
addr2 = AddressType(virtual_network=vn2_name, subnet=kwargs.get('subnet_2'))
port = PortType(-1, 0)
service_name_list = []
si_list = service_list or []
if service_list:
for service in si_list:
service_name_list.append(self._create_service(
[('left', vn1_name), ('right', vn2_name)], service,
auto_policy, create_right_port, **kwargs))
if mirror_service:
mirror_si = self._create_service(
[('left', vn1_name), ('right', vn2_name)], mirror_service, False,
service_mode='transparent', service_type='analyzer')
action_list = ActionListType()
if mirror_service:
mirror = MirrorActionType(analyzer_name=mirror_si)
action_list.mirror_to=mirror
if service_name_list:
action_list.apply_service=service_name_list
else:
action_list.simple_action='pass'
prule = PolicyRuleType(direction="<>", protocol="any",
src_addresses=[addr1], dst_addresses=[addr2],
src_ports=[port], dst_ports=[port],
action_list=action_list)
pentry = PolicyEntriesType([prule])
np = NetworkPolicy(str(uuid.uuid4()), network_policy_entries=pentry)
if auto_policy:
return np
self._vnc_lib.network_policy_create(np)
return np
# end create_network_policy
def create_logical_router(self, name, nb_of_attached_networks=1, **kwargs):
lr = LogicalRouter(name, **kwargs)
vns = []
vmis = []
iips = []
for idx in range(nb_of_attached_networks):
# Virtual Network
vn = self.create_virtual_network('%s-network%d' % (name, idx),
'10.%d.0.0/24' % idx)
vns.append(vn)
# Virtual Machine Interface
vmi_name = '%s-network%d-vmi' % (name, idx)
vmi = VirtualMachineInterface(
vmi_name, parent_type='project',
fq_name=['default-domain', 'default-project', vmi_name])
vmi.set_virtual_machine_interface_device_owner(
'network:router_interface')
vmi.add_virtual_network(vn)
self._vnc_lib.virtual_machine_interface_create(vmi)
lr.add_virtual_machine_interface(vmi)
vmis.append(vmi)
# Instance IP
gw_ip = vn.get_network_ipam_refs()[0]['attr'].ipam_subnets[0].\
default_gateway
subnet_uuid = vn.get_network_ipam_refs()[0]['attr'].\
ipam_subnets[0].subnet_uuid
iip = InstanceIp(name='%s-network%d-iip' % (name, idx))
iip.set_subnet_uuid(subnet_uuid)
iip.set_virtual_machine_interface(vmi)
iip.set_virtual_network(vn)
iip.set_instance_ip_family('v4')
iip.set_instance_ip_address(gw_ip)
self._vnc_lib.instance_ip_create(iip)
iips.append(iip)
self._vnc_lib.logical_router_create(lr)
return lr, vns, vmis, iips
def _security_group_rule_build(self, rule_info, sg_fq_name_str):
protocol = rule_info['protocol']
port_min = rule_info['port_min'] or 0
port_max = rule_info['port_max'] or 65535
direction = rule_info['direction'] or 'ingress'
ip_prefix = rule_info['ip_prefix']
ether_type = rule_info['ether_type']
if ip_prefix:
cidr = ip_prefix.split('/')
pfx = cidr[0]
pfx_len = int(cidr[1])
endpt = [AddressType(subnet=SubnetType(pfx, pfx_len))]
else:
endpt = [AddressType(security_group=sg_fq_name_str)]
local = None
remote = None
if direction == 'ingress':
dir = '>'
local = endpt
remote = [AddressType(security_group='local')]
else:
dir = '>'
remote = endpt
local = [AddressType(security_group='local')]
if not | |
log2(L1) > time:
continue
L12 = L1 ** 2 // 2 ** l1
L12 = max(L12, 1)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
#################################################################################
#######choose start value for l2 such that resultlist size is close to L12#######
#################################################################################
try:
f = lambda x: log2(int(L12)) + int(2) * log2(binom_sp(x, int(w2))) - int(2) * x
l2_val = int(fsolve(f, 0)[0])
except:
continue
if f(l2_val) < 0 or f(l2_val) > 1:
continue
################################################################################
l2_min = w2
l2_max = (n - k - l1 - (w - 2 * p - 2 * w2)) // 2
l2_range = [l2_val - i_val_inc[4] // 2, l2_val + i_val_inc[4] // 2]
for l2 in range(max(l2_min, l2_range[0]), min(l2_max, l2_range[1])):
Tp = max(
log2(binom(n, w)) - log2(binom(n - k - l1 - 2 * l2, w - 2 * p - 2 * w2)) - 2 * log2(
binom(k1, p)) - 2 * log2(binom(l2, w2)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 2 * _list_merge_complexity(L1, l1, hmap) + _mitm_nn_complexity(L12, 2 * l2, 2 * w2,
hmap)
T_rep = int(ceil(2 ** max(l1 - log2(reps), 0)))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, p1, w2, l2, l1]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
break
par = {"l1": params[4], "p": params[0], "p1": params[1], "depth": 2, "l2": params[3], "w2": params[2]}
res = {"time": time, "memory": memory, "parameters": par}
return res
def bjmm_depth_2_disjoint_weight_complexity(n, k, w, mem=inf, hmap=1, p_range=[0, 25], memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm in depth 2 using Indyk-Motwani for NN search
[MMT11] <NAME>., <NAME>., <NAME>.: Decoding random linear codes in 2^(0.054n). In: International Conference
on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)
[BJMM12] <NAME>., <NAME>., <NAME>., <NAME>.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0
improves information set decoding. In: Annual international conference on the theory and applications of
cryptographic techniques. pp. 520–536. Springer (2012)
[EssBel21] <NAME>. and <NAME>.: Syndrome Decoding Estimator. In: IACR Cryptol. ePrint Arch. 2021 (2021), 1243
expected weight distribution::
+---------------------------+-------------+------------+----------+----------+----------+----------+
|<-+ n - k - 2 l1 - 2 l2 +->|<-+ k / 2 +->|<-+ k / 2 ->|<-+ l1 +->|<-+ l1 +->|<-+ l2 +->|<-+ l2 +->|
| w - 2 p - 2 w1 - 2 w2 | p | p | w1 | w1 | w2 | w2 |
+---------------------------+-------------+------------+----------+----------+----------+----------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``p_range`` -- interval in which the parameter p is searched (default: [0, 25], helps speeding up computation)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import bjmm_depth_2_disjoint_weight_complexity
>>> bjmm_depth_2_disjoint_weight_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k)
i_val = [p_range[1], 20, 10, 10, 5]
i_val_inc = [10, 10, 10, 10, 10, 10, 10]
params = [-1 for _ in range(7)]
while True:
stop = True
for p in range(max(p_range[0], params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for p1 in range(max(params[1] - i_val_inc[1] // 2, (p + 1) // 2), min(w, i_val[1])):
s = max(params[2] - i_val_inc[2] // 2, 0)
for w1 in range(s - (s % 2), min(w // 2 - p, i_val[2]), 2):
for w11 in range(max(params[3] - i_val_inc[3] // 2, (w1 + 1) // 2), min(w, i_val[3])):
for w2 in range(max(params[4] - i_val_inc[4] // 2, 0), min(w // 2 - p - w1, i_val[4])):
##################################################################################
######choose start value for l1 such that representations cancel out exactly######
##################################################################################
try:
f = lambda x: 2 * log2((binom(p, p // 2) * binom(k // 2 - p, p1 - p // 2)) * (
binom_sp(x, w1 // 2) * binom_sp(x - w1, w11 - w1 // 2)) + 1) - 2 * x
l1_val = int(
fsolve(f, 2 * log2((binom(p, p // 2) * binom(k // 2 - p, p1 - p // 2))))[0])
except:
continue
if f(l1_val) < 0 or f(l1_val) > 10:
continue
#################################################################################
for l1 in range(max(l1_val - i_val_inc[5], w1, w11), l1_val + i_val_inc[5]):
k1 = k // 2
reps = (binom(p, p // 2) * binom(k1 - p, p1 - p // 2)) ** 2 * (
binom(w1, w1 // 2) * binom(l1 - w1, w11 - w1 // 2)) ** 2
reps = max(reps, 1)
L1 = binom(k1, p1)
if log2(L1) > time:
continue
L12 = L1 ** 2 * binom(l1, w11) ** 2 // 2 ** (2 * l1)
L12 = max(L12, 1)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
#################################################################################
#######choose start value for l2 such that resultlist size is equal to L12#######
#################################################################################
try:
f = lambda x: log2(L12) + 2 * log2(binom_sp(x, w2) + 1) - 2 * x
l2_val = int(fsolve(f, 50)[0])
except:
continue
if f(l2_val) < 0 or f(l2_val) > 10:
continue
################################################################################
l2_max = (n - k - 2 * l1 - (w - 2 * p - 2 * w1 - 2 * w2)) // 2
l2_min = w2
l2_range = [l2_val - i_val_inc[6] // 2, l2_val + i_val_inc[6] // 2]
for l2 in range(max(l2_min, l2_range[0]), min(l2_max, l2_range[1])):
Tp = max(
log2(binom(n, w)) - log2(
binom(n - k - 2 * l1 - 2 * l2, w - 2 * p - 2 * w1 - 2 * w2)) - 2 * log2(
binom(k1, p)) - 2 * log2(binom(l1, w1)) - 2 * log2(
binom(l2, w2)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 2 * _mitm_nn_complexity(L1, 2 * l1, 2 * w11, hmap) + _mitm_nn_complexity(
L12, 2 * l2, 2 * w2, hmap)
T_rep = int(ceil(2 ** max(2 * l1 - log2(reps), 0)))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, p1, w1, w11, w2, l2, l1 + l2]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
break
par = {"l": params[6], "p": params[0], "p1": params[1], "w1": params[2], "w11": params[3], "l2": params[5],
"w2": params[4], "depth": 2}
res = {"time": time, "memory": memory, "parameters": par}
return res
def both_may_depth_2_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of Both-May algorithm in depth 2 using Indyk-Motwani and MitM for NN search
[BotMay18] <NAME>., <NAME>.: Decoding linear codes with high error rate and its impact for LPN security. In:
International Conference on Post-Quantum Cryptography. pp. 25--46. Springer (2018)
expected weight distribution::
+-------------------+---------+-------------------+-------------------+
| <--+ n - k - l+-->|<-+ l +->|<----+ k / 2 +---->|<----+ k / 2 +---->|
| w - w2 - 2p | w2 | p | p |
+-------------------+---------+-------------------+-------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or | |
<reponame>baltham/dne-dna-code<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_bd_subnet
short_description: Manage Subnets (fv:Subnet)
description:
- Manage Subnets on Cisco ACI fabrics.
notes:
- The C(gateway) parameter is the root key used to access the Subnet (not name), so the C(gateway)
is required when the state is C(absent) or C(present).
- The C(tenant) and C(bd) used must exist before using this module in your playbook.
The M(aci_tenant) module and M(aci_bd) can be used for these.
- More information about the internal APIC class B(fv:Subnet) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- <NAME> (@jmcgill298)
version_added: '2.4'
options:
bd:
description:
- The name of the Bridge Domain.
aliases: [ bd_name ]
description:
description:
- The description for the Subnet.
aliases: [ descr ]
enable_vip:
description:
- Determines if the Subnet should be treated as a VIP; used when the BD is extended to multiple sites.
- The APIC defaults new Subnets to C(no).
type: bool
default: 'no'
gateway:
description:
- The IPv4 or IPv6 gateway address for the Subnet.
aliases: [ gateway_ip ]
mask:
description:
- The subnet mask for the Subnet.
- This is the number assocated with CIDR notation.
choices: [ Any 0 to 32 for IPv4 Addresses, 0-128 for IPv6 Addresses ]
aliases: [ subnet_mask ]
nd_prefix_policy:
description:
- The IPv6 Neighbor Discovery Prefix Policy to associate with the Subnet.
preferred:
description:
- Determines if the Subnet is preferred over all available Subnets. Only one Subnet per Address Family (IPv4/IPv6).
can be preferred in the Bridge Domain.
- The APIC defaults new Subnets to C(no).
type: bool
default: 'no'
route_profile:
description:
- The Route Profile to the associate with the Subnet.
route_profile_l3_out:
description:
- The L3 Out that contains the assocated Route Profile.
scope:
description:
- Determines the scope of the Subnet.
- The C(private) option only allows communication with hosts in the same VRF.
- The C(public) option allows the Subnet to be advertised outside of the ACI Fabric, and allows communication with
hosts in other VRFs.
- The shared option limits communication to hosts in either the same VRF or the shared VRF.
- The value is a list of options, C(private) and C(public) are mutually exclusive, but both can be used with C(shared).
- The APIC defaults new Subnets to C(private).
choices:
- private
- public
- shared
- [ private, shared ]
- [ public, shared ]
default: private
subnet_control:
description:
- Determines the Subnet's Control State.
- The C(querier_ip) option is used to treat the gateway_ip as an IGMP querier source IP.
- The C(nd_ra) option is used to treate the gateway_ip address as a Neighbor Discovery Router Advertisement Prefix.
- The C(no_gw) option is used to remove default gateway functionality from the gateway address.
- The APIC defaults new Subnets to C(nd_ra).
choices: [ nd_ra, no_gw, querier_ip, unspecified ]
default: nd_ra
subnet_name:
description:
- The name of the Subnet.
aliases: [ name ]
tenant:
description:
- The name of the Tenant.
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: create a tenant
aci_tenant:
host: apic
username: admin
password: <PASSWORD>
tenant: production
- name: create a bridge domain
aci_bd:
host: apic
username: admin
password: <PASSWORD>
tenant: production
bd: database
- name: create a subnet
aci_bd_subnet:
host: apic
username: admin
password: <PASSWORD>
tenant: production
bd: database
gateway: 10.1.1.1
mask: 24
- name: create a subnet with options
aci_bd_subnet:
host: apic
username: admin
password: <PASSWORD>
tenant: production
bd: database
subnet_name: sql
gateway: 10.1.2.1
mask: 23
description: SQL Servers
scope: public
route_profile_l3_out: corp
route_profile: corp_route_profile
- name: update a subnets scope to private and shared
aci_bd_subnet:
host: apic
username: admin
password: <PASSWORD>
tenant: production
bd: database
gateway: 10.1.1.1
mask: 24
scope: [private, shared]
- name: get all subnets
aci_bd_subnet:
host: apic
username: admin
password: <PASSWORD>
state: query
- name: get all subnets of specific gateway in specified tenant
aci_bd_subnet:
host: apic
username: admin
password: <PASSWORD>
state: query
tenant: production
gateway: 10.1.1.1
mask: 24
- name: get specific subnet
aci_bd_subnet:
host: apic
username: admin
password: <PASSWORD>
state: query
tenant: production
bd: database
gateway: 10.1.1.1
mask: 24
- name: delete a subnet
aci_bd_subnet:
host: apic
username: admin
password: <PASSWORD>
state: absent
tenant: production
bd: database
gateway: 10.1.1.1
mask: 24
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
SUBNET_CONTROL_MAPPING = dict(nd_ra='nd', no_gw='no-default-gateway', querier_ip='querier', unspecified='')
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule, SEQUENCETYPE
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
bd=dict(type='str', aliases=['bd_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
enable_vip=dict(type='bool'),
gateway=dict(type='str', aliases=['gateway_ip']), # Not required for querying all objects
mask=dict(type='int', aliases=['subnet_mask']), # Not required for querying all objects
subnet_name=dict(type='str', aliases=['name']),
nd_prefix_policy=dict(type='str'),
preferred=dict(type='bool'),
route_profile=dict(type='str'),
route_profile_l3_out=dict(type='str'),
scope=dict(type='list', choices=['private', 'public', 'shared']),
subnet_control=dict(type='str', choices=['nd_ra', 'no_gw', 'querier_ip', 'unspecified']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_together=[['gateway', 'mask']],
required_if=[
['state', 'present', ['bd', 'gateway', 'mask', 'tenant']],
['state', 'absent', ['bd', 'gateway', 'mask', 'tenant']],
],
)
aci = ACIModule(module)
description = module.params['description']
enable_vip = aci.boolean(module.params['enable_vip'])
tenant = module.params['tenant']
bd = module.params['bd']
gateway = module.params['gateway']
mask = module.params['mask']
if mask is not None and mask not in range(0, 129):
# TODO: split checkes between IPv4 and IPv6 Addresses
module.fail_json(msg='Valid Subnet Masks are 0 to 32 for IPv4 Addresses and 0 to 128 for IPv6 addresses')
if gateway is not None:
gateway = '{0}/{1}'.format(gateway, str(mask))
subnet_name = module.params['subnet_name']
nd_prefix_policy = module.params['nd_prefix_policy']
preferred = aci.boolean(module.params['preferred'])
route_profile = module.params['route_profile']
route_profile_l3_out = module.params['route_profile_l3_out']
scope = module.params['scope']
if isinstance(scope, SEQUENCETYPE):
if 'private' in scope and 'public' in scope:
module.fail_json(msg="Parameter 'scope' cannot be both 'private' and 'public', got: %s" % scope)
else:
scope = ','.join(sorted(scope))
state = module.params['state']
subnet_control = module.params['subnet_control']
if subnet_control:
subnet_control = SUBNET_CONTROL_MAPPING[subnet_control]
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='fvBD',
aci_rn='BD-{0}'.format(bd),
filter_target='eq(fvBD.name, "{0}")'.format(bd),
module_object=bd,
),
subclass_2=dict(
aci_class='fvSubnet',
aci_rn='subnet-[{0}]'.format(gateway),
filter_target='eq(fvSubnet.ip, "{0}")'.format(gateway),
module_object=gateway,
),
child_classes=['fvRsBDSubnetToProfile', 'fvRsNdPfxPol'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvSubnet',
class_config=dict(
ctrl=subnet_control,
descr=description,
ip=gateway,
name=subnet_name,
preferred=preferred,
scope=scope,
virtual=enable_vip,
),
child_configs=[
{'fvRsBDSubnetToProfile': {'attributes': {'tnL3extOutName': route_profile_l3_out, 'tnRtctrlProfileName': | |
"pl85s5iyhxltk.cf",
"pl85s5iyhxltk.ga",
"pl85s5iyhxltk.gq",
"pl85s5iyhxltk.ml",
"pl85s5iyhxltk.tk",
"placemail.online",
"planet-travel.club",
"planeta-samsung.ru",
"plantsvszombies.ru",
"playcard-semi.com",
"playforfun.ru",
"playsbox.ru",
"playsims.ru",
"plexolan.de",
"plez.org",
"plgbgus.ga",
"plgbgus.ml",
"ploae.com",
"plrdn.com",
"plushington.ru",
"plusmail.cf",
"plutocow.com",
"plutofox.com",
"plutoillumination.ru",
"pmlep.de",
"pneumo360.ru",
"pnevmo360.ru",
"pnvp7zmuexbqvv.cf",
"pnvp7zmuexbqvv.ga",
"pnvp7zmuexbqvv.gq",
"pnvp7zmuexbqvv.ml",
"pnvp7zmuexbqvv.tk",
"po.bot.nu",
"poalmail.ga",
"pobedit-ves.ru",
"pocupki.ru",
"podboremavto.ru",
"poddon-13.ru",
"podlipskiy.ru",
"podsvetkakuhni.ru",
"podveska-nn.ru",
"poehali-otdihat.ru",
"poioijnkjb.cf",
"poioijnkjb.ml",
"pokeett.site",
"pokemail.net",
"pokeymoms.org",
"pokupai-mili.ru",
"pokupai-vsegda.ru",
"pokupaska24.ru",
"polacy-dungannon.tk",
"polarkingxx.ml",
"polet-pallet.ru",
"polimi.ml",
"polishxwyb.com",
"politikerclub.de",
"poliusraas.tk",
"polka-shkaff.ru",
"polkadot.tk",
"polkaidot.ml",
"polplitka.ru",
"polres-aeknabara.cf",
"poly-swarm.com",
"polyfaust.com",
"polymnestore.co",
"polyswarms.com",
"pomsiese.ru",
"poney.xyz",
"ponp.be",
"poofy.org",
"pookmail.com",
"poopiebutt.club",
"popconn.party",
"popesodomy.com",
"popmail.io",
"popso.cf",
"popso.ga",
"popso.gq",
"popso.ml",
"popso.tk",
"popsok.cf",
"popsok.ga",
"popsok.gq",
"popsok.ml",
"popsok.tk",
"porco.cf",
"porco.ga",
"porco.gq",
"porco.ml",
"porororebus.top",
"porsh.net",
"porta.loyalherceghalom.ml",
"portalweb.icu",
"posdz.com",
"post.melkfl.es",
"post0.profimedia.net",
"posta.store",
"postacin.com",
"postbenua.ru",
"postcardsfromukraine.crowdpress.it",
"postemail.net",
"postfach2go.de",
"posthectomie.info",
"posurl.ga",
"posvabotma.x24hr.com",
"potencialexone.ru",
"potencialexstore.ru",
"potencid.ru",
"potencis.ru",
"potencyalex.ru",
"potenzialex149.ru",
"powerbank-russia.ru",
"powerlink.com.np",
"powerml.racing",
"poxudeykina.ru",
"poy.kr",
"pp98.cf",
"pp98.ga",
"pp98.gq",
"pp98.ml",
"pp98.tk",
"ppabldwzsrdfr.cf",
"ppabldwzsrdfr.ga",
"ppabldwzsrdfr.gq",
"ppabldwzsrdfr.ml",
"ppabldwzsrdfr.tk",
"ppc-e.com",
"ppc-trafic.ru",
"ppetw.com",
"ppgu8mqxrmjebc.ga",
"ppgu8mqxrmjebc.gq",
"ppgu8mqxrmjebc.ml",
"ppgu8mqxrmjebc.tk",
"ppmoazqnoip2s.cf",
"ppmoazqnoip2s.ga",
"ppmoazqnoip2s.gq",
"ppmoazqnoip2s.ml",
"ppymail.win",
"pq6fbq3r0bapdaq.cf",
"pq6fbq3r0bapdaq.ga",
"pq6fbq3r0bapdaq.gq",
"pq6fbq3r0bapdaq.ml",
"pq6fbq3r0bapdaq.tk",
"pqoia.com",
"pqoss.com",
"pqtoxevetjoh6tk.cf",
"pqtoxevetjoh6tk.ga",
"pqtoxevetjoh6tk.gq",
"pqtoxevetjoh6tk.ml",
"pqtoxevetjoh6tk.tk",
"pr1ngsil4nmu.ga",
"pr4y.web.id",
"prado-mail.ru",
"pradvice.ru",
"prava-ab.ru",
"pravorobotov.ru",
"prayersa3.com",
"prazdnik-37.ru",
"predatorrat.cf",
"predatorrat.ga",
"predatorrat.gq",
"predatorrat.ml",
"predatorrat.tk",
"prefixcom.com",
"prekuldown47mmi.ml",
"premium-mail.fr",
"premiumail.ml",
"premiumperson.website",
"preparee.top",
"preseven.com",
"press-citrus.ru",
"presslemon.ru",
"prestore.co",
"prethlah907huir.cf",
"pricebit.co",
"priceblog.co",
"priceio.co",
"pricenew.co",
"pricenow.co",
"priceonline.co",
"pricepage.co",
"priceworld.co",
"pride.nafko.cf",
"prigovormodoi.ru",
"prihshop.ru",
"primres.ru",
"prin.be",
"prince-khan.tk",
"pringlang.cf",
"pringlang.ga",
"pringlang.gq",
"pringlang.ml",
"printf.cf",
"printf.ga",
"printf.ml",
"priorityxn5.com",
"prismlasers.tk",
"priv.beastemail.com",
"privacy.net",
"privatemailinator.nl",
"privaterelay.appleid.com",
"privy-mail.com",
"privyinternet.com",
"privyinternet.net",
"privymail.de",
"privyonline.com",
"privyonline.net",
"prmail.top",
"pro.cloudns.asia",
"prob.icu",
"proc.icu",
"procrackers.com",
"proeasyweb.com",
"profast.top",
"profilific.com",
"profit-mats.ru",
"profriend.ru",
"programmymets.ru",
"prohost24.ru",
"project-xhabbo.com",
"projectcl.com",
"prol.icu",
"prolagu.pro",
"promail.site",
"promails.xyz",
"promplast-okna.ru",
"promptly700.com",
"proprice.co",
"proprietativalcea.ro",
"propscore.com",
"protection-0ffice365.com",
"protein-krasnodar.ru",
"protein-razves.ru",
"protein-saratov.ru",
"protempmail.com",
"protestore.co",
"proto2mail.com",
"providier.com",
"provodow-net.ru",
"prow.cf",
"prow.ga",
"prow.gq",
"prow.ml",
"proxsei.com",
"proxymail.eu",
"proz.icu",
"prtnx.com",
"prtxw.com",
"prtz.eu",
"prwmqbfoxdnlh8p4z.cf",
"prwmqbfoxdnlh8p4z.ga",
"prwmqbfoxdnlh8p4z.gq",
"prwmqbfoxdnlh8p4z.ml",
"prwmqbfoxdnlh8p4z.tk",
"prxnzb4zpztlv.cf",
"prxnzb4zpztlv.ga",
"prxnzb4zpztlv.gq",
"prxnzb4zpztlv.ml",
"prxnzb4zpztlv.tk",
"pryamieruki.ru",
"ps160.mpm-motors.cf",
"psacake.me",
"psettinge5.com",
"pseudoname.io",
"psirens.icu",
"psles.com",
"psoxs.com",
"psychologist-prof.ru",
"ptc.vuforia.us",
"ptcks1ribhvupd3ixg.cf",
"ptcks1ribhvupd3ixg.ga",
"ptcks1ribhvupd3ixg.gq",
"ptcks1ribhvupd3ixg.ml",
"ptcks1ribhvupd3ixg.tk",
"ptgtar7lslnpomx.ga",
"ptgtar7lslnpomx.ml",
"ptgtar7lslnpomx.tk",
"ptpigeaz0uorsrygsz.cf",
"ptpigeaz0uorsrygsz.ga",
"ptpigeaz0uorsrygsz.gq",
"ptpigeaz0uorsrygsz.ml",
"ptpigeaz0uorsrygsz.tk",
"ptzprint.ru",
"pubgeresnrpxsab.cf",
"pubgeresnrpxsab.ga",
"pubgeresnrpxsab.gq",
"pubgeresnrpxsab.ml",
"pubgeresnrpxsab.tk",
"pubgm.website",
"publi.innovatio.es",
"pubmail886.com",
"pubs.ga",
"pubwarez.com",
"puchmlt0mt.ga",
"puchmlt0mt.gq",
"puchmlt0mt.tk",
"pudra-style.ru",
"puds5k7lca9zq.cf",
"puds5k7lca9zq.ga",
"puds5k7lca9zq.gq",
"puds5k7lca9zq.ml",
"puds5k7lca9zq.tk",
"puglieisi.com",
"puh4iigs4w.cf",
"puh4iigs4w.ga",
"puh4iigs4w.gq",
"puh4iigs4w.ml",
"puh4iigs4w.tk",
"puji.pro",
"pukimay.cf",
"pukimay.ga",
"pukimay.gq",
"pukimay.ml",
"pukimay.tk",
"pullcombine.com",
"pullmail.info",
"pulpmail.us",
"pulsarfinans.ru",
"pulse-original.ru",
"pulwarm.net",
"pumamaning.cf",
"pumamaning.ml",
"pumapumayes.cf",
"pumapumayes.ml",
"puncakyuk.com",
"punggur.tk",
"pungkiparamitasari.com",
"punkass.com",
"purelogistics.org",
"purplemail.ga",
"purplemail.gq",
"purplemail.ml",
"purplemail.tk",
"pushmojo.com",
"put2.net",
"putfs6fbkicck.cf",
"putfs6fbkicck.ga",
"putfs6fbkicck.gq",
"putfs6fbkicck.ml",
"putfs6fbkicck.tk",
"puttana.cf",
"puttana.ga",
"puttana.gq",
"puttana.ml",
"puttana.tk",
"puttanamaiala.tk",
"putthisinyourspamdatabase.com",
"puyenkgel50ccb.ml",
"pvckomplekt.ru",
"pw-mail.cf",
"pw-mail.ga",
"pw-mail.gq",
"pw-mail.ml",
"pw-mail.tk",
"pw.epac.to",
"pw.loyalherceghalom.ml",
"pw.r4.dns-cloud.net",
"pwjsdgofya4rwc.cf",
"pwjsdgofya4rwc.ga",
"pwjsdgofya4rwc.gq",
"pwjsdgofya4rwc.ml",
"pwjsdgofya4rwc.tk",
"pwp.lv",
"pwrby.com",
"pwt9azutcao7mi6.ga",
"pwt9azutcao7mi6.ml",
"pwt9azutcao7mi6.tk",
"px0dqqkyiii9g4fwb.cf",
"px0dqqkyiii9g4fwb.ga",
"px0dqqkyiii9g4fwb.gq",
"px0dqqkyiii9g4fwb.ml",
"px0dqqkyiii9g4fwb.tk",
"pxddcpf59hkr6mwb.cf",
"pxddcpf59hkr6mwb.ga",
"pxddcpf59hkr6mwb.gq",
"pxddcpf59hkr6mwb.ml",
"pxddcpf59hkr6mwb.tk",
"pyiauje42dysm.cf",
"pyiauje42dysm.ga",
"pyiauje42dysm.gq",
"pyiauje42dysm.ml",
"pyiauje42dysm.tk",
"pyrokiwi.xyz",
"q-urotrin.ru",
"q.new-mgmt.ga",
"q.xtc.yt",
"q0bcg1druy.ga",
"q0bcg1druy.ml",
"q0bcg1druy.tk",
"q2gfiqsi4szzf54xe.cf",
"q2gfiqsi4szzf54xe.ga",
"q2gfiqsi4szzf54xe.gq",
"q2gfiqsi4szzf54xe.ml",
"q2gfiqsi4szzf54xe.tk",
"q2lofok6s06n6fqm.cf",
"q2lofok6s06n6fqm.ga",
"q2lofok6s06n6fqm.gq",
"q2lofok6s06n6fqm.ml",
"q2lofok6s06n6fqm.tk",
"q4heo7ooauboanqh3xm.cf",
"q4heo7ooauboanqh3xm.ga",
"q4heo7ooauboanqh3xm.gq",
"q4heo7ooauboanqh3xm.ml",
"q4heo7ooauboanqh3xm.tk",
"q5prxncteag.cf",
"q5prxncteag.ga",
"q5prxncteag.gq",
"q5prxncteag.ml",
"q5prxncteag.tk",
"q5vm7pi9.com",
"q6suiq1aob.cf",
"q6suiq1aob.ga",
"q6suiq1aob.gq",
"q6suiq1aob.ml",
"q6suiq1aob.tk",
"q74.ru",
"q7t43q92.com",
"q7t43q92.com.com",
"q8cbwendy.com",
"q8ec97sr791.cf",
"q8ec97sr791.ga",
"q8ec97sr791.gq",
"q8ec97sr791.ml",
"q8ec97sr791.tk",
"q8fqrwlxehnu.cf",
"q8fqrwlxehnu.ga",
"q8fqrwlxehnu.gq",
"q8fqrwlxehnu.ml",
"q8fqrwlxehnu.tk",
"q8i4v1dvlsg.ga",
"q8i4v1dvlsg.ml",
"q8i4v1dvlsg.tk",
"qa.team",
"qacquirep.com",
"qaetaldkgl64ygdds.gq",
"qafatwallet.com",
"qasd2qgznggjrl.cf",
"qasd2qgznggjrl.ga",
"qasd2qgznggjrl.ml",
"qasd2qgznggjrl.tk",
"qasti.com",
"qazulbaauct.cf",
"qazulbaauct.ga",
"qazulbaauct.gq",
"qazulbaauct.ml",
"qazulbaauct.tk",
"qb23c60behoymdve6xf.cf",
"qb23c60behoymdve6xf.ga",
"qb23c60behoymdve6xf.gq",
"qb23c60behoymdve6xf.ml",
"qb23c60behoymdve6xf.tk",
"qbaydx2cpv8.cf",
"qbaydx2cpv8.ga",
"qbaydx2cpv8.gq",
"qbaydx2cpv8.ml",
"qbaydx2cpv8.tk",
"qbfree.us",
"qbi.kr",
"qbikgcncshkyspoo.cf",
"qbikgcncshkyspoo.ga",
"qbikgcncshkyspoo.gq",
"qbikgcncshkyspoo.ml",
"qbikgcncshkyspoo.tk",
"qbmail.bid",
"qbqbtf4trnycocdg4c.cf",
"qbqbtf4trnycocdg4c.ga",
"qbqbtf4trnycocdg4c.gq",
"qbqbtf4trnycocdg4c.ml",
"qbuog1cbktcy.cf",
"qbuog1cbktcy.ga",
"qbuog1cbktcy.gq",
"qbuog1cbktcy.ml",
"qbuog1cbktcy.tk",
"qc0lipw1ux.cf",
"qc0lipw1ux.ga",
"qc0lipw1ux.ml",
"qc0lipw1ux.tk",
"qcmail.qc.to",
"qdrwriterx.com",
"qe41hqboe4qixqlfe.gq",
"qe41hqboe4qixqlfe.ml",
"qe41hqboe4qixqlfe.tk",
"qedwardr.com",
"qeispacesq.com",
"qeotxmwotu.cf",
"qeotxmwotu.ga",
"qeotxmwotu.gq",
"qeotxmwotu.ml",
"qeotxmwotu.tk",
"qf1tqu1x124p4tlxkq.cf",
"qf1tqu1x124p4tlxkq.ga",
"qf1tqu1x124p4tlxkq.gq",
"qf1tqu1x124p4tlxkq.ml",
"qf1tqu1x124p4tlxkq.tk",
"qfhh3mmirhvhhdi3b.cf",
"qfhh3mmirhvhhdi3b.ga",
"qfhh3mmirhvhhdi3b.gq",
"qfhh3mmirhvhhdi3b.ml",
"qfhh3mmirhvhhdi3b.tk",
"qfrsxco1mkgl.ga",
"qfrsxco1mkgl.gq",
"qfrsxco1mkgl.ml",
"qg8zn7nj8prrt4z3.cf",
"qg8zn7nj8prrt4z3.ga",
"qg8zn7nj8prrt4z3.gq",
"qg8zn7nj8prrt4z3.ml",
"qg8zn7nj8prrt4z3.tk",
"qgfkslkd1ztf.cf",
"qgfkslkd1ztf.ga",
"qgfkslkd1ztf.gq",
"qgfkslkd1ztf.ml",
"qhesnwv.com",
"qhrgzdqthrqocrge922.cf",
"qhrgzdqthrqocrge922.ga",
"qhrgzdqthrqocrge922.gq",
"qhrgzdqthrqocrge922.ml",
"qhrgzdqthrqocrge922.tk",
"qhstreetr.com",
"qiaua.com",
"qiq.us",
"qirzgl53rik0t0hheo.cf",
"qirzgl53rik0t0hheo.ga",
"qirzgl53rik0t0hheo.gq",
"qirzgl53rik0t0hheo.ml",
"qirzgl53rik0t0hheo.tk",
"qisdo.com",
"qisoa.com",
"qj97r73md7v5.com",
"qjnnbimvvmsk1s.cf",
"qjnnbimvvmsk1s.ga",
"qjnnbimvvmsk1s.gq",
"qjnnbimvvmsk1s.ml",
"qjnnbimvvmsk1s.tk",
"qkbzptliqpdgeg.cf",
"qkbzptliqpdgeg.ga",
"qkbzptliqpdgeg.gq",
"qkbzptliqpdgeg.ml",
"qkbzptliqpdgeg.tk",
"qkw4ck7cs1hktfba.cf",
"qkw4ck7cs1hktfba.ga",
"qkw4ck7cs1hktfba.gq",
"qkw4ck7cs1hktfba.ml",
"qkw4ck7cs1hktfba.tk",
"qlhnu526.com",
"qluiwa5wuctfmsjpju.cf",
"qluiwa5wuctfmsjpju.ga",
"qluiwa5wuctfmsjpju.gq",
"qluiwa5wuctfmsjpju.ml",
"qmperehpsthiu9j91c.ga",
"qmperehpsthiu9j91c.ml",
"qmperehpsthiu9j91c.tk",
"qmwparouoeq0sc.cf",
"qmwparouoeq0sc.ga",
"qmwparouoeq0sc.gq",
"qmwparouoeq0sc.ml",
"qmwparouoeq0sc.tk",
"qn5egoikcwoxfif2g.cf",
"qn5egoikcwoxfif2g.ga",
"qn5egoikcwoxfif2g.gq",
"qn5egoikcwoxfif2g.ml",
"qn5egoikcwoxfif2g.tk",
"qnb.io",
"qnkznwsrwu3.cf",
"qnkznwsrwu3.ga",
"qnkznwsrwu3.gq",
"qnkznwsrwu3.ml",
"qnkznwsrwu3.tk",
"qnuqgrfujukl2e8kh3o.cf",
"qnuqgrfujukl2e8kh3o.ga",
"qnuqgrfujukl2e8kh3o.gq",
"qnuqgrfujukl2e8kh3o.ml",
"qnuqgrfujukl2e8kh3o.tk",
"qnzkugh2dhiq.cf",
"qnzkugh2dhiq.ga",
"qnzkugh2dhiq.gq",
"qnzkugh2dhiq.ml",
"qnzkugh2dhiq.tk",
"qocya.com",
"qoo-10.id",
"qopmail.com",
"qorikan.com",
"qpalong.com",
"qpi8iqrh8wtfpee3p.ga",
"qpi8iqrh8wtfpee3p.ml",
"qpi8iqrh8wtfpee3p.tk",
"qpptplypblyp052.cf",
"qpulsa.com",
"qq568.top",
"qqipgthtrlm.cf",
"qqipgthtrlm.ga",
"qqipgthtrlm.gq",
"qqipgthtrlm.ml",
"qqipgthtrlm.tk",
"qqqwwwil.men",
"qqzymail.win",
"qs.dp76.com",
"qs2k.com",
"qtfxtbxudvfvx04.cf",
"qtfxtbxudvfvx04.ga",
"qtfxtbxudvfvx04.gq",
"qtfxtbxudvfvx04.ml",
"qtfxtbxudvfvx04.tk",
"qtlhkpfx3bgdxan.cf",
"qtlhkpfx3bgdxan.ga",
"qtlhkpfx3bgdxan.gq",
"qtlhkpfx3bgdxan.ml",
"qtlhkpfx3bgdxan.tk",
"qtpxsvwifkc.cf",
"qtpxsvwifkc.ga",
"qtpxsvwifkc.gq",
"qtpxsvwifkc.ml",
"qtpxsvwifkc.tk",
"qtum-ico.com",
"quadparts.ru",
"quaestore.co",
"qubecorp.tk",
"queeejkdfg7790.cf",
"queeejkdfg7790.ga",
"queeejkdfg7790.gq",
"queeejkdfg7790.ml",
"queeejkdfg7790.tk",
"querydirect.com",
"questore.co",
"queuem.com",
"quichebedext.freetcp.com",
"quick-mail.club",
"quick-mail.info",
"quick-mail.online",
"quickinbox.com",
"quickmail.best",
"quickmail.nl",
"quickmail.rocks",
"quickmakeupbag-shop.ru",
"quid4pro.com",
"quintania.top",
"ququb.com",
"quuradminb.com",
"qvy.me",
"qwarmingu.com",
"qwekssxt6624.cf",
"qwekssxt6624.ga",
"qwekssxt6624.gq",
"qwekssxt6624.ml",
"qwekssxt6624.tk",
"qwerasd1.ru",
"qwerqwerty.ga",
"qwerqwerty.ml",
"qwerqwerty.tk",
"qwertymail.cf",
"qwertymail.ga",
"qwertymail.gq",
"qwertymail.ml",
"qwertymail.tk",
"qwertyuiop.tk",
"qwfox.com",
"qwqrwsf.date",
"qwsa.ga",
"qwtof1c6gewti.cf",
"qwtof1c6gewti.ga",
"qwtof1c6gewti.gq",
"qwtof1c6gewti.ml",
"qwtof1c6gewti.tk",
"qxlvqptiudxbp5.cf",
"qxlvqptiudxbp5.ga",
"qxlvqptiudxbp5.gq",
"qxlvqptiudxbp5.ml",
"qxlvqptiudxbp5.tk",
"qxpaperk.com",
"qzdynxhzj71khns.cf",
"qzdynxhzj71khns.gq",
"qzdynxhzj71khns.ml",
"qzdynxhzj71khns.tk",
"qzvbxqe5dx.cf",
"qzvbxqe5dx.ga",
"qzvbxqe5dx.gq",
"qzvbxqe5dx.ml",
"qzvbxqe5dx.tk",
"r-fasket.ru",
"r-mail.cf",
"r-mail.ga",
"r-mail.gq",
"r-mail.ml",
"r.yasser.ru",
"r0ywhqmv359i9cawktw.cf",
"r0ywhqmv359i9cawktw.ga",
"r0ywhqmv359i9cawktw.gq",
"r0ywhqmv359i9cawktw.ml",
"r0ywhqmv359i9cawktw.tk",
"r115pwhzofguwog.cf",
"r115pwhzofguwog.ga",
"r115pwhzofguwog.gq",
"r115pwhzofguwog.ml",
"r115pwhzofguwog.tk",
"r1qaihnn9wb.cf",
"r1qaihnn9wb.ga",
"r1qaihnn9wb.gq",
"r1qaihnn9wb.ml",
"r1qaihnn9wb.tk",
"r2cakes.com",
"r2vw8nlia9goqce.cf",
"r2vw8nlia9goqce.ga",
"r2vw8nlia9goqce.gq",
"r2vw8nlia9goqce.ml",
"r2vw8nlia9goqce.tk",
"r2vxkpb2nrw.cf",
"r2vxkpb2nrw.ga",
"r2vxkpb2nrw.gq",
"r2vxkpb2nrw.ml",
"r2vxkpb2nrw.tk",
"r3-r4.tk",
"r31s4fo.com",
"r3hyegd84yhf.cf",
"r3hyegd84yhf.ga",
"r3hyegd84yhf.gq",
"r3hyegd84yhf.ml",
"r3hyegd84yhf.tk",
"r4.dns-cloud.net",
"r4gmw5fk5udod2q.cf",
"r4gmw5fk5udod2q.ga",
"r4gmw5fk5udod2q.gq",
"r4gmw5fk5udod2q.ml",
"r4gmw5fk5udod2q.tk",
"r4ntwsd0fe58xtdp.cf",
"r4ntwsd0fe58xtdp.ga",
"r4ntwsd0fe58xtdp.gq",
"r4ntwsd0fe58xtdp.ml",
"r4ntwsd0fe58xtdp.tk",
"r4unxengsekp.cf",
"r4unxengsekp.ga",
"r4unxengsekp.gq",
"r4unxengsekp.ml",
"r4unxengsekp.tk",
"r56r564b.cf",
"r56r564b.ga",
"r56r564b.gq",
"r56r564b.ml",
"r56r564b.tk",
"r6cnjv0uxgdc05lehvs.cf",
"r6cnjv0uxgdc05lehvs.ga",
"r6cnjv0uxgdc05lehvs.gq",
"r6cnjv0uxgdc05lehvs.ml",
"r6cnjv0uxgdc05lehvs.tk",
"r8lirhrgxggthhh.cf",
"r8lirhrgxggthhh.ga",
"r8lirhrgxggthhh.ml",
"r8lirhrgxggthhh.tk",
"r8r4p0cb.com",
"r9-nalarum.ru",
"r9jebqouashturp.cf",
"r9jebqouashturp.ga",
"r9jebqouashturp.gq",
"r9jebqouashturp.ml",
"r9jebqouashturp.tk",
"r9ycfn3nou.cf",
"r9ycfn3nou.ga",
"r9ycfn3nou.gq",
"r9ycfn3nou.ml",
"r9ycfn3nou.tk",
"ra-st.ru",
"rabiot.reisen",
"rabota24-v-internet.ru",
"rabuberkah.cf",
"radiator-stout.ru",
"radiodale.com",
"radiodirectory.ru",
"rael.cc",
"raetp9.com",
"raf-store.com",
"raffles.gg",
"rafmail.cf",
"rafmix.site",
"ragel.me",
"ragzwtna4ozrbf.cf",
"ragzwtna4ozrbf.ga",
"ragzwtna4ozrbf.gq",
"ragzwtna4ozrbf.ml",
"ragzwtna4ozrbf.tk",
"raiasu.cf",
"raiasu.ga",
"raiasu.gq",
"raiasu.ml",
"raiasu.tk",
"raikas77.eu",
"railway-shop.ru",
"raimu.cf",
"raimucok.cf",
"raimucok.ga",
"raimucok.gq",
"raimucok.ml",
"raimuwedos.cf",
"raimuwedos.ga",
"raimuwedos.gq",
"raimuwedos.ml",
"rainbocorns.ru",
"rainbow-vanilla.ru",
"rainbowly.ml",
"rainwaterstudios.org",
"raiplay.cf",
"raiplay.ga",
"raiplay.gq",
"raiplay.ml",
"raiplay.tk",
"raiway.cf",
"raiway.ga",
"raiway.gq",
"raiway.ml",
"raiway.tk",
"rajarajut.co",
"rajasoal.online",
"rajemail.tk",
"rajeshcon.cf",
"rajetempmail.com",
"raketenmann.de",
"ralree.com",
"rambakcor44bwd.ga",
"ramireshop.ru",
"rampas.ml",
"rampasboya.ml",
"randomcsorg.ru",
"rao-network.com",
"rao.kr",
"rape.lol",
"rapenakyodilakoni.cf",
"rapt.be",
"raskhin54swert.ml",
"rasprodazha365.ru",
"ratta.cf",
"ratta.ga",
"ratta.gq",
"ratta.ml",
"ratta.tk",
"rattlearray.com",
"rattlecore.com",
"rauxa.seny.cat",
"rav-4.cf",
"rav-4.ga",
"rav-4.gq",
"rav-4.ml",
"rav-4.tk",
"rav-apsl.ru",
"rav4.tk",
"ravenom.ru",
"ravyn.xyz",
"rawhidefc.org",
"rawrr.ga",
"razemail.com",
"razinrocks.me",
"rblx.site",
"rbpc6x9gprl.cf",
"rbpc6x9gprl.ga",
"rbpc6x9gprl.gq",
"rbpc6x9gprl.ml",
"rbpc6x9gprl.tk",
"rcasd.com",
"rcinvn408nrpwax3iyu.cf",
"rcinvn408nrpwax3iyu.ga",
"rcinvn408nrpwax3iyu.gq",
"rcinvn408nrpwax3iyu.ml",
"rcinvn408nrpwax3iyu.tk",
"rcpt.at",
"rdahb3lrpjquq.cf",
"rdahb3lrpjquq.ga",
"rdahb3lrpjquq.gq",
"rdahb3lrpjquq.ml",
"rdahb3lrpjquq.tk",
"rdklcrv.xyz",
"rdyn171d60tswq0hs8.cf",
"rdyn171d60tswq0hs8.ga",
"rdyn171d60tswq0hs8.gq",
"rdyn171d60tswq0hs8.ml",
"rdyn171d60tswq0hs8.tk",
"re-gister.com",
"reactive-school.ru",
"readyforyou.cf",
"readyforyou.ga",
"readyforyou.gq",
"readyforyou.ml",
"reality-concept.club",
"really.istrash.com",
"reallymymail.com",
"realtor-chelny.ru",
"rebotec24.ru",
"receiveee.com",
"recept-edy.ru",
"recepty-mira.ru",
"recepty-prigotovleniya.ru",
"recode.me",
"recognised.win",
"reconmail.com",
"recruitaware.com",
"recursor.net",
"recyclemail.dk",
"red-mail.info",
"red-mail.top",
"redcarpet-agency.ru",
"redcityrose.ru",
"reddcoin2.com",
"reddduslim.ru",
"reddithub.com",
"reddoors.ru",
"redfeathercrow.com",
"redigesso.ru",
"redmail.tech",
"redpeanut.com",
"redpen.trade",
"redteddy.ru",
"reduslim-dealer.ru",
"reduslim-dly-vas.ru",
"reduslim-originals.ru",
"reduslimf.ru",
"reduslimius.ru",
"redusslimium.ru",
"referalu.ru",
"refinance-credit.ru",
"reftoken.net",
"refurhost.com",
"reg19.ml",
"regalsz.com",
"regbypass.com",
"regbypass.comsafe-mail.net",
"region42-tur.ru",
"registraciya-bank.ru",
"regpp7arln7bhpwq1.cf",
"regpp7arln7bhpwq1.ga",
"regpp7arln7bhpwq1.gq",
"regpp7arln7bhpwq1.ml",
"regpp7arln7bhpwq1.tk",
"regspaces.tk",
"rehau39.ru",
"reignict.com",
"rejectmail.com",
"rejo.technology",
"reksareksy78oy.ml",
"reksatal-effective.ru",
"rekt.ml",
"rekthosting.ml",
"relaxology.ru",
"relay-bossku3.com",
"relay-bossku4.com",
"reloadpoint.ru",
"remail.cf",
"remail.ga",
"remarkable.rocks",
"remehan.ga",
"remehan.ml",
"remont-iq.ru",
"remontholoda36.ru",
"remontvuu.ru",
"remonty-v-sochi.ru",
"remote.li",
"renault-forums.ru",
"renault-sa.cf",
"renault-sa.ga",
"renault-sa.gq",
"renault-sa.ml",
"renault-sa.tk",
"renaulttrucks.cf",
"renaulttrucks.ga",
"renaulttrucks.gq",
"renaulttrucks.ml",
"renaulttrucks.tk",
"rendymail.com",
"rengginangred95btw.cf",
"renovation-building.ru",
"renumax-dealer.ru",
"repetitoronlayn.ru",
"repolusi.com",
"reportes.ml",
"reptilegenetics.com",
"rerajut.com",
"res.craigslist.org",
"resantamiass.ru",
"resellermurah.me",
"resepku.site",
"reservelp.de",
"resgedvgfed.tk",
"resistore.co",
"resla-rasxaer.ru",
"rest-top.ru",
"restartmotora.ru",
"ret35363ddfk.cf",
"ret35363ddfk.ga",
"ret35363ddfk.gq",
"ret35363ddfk.ml",
"ret35363ddfk.tk",
"rethmail.ga",
"retsept18.ru",
"return0.ga",
"return0.gq",
"return0.ml",
"returnhealth.ru",
"rev-zone.net",
"revolvingdoorhoax.org",
"rewas-app-lex.com",
"rex-app-lexc.com",
"rexagod-freeaccount.cf",
"rexagod-freeaccount.ga",
"rexagod-freeaccount.gq",
"rexagod-freeaccount.ml",
"rexagod-freeaccount.tk",
"rexagod.cf",
"rexagod.ga",
"rexagod.gq",
"rexagod.ml",
"rexagod.tk",
"rfavy2lxsllh5.cf",
"rfavy2lxsllh5.ga",
"rfavy2lxsllh5.gq",
"rfavy2lxsllh5.ml",
"rfgym.ru",
"rfirewallj.com",
"rfreedomj.com",
"rfzaym.ru",
"rgb9000.net",
"rgtvtnxvci8dnwy8dfe.cf",
"rgtvtnxvci8dnwy8dfe.ga",
"rgtvtnxvci8dnwy8dfe.gq",
"rgtvtnxvci8dnwy8dfe.ml",
"rgtvtnxvci8dnwy8dfe.tk",
"rgwfagbc9ufthnkmvu.cf",
"rgwfagbc9ufthnkmvu.ml",
"rgwfagbc9ufthnkmvu.tk",
"rh3qqqmfamt3ccdgfa.cf",
"rh3qqqmfamt3ccdgfa.ga",
"rh3qqqmfamt3ccdgfa.gq",
"rh3qqqmfamt3ccdgfa.ml",
"rh3qqqmfamt3ccdgfa.tk",
"rheank.com",
"rhombushorizons.com",
"rhpzrwl4znync9f4f.cf",
"rhpzrwl4znync9f4f.ga",
"rhpzrwl4znync9f4f.gq",
"rhpzrwl4znync9f4f.ml",
"rhpzrwl4znync9f4f.tk",
"rhyta.com",
"riaucyberart.ga",
"riba-4ok.ru",
"rich-money.pw",
"richdi.ru",
"richfinances.pw",
"richfunds.pw",
"richinssuresh.ga",
"richmoney.pw",
"richonedai.pw",
"richsmart.pw",
"rickifoodpatrocina.tk",
"ricret.com",
"riddermark.de",
"rif-ramarew.ru",
"rifkian.cf",
"rifkian.ga",
"rifkian.gq",
"rifkian.ml",
"rifkian.tk",
"rika-santila.ru",
"rikka-weaka.ru",
"rim7lth8moct0o8edoe.cf",
"rim7lth8moct0o8edoe.ga",
"rim7lth8moct0o8edoe.gq",
"rim7lth8moct0o8edoe.ml",
"rim7lth8moct0o8edoe.tk",
"ringobot.ru",
"risencraft.ru",
"risingsuntouch.com",
"riski.cf",
"risu.be",
"riujnivuvbxe94zsp4.ga",
"riujnivuvbxe94zsp4.ml",
"riujnivuvbxe94zsp4.tk",
"rj-11.cf",
"rj-11.ga",
"rj-11.gq",
"rj-11.ml",
"rj-11.tk",
"rj11.cf",
"rj11.ga",
"rj11.gq",
"rj11.ml",
"rj11.tk",
"rjxewz2hqmdshqtrs6n.cf",
"rjxewz2hqmdshqtrs6n.ga",
"rjxewz2hqmdshqtrs6n.gq",
"rjxewz2hqmdshqtrs6n.ml",
"rjxewz2hqmdshqtrs6n.tk",
"rk4vgbhzidd0sf7hth.cf",
"rk4vgbhzidd0sf7hth.ga",
"rk4vgbhzidd0sf7hth.gq",
"rk4vgbhzidd0sf7hth.ml",
"rk4vgbhzidd0sf7hth.tk",
"rklips.com",
"rko.kr",
"rkofgttrb0.cf",
"rkofgttrb0.ga",
"rkofgttrb0.gq",
"rkofgttrb0.ml",
"rkofgttrb0.tk",
"rkomo.com",
"rmail.cf",
"rmcp.cf",
"rmcp.ga",
"rmcp.gq",
"rmcp.ml",
"rmcp.tk",
"rmqkr.net",
"rnc69szk1i0u.cf",
"rnc69szk1i0u.ga",
"rnc69szk1i0u.gq",
"rnc69szk1i0u.ml",
"rnc69szk1i0u.tk",
"rnjc8wc2uxixjylcfl.cf",
"rnjc8wc2uxixjylcfl.ga",
"rnjc8wc2uxixjylcfl.gq",
"rnjc8wc2uxixjylcfl.ml",
"rnjc8wc2uxixjylcfl.tk",
"rnzcomesth.com",
"roastscreen.com",
"robbolahta.ru",
"robinzonshop.ru",
"robo3.club",
"robo3.co",
"robo3.me",
"robo3.site",
"robot-mail.com",
"robot2.club",
"robot2.me",
"robotbobot.ru",
"robox.agency",
"rocketmail.cf",
"rocketmail.ga",
"rocketmail.gq",
"rockkes.us",
"rockmail.top",
"rockmailapp.com",
"rockmailgroup.com",
"rockyoujit.icu",
"rodeslava.ru",
"roewe.cf",
"roewe.ga",
"roewe.gq",
"roewe.ml",
"rogerin.space",
"rohingga.xyz",
"rollindo.agency",
"rolling-discs.ru",
"rollsroyce-plc.cf",
"rollsroyce-plc.ga",
"rollsroyce-plc.gq",
"rollsroyce-plc.ml",
"rollsroyce-plc.tk",
"rolndedip.cf",
"rolndedip.ga",
"rolndedip.gq",
"rolndedip.ml",
"rolndedip.tk",
"ronnierage.net",
"rootfest.net",
"roots31.ru",
"rosebear21.ru",
"rosebearmylove.ru",
"roseltorg-info.ru",
"roseofwind.ru",
"rosmillo.com",
"rotaniliam.com",
"rotate.pw",
"roundclap.fun",
"rout66.ru",
"rover100.cf",
"rover100.ga",
"rover100.gq",
"rover100.ml",
"rover100.tk",
"rover400.cf",
"rover400.ga",
"rover400.gq",
"rover400.ml",
"rover400.tk",
"rover75.cf",
"rover75.ga",
"rover75.gq",
"rover75.ml",
"rover75.tk",
"row.kr",
"rowe-solutions.com",
"rowmoja6a6d9z4ou.cf",
"rowmoja6a6d9z4ou.ga",
"rowmoja6a6d9z4ou.gq",
"rowmoja6a6d9z4ou.ml",
"rowmoja6a6d9z4ou.tk",
"royal-soft.net",
"royal.net",
"royalgifts.info",
"royalhost.info",
"royalmail.top",
"royalmarket.club",
"royalmarket.online",
"royalpresents.ru",
"roys.ml",
"roza-tmn.ru",
"rozavk.ru",
"rp-attract.ru",
"rpaowpro3l5ha.tk",
"rpgitxp6tkhtasxho.cf",
"rpgitxp6tkhtasxho.ga",
"rpgitxp6tkhtasxho.gq",
"rpgitxp6tkhtasxho.ml",
"rpgitxp6tkhtasxho.tk",
"rpgkeronbeta.ru",
"rpl-id.com",
"rplid.com",
"rppkn.com",
"rq1.in",
"rq1h27n291puvzd.cf",
"rq1h27n291puvzd.ga",
"rq1h27n291puvzd.gq",
"rq1h27n291puvzd.ml",
"rq1h27n291puvzd.tk",
"rq6668f.com",
"rr-ghost.cf",
"rr-ghost.ga",
"rr-ghost.gq",
"rr-ghost.ml",
"rr-ghost.tk",
"rr-group.cf",
"rr-group.ga",
"rr-group.gq",
"rr-group.ml",
"rr-group.tk",
"rrasianp.com",
"rrqkd9t5fhvo5bgh.cf",
"rrqkd9t5fhvo5bgh.ga",
"rrqkd9t5fhvo5bgh.gq",
"rrqkd9t5fhvo5bgh.ml",
"rrqkd9t5fhvo5bgh.tk",
"rrwbltw.xyz",
"rs311e8.com",
"rsbysdmxi9.cf",
"rsbysdmxi9.ga",
"rsbysdmxi9.gq",
"rsbysdmxi9.ml",
"rsbysdmxi9.tk",
"rsfdgtv4664.cf",
"rsfdgtv4664.ga",
"rsfdgtv4664.gq",
"rsfdgtv4664.ml",
"rsfdgtv4664.tk",
"rsnfoopuc0fs.cf",
"rsnfoopuc0fs.ga",
"rsnfoopuc0fs.gq",
"rsnfoopuc0fs.ml",
"rsnfoopuc0fs.tk",
"rssfwu9zteqfpwrodq.ga",
"rssfwu9zteqfpwrodq.gq",
"rssfwu9zteqfpwrodq.ml",
"rssfwu9zteqfpwrodq.tk",
"rsvhr.com",
"rtrtr.com",
"rts6ypzvt8.ga",
"rts6ypzvt8.gq",
"rts6ypzvt8.ml",
"rts6ypzvt8.tk",
"rtskiya.xyz",
"rtyreszxl.ru",
"ru196595463.ru",
"ruafdulw9otmsknf.cf",
"ruafdulw9otmsknf.ga",
"ruafdulw9otmsknf.ml",
"ruafdulw9otmsknf.tk",
"rubitcoini.ru",
"ruby-keys.ru",
"ruditnugnab.xyz",
"rudymail.ml",
"ruffrey.com",
"ruhshe5uet547.tk",
"ruki-master.ru",
"rundablage.com",
"runled-official.ru",
"rupayamail.com",
"ruru.be",
"rus-oonies.ru",
"rusecoflot.ru",
| |
# _core/test_base.py
"""Tests for rom_operator_inference._core._base.py."""
import os
import h5py
import pytest
import numpy as np
from scipy import linalg as la
import rom_operator_inference as opinf
from . import MODEL_FORMS, _get_data, _get_operators, _trainedmodel
class TestBaseROM:
"""Test _core._base._BaseROM."""
class Dummy(opinf._core._base._BaseROM):
"""Copy of _BaseROM without the abstract class instantiation error."""
def __init__(self, modelform):
self.modelform = modelform
def test_init(self):
"""Test _core._base._BaseROM.__init__()."""
with pytest.raises(TypeError) as ex:
opinf._core._base._BaseROM()
assert ex.value.args[0] == \
"__init__() missing 1 required positional argument: 'modelform'"
with pytest.raises(TypeError) as ex:
opinf._core._base._BaseROM("cAH", False)
assert ex.value.args[0] == \
"__init__() takes 2 positional arguments but 3 were given"
with pytest.raises(RuntimeError) as ex:
opinf._core._base._BaseROM("cAH")
assert ex.value.args[0] == \
"abstract class instantiation (use _ContinuousROM or _DiscreteROM)"
def test_modelform_properties(self, n=10, r=3, m=5):
"""Test the properties related to _core._base_._BaseROM.modelform."""
c_, A_, H_, G_, B_ = _get_operators(r, m)
# Try with invalid modelform.
with pytest.raises(ValueError) as ex:
self.Dummy("bad_form")
assert ex.value.args[0] == \
"invalid modelform key 'b'; " \
f"options are {', '.join(opinf._core._base._BaseROM._MODEL_KEYS)}"
# Check initial attributes exist.
rom = self.Dummy("cAB")
assert hasattr(rom, "modelform")
assert hasattr(rom, "Vr")
assert hasattr(rom, "n")
assert hasattr(rom, "m")
assert hasattr(rom, "r")
assert hasattr(rom, "has_constant")
assert hasattr(rom, "has_linear")
assert hasattr(rom, "has_quadratic")
assert hasattr(rom, "has_cubic")
assert hasattr(rom, "has_inputs")
assert hasattr(rom, "c_")
assert hasattr(rom, "A_")
assert hasattr(rom, "H_")
assert hasattr(rom, "G_")
assert hasattr(rom, "B_")
assert rom.Vr is None
assert rom.n is None
assert rom.m is None
assert rom.r is None
assert rom.c_ is None
assert rom.A_ is None
assert rom.H_ is None
assert rom.G_ is None
assert rom.B_ is None
rom = self.Dummy("cAG")
assert rom.modelform == "cAG"
assert rom.m == 0
assert rom.has_constant is True
assert rom.has_linear is True
assert rom.has_quadratic is False
assert rom.has_cubic is True
assert rom.has_inputs is False
assert rom.c_ is None
assert rom.A_ is None
assert rom.H_ is None
assert rom.G_ is None
assert rom.B_ is None
rom = self.Dummy("BHc")
assert rom.modelform == "cHB"
assert rom.has_constant is True
assert rom.has_linear is False
assert rom.has_quadratic is True
assert rom.has_cubic is False
assert rom.has_inputs is True
assert rom.c_ is None
assert rom.A_ is None
assert rom.H_ is None
assert rom.G_ is None
assert rom.B_ is None
def test_dimension_properties(self, n=20, m=3, r=7):
"""Test the properties _core._base._BaseROM.(n|r|Vr)."""
rom = self.Dummy("cH")
assert rom.n is None
assert rom.m == 0
assert rom.r is None
assert rom.Vr is None
# Case 1: Vr != None
Vr = np.random.random((n,r))
rom.Vr = Vr
assert rom.n == n
assert rom.m == 0
assert rom.r == r
assert rom.Vr is Vr
# Try setting n with Vr already set.
with pytest.raises(AttributeError) as ex:
rom.n = n+1
assert ex.value.args[0] == "can't set attribute (n = Vr.shape[0])"
# Try setting m with no inputs.
with pytest.raises(AttributeError) as ex:
rom.m = 1
assert ex.value.args[0] == "can't set attribute ('B' not in modelform)"
# Try setting r with Vr already set.
with pytest.raises(AttributeError) as ex:
rom.r = r+1
assert ex.value.args[0] == "can't set attribute (r = Vr.shape[1])"
# Case 2: Vr = None
del rom.Vr
assert rom.Vr is None
assert rom.n is None
rom = self.Dummy("AB")
assert rom.m is None
rom.r = r
rom.m = m
rom.B_ = np.random.random((r,m))
# Try setting r with an operator already set.
with pytest.raises(AttributeError) as ex:
rom.r = r+1
assert ex.value.args[0] == "can't set attribute (call fit() to reset)"
# Try setting m with B_ already set.
with pytest.raises(AttributeError) as ex:
rom.m = m+1
assert ex.value.args[0] == "can't set attribute (m = B_.shape[1])"
def test_operator_properties(self, m=4, r=7):
"""Test the properties _core._base._BaseROM.(c_|A_|H_|G_|B_)."""
c, A, H, G, B = operators = _get_operators(r, m)
rom = self.Dummy(self.Dummy._MODEL_KEYS)
rom.r = r
rom.m = m
for key, op in zip("cAHGB", operators):
name = key+'_'
assert hasattr(rom, name)
assert getattr(rom, name) is None
setattr(rom, name, op)
assert getattr(rom, name) is op
rom.H_ = np.random.random((r,r**2))
rom.G_ = np.random.random((r,r**3))
def test_check_operator_matches_modelform(self):
"""Test _core._base._BaseROM._check_operator_matches_modelform()."""
# Try key in modelform but operator None.
rom = self.Dummy(self.Dummy._MODEL_KEYS)
for key in rom._MODEL_KEYS:
with pytest.raises(TypeError) as ex:
rom._check_operator_matches_modelform(None, key)
assert ex.value.args[0] == \
f"'{key}' in modelform requires {key}_ != None"
# Try key not in modelform but operator not None.
rom = self.Dummy("")
for key in rom._MODEL_KEYS:
with pytest.raises(TypeError) as ex:
rom._check_operator_matches_modelform(10, key)
assert ex.value.args[0] == \
f"'{key}' not in modelform requires {key}_ = None"
def test_check_rom_operator_shape(self, m=4, r=7):
"""Test _core._base._BaseROM._check_rom_operator_shape()."""
c, A, H, G, B = operators = _get_operators(r, m)
# Try correct match but dimension 'r' is missing.
rom = self.Dummy("A")
with pytest.raises(AttributeError) as ex:
rom._check_rom_operator_shape(A, 'A')
assert ex.value.args[0] == "no reduced dimension 'r' (call fit())"
# Try correct match but dimension 'm' is missing.
rom = self.Dummy("B")
rom.r = 10
with pytest.raises(AttributeError) as ex:
rom._check_rom_operator_shape(B, 'B')
assert ex.value.args[0] == "no input dimension 'm' (call fit())"
# Try with dimensions set, but improper shapes.
rom = self.Dummy(self.Dummy._MODEL_KEYS)
rom.r, rom.m = r, m
with pytest.raises(ValueError) as ex:
rom._check_rom_operator_shape(c[:-1], 'c')
assert ex.value.args[0] == \
f"c_.shape = {c[:-1].shape}, must be (r,) with r = {r}"
with pytest.raises(ValueError) as ex:
rom._check_rom_operator_shape(A[:-1,1:], 'A')
assert ex.value.args[0] == \
f"A_.shape = {A[:-1,1:].shape}, must be (r,r) with r = {r}"
with pytest.raises(ValueError) as ex:
rom._check_rom_operator_shape(H[:-1,:-1], 'H')
assert ex.value.args[0] == \
f"H_.shape = {H[:-1,:-1].shape}, must be (r,r(r+1)/2) with r = {r}"
with pytest.raises(ValueError) as ex:
rom._check_rom_operator_shape(G[1:], 'G')
assert ex.value.args[0] == \
f"G_.shape = {G[1:].shape}, must be (r,r(r+1)(r+2)/6) with r = {r}"
with pytest.raises(ValueError) as ex:
rom._check_rom_operator_shape(B[1:-1], 'B')
assert ex.value.args[0] == \
f"B_.shape = {B[1:-1].shape}, must be (r,m) with r = {r}, m = {m}"
# Correct usage.
for key, op in zip("cAHGB", operators):
rom._check_rom_operator_shape(op, key)
def test_check_inputargs(self):
"""Test _BaseROM._check_inputargs()."""
# Try with has_inputs = True but without inputs.
rom = self.Dummy("cB")
with pytest.raises(ValueError) as ex:
rom._check_inputargs(None, 'U')
assert ex.value.args[0] == \
"argument 'U' required since 'B' in modelform"
# Try with has_inputs = False but with inputs.
rom = self.Dummy("cA")
with pytest.raises(ValueError) as ex:
rom._check_inputargs(1, 'u')
assert ex.value.args[0] == \
"argument 'u' invalid since 'B' in modelform"
def test_is_trained(self, m=4, r=7):
"""Test _core._base._BaseROM._check_is_trained()."""
operators = _get_operators(r, m)
rom = self.Dummy(self.Dummy._MODEL_KEYS)
# Try without dimensions / operators set.
with pytest.raises(AttributeError) as ex:
rom._check_is_trained()
assert ex.value.args[0] == "model not trained (call fit())"
# Successful check.
rom.r, rom.m = r, m
rom.c_, rom.A_, rom.H_, rom.G_, rom.B_ = operators
rom._check_is_trained()
def test_set_operators(self, n=60, m=10, r=12):
"""Test _core._base._BaseROM.set_operators()."""
Vr = np.random.random((n, r))
c, A, H, G, B = _get_operators(r, m)
# Test correct usage.
rom = self.Dummy("cAH").set_operators(Vr=Vr, c_=c, A_=A, H_=H)
assert isinstance(rom, self.Dummy)
assert rom.modelform == "cAH"
assert rom.n == n
assert rom.r == r
assert rom.m == 0
assert rom.Vr is Vr
assert rom.c_ is c
assert rom.A_ is A
assert rom.H_ is H
assert rom.B_ is None
assert rom.G_ is None
rom = self.Dummy("GB").set_operators(None, G_=G, B_=B)
assert isinstance(rom, self.Dummy)
assert rom.modelform == "GB"
assert rom.n is None
assert rom.r == r
assert rom.m == m
assert rom.Vr is None
assert rom.c_ is None
assert rom.A_ is None
assert rom.H_ is None
assert rom.G_ is G
assert rom.B_ is B
def test_project(self, n=60, k=50, r=10):
"""Test _core._base._BaseROM.project()."""
X, Xdot, _ = _get_data(n, k, 2)
rom = self.Dummy("c")
rom.Vr = la.svd(X)[0][:,:r]
with pytest.raises(ValueError) as ex:
rom.project(X[:-1,:], 'X')
assert ex.value.args[0] == "X not aligned with Vr, dimension 0"
for S, label in [(X, 'X'), (Xdot, 'Xdot')]:
S_ = rom.project(S, label)
assert S_.shape == (r,k)
S_ = rom.project(rom.Vr.T @ S, label)
assert S_.shape == (r,k)
def test_fit(self):
"""Test _core._base._BaseROM.fit()."""
rom = self.Dummy("A")
with pytest.raises(NotImplementedError) as ex:
rom.fit()
assert ex.value.args[0] == "fit() implemented by child classes"
with pytest.raises(NotImplementedError) as ex:
rom.fit(1, 2, 3, 4, 5, 6, 7, a=8)
assert ex.value.args[0] == "fit() implemented by child classes"
def test_predict(self):
"""Test _core._base._BaseROM.fit()."""
rom = self.Dummy("A")
with pytest.raises(NotImplementedError) as ex:
rom.predict()
assert ex.value.args[0] == "predict() implemented by child classes"
with pytest.raises(NotImplementedError) as ex:
rom.predict(1, 2, 3, 4, 5, 6, 7, a=8)
assert ex.value.args[0] == "predict() implemented by child classes"
class TestDiscreteROM:
"""Test _core._base._DiscreteROM."""
def test_f_(self, r=5, m=2):
"""Test _core._base.DiscreteROM.f_()."""
c_, A_, H_, G_, B_ = _get_operators(r, m)
rom = opinf._core._base._DiscreteROM("cA")
rom.r = r
rom.c_, rom.A_ = c_, A_
x_ = np.random.random(r)
y_ = c_ + A_ | |
:type Width: int
:param Height: 雪碧图中小图的高度,取值范围: [128, 4096],单位:px。
:type Height: int
:param ResolutionAdaptive: 分辨率自适应,可选值:
<li>open:开启,此时,Width 代表视频的长边,Height 表示视频的短边;</li>
<li>close:关闭,此时,Width 代表视频的宽度,Height 表示视频的高度。</li>
默认值:open。
:type ResolutionAdaptive: str
:param SampleType: 采样类型,取值:
<li>Percent:按百分比。</li>
<li>Time:按时间间隔。</li>
:type SampleType: str
:param SampleInterval: 采样间隔。
<li>当 SampleType 为 Percent 时,指定采样间隔的百分比。</li>
<li>当 SampleType 为 Time 时,指定采样间隔的时间,单位为秒。</li>
:type SampleInterval: int
:param RowCount: 雪碧图中小图的行数。
:type RowCount: int
:param ColumnCount: 雪碧图中小图的列数。
:type ColumnCount: int
:param FillType: 填充方式,当视频流配置宽高参数与原始视频的宽高比不一致时,对转码的处理方式,即为“填充”。可选填充方式:
<li> stretch:拉伸,对每一帧进行拉伸,填满整个画面,可能导致转码后的视频被“压扁“或者“拉长“;</li>
<li>black:留黑,保持视频宽高比不变,边缘剩余部分使用黑色填充。</li>
默认值:black 。
:type FillType: str
:param Comment: 模板描述信息,长度限制:256 个字符。
:type Comment: str
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.Definition = None
self.Name = None
self.Width = None
self.Height = None
self.ResolutionAdaptive = None
self.SampleType = None
self.SampleInterval = None
self.RowCount = None
self.ColumnCount = None
self.FillType = None
self.Comment = None
self.SubAppId = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Name = params.get("Name")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.ResolutionAdaptive = params.get("ResolutionAdaptive")
self.SampleType = params.get("SampleType")
self.SampleInterval = params.get("SampleInterval")
self.RowCount = params.get("RowCount")
self.ColumnCount = params.get("ColumnCount")
self.FillType = params.get("FillType")
self.Comment = params.get("Comment")
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyImageSpriteTemplateResponse(AbstractModel):
"""ModifyImageSpriteTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyMediaInfoRequest(AbstractModel):
"""ModifyMediaInfo请求参数结构体
"""
def __init__(self):
"""
:param FileId: 媒体文件唯一标识。
:type FileId: str
:param Name: 媒体文件名称,最长 64 个字符。
:type Name: str
:param Description: 媒体文件描述,最长 128 个字符。
:type Description: str
:param ClassId: 媒体文件分类 ID。
:type ClassId: int
:param ExpireTime: 媒体文件过期时间,采用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。填“9999-12-31T23:59:59Z”表示永不过期。过期后该媒体文件及其相关资源(转码结果、雪碧图等)将被永久删除。
:type ExpireTime: str
:param CoverData: 视频封面图片文件(如 jpeg, png 等)进行 [Base64](https://tools.ietf.org/html/rfc4648) 编码后的字符串,仅支持 gif、jpeg、png 三种图片格式。
:type CoverData: str
:param AddKeyFrameDescs: 新增的一组视频打点信息,如果某个偏移时间已存在打点,则会进行覆盖操作,单个媒体文件最多 100 个打点信息。同一个请求里,AddKeyFrameDescs 的时间偏移参数必须与 DeleteKeyFrameDescs 都不同。
:type AddKeyFrameDescs: list of MediaKeyFrameDescItem
:param DeleteKeyFrameDescs: 要删除的一组视频打点信息的时间偏移,单位:秒。同一个请求里,AddKeyFrameDescs 的时间偏移参数必须与 DeleteKeyFrameDescs 都不同。
:type DeleteKeyFrameDescs: list of float
:param ClearKeyFrameDescs: 取值 1 表示清空视频打点信息,其他值无意义。
同一个请求里,ClearKeyFrameDescs 与 AddKeyFrameDescs 不能同时出现。
:type ClearKeyFrameDescs: int
:param AddTags: 新增的一组标签,单个媒体文件最多 16 个标签,单个标签最多 16 个字符。同一个请求里,AddTags 参数必须与 DeleteTags 都不同。
:type AddTags: list of str
:param DeleteTags: 要删除的一组标签。同一个请求里,AddTags 参数必须与 DeleteTags 都不同。
:type DeleteTags: list of str
:param ClearTags: 取值 1 表示清空媒体文件所有标签,其他值无意义。
同一个请求里,ClearTags 与 AddTags 不能同时出现。
:type ClearTags: int
:param AddSubtitles: 新增一组字幕。单个媒体文件最多 16 个字幕。同一个请求中,AddSubtitles 中指定的字幕 Id 必须与 DeleteSubtitleIds 都不相同。
:type AddSubtitles: list of MediaSubtitleInput
:param DeleteSubtitleIds: 待删除字幕的唯一标识。同一个请求中,AddSubtitles 中指定的字幕 Id 必须与 DeleteSubtitleIds 都不相同。
:type DeleteSubtitleIds: list of str
:param ClearSubtitles: 取值 1 表示清空媒体文件所有的字幕信息,其他值无意义。
同一个请求里,ClearSubtitles 与 AddSubtitles不能同时出现。
:type ClearSubtitles: int
:param SubAppId: 点播[子应用](/document/product/266/14574) ID 。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.FileId = None
self.Name = None
self.Description = None
self.ClassId = None
self.ExpireTime = None
self.CoverData = None
self.AddKeyFrameDescs = None
self.DeleteKeyFrameDescs = None
self.ClearKeyFrameDescs = None
self.AddTags = None
self.DeleteTags = None
self.ClearTags = None
self.AddSubtitles = None
self.DeleteSubtitleIds = None
self.ClearSubtitles = None
self.SubAppId = None
def _deserialize(self, params):
self.FileId = params.get("FileId")
self.Name = params.get("Name")
self.Description = params.get("Description")
self.ClassId = params.get("ClassId")
self.ExpireTime = params.get("ExpireTime")
self.CoverData = params.get("CoverData")
if params.get("AddKeyFrameDescs") is not None:
self.AddKeyFrameDescs = []
for item in params.get("AddKeyFrameDescs"):
obj = MediaKeyFrameDescItem()
obj._deserialize(item)
self.AddKeyFrameDescs.append(obj)
self.DeleteKeyFrameDescs = params.get("DeleteKeyFrameDescs")
self.ClearKeyFrameDescs = params.get("ClearKeyFrameDescs")
self.AddTags = params.get("AddTags")
self.DeleteTags = params.get("DeleteTags")
self.ClearTags = params.get("ClearTags")
if params.get("AddSubtitles") is not None:
self.AddSubtitles = []
for item in params.get("AddSubtitles"):
obj = MediaSubtitleInput()
obj._deserialize(item)
self.AddSubtitles.append(obj)
self.DeleteSubtitleIds = params.get("DeleteSubtitleIds")
self.ClearSubtitles = params.get("ClearSubtitles")
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyMediaInfoResponse(AbstractModel):
"""ModifyMediaInfo返回参数结构体
"""
def __init__(self):
"""
:param CoverUrl: 新的视频封面 URL。
* 注意:仅当请求携带 CoverData 时此返回值有效。 *
:type CoverUrl: str
:param AddedSubtitleSet: 新增的字幕信息。
:type AddedSubtitleSet: list of MediaSubtitleItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CoverUrl = None
self.AddedSubtitleSet = None
self.RequestId = None
def _deserialize(self, params):
self.CoverUrl = params.get("CoverUrl")
if params.get("AddedSubtitleSet") is not None:
self.AddedSubtitleSet = []
for item in params.get("AddedSubtitleSet"):
obj = MediaSubtitleItem()
obj._deserialize(item)
self.AddedSubtitleSet.append(obj)
self.RequestId = params.get("RequestId")
class ModifyPersonSampleRequest(AbstractModel):
"""ModifyPersonSample请求参数结构体
"""
def __init__(self):
"""
:param PersonId: 素材 ID。
:type PersonId: str
:param Name: 名称,长度限制:128 个字符。
:type Name: str
:param Description: 描述,长度限制:1024 个字符。
:type Description: str
:param Usages: 素材应用场景,可选值:
1. Recognition:用于内容识别,等价于 Recognition.Face。
2. Review:用于不适宜的内容识别,等价于 Review.Face。
3. All:用于内容识别、不适宜的内容识别,等价于 1+2。
:type Usages: list of str
:param FaceOperationInfo: 五官操作信息。
:type FaceOperationInfo: :class:`tencentcloud.vod.v20180717.models.AiSampleFaceOperation`
:param TagOperationInfo: 标签操作信息。
:type TagOperationInfo: :class:`tencentcloud.vod.v20180717.models.AiSampleTagOperation`
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.PersonId = None
self.Name = None
self.Description = None
self.Usages = None
self.FaceOperationInfo = None
self.TagOperationInfo = None
self.SubAppId = None
def _deserialize(self, params):
self.PersonId = params.get("PersonId")
self.Name = params.get("Name")
self.Description = params.get("Description")
self.Usages = params.get("Usages")
if params.get("FaceOperationInfo") is not None:
self.FaceOperationInfo = AiSampleFaceOperation()
self.FaceOperationInfo._deserialize(params.get("FaceOperationInfo"))
if params.get("TagOperationInfo") is not None:
self.TagOperationInfo = AiSampleTagOperation()
self.TagOperationInfo._deserialize(params.get("TagOperationInfo"))
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyPersonSampleResponse(AbstractModel):
"""ModifyPersonSample返回参数结构体
"""
def __init__(self):
"""
:param Person: 素材信息。
:type Person: :class:`tencentcloud.vod.v20180717.models.AiSamplePerson`
:param FailFaceInfoSet: 处理失败的五官信息。
注意:此字段可能返回 null,表示取不到有效值。
:type FailFaceInfoSet: list of AiSampleFailFaceInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Person = None
self.FailFaceInfoSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Person") is not None:
self.Person = AiSamplePerson()
self.Person._deserialize(params.get("Person"))
if params.get("FailFaceInfoSet") is not None:
self.FailFaceInfoSet = []
for item in params.get("FailFaceInfoSet"):
obj = AiSampleFailFaceInfo()
obj._deserialize(item)
self.FailFaceInfoSet.append(obj)
self.RequestId = params.get("RequestId")
class ModifySampleSnapshotTemplateRequest(AbstractModel):
"""ModifySampleSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param Definition: 采样截图模板唯一标识。
:type Definition: int
:param Name: 采样截图模板名称,长度限制:64 个字符。
:type Name: str
:param Width: 截图宽度(或长边)的最大值,取值范围:0 和 [128, 4096],单位:px。
<li>当 Width、Height 均为 0,则分辨率同源;</li>
<li>当 Width 为 0,Height 非 0,则 Width 按比例缩放;</li>
<li>当 Width 非 0,Height 为 0,则 Height 按比例缩放;</li>
<li>当 Width、Height 均非 0,则分辨率按用户指定。</li>
默认值:0。
:type Width: int
:param Height: 截图高度(或短边)的最大值,取值范围:0 和 [128, 4096],单位:px。
<li>当 Width、Height 均为 0,则分辨率同源;</li>
<li>当 Width 为 0,Height 非 0,则 Width 按比例缩放;</li>
<li>当 Width 非 0,Height 为 0,则 Height 按比例缩放;</li>
<li>当 Width、Height 均非 0,则分辨率按用户指定。</li>
默认值:0。
:type Height: int
:param ResolutionAdaptive: 分辨率自适应,可选值:
<li>open:开启,此时,Width 代表视频的长边,Height 表示视频的短边;</li>
<li>close:关闭,此时,Width 代表视频的宽度,Height 表示视频的高度。</li>
默认值:open。
:type ResolutionAdaptive: str
:param SampleType: 采样截图类型,取值:
<li>Percent:按百分比。</li>
<li>Time:按时间间隔。</li>
:type SampleType: str
:param SampleInterval: 采样间隔。
<li>当 SampleType 为 Percent 时,指定采样间隔的百分比。</li>
<li>当 SampleType 为 Time 时,指定采样间隔的时间,单位为秒。</li>
:type SampleInterval: int
:param Format: 图片格式,取值为 jpg 和 png。
:type Format: str
:param Comment: 模板描述信息,长度限制:256 个字符。
:type Comment: str
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
:param FillType: 填充方式,当视频流配置宽高参数与原始视频的宽高比不一致时,对转码的处理方式,即为“填充”。可选填充方式:
<li> stretch:拉伸,对每一帧进行拉伸,填满整个画面,可能导致转码后的视频被“压扁“或者“拉长“;</li>
<li>black:留黑,保持视频宽高比不变,边缘剩余部分使用黑色填充。</li>
<li>white:留白,保持视频宽高比不变,边缘剩余部分使用白色填充。</li>
<li>gauss:高斯模糊,保持视频宽高比不变,边缘剩余部分使用高斯模糊。</li>
默认值:black 。
:type FillType: str
"""
self.Definition = None
self.Name = None
self.Width = None
self.Height = None
self.ResolutionAdaptive = None
self.SampleType = None
self.SampleInterval = None
self.Format = None
self.Comment = None
self.SubAppId = None
self.FillType = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Name = params.get("Name")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.ResolutionAdaptive = params.get("ResolutionAdaptive")
self.SampleType = params.get("SampleType")
self.SampleInterval = params.get("SampleInterval")
self.Format = params.get("Format")
self.Comment = params.get("Comment")
self.SubAppId = params.get("SubAppId")
self.FillType = params.get("FillType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySampleSnapshotTemplateResponse(AbstractModel):
"""ModifySampleSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySnapshotByTimeOffsetTemplateRequest(AbstractModel):
"""ModifySnapshotByTimeOffsetTemplate请求参数结构体
"""
def __init__(self):
"""
:param Definition: 指定时间点截图模板唯一标识。
:type Definition: int
:param Name: 指定时间点截图模板名称,长度限制:64 个字符。
:type Name: str
:param Width: 截图宽度(或长边)的最大值,取值范围:0 和 [128, 4096],单位:px。
<li>当 Width、Height 均为 0,则分辨率同源;</li>
<li>当 Width 为 0,Height 非 0,则 Width 按比例缩放;</li>
<li>当 Width 非 0,Height 为 0,则 Height 按比例缩放;</li>
<li>当 Width、Height 均非 0,则分辨率按用户指定。</li>
默认值:0。
:type Width: int
:param Height: 截图高度(或短边)的最大值,取值范围:0 和 [128, 4096],单位:px。
<li>当 Width、Height 均为 0,则分辨率同源;</li>
<li>当 Width 为 0,Height 非 0,则 Width 按比例缩放;</li>
<li>当 Width 非 0,Height 为 0,则 Height 按比例缩放;</li>
<li>当 Width、Height 均非 0,则分辨率按用户指定。</li>
默认值:0。
:type Height: int
:param ResolutionAdaptive: 分辨率自适应,可选值:
<li>open:开启,此时,Width 代表视频的长边,Height 表示视频的短边;</li>
<li>close:关闭,此时,Width 代表视频的宽度,Height 表示视频的高度。</li>
默认值:open。
:type ResolutionAdaptive: str
:param Format: 图片格式,取值可以为 jpg 和 png。
:type Format: str
:param Comment: 模板描述信息,长度限制:256 个字符。
:type Comment: str
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
:param FillType: 填充方式,当视频流配置宽高参数与原始视频的宽高比不一致时,对转码的处理方式,即为“填充”。可选填充方式:
<li> stretch:拉伸,对每一帧进行拉伸,填满整个画面,可能导致转码后的视频被“压扁“或者“拉长“;</li>
<li>black:留黑,保持视频宽高比不变,边缘剩余部分使用黑色填充。</li>
<li>white:留白,保持视频宽高比不变,边缘剩余部分使用白色填充。</li>
<li>gauss:高斯模糊,保持视频宽高比不变,边缘剩余部分使用高斯模糊。</li>
默认值:black 。
:type FillType: str
"""
self.Definition = None
self.Name = None
self.Width = None
self.Height = None
self.ResolutionAdaptive = None
self.Format = None
self.Comment = None
self.SubAppId = None
self.FillType = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Name = params.get("Name")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.ResolutionAdaptive = params.get("ResolutionAdaptive")
self.Format = params.get("Format")
self.Comment = params.get("Comment")
self.SubAppId = params.get("SubAppId")
self.FillType = params.get("FillType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySnapshotByTimeOffsetTemplateResponse(AbstractModel):
"""ModifySnapshotByTimeOffsetTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type | |
<reponame>SaptakS/api
import http.client
import json
import math
import re
import time
from dateutil.parser import parse as parse_date
import requests
import lz4framed
from sentry_sdk import configure_scope, capture_exception
from flask import current_app, request, make_response
from flask.json import jsonify
from werkzeug.exceptions import HTTPException, BadRequest
from sqlalchemy.dialects import postgresql
from sqlalchemy import func, or_, and_, false, text, select, sql, column
from sqlalchemy import String, cast, Float
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from sqlalchemy.exc import OperationalError
from psycopg2.extensions import QueryCanceledError
from urllib.parse import urljoin, urlencode
from measurements import __version__
from measurements.config import REPORT_INDEX_OFFSET, REQID_HDR, request_id
from measurements.models import Report, Input, Measurement, Autoclaved, Fastpath
MSM_ID_PREFIX = "temp-id"
FASTPATH_MSM_ID_PREFIX = "temp-fid-"
RE_MSM_ID = re.compile("^{}-(\d+)$".format(MSM_ID_PREFIX))
FASTPATH_SERVER = "fastpath.ooni.nu"
FASTPATH_PORT = 8000
class QueryTimeoutError(HTTPException):
code = 504
description = (
"The database query timed out.",
"Try changing the query parameters.",
)
def get_version():
return jsonify({"version": __version__})
def list_files(
probe_asn=None,
probe_cc=None,
test_name=None,
since=None,
until=None,
since_index=None,
order_by="index",
order="desc",
offset=0,
limit=100,
):
if probe_asn is not None:
if probe_asn.startswith("AS"):
probe_asn = probe_asn[2:]
probe_asn = int(probe_asn)
try:
if since is not None:
since = parse_date(since)
except ValueError:
raise BadRequest("Invalid since")
try:
if until is not None:
until = parse_date(until)
except ValueError:
raise BadRequest("Invalid until")
if since_index is not None:
since_index = int(since_index)
report_no = max(0, since_index - REPORT_INDEX_OFFSET)
if order_by in ("index", "idx"):
order_by = "report_no"
q = current_app.db_session.query(
Report.textname,
Report.test_start_time,
Report.probe_cc,
Report.probe_asn,
Report.report_no,
Report.test_name,
)
# XXX maybe all of this can go into some sort of function.
if probe_cc:
q = q.filter(Report.probe_cc == probe_cc)
if probe_asn:
q = q.filter(Report.probe_asn == probe_asn)
if test_name:
q = q.filter(Report.test_name == test_name)
if since:
q = q.filter(Report.test_start_time > since)
if until:
q = q.filter(Report.test_start_time <= until)
if since_index:
q = q.filter(Report.report_no > report_no)
count = q.count()
pages = math.ceil(count / limit)
current_page = math.ceil(offset / limit) + 1
q = q.order_by(text("{} {}".format(order_by, order)))
q = q.limit(limit).offset(offset)
next_args = request.args.to_dict()
next_args["offset"] = "%s" % (offset + limit)
next_args["limit"] = "%s" % limit
next_url = urljoin(
current_app.config["BASE_URL"], "/api/v1/files?%s" % urlencode(next_args)
)
if current_page >= pages:
next_url = None
metadata = {
"offset": offset,
"limit": limit,
"count": count,
"pages": pages,
"current_page": current_page,
"next_url": next_url,
}
results = []
for row in q:
download_url = urljoin(
current_app.config["BASE_URL"], "/files/download/%s" % row.textname
)
results.append(
{
"download_url": download_url,
"probe_cc": row.probe_cc,
"probe_asn": "AS{}".format(row.probe_asn),
"test_name": row.test_name,
"index": int(row.report_no) + REPORT_INDEX_OFFSET,
"test_start_time": row.test_start_time,
}
)
return jsonify({"metadata": metadata, "results": results})
def get_one_fastpath_measurement(measurement_id, download):
"""Get one measurement from the fastpath table by measurement_id,
fetching the file from the fastpath host
"""
log = current_app.logger
tid = measurement_id[len(FASTPATH_MSM_ID_PREFIX) :]
path = "/measurements/{}.json.lz4".format(tid)
log.info(
"Incoming fastpath query %r. Fetching %s:%d%s",
measurement_id,
FASTPATH_SERVER,
FASTPATH_PORT,
path,
)
conn = http.client.HTTPConnection(FASTPATH_SERVER, FASTPATH_PORT)
log.debug("Fetching %s:%d %r", FASTPATH_SERVER, FASTPATH_PORT, path)
conn.request("GET", path)
r = conn.getresponse()
log.debug("Response status: %d", r.status)
try:
assert r.status == 200
blob = r.read()
conn.close()
log.debug("Decompressing LZ4 data")
blob = lz4framed.decompress(blob)
response = make_response(blob)
response.headers.set("Content-Type", "application/json")
log.debug("Sending JSON response")
return response
except Exception:
raise BadRequest("No measurement found")
def get_measurement(measurement_id, download=None):
"""Get one measurement by measurement_id,
fetching the file from S3 or the fastpath host as needed
Returns only the measurement without extra data from the database
"""
if measurement_id.startswith(FASTPATH_MSM_ID_PREFIX):
return get_one_fastpath_measurement(measurement_id, download)
# XXX this query is slow due to filtering by report_id and input
# It also occasionally return multiple rows and serves only the first one
# TODO: add timing metric
# TODO: switch to OOID to speed up the query
# https://github.com/ooni/pipeline/issues/48
m = RE_MSM_ID.match(measurement_id)
if not m:
raise BadRequest("Invalid measurement_id")
msm_no = int(m.group(1))
q = (
current_app.db_session.query(
Measurement.report_no.label("report_no"),
Measurement.frame_off.label("frame_off"),
Measurement.frame_size.label("frame_size"),
Measurement.intra_off.label("intra_off"),
Measurement.intra_size.label("intra_size"),
Report.textname.label("textname"),
Report.report_no.label("r_report_no"),
Report.autoclaved_no.label("r_autoclaved_no"),
Autoclaved.filename.label("a_filename"),
Autoclaved.autoclaved_no.label("a_autoclaved_no"),
)
.filter(Measurement.msm_no == msm_no)
.join(Report, Report.report_no == Measurement.report_no)
.join(Autoclaved, Autoclaved.autoclaved_no == Report.autoclaved_no)
)
try:
msmt = q.one()
except MultipleResultsFound:
current_app.logger.warning(
"Duplicate rows for measurement_id: %s" % measurement_id
)
msmt = q.first()
except NoResultFound:
# XXX we should actually return a 404 here
raise BadRequest("No measurement found")
# Usual size of LZ4 frames is 256kb of decompressed text.
# Largest size of LZ4 frame was ~55Mb compressed and ~56Mb decompressed. :-/
range_header = "bytes={}-{}".format(
msmt.frame_off, msmt.frame_off + msmt.frame_size - 1
)
r = requests.get(
urljoin(current_app.config["AUTOCLAVED_BASE_URL"], msmt.a_filename),
headers={"Range": range_header, REQID_HDR: request_id()},
)
r.raise_for_status()
blob = r.content
if len(blob) != msmt.frame_size:
raise RuntimeError("Failed to fetch LZ4 frame", len(blob), msmt.frame_size)
blob = lz4framed.decompress(blob)[msmt.intra_off : msmt.intra_off + msmt.intra_size]
if len(blob) != msmt.intra_size or blob[:1] != b"{" or blob[-1:] != b"}":
raise RuntimeError(
"Failed to decompress LZ4 frame to measurement.json",
len(blob),
msmt.intra_size,
blob[:1],
blob[-1:],
)
# There is no replacement of `measurement_id` with `msm_no` or anything
# else to keep sanity. Maybe it'll happen as part of orchestration update.
# Also, blob is not decoded intentionally to save CPU
filename = "ooni-msmt-{}-{}".format(measurement_id, msmt.textname.replace("/", "-"))
response = make_response(blob)
response.headers.set("Content-Type", "application/json")
if download is not None:
response.headers.set("Content-Disposition", "attachment", filename=filename)
return response
def input_cte(input_, domain, test_name):
"""Given a domain or an input_, build a WHERE filter
"""
if input_ and domain:
raise BadRequest("Must pick either domain or input")
if not input_ and not domain:
return None
where_or = []
if input_:
where_or.append(text("input.input LIKE :i").bindparams(i="%{}%".format(input_)))
else:
domain_filter = "{}%".format(domain)
where_or.append(
text("input.input LIKE :domain_filter").bindparams(
domain_filter=domain_filter
)
)
if test_name in [None, "web_connectivity", "http_requests"]:
where_or.append(
text("input.input LIKE :http_filter").bindparams(
http_filter="http://{}".format(domain_filter)
)
)
where_or.append(
text("input.input LIKE :https_filter").bindparams(
https_filter="https://{}".format(domain_filter)
)
)
url_q = (
select([column("input").label("input"), column("input_no").label("input_no")])
.where(or_(*where_or))
.select_from(sql.table("input"))
)
return url_q.cte("input_cte")
def log_query(log, q):
import sqlparse # debdeps: python3-sqlparse
sql = str(
q.statement.compile(
dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}
)
)
sql = sqlparse.format(sql, reindent=True, keyword_case="upper")
log.info("\n--- query ---\n\n%s\n\n-------------", sql)
def _merge_two_results(a, b):
"""Merge 2 measurements. Collect useful fields from traditional pipeline
and fastpath
"""
if a["scores"] and b["scores"]:
# both a and b are fastpath: ignore b
return a
if a["scores"]:
# merge in useful fields from traditional (b) into a
for f in ("anomaly", "confirmed"):
a[f] = b[f]
return a
if b["scores"]:
# merge in useful fields from fastpath (b) into a
for f in ("scores", "measurement_url", "measurement_id"):
a[f] = b[f]
return a
# both traditional, ignore b
return a
def _merge_results(tmpresults):
"""Merge list_measurements() outputs from traditional pipeline and fastpath
"""
resultsmap = {}
for r in tmpresults:
k = (r["report_id"], r["input"])
if k not in resultsmap:
resultsmap[k] = r
else:
resultsmap[k] = _merge_two_results(resultsmap[k], r)
return tuple(resultsmap.values())
def list_measurements(
report_id=None,
probe_asn=None,
probe_cc=None,
test_name=None,
since=None,
until=None,
since_index=None,
order_by="test_start_time",
order="desc",
offset=0,
limit=100,
failure=None,
anomaly=None,
confirmed=None,
):
"""Search for measurements using only the database. Provide pagination.
"""
# FIXME: list_measurements and get_measurement will be simplified and
# made faster by https://github.com/ooni/pipeline/issues/48
log = current_app.logger
## Prepare query parameters
input_ = request.args.get("input")
domain = request.args.get("domain")
if probe_asn is not None:
if probe_asn.startswith("AS"):
probe_asn = probe_asn[2:]
probe_asn = int(probe_asn)
# When the user specifies a list that includes all the possible values for
# boolean arguments, that is logically the same of applying no filtering at
# all.
# TODO: treat it as an error?
if failure is not None:
if set(failure) == set(["true", "false"]):
failure = None
else:
failure = set(failure) == set(["true"])
if anomaly is not None:
if set(anomaly) == set(["true", "false"]):
anomaly = None
else:
anomaly = set(anomaly) == set(["true"])
if confirmed is not None:
if set(confirmed) == set(["true", "false"]):
confirmed = None
else:
confirmed = set(confirmed) == set(["true"])
try:
if since is not None:
since = parse_date(since)
except ValueError:
raise BadRequest("Invalid since")
try:
if until is not None:
until = parse_date(until)
except ValueError:
raise BadRequest("Invalid until")
if order.lower() not in ("asc", "desc"):
raise BadRequest("Invalid order")
## Create SQL query
c_anomaly = func.coalesce(Measurement.anomaly, false()).label("anomaly")
c_confirmed = func.coalesce(Measurement.confirmed, false()).label("confirmed")
c_msm_failure = func.coalesce(Measurement.msm_failure, false()).label("msm_failure")
cols = [
Measurement.input_no.label("m_input_no"),
Measurement.measurement_start_time.label("measurement_start_time"),
Report.test_start_time.label("test_start_time"),
func.concat(MSM_ID_PREFIX, "-", Measurement.msm_no).label("measurement_id"),
Measurement.report_no.label("m_report_no"),
c_anomaly,
c_confirmed,
c_msm_failure,
func.coalesce("{}").label("scores"),
Measurement.exc.label("exc"),
Measurement.residual_no.label("residual_no"),
Report.report_id.label("report_id"),
Report.probe_cc.label("probe_cc"),
Report.probe_asn.label("probe_asn"),
Report.test_name.label("test_name"),
Report.report_no.label("report_no"),
]
cte = input_cte(input_=input_, domain=domain, test_name=test_name)
if cte is not None:
cols.append(cte)
else:
cols.append(func.coalesce(Input.input, None).label("input"))
q = current_app.db_session.query(*cols)
if cte is not None:
q = q.join(cte, sql.text("input_cte.input_no = measurement.input_no"))
else:
q = q.outerjoin(Input, Measurement.input_no == Input.input_no)
q = q.join(Report, Report.report_no == Measurement.report_no)
q.join(Report, Report.report_no == Measurement.report_no)
if report_id:
q = q.filter(Report.report_id == report_id)
if probe_cc:
q = q.filter(Report.probe_cc == probe_cc)
if probe_asn is not None:
q = q.filter(Report.probe_asn == probe_asn)
if test_name is not None:
q = q.filter(Report.test_name == test_name)
if since is | |
<gh_stars>0
import datetime
import hashlib
import io
import json
import logging
import os
import socket
import getpass
from base64 import b64encode
try:
from urlparse import urlunparse
except ImportError:
from urllib.parse import urlunparse
from smb.SMBConnection import SMBConnection
from smb.base import OperationFailure
from smb.smb_constants import ATTR_DIRECTORY, ATTR_NORMAL
from nmb.NetBIOS import NetBIOS
from dtoolcore.storagebroker import BaseStorageBroker, DiskStorageBroker
from dtoolcore.filehasher import FileHasher, md5sum_hexdigest, md5sum_digest
from dtoolcore.storagebroker import StorageBrokerOSError
from dtoolcore.utils import (
generate_identifier,
get_config_value,
generous_parse_uri,
timestamp,
DEFAULT_CACHE_PATH,
)
from dtool_smb import __version__
logger = logging.getLogger(__name__)
_STRUCTURE_PARAMETERS = {
"data_directory": ["data"],
"dataset_readme_relpath": ["README.yml"],
"dtool_directory": ["_dtool"],
"admin_metadata_relpath": ["_dtool", "dtool"],
"structure_metadata_relpath": ["_dtool", "structure.json"],
"dtool_readme_relpath": ["_dtool", "README.txt"],
"manifest_relpath": ["_dtool", "manifest.json"],
"overlays_directory": ["_dtool", "overlays"],
"annotations_directory": ["_dtool", "annotations"],
"tags_directory": ["_dtool", "tags"],
"metadata_fragments_directory": ["_dtool", "tmp_fragments"],
"storage_broker_version": __version__,
}
_DTOOL_README_TXT = """README
======
This is a Dtool dataset stored in an SMB share.
Content provided during the dataset creation process
----------------------------------------------------
Directory named $UUID, where UUID is the unique identifier for the
dataset.
Dataset descriptive metadata: README.yml
Dataset items. The keys for these blobs are item identifiers. An item
identifier is the sha1sum hexdigest of the relative path used to represent the
file on traditional file system disk.
Administrative metadata describing the dataset is encoded as metadata on the
container.
Automatically generated blobs
-----------------------------
This file: README.txt
Structural metadata describing the dataset: structure.json
Structural metadata describing the data items: manifest.json
Per item descriptive metadata prefixed by: overlays/
Dataset key/value pairs metadata prefixed by: annotations/
Dataset tags metadata prefixed by: tags/
"""
class SMBStorageBrokerValidationWarning(Warning):
pass
class SMBStorageBroker(BaseStorageBroker):
#: Attribute used to define the type of storage broker.
key = "smb"
#: Attribute used by :class:`dtoolcore.ProtoDataSet` to write the hash
#: function name to the manifest.
hasher = FileHasher(md5sum_hexdigest)
# Attribute used to define the structure of the dataset.
_structure_parameters = _STRUCTURE_PARAMETERS
# Attribute used to document the structure of the dataset.
_dtool_readme_txt = _DTOOL_README_TXT
# Encoding
_encoding = 'utf-8'
def __init__(self, uri, config_path=None):
parse_result = generous_parse_uri(uri)
self.config_name = parse_result.netloc
uuid = parse_result.path[1:]
self.uuid = uuid
# Connect to SMB server.
self.conn, self.service_name, self.path = \
SMBStorageBroker._connect(uri, config_path)
# Define some other more abspaths.
self._data_path = self._generate_path("data_directory")
self._overlays_path = self._generate_path("overlays_directory")
self._annotations_path = self._generate_path(
"annotations_directory"
)
self._tags_path = self._generate_path(
"tags_directory"
)
self._metadata_fragments_path = self._generate_path(
"metadata_fragments_directory"
)
# Define some essential directories to be created.
self._essential_subdirectories = [
self._generate_path("dtool_directory"),
self._data_path,
self._overlays_path,
self._annotations_path,
self._tags_path,
]
# Cache for file hashes computed on upload
self._hash_cache = {}
def _count_calls(func):
def wrapper(*args, **kwargs):
wrapper.num_calls += 1
return func(*args, **kwargs)
wrapper.num_calls = 0
return wrapper
@classmethod
@_count_calls
def _connect(cls, uri, config_path):
parse_result = generous_parse_uri(uri)
config_name = parse_result.netloc
username = get_config_value(
"DTOOL_SMB_USERNAME_{}".format(config_name),
config_path=config_path
)
server_name = get_config_value(
"DTOOL_SMB_SERVER_NAME_{}".format(config_name),
config_path=config_path
)
server_port = get_config_value(
"DTOOL_SMB_SERVER_PORT_{}".format(config_name),
config_path=config_path
)
domain = get_config_value(
"DTOOL_SMB_DOMAIN_{}".format(config_name),
config_path=config_path
)
service_name = get_config_value(
"DTOOL_SMB_SERVICE_NAME_{}".format(config_name),
config_path=config_path
)
path = get_config_value(
"DTOOL_SMB_PATH_{}".format(config_name),
config_path=config_path
)
if not username:
raise RuntimeError("No username specified for service '{name}', "
"please set DTOOL_SMB_USERNAME_{name}."
.format(name=config_name))
if not server_name:
raise RuntimeError("No server name specified for service '{name}', "
"please set DTOOL_SMB_SERVER_NAME_{name}."
.format(name=config_name))
if not server_port:
raise RuntimeError("No server port specified for service '{name}', "
"please set DTOOL_SMB_SERVER_PORT_{name}."
.format(name=config_name))
if not domain:
raise RuntimeError("No domain specified for service '{name}', "
"please set DTOOL_SMB_DOMAIN_{name}."
.format(name=config_name))
if not service_name:
raise RuntimeError("No service name specified for service '{name}', "
"please set DTOOL_SMB_SERVICE_NAME_{name}. "
"(The service name is the name of the 'share'.)"
.format(name=config_name))
if not path:
raise RuntimeError("No path specified for service '{name}', "
"please set DTOOL_SMB_PATH_{name}."
.format(name=config_name))
# server_port might be string, i.e. if specified via env vars
if not isinstance(server_port, int):
server_port = int(server_port)
server_ip = socket.gethostbyname(server_name)
host_name = socket.gethostname()
password = get_config_value(
"DTOOL_SMB_PASSWORD_{}".format(config_name),
config_path=config_path
)
if password is None:
if cls._connect.num_calls == 1:
password = <PASSWORD>()
cls.password = password
else:
password = cls.password
conn = SMBConnection(username, password, host_name, server_name,
domain=domain, use_ntlm_v2=True, is_direct_tcp=True)
logger.info( ( "Connecting from '{host:s}' to "
"'smb://{user:s}@{ip:s}({server:s}):{port:d}', "
"DOMAIN '{domain:s}'").format(user=username,
ip=server_ip, server=server_name,
port=server_port, host=host_name,
domain=domain) )
# for testing, see types of arguments
logger.debug( ( "Types HOST '{host:s}', USER '{user:s}', IP '{ip:s}', "
"SERVER '{server:s}', PORT '{port:s}', DOMAIN '{domain:s}'").format(
user=type(username).__name__,
ip=type(server_ip).__name__,
server=type(server_name).__name__,
port=type(server_port).__name__,
host=type(host_name).__name__,
domain=type(domain).__name__))
conn.connect(server_ip, port=server_port)
return conn, service_name, path
# Generic helper functions.
def _generate_path(self, structure_dict_key):
logger.debug("_generate_path, structure_dict_key='{}'"
.format(structure_dict_key))
logger.debug("_generate_path, self.path='{}', self.uuid='{}', {}"
.format(self.path, self.uuid,
self._structure_parameters[structure_dict_key]))
return os.path.join(self.path, self.uuid,
*self._structure_parameters[structure_dict_key])
def _fpath_from_handle(self, handle):
return os.path.join(self._data_path, handle)
def _handle_to_fragment_prefixpath(self, handle):
stem = generate_identifier(handle)
logger.debug("_handle_to_fragment_prefixpath, handle='{}', stem='{}'"
.format(handle, stem))
return os.path.join(self._metadata_fragments_path, stem)
def _path_exists(self, path):
try:
self.conn.getAttributes(self.service_name, path)
except OperationFailure:
return False
return True
def _create_directory(self, path):
paths = []
while not self._path_exists(path):
paths += [path]
path = os.path.dirname(path)
while len(paths) > 0:
path = paths.pop()
logger.debug("_create_directory, path = '{}'".format(path))
self.conn.createDirectory(self.service_name, path)
# Class methods to override.
@classmethod
def generate_uri(cls, name, uuid, base_uri):
scheme, netloc, path, _, _, _ = generous_parse_uri(base_uri)
assert scheme == 'smb'
# Force path (third component of tuple) to be the dataset UUID
uri = urlunparse((scheme, netloc, uuid, _, _, _))
return uri
@classmethod
def list_dataset_uris(cls, base_uri, config_path):
"""Return list containing URIs with base URI."""
conn, service_name, path = \
SMBStorageBroker._connect(base_uri, config_path)
files = conn.listPath(service_name, path)
uri_list = []
for f in files:
if f.filename != '.' and f.filename != '..':
if f.file_attributes & ATTR_DIRECTORY:
uuid = f.filename
uri = cls.generate_uri(None, uuid, base_uri)
uri_list.append(uri)
return uri_list
# Methods to override.
def get_admin_metadata_key(self):
"Return the path to the admin metadata file."""
return self._generate_path("admin_metadata_relpath")
def get_readme_key(self):
"Return the path to the readme file."""
return self._generate_path("dataset_readme_relpath")
def get_manifest_key(self):
"Return the path to the readme file."""
return self._generate_path("manifest_relpath")
def get_structure_key(self):
"Return the path to the structure parameter file."""
return self._generate_path("structure_metadata_relpath")
def get_dtool_readme_key(self):
"Return the path to the dtool readme file."""
return self._generate_path("dtool_readme_relpath")
def get_overlay_key(self, overlay_name):
"Return the path to the overlay file."""
return os.path.join(self._overlays_path, overlay_name + '.json')
def get_annotation_key(self, annotation_name):
"Return the path to the annotation file."""
return os.path.join(
self._annotations_path,
annotation_name + '.json'
)
def get_tag_key(self, tag):
"Return the path to the tag file."""
return os.path.join(
self._tags_path,
tag
)
def get_text(self, key):
"""Return the text associated with the key."""
logger.debug("get_text, key='{}'".format(key))
f = io.BytesIO()
self.conn.retrieveFile(self.service_name, key, f)
return f.getvalue().decode(self._encoding)
def put_text(self, key, text):
"""Put the text into the storage associated with the key."""
logger.debug("put_text, key='{}', text='{}'".format(key, text))
parent_directory = os.path.dirname(key)
self._create_directory(parent_directory)
f = io.BytesIO(text.encode(self._encoding))
self.conn.storeFile(self.service_name, key, f)
def delete_key(self, key):
"""Delete the file/object associated with the key."""
self.conn.deleteFile(self.service_name, key)
def get_size_in_bytes(self, handle):
"""Return the size in bytes."""
fpath = self._fpath_from_handle(handle)
return self.conn.getAttributes(self.service_name, fpath).file_size
def get_utc_timestamp(self, handle):
"""Return the UTC timestamp."""
fpath = self._fpath_from_handle(handle)
datetime_obj = datetime.datetime.utcfromtimestamp(
self.conn.getAttributes(self.service_name, fpath).last_write_time
)
return timestamp(datetime_obj)
def get_hash(self, handle):
"""Return the hash."""
logger.debug("get_hash, handle='{}'".format(handle))
logger.debug("get_hash, hash_cache={}".format(self._hash_cache))
fpath = self._fpath_from_handle(handle)
logger.debug("get_hash, fpath='{}'".format(fpath))
try:
return self._hash_cache[fpath]
except KeyError:
logger.debug("get_hash, fpath not found in cache")
f = io.BytesIO()
self.conn.retrieveFile(self.service_name, fpath, f)
hasher = hashlib.md5()
hasher.update(f.getvalue())
h = hasher.hexdigest()
self._hash_cache[fpath] = h
return h
def has_admin_metadata(self):
"""Return True if the administrative metadata exists.
This is the definition of being a "dataset".
"""
return self._path_exists(self.get_admin_metadata_key())
def _list_names(self, path):
names = []
for shf in self.conn.listPath(self.service_name, path):
if shf.file_attributes & ATTR_NORMAL:
name, ext = os.path.splitext(shf.filename)
names.append(name)
return names
def list_overlay_names(self):
"""Return list of overlay names."""
return self._list_names(self._overlays_path)
def list_annotation_names(self):
"""Return list of annotation names."""
return self._list_names(self._annotation_path)
def list_tags(self):
"""Return list of tags."""
return self._list_names(self._tags_path)
def get_item_path(self, identifier):
"""Return absolute path at which item content can be accessed.
:param identifier: item identifier
:returns: absolute path from which the item content can be accessed
"""
manifest = self.get_manifest()
relpath = hitem["relpath"]
item_path = os.path.join(self._data_path, relpath)
return item_path
def _create_structure(self):
"""Create necessary structure to hold a dataset."""
uuid_path = os.path.join(self.path, self.uuid)
# Ensure that the specified path does not exist and create it.
if self._path_exists(uuid_path):
raise StorageBrokerOSError(
"Path '{}' already exists on share '{}'.".format(uuid_path,
self.service_name))
logger.debug(
"_create_structure, creating directory '{}' on share '{}'." \
.format(os.path.join(self.path, self.uuid), self.service_name))
self._create_directory(uuid_path)
# Create more essential subdirectories.
for abspath in self._essential_subdirectories:
logger.debug(
"_create_structure, creating directory '{}' on share '{}'." \
.format(abspath, self.service_name))
self._create_directory(abspath)
def put_item(self, fpath, relpath):
"""Put item with content from fpath at relpath in dataset.
Missing directories in relpath are created on the fly.
:param fpath: path to the item on disk
:param relpath: relative path name given to the item in the dataset as
a handle, i.e. a Unix-like relpath
:returns: the handle given to the item
"""
logger.debug("put_item, | |
run(self):
"""
Run resize-revert instance
"""
DLOG.verbose("Resize-Revert-Instance for %s." % self._instance.name)
context = None
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
context = action_data.context
nfvi.nfvi_resize_revert_instance(self._instance.uuid, self._callback(),
context=context)
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
action_data.set_action_initiated()
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
def handle_event(self, event, event_data=None):
"""
Handle instance action proceed notifications
"""
handled = False
if INSTANCE_EVENT.NFVI_HOST_CHANGED == event:
if self._from_host_name != self._instance.host_name:
DLOG.debug("Resize-Revert-Instance for %s has moved from "
"host %s to host %s." % (self._instance.name,
self._from_host_name,
self._instance.host_name))
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
handled = True
elif INSTANCE_EVENT.RESIZE_REVERT_COMPLETED == event:
DLOG.debug("Resize-Revert-Instance for %s completed"
% self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
handled = True
return handled
class EvacuateTaskWork(state_machine.StateTaskWork):
"""
Evacuate Task Work
"""
def __init__(self, task, instance, force_pass=False):
super(EvacuateTaskWork, self).__init__(
'evacuate-instance_%s' % instance.name, task,
force_pass=force_pass, timeout_in_secs=120)
self._instance_reference = weakref.ref(instance)
self._evacuate_inprogress = False
@property
def _instance(self):
"""
Returns the instance
"""
instance = self._instance_reference()
return instance
@coroutine
def _callback(self):
"""
Callback for evacuate instance
"""
response = (yield)
if self.task is not None:
DLOG.debug("Evacuate-Instance callback for %s, response=%s."
% (self._instance.name, response))
if response['completed']:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
if self.force_pass:
DLOG.info("Evacuate-Instance callback for %s, failed, "
"force-passing." % self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.FAILED,
response['reason'])
def _do_evacuate(self):
"""
Perform the evacuate
"""
self._evacuate_inprogress = True
context = None
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
context = action_data.context
DLOG.debug("Evacuate-Instance for %s." % self._instance.name)
nfvi.nfvi_evacuate_instance(self._instance.uuid, self._callback(),
context=context)
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
action_data.set_action_initiated()
def run(self):
"""
Run evacuate instance
"""
from nfv_vim import tables
host_table = tables.tables_get_host_table()
host = host_table.get(self._instance.host_name, None)
if host is not None:
if not (host.is_offline() or host.is_power_off()):
# We must wait for the compute host to go offline or power off
# before attempting to evacuate the instance. It is not safe to
# evacuate an instance that may still be running.
DLOG.debug("Evacuate-Instance for %s, but host %s is not "
"offline or power-off." %
(self._instance.name, host.name))
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
self._do_evacuate()
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
def handle_event(self, event, event_data=None):
"""
Handle instance action proceed notifications
"""
from nfv_vim import tables
handled = False
if not self._evacuate_inprogress:
if INSTANCE_EVENT.NFVI_HOST_OFFLINE == event:
self._do_evacuate()
handled = True
elif INSTANCE_EVENT.AUDIT == event:
host_table = tables.tables_get_host_table()
host = host_table.get(self._instance.host_name, None)
if host is not None:
if host.is_offline():
self._do_evacuate()
handled = True
return handled
class StartTaskWork(state_machine.StateTaskWork):
"""
Start Task Work
"""
def __init__(self, task, instance, force_pass=False):
super(StartTaskWork, self).__init__(
'start-instance_%s' % instance.name, task,
force_pass=force_pass, timeout_in_secs=60)
self._instance_reference = weakref.ref(instance)
@property
def _instance(self):
"""
Returns the instance
"""
instance = self._instance_reference()
return instance
@coroutine
def _callback(self):
"""
Callback for start instance
"""
response = (yield)
if self.task is not None:
DLOG.debug("Start-Instance callback for %s, response=%s."
% (self._instance.name, response))
if response['completed']:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
if self.force_pass:
DLOG.info("Start-Instance callback for %s, failed, "
"force-passing." % self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.FAILED,
response['reason'])
def run(self):
"""
Run start instance
"""
DLOG.verbose("Start-Instance for %s." % self._instance.name)
context = None
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
context = action_data.context
nfvi.nfvi_start_instance(self._instance.uuid, self._callback(),
context=context)
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
action_data.set_action_initiated()
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
class StopTaskWork(state_machine.StateTaskWork):
"""
Stop Task Work
"""
def __init__(self, task, instance, force_pass=False):
super(StopTaskWork, self).__init__(
'stop-instance_%s' % instance.name, task,
force_pass=force_pass, timeout_in_secs=60)
self._instance_reference = weakref.ref(instance)
@property
def _instance(self):
"""
Returns the instance
"""
instance = self._instance_reference()
return instance
@coroutine
def _callback(self):
"""
Callback for stop instance
"""
response = (yield)
if self.task is not None:
DLOG.debug("Stop-Instance callback for %s, response=%s."
% (self._instance.name, response))
if response['completed']:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
if self.force_pass:
DLOG.info("Stop-Instance callback for %s, failed, "
"force-passing." % self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.FAILED,
response['reason'])
def run(self):
"""
Run stop instance
"""
DLOG.verbose("Stop-Instance for %s." % self._instance.name)
context = None
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
context = action_data.context
nfvi.nfvi_stop_instance(self._instance.uuid, self._callback(),
context=context)
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
action_data.set_action_initiated()
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
class PauseTaskWork(state_machine.StateTaskWork):
"""
Pause Task Work
"""
def __init__(self, task, instance, force_pass=False):
super(PauseTaskWork, self).__init__(
'pause-instance_%s' % instance.name, task,
force_pass=force_pass, timeout_in_secs=60)
self._instance_reference = weakref.ref(instance)
@property
def _instance(self):
"""
Returns the instance
"""
instance = self._instance_reference()
return instance
@coroutine
def _callback(self):
"""
Callback for pause instance
"""
response = (yield)
if self.task is not None:
DLOG.debug("Pause-Instance callback for %s, response=%s."
% (self._instance.name, response))
if response['completed']:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
if self.force_pass:
DLOG.info("Pause-Instance callback for %s, failed, "
"force-passing." % self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.FAILED,
response['reason'])
def run(self):
"""
Run pause instance
"""
DLOG.verbose("Pause-Instance for %s." % self._instance.name)
context = None
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
context = action_data.context
nfvi.nfvi_pause_instance(self._instance.uuid, self._callback(),
context=context)
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
action_data.set_action_initiated()
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
class UnpauseTaskWork(state_machine.StateTaskWork):
"""
Unpause Task Work
"""
def __init__(self, task, instance, force_pass=False):
super(UnpauseTaskWork, self).__init__(
'unpause-instance_%s' % instance.name, task,
force_pass=force_pass, timeout_in_secs=60)
self._instance_reference = weakref.ref(instance)
@property
def _instance(self):
"""
Returns the instance
"""
instance = self._instance_reference()
return instance
@coroutine
def _callback(self):
"""
Callback for unpause instance
"""
response = (yield)
if self.task is not None:
DLOG.debug("Unpause-Instance callback for %s, response=%s."
% (self._instance.name, response))
if response['completed']:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
if self.force_pass:
DLOG.info("Unpause-Instance callback for %s, failed, "
"force-passing." % self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.FAILED,
response['reason'])
def run(self):
"""
Run unpause instance
"""
DLOG.verbose("Unpause-Instance for %s." % self._instance.name)
context = None
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
context = action_data.context
nfvi.nfvi_unpause_instance(self._instance.uuid, self._callback(),
context=context)
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
action_data.set_action_initiated()
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
class SuspendTaskWork(state_machine.StateTaskWork):
"""
Suspend Task Work
"""
def __init__(self, task, instance, force_pass=False):
super(SuspendTaskWork, self).__init__(
'suspend-instance_%s' % instance.name, task,
force_pass=force_pass, timeout_in_secs=60)
self._instance_reference = weakref.ref(instance)
@property
def _instance(self):
"""
Returns the instance
"""
instance = self._instance_reference()
return instance
@coroutine
def _callback(self):
"""
Callback for suspend instance
"""
response = (yield)
if self.task is not None:
DLOG.debug("Suspend-Instance callback for %s, response=%s."
% (self._instance.name, response))
if response['completed']:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
if self.force_pass:
DLOG.info("Suspend-Instance callback for %s, failed, "
"force-passing." % self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.FAILED,
response['reason'])
def run(self):
"""
Run suspend instance
"""
DLOG.verbose("Suspend-Instance for %s." % self._instance.name)
context = None
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
context = action_data.context
nfvi.nfvi_suspend_instance(self._instance.uuid, self._callback(),
context=context)
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
action_data.set_action_initiated()
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
class ResumeTaskWork(state_machine.StateTaskWork):
"""
Resume Task Work
"""
def __init__(self, task, instance, force_pass=False):
super(ResumeTaskWork, self).__init__(
'resume-instance_%s' % instance.name, task,
force_pass=force_pass, timeout_in_secs=60)
self._instance_reference = weakref.ref(instance)
@property
def _instance(self):
"""
Returns the instance
"""
instance = self._instance_reference()
return instance
@coroutine
def _callback(self):
"""
Callback for resume instance
"""
response = (yield)
if self.task is not None:
DLOG.debug("Resume-Instance callback for %s, response=%s."
% (self._instance.name, response))
if response['completed']:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
if self.force_pass:
DLOG.info("Resume-Instance callback for %s, failed, "
"force-passing." % self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.FAILED,
response['reason'])
def run(self):
"""
Run resume instance
"""
DLOG.verbose("Resume-Instance for %s." % self._instance.name)
context = None
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
context = action_data.context
nfvi.nfvi_resume_instance(self._instance.uuid, self._callback(),
context=context)
if self._instance.action_fsm is not None:
action_data = self._instance.action_fsm_data
if action_data is not None:
action_data.set_action_initiated()
return state_machine.STATE_TASK_WORK_RESULT.WAIT, empty_reason
class RebootTaskWork(state_machine.StateTaskWork):
"""
Reboot Task Work
"""
def __init__(self, task, instance, force_pass=False):
super(RebootTaskWork, self).__init__(
'reboot-instance_%s' % instance.name, task,
force_pass=force_pass, timeout_in_secs=60)
self._instance_reference = weakref.ref(instance)
@property
def _instance(self):
"""
Returns the instance
"""
instance = self._instance_reference()
return instance
@coroutine
def _callback(self):
"""
Callback for reboot instance
"""
response = (yield)
if self.task is not None:
DLOG.debug("Reboot-Instance callback for %s, response=%s."
% (self._instance.name, response))
if response['completed']:
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
if self.force_pass:
DLOG.info("Reboot-Instance callback for %s, failed, "
"force-passing." % self._instance.name)
self.task.task_work_complete(
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
empty_reason)
else:
self.task.task_work_complete(
| |
'classmate',
'jakarta.el-api',
'jakarta.el',
'jakarta.json',
'jsonp-jaxrs',
'hibernate-validator',
'hibernate-validator-cdi',
'pax-web-jetty-bundle',
'pax-web-extender-war',
'jmh-core',
'jmh-generator-annprocess',
'kryo',
'commons-logging',
'weld-se-core',
'weld-servlet',
'jakarta.validation-api',
'pax-exam',
'pax-exam-junit4',
'pax-exam-container-forked',
'pax-exam-container-native',
'pax-exam-junit-extender-impl',
'pax-exam-link-mvn',
'jetty',
'jetty-util',
'servlet-api-2.5',
'junit',
'testng',
'hamcrest-library',
'jmockit',
'mockito-all',
'xmlunit',
'org.apache.felix.framework',
'org.apache.felix.eventadmin',
'org.apache.felix.framework.security',
'jakarta.json.bind-api',
'yasson',
'opentracing-api',
'opentracing-util',
'jakarta.ws.rs-api',
'junit',
'oauth1-signature',
'oauth1-signature',
'junit',
'junit',
'maven-plugin-api',
'maven-core',
'maven-artifact',
'maven-dependency-tree',
'maven-plugin-api',
'maven-core',
'maven-artifact',
'maven-plugin-annotations',
'github-api',
'maven-plugin-testing-harness',
'commons-io',
'maven-compat',
'junit',
'javassist',
'metrics-core',
'junit',
'org.apache.felix.framework',
'org.eclipse.osgi',
'log4j-api',
'log4j-api',
'log4j-core',
'jboss-jms-api_1.1_spec',
'log4j-core',
'velocity',
'commons-io',
'jackson-dataformat-xml',
'log4j-api-java9',
'org.apache.felix.framework',
'org.osgi.core',
'junit',
'org.eclipse.osgi',
'maven-core',
'commons-lang3',
'jackson-core',
'jackson-databind',
'junit',
'maven-core',
'log4j-api',
'log4j-core',
'javax.servlet-api',
'tomcat-catalina',
'jetty-util',
'log4j-core',
'junit',
'spring-test',
'mockito-core',
'log4j-api',
'log4j-core',
'log4j-1.2-api',
'log4j-jcl',
'log4j-flume-ng',
'log4j-taglib',
'log4j-jmx-gui',
'log4j-slf4j-impl',
'log4j-slf4j18-impl',
'log4j-to-slf4j',
'log4j-appserver',
'log4j-web',
'log4j-couchdb',
'log4j-mongodb2',
'log4j-mongodb3',
'log4j-cassandra',
'log4j-jpa',
'log4j-iostreams',
'log4j-jul',
'log4j-liquibase',
'log4j-docker',
'log4j-spring-cloud-config-client',
'log4j-core',
'cassandra-driver-core',
'junit',
'mockito-core',
'log4j-api',
'log4j-core',
'cassandra-all',
'log4j-slf4j-impl',
'log4j-api',
'log4j-core-java9',
'org.osgi.core',
'disruptor',
'disruptor',
'jctools-core',
'jackson-core',
'jackson-databind',
'jackson-dataformat-yaml',
'jackson-dataformat-xml',
'woodstox-core',
'jansi',
'javax.mail',
'jboss-jms-api_1.1_spec',
'kafka-clients',
'jeromq',
'commons-compress',
'commons-csv',
'log4j-api',
'xz',
'jmdns',
'log4j',
'slf4j-api',
'slf4j-ext',
'junit',
'hamcrest-all',
'mockito-core',
'hsqldb',
'h2',
'spring-test',
'activemq-broker',
'commons-logging',
'logback-core',
'logback-classic',
'org.eclipse.osgi',
'org.apache.felix.framework',
'plexus-utils',
'maven-core',
'json-unit',
'xmlunit-core',
'xmlunit-matchers',
'commons-io',
'commons-codec',
'commons-lang3',
'bsh',
'groovy-jsr223',
'groovy-dateutil',
'wiremock',
'java-allocation-instrumenter',
'HdrHistogram',
'log4j-api',
'log4j-api',
'log4j-core',
'log4j-core',
'disruptor',
'disruptor',
'jctools-core',
'jackson-core',
'jackson-databind',
'jackson-dataformat-yaml',
'jackson-dataformat-xml',
'woodstox-core',
'log4j',
'slf4j-api',
'slf4j-ext',
'junit',
'hamcrest-all',
'spring-test',
'commons-logging',
'logback-core',
'logback-classic',
'jboss-jms-api_1.1_spec',
'activemq-broker',
'log4j-api',
'junit',
'maven-core',
'log4j-core',
'lightcouch',
'junit',
'mockito-core',
'log4j-api',
'log4j-core',
'log4j-slf4j-impl',
'log4j-api',
'log4j-api',
'log4j-api',
'log4j-core',
'log4j-core',
'log4j-core',
'log4j-core',
'log4j-iostreams',
'log4j-iostreams',
'log4j-iostreams',
'log4j-jcl',
'log4j-jcl',
'log4j-jcl',
'log4j-jul',
'log4j-jul',
'log4j-jul',
'log4j-flume-ng',
'log4j-flume-ng',
'log4j-flume-ng',
'log4j-1.2-api',
'log4j-1.2-api',
'log4j-1.2-api',
'log4j-slf4j-impl',
'log4j-slf4j-impl',
'log4j-slf4j-impl',
'log4j-slf4j18-impl',
'log4j-slf4j18-impl',
'log4j-slf4j18-impl',
'log4j-to-slf4j',
'log4j-to-slf4j',
'log4j-to-slf4j',
'log4j-jmx-gui',
'log4j-jmx-gui',
'log4j-jmx-gui',
'log4j-taglib',
'log4j-taglib',
'log4j-taglib',
'log4j-web',
'log4j-web',
'log4j-web',
'log4j-couchdb',
'log4j-couchdb',
'log4j-couchdb',
'log4j-mongodb2',
'log4j-mongodb2',
'log4j-mongodb2',
'log4j-mongodb3',
'log4j-mongodb3',
'log4j-mongodb3',
'log4j-cassandra',
'log4j-cassandra',
'log4j-cassandra',
'log4j-jpa',
'log4j-jpa',
'log4j-jpa',
'log4j-jdbc-dbcp2',
'log4j-jdbc-dbcp2',
'log4j-jdbc-dbcp2',
'log4j-liquibase',
'log4j-liquibase',
'log4j-liquibase',
'log4j-appserver',
'log4j-appserver',
'log4j-appserver',
'log4j-docker',
'log4j-docker',
'log4j-docker',
'log4j-spring-cloud-config-client',
'log4j-spring-cloud-config-client',
'log4j-spring-cloud-config-client',
'log4j-api',
'log4j-core',
'jackson-annotations',
'jackson-core',
'jackson-databind',
'junit',
'log4j-api',
'log4j-core',
'je',
'log4j-slf4j-impl',
'log4j-1.2-api',
'log4j-jcl',
'log4j-core',
'junit',
'flume-ng-sdk',
'jackson-core-asl',
'jackson-mapper-asl',
'flume-ng-core',
'flume-ng-embedded-agent',
'flume-file-channel',
'hadoop-core',
'log4j-api',
'log4j-core',
'log4j-core',
'junit',
'hamcrest-all',
'mockito-core',
'h2',
'junit',
'hamcrest-all',
'commons-logging',
'log4j-api',
'log4j-core',
'log4j-core',
'log4j-core',
'commons-dbcp2',
'junit',
'log4j-api',
'log4j-core',
'h2',
'log4j-api',
'log4j-core',
'log4j-core',
'junit',
'jconsole',
'jconsole',
'jconsole',
'log4j-core',
'javax.persistence',
'jackson-core',
'jackson-databind',
'junit',
'log4j-api',
'log4j-core',
'org.eclipse.persistence.jpa',
'hsqldb',
'h2',
'log4j-api',
'log4j-core',
'log4j-core',
'hamcrest-all',
'junit',
'log4j-api',
'log4j-core',
'kubernetes-client',
'junit',
'log4j-api',
'liquibase-core',
'log4j-api',
'commons-lang3',
'log4j-core',
'log4j-core',
'junit',
'log4j-core',
'mongo-java-driver',
'junit',
'mockito-core',
'log4j-api',
'log4j-core',
'log4j-slf4j-impl',
'de.flapdoodle.embed.mongo',
'log4j-core',
'mongodb-driver',
'bson',
'junit',
'mockito-core',
'log4j-api',
'log4j-core',
'log4j-slf4j-impl',
'de.flapdoodle.embed.mongo',
'org.apache.felix.framework',
'org.osgi.core',
'junit',
'org.eclipse.osgi',
'maven-core',
'commons-lang3',
'jmh-core',
'jmh-generator-annprocess',
'log4j-api',
'log4j-core',
'log4j-jpa',
'slf4j-api',
'slf4j-ext',
'logback-core',
'logback-classic',
'log4j',
'disruptor',
'disruptor',
'jctools-core',
'hsqldb',
'h2',
'persistence-api',
'javax.persistence',
'eclipselink',
'org.eclipse.persistence.jpa',
'jackson-core',
'jackson-databind',
'log4j-api',
'log4j-core',
'junit',
'log4j-api',
'log4j-core',
'spring-beans',
'spring-core',
'spring-webmvc',
'spring-ws-core',
'servlet-api',
'junit',
'log4j-samples-flume-common',
'log4j-api',
'log4j-core',
'log4j-flume-ng',
'flume-ng-node',
'hadoop-core',
'flume-file-channel',
'spring-beans',
'spring-core',
'spring-webmvc',
'spring-ws-core',
'servlet-api',
'junit',
'log4j-samples-flume-common',
'log4j-api',
'log4j-core',
'log4j-flume-ng',
'spring-beans',
'spring-core',
'spring-webmvc',
'spring-ws-core',
'servlet-api',
'junit',
'log4j-api',
'log4j-core',
'junit',
'log4j-flume-ng',
'log4j-samples-flume-common',
'spring-beans',
'spring-core',
'spring-webmvc',
'spring-ws-core',
'servlet-api',
'junit',
'slf4j-api',
'slf4j-ext',
'log4j-api',
'log4j-core',
'log4j-api',
'commons-lang3',
'commons-csv',
'log4j-core',
'log4j-to-slf4j',
'junit',
'slf4j-api',
'slf4j-ext',
'log4j-api',
'log4j-core',
'log4j-api',
'commons-lang3',
'commons-csv',
'log4j-core',
'log4j-to-slf4j',
'junit',
'junit',
'spring-cloud-config-client',
'spring-cloud-bus',
'spring-boot',
'spring-context',
'spring-context-support',
'hamcrest-all',
'commons-logging',
'log4j-api',
'log4j-core',
'log4j-core',
'log4j-jcl',
'spring-boot-starter-web',
'spring-boot-starter-actuator',
'jackson-datatype-jsr310',
'spring-cloud-starter-bus-amqp',
'spring-boot-starter-test',
'spring-test',
'spring-boot-starter-tomcat',
'log4j-spring-cloud-config-client',
'spring-boot-starter',
'log4j-docker',
'log4j-kubernetes',
'disruptor',
'log4j-flume-ng',
'flume-ng-sdk',
'flume-ng-embedded-agent',
'kafka-clients',
'javax.servlet-api',
'log4j-api',
'spring-cloud-dependencies',
'spring-cloud-starter-config',
'spring-cloud-config-server',
'spring-cloud-config-monitor',
'spring-boot-starter-actuator',
'spring-cloud-starter-bus-amqp',
'spring-boot-starter-web',
'spring-boot-starter-security',
'log4j-api',
'log4j-core',
'log4j-slf4j-impl',
'spring-boot-starter-test',
'spring-beans',
'spring-core',
'spring-webmvc',
'spring-ws-core',
'servlet-api',
'junit',
'spring-framework-bom',
'spring-boot',
'spring-cloud-dependencies',
'junit',
'log4j-api',
'log4j-web',
'javax.servlet-api',
'jsp-api',
'log4j-core',
'log4j-core',
'junit',
'spring-test',
'slf4j-api',
'log4j-api',
'logback-core',
'logback-core',
'logback-classic',
'junit',
'hamcrest-all',
'log4j-api',
'log4j-core',
'javax.servlet-api',
'log4j-core',
'junit',
'spring-test',
'mockito-core',
'slf4j-api',
'slf4j-ext',
'logback-core',
'logback-core',
'org.eclipse.osgi',
'org.apache.felix.framework',
'maven-core',
'commons-codec',
'commons-lang3',
'logback-classic',
'logback-classic',
'log4j-api-java9',
'log4j-api',
'log4j-api',
'log4j-core-java9',
'log4j-core',
'log4j-core',
'log4j-slf4j-impl',
'log4j-slf4j-impl',
'log4j-slf4j18-impl',
'log4j-jcl',
'commons-logging',
'log4j-1.2-api',
'log4j-flume-ng',
'log4j-iostreams',
'log4j-jul',
'log4j-taglib',
'log4j-web',
'je',
'org.osgi.core',
'jansi',
'flume-ng-sdk',
'flume-ng-core',
'flume-ng-embedded-agent',
'flume-ng-node',
'flume-file-channel',
'hadoop-core',
'jackson-core-asl',
'jackson-mapper-asl',
'jackson-core',
'jackson-databind',
'jackson-annotations',
'jackson-dataformat-yaml',
'jackson-dataformat-xml',
'jackson-module-jaxb-annotations',
'javax.mail',
'jboss-jms-api_1.1_spec',
'activemq-broker',
'kafka-clients',
'jeromq',
'servlet-api',
'disruptor',
'disruptor',
'jctools-core',
'junit',
'hamcrest-all',
'plexus-utils',
'mockito-core',
'spring-aop',
'spring-beans',
'spring-context',
'spring-core',
'spring-expression',
'spring-oxm',
'spring-test',
'spring-web',
'spring-webmvc',
'kubernetes-client',
'hsqldb',
'h2',
'org.eclipse.persistence.jpa',
'javax.persistence',
'mongo-java-driver',
'mongodb-driver',
'bson',
'lightcouch',
'cassandra-driver-core',
'liquibase-core',
'json-unit',
'xmlunit-core',
'xmlunit-matchers',
'commons-io',
'wiremock',
'commons-compress',
'xz',
'commons-csv',
'java-allocation-instrumenter',
'HdrHistogram',
'bsh',
'groovy-jsr223',
'groovy-dateutil',
'de.flapdoodle.embed.mongo',
'wagon-ssh',
'yjp-controller-api-redist',
'revapi-java',
'ognl',
'javassist',
'slf4j-api',
'slf4j-log4j12',
'log4j',
'log4j-core',
'commons-logging',
'cglib',
'junit-jupiter-engine',
'hsqldb',
'derby',
'mockito-core',
'mockito-junit-jupiter',
'velocity-engine-core',
'postgresql',
'mysql-connector-java',
'assertj-core',
'catch-exception',
'junit-jupiter',
'postgresql',
'mysql',
'checkstyle',
'protobuf-java',
'jboss-marshalling',
'slf4j-api',
'commons-logging',
'log4j',
'mockito-core',
'protobuf-java',
'protobuf-javanano',
'jboss-marshalling',
'jzlib',
'compress-lzf',
'lz4',
'lzma-java',
'mockito-core',
'jboss-marshalling-serial',
'jboss-marshalling-river',
'commons-compress',
'apacheds-protocol-dns',
'jzlib',
'mockito-core',
'jzlib',
'gson',
'mockito-core',
'mockito-core',
'aalto-xml',
'svm',
'jctools-core',
'slf4j-api',
'commons-logging',
'log4j',
'log4j-api',
'log4j-core',
'blockhound',
'mockito-core',
'groovy-all',
'ant-optional',
'protobuf-java',
'${tcnative.artifactId}',
'${conscrypt.artifactId}',
'npn-api',
'jzlib',
'javassist',
'metrics-core',
'logback-classic',
'bcpkix-jdk15on',
'bcprov-jdk15on',
'javax.activation',
'${tcnative.artifactId}',
'bcpkix-jdk15on',
'npn-api',
'alpn-api',
'${conscrypt.artifactId}',
'mockito-core',
'AmazonCorrettoCryptoProvider',
'mockito-core',
'junit',
'jmh-core',
'jmh-generator-annprocess',
'Agrona',
'${tcnative.artifactId}',
'javax.activation',
'javassist',
'jboss-marshalling',
'npn-api',
'alpn-api',
'protobuf-java',
'protobuf-javanano',
'${tcnative.artifactId}',
'${conscrypt.artifactId}',
'bcpkix-jdk15on',
'bcprov-jdk15on',
'aalto-xml',
'jzlib',
'compress-lzf',
'lz4',
'lzma-java',
'jctools-core',
'rxtx',
'barchart-udt-bundle',
'servlet-api',
'slf4j-api',
'commons-logging',
'log4j-api',
'log4j',
'metrics-core',
'junit',
'hamcrest-library',
'mockito-core',
'logback-classic',
'jboss-marshalling-serial',
'jboss-marshalling-river',
'caliper',
'commons-compress',
'gson',
'xz',
'apacheds-protocol-dns',
'log4j-core',
'blockhound',
'javassist',
'junit',
'hamcrest-library',
'logback-classic',
'checkstyle',
'maven-scm-api',
'maven-scm-provider-gitexe',
'ant',
'ant-launcher',
'ant-contrib',
'ant-contrib',
'mockito-core',
'apacheds-protocol-dns',
'commons-lang',
'${tcnative.artifactId}',
'xz',
'junit',
'hamcrest-library',
'mockito-core',
'logback-classic',
'jython-standalone',
'org.apache.felix.configadmin',
'org.apache.felix.framework',
'pax-exam-junit4',
'pax-exam-container-native',
'pax-exam-link-assembly',
'junit',
'${tcnative.artifactId}',
'${tcnative.artifactId}',
'${tcnative.artifactId}',
'mockito-core',
'blockhound',
'enforcer-rules',
'${tcnative.artifactId}',
'${tcnative.artifactId}',
'junit',
'rxtx',
'barchart-udt-bundle',
'metrics-core',
'caliper',
'snakeyaml',
'commons-codec',
'commons-lang3',
'slf4j-api',
'logback-classic',
'logback-core',
'commons-validator',
'logback-classic',
'logback-core',
'fastjson',
'javassist',
'slf4j-api',
'slf4j-api',
'commons-lang3',
'log4j-core',
'log4j-slf4j-impl',
'commons-lang3',
'commons-validator',
'logback-classic',
'javassist',
'openmessaging-api',
'guava',
'slf4j-api',
'log4j',
'log4j-core',
'logback-core',
'logback-classic',
'slf4j-api',
'logback-classic',
'logback-classic',
'logback-core',
'slf4j-api',
'openmessaging-api',
'extra-enforcer-rules',
'junit',
'assertj-core',
'mockito-core',
'powermock-module-junit4',
'powermock-api-mockito2',
'slf4j-api',
'logback-classic',
'logback-core',
'commons-cli',
'netty-all',
'fastjson',
'javassist',
'jna',
'commons-lang3',
'guava',
'openmessaging-api',
'log4j',
'snakeyaml',
'commons-codec',
'log4j-core',
'log4j-slf4j-impl',
'commons-validator',
'fastjson',
'netty-all',
'netty-tcnative-boringssl-static',
'commons-cli',
'dledger',
'jna',
'logback-classic',
'logback-core',
'log4j',
'truth',
'fastjson',
'logback-classic',
'logback-core',
'commons-lang3',
'snakeyaml',
'commons-beanutils',
'commons-configuration2',
'slf4j-api',
'slf4j-simple',
'jcl-over-slf4j',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'hsqldb',
'bcprov-jdk15on',
'taglibs-standard-spec',
'taglibs-standard-impl',
'javax.servlet-api',
'slf4j-log4j12',
'log4j',
'guice-servlet',
'jcl-over-slf4j',
'htmlunit',
'apache-jsp',
'apache-jstl',
'taglibs-standard-spec',
'taglibs-standard-impl',
'javax.servlet-api',
'slf4j-log4j12',
'log4j',
'guice-servlet',
'jcl-over-slf4j',
'htmlunit',
'apache-jsp',
'apache-jstl',
'taglibs-standard-spec',
'taglibs-standard-impl',
'javax.servlet-api',
'slf4j-log4j12',
'log4j',
'jcl-over-slf4j',
'htmlunit',
'jetty-server',
'jetty-webapp',
'apache-jsp',
'apache-jstl',
'junit-servers-jetty',
'junit',
'slf4j-api',
'slf4j-log4j12',
'log4j',
'groovy',
'${project.artifactId}',
'junit',
'hamcrest-core',
'easymock',
'groovy',
'powermock-module-junit4',
'powermock-api-easymock',
'samples-spring-client',
'junit',
'commons-cli',
'javax.annotation-api',
'commons-codec',
'aspectjrt',
'aspectjweaver',
'commons-configuration2',
'encoder',
'logback-classic',
'slf4j-api',
'slf4j-simple',
'slf4j-log4j12',
'jcl-over-slf4j',
'commons-beanutils',
'hsqldb',
'jsp-api',
'taglibs-standard-spec',
'taglibs-standard-impl',
'javax.servlet-api',
'log4j',
'groovy',
'ehcache-core',
'hazelcast',
'htmlunit',
'hibernate-core',
'geronimo-jta_1.1_spec',
'spring-context',
'spring-web',
'spring-jdbc',
'spring-orm',
'spring-webmvc',
'spring-test',
'spring-boot-starter',
'spring-boot-autoconfigure',
'spring-boot-configuration-processor',
'spring-boot-test',
'guice',
'guice-multibindings',
'guice-servlet',
'quartz',
'junit-servers-jetty',
'aspectjtools',
'commons-lang3',
'log4j',
'jcl-over-slf4j',
'slf4j-log4j12',
'taglibs-standard-spec',
'taglibs-standard-impl',
'javax.servlet-api',
'slf4j-log4j12',
'log4j',
'guice-servlet',
'jcl-over-slf4j',
'htmlunit',
'apache-jsp',
'apache-jstl',
'jcl-over-slf4j',
'logback-classic',
'rest-assured',
'jersey-container-grizzly2-servlet',
'resteasy-jaxrs',
'resteasy-servlet-initializer',
'resteasy-jackson2-provider',
'cxf-rt-rs-http-sci',
'cxf-rt-frontend-jaxws',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'taglibs-standard-spec',
'taglibs-standard-impl',
'javax.servlet-api',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'htmlunit',
'apache-jsp',
'apache-jstl',
'spring-context',
'javax.annotation-api',
'slf4j-log4j12',
'jcl-over-slf4j',
'log4j',
'spring-boot-autoconfigure',
'spring-boot-configuration-processor',
'javax.annotation-api',
'logback-classic',
'spring-boot-starter-thymeleaf',
'javax.servlet-api',
'spring-boot-autoconfigure',
'spring-boot-configuration-processor',
'commons-logging',
'log4j',
'slf4j-log4j12',
'jcl-over-slf4j',
'javax.servlet-api',
'hibernate-core',
'geronimo-jta_1.1_spec',
'hsqldb',
'ehcache-core',
'hibernate-ehcache',
'spring-context',
'spring-orm',
'spring-jdbc',
'spring-webmvc',
'htmlunit',
'apache-jsp',
'taglibs-standard-spec',
'taglibs-standard-impl',
'apache-jstl',
'javax.annotation-api',
'ehcache-core',
'javax.servlet-api',
'slf4j-log4j12',
'jcl-over-slf4j',
'log4j',
'spring-context',
'spring-jdbc',
'spring-webmvc',
'hsqldb',
'taglibs-standard-spec',
'taglibs-standard-impl',
'taglibs-standard-spec',
'taglibs-standard-impl',
'javax.servlet-api',
'slf4j-log4j12',
'log4j',
'htmlunit',
'jcl-over-slf4j',
'apache-jsp',
'apache-jstl',
'aspectjrt',
'aspectjweaver',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'aspectjtools',
'cas-client-core',
'commons-codec',
'opensaml',
'xmlsec',
'ehcache-core',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'framework',
'guice',
'guice-multibindings',
'javax.annotation-api',
'guice-servlet',
'commons-beanutils',
'javax.servlet-api',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'hazelcast',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'javax.ws.rs-api',
'jcl-over-slf4j',
'quartz',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'javax.servlet-api',
'spring-context',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'spring-test',
'spring-web',
'mockito-core',
'spring-boot-autoconfigure',
'spring-boot-configuration-processor',
'spring-boot-test',
'spring-test',
'mockito-core',
'commons-logging',
'javax.servlet-api',
'spring-boot-autoconfigure',
'spring-boot-configuration-processor',
'spring-boot-starter',
'spring-boot-starter-web',
'spring-boot-test',
'spring-test',
'mockito-core',
'commons-cli',
'slf4j-simple',
'jsp-api',
'taglibs-standard-spec',
'taglibs-standard-impl',
'javax.servlet-api',
'encoder',
'jcl-over-slf4j',
'slf4j-log4j12',
'log4j',
'org.apache.felix.main',
'junit',
'ant-junit',
'org.osgi.core',
'org.osgi.enterprise',
'junit',
'log4j',
'cal10n-api',
'cal10n-api',
'javassist',
'log4j',
'spring-data-commons',
'spring-orm',
'spring-context',
'spring-aop',
'spring-tx',
'spring-beans',
'spring-instrument',
'spring-core',
'aspectjrt',
'aspectjweaver',
'spring-aspects',
'hsqldb',
'joda-time',
'threetenbp',
'org.eclipse.persistence.jpa',
'hibernate-core',
'hibernate-jpamodelgen',
'querydsl-apt',
'querydsl-jpa',
'geronimo-jcdi_2.0_spec',
'javax.interceptor-api',
'cdi-api',
'javax.annotation-api',
'openwebbeans-se',
'spring-instrument',
'aspectjrt',
'aspectjtools',
'elasticsearch',
'guava',
'guava',
'spring-beans',
'spring-core',
'spring-context',
'spring-jms',
'xbean-spring',
'activemq-client',
'kafka-clients',
'HdrHistogram',
'jetty-server',
'jetty-servlet',
'commons-cli',
'guava',
'commons-io',
'json-simple',
'snakeyaml',
'flux-core',
'mqtt-client',
'activemq-broker',
'activemq-mqtt',
'activemq-kahadb-store',
'guava',
'kafka-clients',
'jctools-core',
'commons-compress',
'guava',
'commons-cli',
'httpclient',
'commons-lang',
'commons-logging',
'guava',
'HdrHistogram',
'testng',
'mockito-core',
'java-hamcrest',
'fest-assert-core',
'jmock',
'multilang-javascript',
'multilang-ruby',
'multilang-python',
'commons-collections',
'guava',
'hadoop-client',
'hbase-annotations',
'hbase-client',
'hbase-server',
'hive-hcatalog-streaming',
'hive-webhcat-java-client',
'jaxb-api',
'hadoop-hdfs',
'hadoop-client',
'hadoop-common',
'guava',
'cassandra-driver-core',
'cassandra-driver-mapping',
'cassandra-all',
'slf4j-api',
'guava',
'commons-lang3',
'mockito-core',
'java-hamcrest',
'commons-io',
'rest',
'httpasyncclient',
'httpclient',
'jackson-core',
'jackson-databind',
'guava-testlib',
'mockito-core',
'mockito-junit-jupiter',
'java-hamcrest',
'elasticsearch',
'guava',
'azure-eventhubs',
'curator-framework',
'zookeeper',
'hbase-annotations',
'hbase-client',
'caffeine',
'jackson-core',
'jackson-databind',
'mockito-core',
'hadoop-client',
'snakeyaml',
'hadoop-hdfs',
'hadoop-common',
'hadoop-auth',
'mockito-core',
'java-hamcrest',
'hadoop-minicluster',
'kafka-avro-serializer',
'guava',
'hadoop-hdfs',
'hadoop-common',
'hadoop-minicluster',
'guava',
'junit-jupiter-params',
'hadoop-annotations',
'hive-hcatalog-streaming',
'hive-hcatalog-core',
'hive-cli',
'calcite-core',
'json-simple',
'hadoop-client',
'mockito-core',
'java-hamcrest',
'libthrift',
'guava',
'commons-lang3',
'guava',
'HikariCP',
'hsqldb',
'commons-lang',
'geronimo-jms_1.1_spec',
'activemq-all',
'kafka-clients',
'jackson-databind',
'guava',
'mockito-core',
'mockito-junit-jupiter',
'java-hamcrest',
'junit-jupiter-params',
'log4j-over-slf4j',
'kafka_2.11',
'kafka-clients',
'kafka_2.11',
'curator-test',
'commons-lang',
'kafka-clients',
'jackson-databind',
'curator-framework',
| |
corresponds to the ``instance_id_token`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
task_id (:class:`str`):
Required. Unique identifier of the
task this applies to.
This corresponds to the ``task_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
task_type (:class:`google.cloud.osconfig.agentendpoint_v1beta.types.TaskType`):
Required. The type of task to report progress on.
Progress must include the appropriate message based on
this enum as specified below: APPLY_PATCHES =
ApplyPatchesTaskProgress EXEC_STEP = Progress not
supported for this type.
This corresponds to the ``task_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.osconfig.agentendpoint_v1beta.types.ReportTaskProgressResponse:
The response message after the agent
reported the current task progress.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([instance_id_token, task_id, task_type])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = agentendpoint.ReportTaskProgressRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if instance_id_token is not None:
request.instance_id_token = instance_id_token
if task_id is not None:
request.task_id = task_id
if task_type is not None:
request.task_type = task_type
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.report_task_progress,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def report_task_complete(self,
request: agentendpoint.ReportTaskCompleteRequest = None,
*,
instance_id_token: str = None,
task_id: str = None,
task_type: tasks.TaskType = None,
error_message: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> agentendpoint.ReportTaskCompleteResponse:
r"""Signals that the task execution is complete and
optionally returns the next task.
Args:
request (:class:`google.cloud.osconfig.agentendpoint_v1beta.types.ReportTaskCompleteRequest`):
The request object. A request message for signaling the
completion of a task execution.
instance_id_token (:class:`str`):
Required. This is the Compute Engine
instance identity token described in
https://cloud.google.com/compute/docs/instances/verifying-
instance-identity where the audience is
'osconfig.googleapis.com' and the format
is 'full'.
This corresponds to the ``instance_id_token`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
task_id (:class:`str`):
Required. Unique identifier of the
task this applies to.
This corresponds to the ``task_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
task_type (:class:`google.cloud.osconfig.agentendpoint_v1beta.types.TaskType`):
Required. The type of task to report completed.
The output must include the appropriate message based on
the following enum values: APPLY_PATCHES =
ApplyPatchesTaskOutput EXEC_STEP = ExecStepTaskOutput
This corresponds to the ``task_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
error_message (:class:`str`):
Descriptive error message if the task
execution ended in error.
This corresponds to the ``error_message`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.osconfig.agentendpoint_v1beta.types.ReportTaskCompleteResponse:
The response message after the agent
signaled the current task complete.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([instance_id_token, task_id, task_type, error_message])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = agentendpoint.ReportTaskCompleteRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if instance_id_token is not None:
request.instance_id_token = instance_id_token
if task_id is not None:
request.task_id = task_id
if task_type is not None:
request.task_type = task_type
if error_message is not None:
request.error_message = error_message
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.report_task_complete,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def lookup_effective_guest_policy(self,
request: guest_policies.LookupEffectiveGuestPolicyRequest = None,
*,
instance_id_token: str = None,
os_short_name: str = None,
os_version: str = None,
os_architecture: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> guest_policies.EffectiveGuestPolicy:
r"""Lookup the effective guest policy that applies to a
VM instance. This lookup merges all policies that are
assigned to the instance ancestry.
Args:
request (:class:`google.cloud.osconfig.agentendpoint_v1beta.types.LookupEffectiveGuestPolicyRequest`):
The request object. A request message for getting
effective policy assigned to the instance.
instance_id_token (:class:`str`):
Required. This is the GCE instance
identity token described in
https://cloud.google.com/compute/docs/instances/verifying-
instance-identity where the audience is
'osconfig.googleapis.com' and the format
is 'full'.
This corresponds to the ``instance_id_token`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
os_short_name (:class:`str`):
Short name of the OS running on the
instance. The OS Config agent only
provideS this field for targeting if OS
Inventory is enabled for that instance.
This corresponds to the ``os_short_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
os_version (:class:`str`):
Version of the OS running on the
instance. The OS Config agent only
provide this field for targeting if OS
Inventory is enabled for that VM
instance.
This corresponds to the ``os_version`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
os_architecture (:class:`str`):
Architecture of OS running on the
instance. The OS Config agent only
provide this field for targeting if OS
Inventory is enabled for that instance.
This corresponds to the ``os_architecture`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.osconfig.agentendpoint_v1beta.types.EffectiveGuestPolicy:
The effective guest policy assigned
to the instance.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([instance_id_token, os_short_name, os_version, os_architecture])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = guest_policies.LookupEffectiveGuestPolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if instance_id_token is not None:
request.instance_id_token = instance_id_token
if os_short_name is not None:
request.os_short_name = os_short_name
if os_version is not None:
request.os_version = os_version
if os_architecture is not None:
request.os_architecture = os_architecture
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.lookup_effective_guest_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def register_agent(self,
request: agentendpoint.RegisterAgentRequest = None,
*,
instance_id_token: str = None,
agent_version: str = None,
supported_capabilities: Sequence[str] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> agentendpoint.RegisterAgentResponse:
r"""Registers the agent running on the VM.
Args:
request (:class:`google.cloud.osconfig.agentendpoint_v1beta.types.RegisterAgentRequest`):
The request object. The request message for registering
the agent.
instance_id_token (:class:`str`):
Required. This is the Compute Engine
instance identity token described in
https://cloud.google.com/compute/docs/instances/verifying-
instance-identity where the audience is
'osconfig.googleapis.com' and the format
is 'full'.
This corresponds to the ``instance_id_token`` field
on the ``request`` instance; if ``request`` is provided, this
should not be | |
implemented for indefinite forms.
- ``algorithm`` -- String. The algorithm to use: Valid options are:
* ``'default'`` -- Let Sage pick an algorithm (default).
* ``'pari'`` -- use PARI
* ``'sage'`` -- use Sage
.. SEEALSO::
:meth:`is_reduced`
EXAMPLES::
sage: a = BinaryQF([33, 11, 5])
sage: a.is_reduced()
False
sage: b = a.reduced_form(); b
5*x^2 - x*y + 27*y^2
sage: b.is_reduced()
True
sage: a = BinaryQF([15, 0, 15])
sage: a.is_reduced()
True
sage: b = a.reduced_form(); b
15*x^2 + 15*y^2
sage: b.is_reduced()
True
Examples of reducing indefinite forms::
sage: f = BinaryQF(1, 0, -3)
sage: f.is_reduced()
False
sage: g = f.reduced_form(); g
x^2 + 2*x*y - 2*y^2
sage: g.is_reduced()
True
sage: q = BinaryQF(1, 0, -1)
sage: q.reduced_form()
x^2 + 2*x*y
sage: BinaryQF(1, 9, 4).reduced_form(transformation=True)
(
[ 0 -1]
4*x^2 + 7*x*y - y^2, [ 1 2]
)
sage: BinaryQF(3, 7, -2).reduced_form(transformation=True)
(
[1 0]
3*x^2 + 7*x*y - 2*y^2, [0 1]
)
sage: BinaryQF(-6, 6, -1).reduced_form(transformation=True)
(
[ 0 -1]
-x^2 + 2*x*y + 2*y^2, [ 1 -4]
)
"""
if self.is_reduced():
if transformation:
return self, Matrix(ZZ, 2, 2, [1, 0, 0, 1])
else:
return self
if algorithm == "default":
if self.is_reducible() or (self.discriminant() > 0 and transformation):
algorithm = 'sage'
elif not transformation:
algorithm = 'pari'
else:
raise NotImplementedError('reduction of definite binary '
'quadratic forms with transformation=True is not '
'implemented')
if algorithm == 'sage':
if self.discriminant() <= 0:
raise NotImplementedError('reduction of definite binary '
'quadratic forms is not implemented in Sage')
return self._reduce_indef(transformation)
elif algorithm == 'pari':
if transformation:
raise NotImplementedError('transformation=True is not '
'supported using PARI')
elif self.is_reducible():
raise NotImplementedError('reducible forms are not '
'supported using PARI')
return BinaryQF(self.__pari__().qfbred())
else:
raise ValueError('unknown implementation for binary quadratic form '
'reduction: %s' % algorithm)
# Buchmann/Vollmer cycle algorithm
def _RhoTau(self):
"""
Apply Rho and Tau operators to this form, returning a new form `Q`.
EXAMPLES::
sage: f = BinaryQF(1, 8, -3)
sage: f._RhoTau()
3*x^2 + 4*x*y - 5*y^2
"""
d = self.discriminant().sqrt(prec=53)
a = self._a
b = self._b
c = self._c
cabs = c.abs()
sign = c.sign()
if cabs >= d:
s = sign * ((cabs+b) / (2*cabs)).floor()
else:
s = sign * ((d+b) / (2*cabs)).floor()
Q = BinaryQF(-c, -b + 2*s*c, -(a - b*s + c*s*s))
return Q
def _Rho(self):
"""
Apply the Rho operator to this form, returning a new form `Q`.
EXAMPLES::
sage: f = BinaryQF(1, 8, -3)
sage: f._Rho()
-3*x^2 + 4*x*y + 5*y^2
"""
d = self.discriminant().sqrt(prec=53)
a = self._a
b = self._b
c = self._c
cabs = c.abs()
sign = c.sign()
if cabs >= d:
s = sign * ((cabs+b) / (2*cabs)).floor()
else:
s = sign * ((d+b) / (2*cabs)).floor()
Q = BinaryQF(c, -b + 2*s*c, a - b*s + c*s*s)
return Q
def _Tau(self):
"""
Apply the Tau operator to this form, returning a new form `Q`.
EXAMPLES::
sage: f = BinaryQF(1, 8, -3)
sage: f._Tau()
-x^2 + 8*x*y + 3*y^2
"""
a = self._a
b = self._b
c = self._c
Q = BinaryQF(-a, b, -c)
return Q
def cycle(self, proper=False):
"""
Return the cycle of reduced forms to which ``self`` belongs.
This is based on Algorithm 6.1 of [BUVO2007]_.
INPUT:
- ``self`` -- reduced, indefinite form of non-square discriminant
- ``proper`` -- boolean (default: ``False``); if ``True``, return the
proper cycle
The proper cycle of a form `f` consists of all reduced forms that are
properly equivalent to `f`. This is useful when testing for proper
equivalence (or equivalence) between indefinite forms.
The cycle of `f` is a technical tool that is used when computing the proper
cycle. Our definition of the cycle is slightly different from the one
in [BUVO2007]_. In our definition, the cycle consists of all reduced
forms `g`, such that the `a`-coefficient of `g` has the same sign as the
`a`-coefficient of `f`, and `g` can be obtained from `f` by performing a
change of variables, and then multiplying by the determinant of the
change-of-variables matrix. It is important to note that `g` might not be
equivalent to `f` (because of multiplying by the determinant). However,
either 'g' or '-g' must be equivalent to `f`. Also note that the cycle
does contain `f`. (Under the definition in [BUVO2007]_, the cycle might
not contain `f`, because all forms in the cycle are required to have
positive `a`-coefficient, even if the `a`-coefficient of `f` is negative.)
EXAMPLES::
sage: Q = BinaryQF(14, 17, -2)
sage: Q.cycle()
[14*x^2 + 17*x*y - 2*y^2,
2*x^2 + 19*x*y - 5*y^2,
5*x^2 + 11*x*y - 14*y^2]
sage: Q.cycle(proper=True)
[14*x^2 + 17*x*y - 2*y^2,
-2*x^2 + 19*x*y + 5*y^2,
5*x^2 + 11*x*y - 14*y^2,
-14*x^2 + 17*x*y + 2*y^2,
2*x^2 + 19*x*y - 5*y^2,
-5*x^2 + 11*x*y + 14*y^2]
sage: Q = BinaryQF(1, 8, -3)
sage: Q.cycle()
[x^2 + 8*x*y - 3*y^2,
3*x^2 + 4*x*y - 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
2*x^2 + 6*x*y - 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
3*x^2 + 8*x*y - y^2]
sage: Q.cycle(proper=True)
[x^2 + 8*x*y - 3*y^2,
-3*x^2 + 4*x*y + 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
-2*x^2 + 6*x*y + 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
-3*x^2 + 8*x*y + y^2]
sage: Q = BinaryQF(1, 7, -6)
sage: Q.cycle()
[x^2 + 7*x*y - 6*y^2,
6*x^2 + 5*x*y - 2*y^2,
2*x^2 + 7*x*y - 3*y^2,
3*x^2 + 5*x*y - 4*y^2,
4*x^2 + 3*x*y - 4*y^2,
4*x^2 + 5*x*y - 3*y^2,
3*x^2 + 7*x*y - 2*y^2,
2*x^2 + 5*x*y - 6*y^2,
6*x^2 + 7*x*y - y^2]
TESTS:
Check an example in :trac:`28989`::
sage: Q = BinaryQF(1, 1, -1)
sage: Q.cycle(proper=True)
[x^2 + x*y - y^2, -x^2 + x*y + y^2]
This is Example 6.10.6 of [BUVO2007]_::
sage: Q = BinaryQF(1, 7, -6)
sage: Q.cycle()
[x^2 + 7*x*y - 6*y^2,
6*x^2 + 5*x*y - 2*y^2,
2*x^2 + 7*x*y - 3*y^2,
3*x^2 + 5*x*y - 4*y^2,
4*x^2 + 3*x*y - 4*y^2,
4*x^2 + 5*x*y - 3*y^2,
3*x^2 + 7*x*y - 2*y^2,
2*x^2 + 5*x*y - 6*y^2,
6*x^2 + 7*x*y - y^2]
sage: Q.cycle(proper=True)
[x^2 + 7*x*y - 6*y^2,
-6*x^2 + 5*x*y + 2*y^2,
2*x^2 + 7*x*y - 3*y^2,
-3*x^2 + 5*x*y + 4*y^2,
4*x^2 + 3*x*y - 4*y^2,
-4*x^2 + 5*x*y + 3*y^2,
3*x^2 + 7*x*y - 2*y^2,
-2*x^2 + 5*x*y + 6*y^2,
6*x^2 + 7*x*y - y^2,
-x^2 + 7*x*y + 6*y^2,
6*x^2 + 5*x*y - 2*y^2,
-2*x^2 + 7*x*y + 3*y^2,
3*x^2 + 5*x*y - 4*y^2,
-4*x^2 + 3*x*y + 4*y^2,
4*x^2 + 5*x*y - 3*y^2,
-3*x^2 + 7*x*y + 2*y^2,
2*x^2 + 5*x*y - 6*y^2,
-6*x^2 + 7*x*y + y^2]
This is Example 6.10.7 of [BUVO2007]_::
sage: Q = BinaryQF(1, 8, -3)
sage: Q.cycle()
[x^2 + 8*x*y - 3*y^2,
3*x^2 + 4*x*y - 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
2*x^2 + 6*x*y - 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
3*x^2 + 8*x*y - y^2]
sage: Q.cycle(proper=True)
[x^2 + 8*x*y - 3*y^2,
-3*x^2 + 4*x*y + 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
-2*x^2 + 6*x*y + 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
-3*x^2 + 8*x*y + y^2]
sage: Q.cycle(proper=True) # should be the same as the previous one
[x^2 + 8*x*y - 3*y^2,
-3*x^2 + 4*x*y + 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
-2*x^2 + 6*x*y + 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
-3*x^2 + 8*x*y + y^2]
Try an example where a is negative::
sage: Q = BinaryQF(-1, 8, 3)
sage: Q.cycle(proper=True)
[-x^2 + 8*x*y + 3*y^2,
3*x^2 + 4*x*y - 5*y^2,
-5*x^2 + 6*x*y + 2*y^2,
2*x^2 + 6*x*y - 5*y^2,
-5*x^2 + 4*x*y + 3*y^2,
3*x^2 + 8*x*y - y^2]
"""
if not (self.is_indef() and self.is_reduced()):
raise ValueError("%s must be indefinite and reduced" % self)
if self.discriminant().is_square():
# Buchmann/Vollmer assume the discriminant to be non-square
raise NotImplementedError('computation of cycles is only '
'implemented for non-square discriminants')
if proper:
# Prop 6.10.5 in Buchmann Vollmer
C = list(self.cycle(proper=False)) # make | |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test neurom.fst_get features'''
import os
import math
import numpy as np
from numpy.testing import assert_allclose
from nose import tools as nt
import neurom as nm
from neurom.core.types import NeuriteType
from neurom.core.population import Population
from neurom import (core, load_neurons, iter_neurites, iter_sections,
load_neuron, fst)
from neurom.fst import get as fst_get
from neurom.fst import NEURITEFEATURES
from neurom.fst import _neuritefunc as nf
from neurom.core.types import tree_type_checker as _is_type
from neurom.exceptions import NeuroMError
_PWD = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(_PWD, '../../../test_data')
NRN_FILES = [os.path.join(DATA_PATH, 'h5/v1', f)
for f in ('Neuron.h5', 'Neuron_2_branch.h5', 'bio_neuron-001.h5')]
NRNS = load_neurons(NRN_FILES)
NRN = NRNS[0]
POP = Population(NRNS)
NEURITES = (NeuriteType.axon,
NeuriteType.apical_dendrite,
NeuriteType.basal_dendrite,
NeuriteType.all)
def assert_items_equal(a, b):
nt.eq_(sorted(a), sorted(b))
def assert_features_for_neurite(feat, neurons, expected, exact=True):
for neurite_type, expected_values in expected.items():
if neurite_type is None:
res_pop = fst_get(feat, neurons)
res = fst_get(feat, neurons[0])
else:
res_pop = fst_get(feat, neurons, neurite_type=neurite_type)
res = fst_get(feat, neurons[0], neurite_type=neurite_type)
if exact:
assert_items_equal(res_pop, expected_values)
else:
assert_allclose(res_pop, expected_values)
#test for single neuron
if isinstance(res, np.ndarray):
# some features, ex: total_length return arrays w/ one element when
# called on a single neuron
nt.eq_(len(res), 1)
res = res[0]
if exact:
nt.eq_(res, expected_values[0])
else:
assert_allclose(res, expected_values[0])
def _stats(seq):
return np.min(seq), np.max(seq), np.sum(seq), np.mean(seq)
def test_number_of_sections():
feat = 'number_of_sections'
expected = {None: [84, 42, 202],
NeuriteType.all: [84, 42, 202],
NeuriteType.axon: [21, 21, 179],
NeuriteType.apical_dendrite: [21, 0, 0],
NeuriteType.basal_dendrite: [42, 21, 23],
}
assert_features_for_neurite(feat, POP, expected)
def test_section_tortuosity_pop():
feat = 'section_tortuosity'
assert_allclose(_stats(fst_get(feat, POP)),
(1.0,
4.6571118550276704,
440.40884450374455,
1.3427098917797089))
assert_allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.all)),
(1.0,
4.6571118550276704,
440.40884450374455,
1.3427098917797089))
assert_allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.apical_dendrite)),
(1.0702760052031615,
1.5732825321954913,
26.919574286670883,
1.2818844898414707))
assert_allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.basal_dendrite)),
(1.042614577410971,
1.6742599832295344,
106.5960839640893,
1.239489348419643))
def test_section_tortuosity_nrn():
feat = 'section_tortuosity'
nt.ok_(np.allclose(_stats(fst_get(feat, NRN)),
(1.0702760052031612,
1.5732825321954911,
106.42449427885093,
1.2669582652244158)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.all)),
(1.0702760052031612,
1.5732825321954911,
106.42449427885093,
1.2669582652244158)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.apical_dendrite)),
(1.0702760052031612,
1.5732825321954911,
26.919574286670883,
1.2818844898414707)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.basal_dendrite)),
(1.0788578286296124,
1.5504287518256337,
51.540901640170489,
1.227164324765964)))
def test_number_of_segments():
feat = 'number_of_segments'
expected = {None: [840, 419, 5179],
NeuriteType.all: [840, 419, 5179],
NeuriteType.axon: [210, 209, 4508],
NeuriteType.apical_dendrite: [210, 0, 0],
NeuriteType.basal_dendrite: [420, 210, 671],
}
assert_features_for_neurite(feat, POP, expected)
def test_number_of_neurites_pop():
feat = 'number_of_neurites'
expected = {None: [4, 2, 4],
NeuriteType.all: [4, 2, 4],
NeuriteType.axon: [1, 1, 1],
NeuriteType.apical_dendrite: [1, 0, 0],
NeuriteType.basal_dendrite: [2, 1, 3],
}
assert_features_for_neurite(feat, POP, expected)
def test_number_of_bifurcations_pop():
feat = 'number_of_bifurcations'
expected = {None: [40, 20, 97],
NeuriteType.all: [40, 20, 97],
NeuriteType.axon: [10, 10, 87],
NeuriteType.apical_dendrite: [10, 0, 0],
NeuriteType.basal_dendrite: [20, 10, 10],
}
assert_features_for_neurite(feat, POP, expected)
def test_number_of_forking_points_pop():
feat = 'number_of_forking_points'
expected = {None: [40, 20, 98],
NeuriteType.all: [40, 20, 98],
NeuriteType.axon: [10, 10, 88],
NeuriteType.apical_dendrite: [10, 0, 0],
NeuriteType.basal_dendrite: [20, 10, 10],
}
assert_features_for_neurite(feat, POP, expected)
def test_number_of_terminations_pop():
feat = 'number_of_terminations'
expected = {None: [44, 22, 103],
NeuriteType.all: [44, 22, 103],
NeuriteType.axon: [11, 11, 90],
NeuriteType.apical_dendrite: [11, 0, 0],
NeuriteType.basal_dendrite: [22, 11, 13],
}
assert_features_for_neurite(feat, POP, expected)
def test_total_length_pop():
feat = 'total_length'
expected = {None: [840.68522362011538, 418.83424432013902, 13250.825773939932],
NeuriteType.all: [840.68522362011538, 418.83424432013902, 13250.825773939932],
NeuriteType.axon: [207.8797736031714, 207.81088341560977, 11767.156115224638],
NeuriteType.apical_dendrite: [214.37302709169489, 0, 0],
NeuriteType.basal_dendrite: [418.43242292524889, 211.02336090452931, 1483.6696587152967],
}
assert_features_for_neurite(feat, POP, expected, exact=False)
def test_segment_radii_pop():
feat = 'segment_radii'
nt.ok_(np.allclose(_stats(fst_get(feat, POP)),
(0.079999998211860657,
1.2150000333786011,
1301.9191725363567,
0.20222416473071708)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.all)),
(0.079999998211860657,
1.2150000333786011,
1301.9191725363567,
0.20222416473071708)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.apical_dendrite)),
(0.13142434507608414,
1.0343990325927734,
123.41135908663273,
0.58767313850777492)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.basal_dendrite)),
(0.079999998211860657,
1.2150000333786011,
547.43900821779164,
0.42078324997524336)))
def test_segment_radii_nrn():
feat = 'segment_radii'
nt.ok_(np.allclose(_stats(fst_get(feat, NRN)),
(0.12087134271860123,
1.0343990325927734,
507.01994501426816,
0.60359517263603357)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.all)),
(0.12087134271860123,
1.0343990325927734,
507.01994501426816,
0.60359517263603357)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.apical_dendrite)),
(0.13142434507608414,
1.0343990325927734,
123.41135908663273,
0.58767313850777492)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.basal_dendrite)),
(0.14712842553853989,
1.0215770602226257,
256.71241207793355,
0.61122002875698467)))
def test_segment_meander_angles_pop():
feat = 'segment_meander_angles'
nt.ok_(np.allclose(_stats(fst_get(feat, POP)),
(0.0, 3.1415926535897931, 14637.977670710961, 2.3957410263029395)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.all)),
(0.0, 3.1415926535897931, 14637.977670710961, 2.3957410263029395)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.apical_dendrite)),
(0.326101999292573, 3.0939261437163492, 461.98168732359414, 2.4443475519766884)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.basal_dendrite)),
(0.0, 3.1415926535897931, 2926.2411975307768, 2.4084289691611334)))
def test_segment_meander_angles_nrn():
feat = 'segment_meander_angles'
nt.ok_(np.allclose(_stats(fst_get(feat, NRN)),
(0.326101999292573, 3.129961675751181, 1842.351779156608, 2.4369732528526562)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.all)),
(0.326101999292573, 3.129961675751181, 1842.351779156608, 2.4369732528526562)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.apical_dendrite)),
(0.326101999292573, 3.0939261437163492, 461.98168732359414, 2.4443475519766884)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.basal_dendrite)),
(0.47318725279312024, 3.129961675751181, 926.33847274926438, 2.4506308802890593)))
def test_neurite_volumes_nrn():
feat = 'neurite_volumes'
nt.ok_(np.allclose(_stats(fst_get(feat, NRN)),
(271.94122143951864, 281.24754646913954, 1104.9077698137021, 276.22694245342552)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.all)),
(271.94122143951864, 281.24754646913954, 1104.9077698137021, 276.22694245342552)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.axon)),
(276.73860261723024, 276.73860261723024, 276.73860261723024, 276.73860261723024)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.basal_dendrite)),
(274.98039928781355, 281.24754646913954, 556.22794575695309, 278.11397287847655)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.apical_dendrite)),
(271.94122143951864, 271.94122143951864, 271.94122143951864, 271.94122143951864)))
def test_neurite_volumes_pop():
feat = 'neurite_volumes'
nt.ok_(np.allclose(_stats(fst_get(feat, POP)),
(28.356406629821159, 281.24754646913954, 2249.4613918388391, 224.9461391838839)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.all)),
(28.356406629821159, 281.24754646913954, 2249.4613918388391, 224.9461391838839)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.axon)),
(276.58135508666612, 277.5357232437392, 830.85568094763551, 276.95189364921185)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.basal_dendrite)),
(28.356406629821159, 281.24754646913954, 1146.6644894516851, 191.1107482419475)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.apical_dendrite)),
(271.94122143951864, 271.94122143951864, 271.94122143951864, 271.94122143951864)))
def test_neurite_density_nrn():
feat = 'neurite_volume_density'
nt.ok_(np.allclose(_stats(fst_get(feat, NRN)),
(0.24068543213643726, 0.52464681266899216, 1.4657913638494682, 0.36644784096236704)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.all)),
(0.24068543213643726, 0.52464681266899216, 1.4657913638494682, 0.36644784096236704)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.axon)),
(0.26289304906104355, 0.26289304906104355, 0.26289304906104355, 0.26289304906104355)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.basal_dendrite)),
(0.24068543213643726, 0.52464681266899216, 0.76533224480542938, 0.38266612240271469)))
nt.ok_(np.allclose(_stats(fst_get(feat, NRN, neurite_type=NeuriteType.apical_dendrite)),
(0.43756606998299519, 0.43756606998299519, 0.43756606998299519, 0.43756606998299519)))
def test_neurite_density_pop():
feat = 'neurite_volume_density'
nt.ok_(np.allclose(_stats(fst_get(feat, POP)),
(6.1847539631150784e-06, 0.52464681266899216, 1.9767794901940539, 0.19767794901940539)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.all)),
(6.1847539631150784e-06, 0.52464681266899216, 1.9767794901940539, 0.19767794901940539)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.axon)),
(6.1847539631150784e-06, 0.26465213325053372, 0.5275513670655404, 0.17585045568851346)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.basal_dendrite)),
(0.00034968816544949771, 0.52464681266899216, 1.0116620531455183, 0.16861034219091972)))
nt.ok_(np.allclose(_stats(fst_get(feat, POP, neurite_type=NeuriteType.apical_dendrite)),
(0.43756606998299519, 0.43756606998299519, 0.43756606998299519, 0.43756606998299519)))
def test_segment_meander_angles_single_section():
class Mock(object):
pass
feat = 'segment_meander_angles'
sec = core.Section(np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[2, 1, 0],
[2, 2, 0]]))
nrt = core.Neurite(sec)
nrn = Mock()
nrn.neurites = [nrt]
nrn.soma = None
pop = core.Population([nrn])
ref = [math.pi / 2, math.pi / 2, math.pi / 2]
nt.eq_(ref, fst_get(feat, nrt).tolist())
nt.eq_(ref, fst_get(feat, nrn).tolist())
nt.eq_(ref, fst_get(feat, pop).tolist())
def test_neurite_features_accept_single_tree():
features = NEURITEFEATURES.keys()
for f in features:
ret = fst_get(f, NRN.neurites[0])
nt.ok_(ret.dtype.kind in ('i', 'f'))
nt.ok_(len(ret) or len(ret) == 0) # make sure that len() resolves
def test_register_neurite_feature_nrns():
def npts(neurite):
return len(neurite.points)
def vol(neurite):
return neurite.volume
fst.register_neurite_feature('foo', npts)
n_points_ref = [len(n.points) for n in iter_neurites(NRNS)]
n_points = fst.get('foo', NRNS)
assert_items_equal(n_points, n_points_ref)
# test neurite type filtering
n_points_ref = [len(n.points) for n in iter_neurites(NRNS, filt=_is_type(NeuriteType.axon))]
n_points = fst.get('foo', NRNS, neurite_type=NeuriteType.axon)
assert_items_equal(n_points, n_points_ref)
fst.register_neurite_feature('bar', vol)
n_volume_ref = [n.volume for n in iter_neurites(NRNS)]
n_volume = fst.get('bar', NRNS)
assert_items_equal(n_volume, n_volume_ref)
# test neurite type filtering
n_volume_ref = [n.volume for n in iter_neurites(NRNS, filt=_is_type(NeuriteType.axon))]
n_volume = fst.get('bar', NRNS, neurite_type=NeuriteType.axon)
assert_items_equal(n_volume, n_volume_ref)
def test_register_neurite_feature_pop():
def npts(neurite):
return len(neurite.points)
def vol(neurite):
return neurite.volume
fst.register_neurite_feature('foo', npts)
n_points_ref = [len(n.points) for n in iter_neurites(POP)]
n_points = fst.get('foo', POP)
assert_items_equal(n_points, n_points_ref)
# test neurite type filtering
n_points_ref = [len(n.points) for n in iter_neurites(POP,
filt=_is_type(NeuriteType.basal_dendrite))]
n_points = fst.get('foo', POP, neurite_type=NeuriteType.basal_dendrite)
assert_items_equal(n_points, n_points_ref)
fst.register_neurite_feature('bar', vol)
n_volume_ref = [n.volume for n in iter_neurites(POP)]
n_volume = fst.get('bar', POP)
assert_items_equal(n_volume, n_volume_ref)
# test neurite type filtering
n_volume_ref = [n.volume for n in iter_neurites(POP, filt=_is_type(NeuriteType.basal_dendrite))]
n_volume = fst.get('bar', POP, neurite_type=NeuriteType.basal_dendrite)
assert_items_equal(n_volume, n_volume_ref)
@nt.raises(NeuroMError)
def test_register_existing_feature_raises():
fst.register_neurite_feature('total_length', lambda n: None)
_PWD = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(_PWD, '../../../test_data')
SWC_PATH = os.path.join(DATA_PATH, 'swc')
NEURON_PATH = os.path.join(SWC_PATH, 'Neuron.swc')
NEURON = load_neuron(NEURON_PATH)
def test_section_lengths():
ref_seclen = [n.length for n in iter_sections(NEURON)]
seclen = fst_get('section_lengths', NEURON)
nt.eq_(len(seclen), 84)
assert_allclose(seclen, ref_seclen)
def test_section_lengths_axon():
s = fst_get('section_lengths', NEURON, neurite_type=NeuriteType.axon)
nt.eq_(len(s), 21)
def test_total_lengths_basal():
s = fst_get('section_lengths', NEURON, neurite_type=NeuriteType.basal_dendrite)
nt.eq_(len(s), 42)
def test_section_lengths_apical():
s = fst_get('section_lengths', NEURON, neurite_type=NeuriteType.apical_dendrite)
nt.eq_(len(s), 21)
def test_total_length_per_neurite_axon():
tl = fst_get('total_length_per_neurite', NEURON, neurite_type=NeuriteType.axon)
nt.eq_(len(tl), 1)
assert_allclose(tl, (207.87975221))
def test_total_length_per_neurite_basal():
tl = fst_get('total_length_per_neurite', NEURON, neurite_type=NeuriteType.basal_dendrite)
nt.eq_(len(tl), 2)
assert_allclose(tl, (211.11737442, 207.31504202))
def test_total_length_per_neurite_apical():
tl = fst_get('total_length_per_neurite', NEURON, neurite_type=NeuriteType.apical_dendrite)
| |
__all__ = [
'RunExposure',
'RunFmTest',
]
import tempfile
import os
import csv
import shutil
from itertools import chain
import pandas as pd
from ..base import ComputationStep
from ..generate.keys import GenerateKeysDeterministic
from ..generate.files import GenerateFiles
from ..generate.losses import GenerateLossesDeterministic
from ...preparation.il_inputs import get_oed_hierarchy
from ...utils.exceptions import OasisException
from ...utils.data import (
get_dataframe,
print_dataframe,
)
from ...utils.inputs import str2bool
from ...utils.coverages import SUPPORTED_COVERAGE_TYPES
from ...utils.defaults import (
KTOOLS_ALLOC_IL_DEFAULT,
KTOOLS_ALLOC_RI_DEFAULT,
KTOOLS_ALLOC_FM_MAX,
OASIS_FILES_PREFIXES,
find_exposure_fp
)
class RunExposure(ComputationStep):
"""
Generates insured losses from preexisting Oasis files with specified
loss factors (loss % of TIV).
"""
step_params = [
{'name': 'src_dir', 'flag':'-s', 'is_path': True, 'pre_exist': True, 'help': ''},
{'name': 'run_dir', 'flag':'-r', 'is_path': True, 'pre_exist': False, 'help': ''},
{'name': 'output_file', 'flag':'-f', 'is_path': True, 'pre_exist': False, 'help': '', 'type': str},
{'name': 'loss_factor', 'flag':'-l', 'type' :float, 'nargs':'+', 'help': '', 'default': [1.0]},
{'name': 'ktools_alloc_rule_il', 'flag':'-a', 'default': KTOOLS_ALLOC_IL_DEFAULT, 'type':int, 'help': 'Set the fmcalc allocation rule used in direct insured loss'},
{'name': 'ktools_alloc_rule_ri', 'flag':'-A', 'default': KTOOLS_ALLOC_RI_DEFAULT, 'type':int, 'help': 'Set the fmcalc allocation rule used in reinsurance'},
{'name': 'output_level', 'flag':'-o', 'help': 'Keys files output format', 'choices':['item', 'loc', 'pol', 'acc', 'port'], 'default': 'item'},
{'name': 'num_subperils', 'flag':'-p', 'default': 1, 'type':int, 'help': 'Set the number of subperils returned by deterministic key generator'},
{'name': 'coverage_types', 'type' :int, 'nargs':'+', 'default': list(v['id'] for v in SUPPORTED_COVERAGE_TYPES.values()), 'help': 'Select List of supported coverage_types [1, .. ,4]'},
{'name': 'fmpy', 'default': True, 'type': str2bool, 'const':True, 'nargs':'?', 'help': 'use fmcalc python version instead of c++ version'},
{'name': 'fmpy_low_memory', 'default': False, 'type': str2bool, 'const':True, 'nargs':'?', 'help': 'use memory map instead of RAM to store loss array (may decrease performance but reduce RAM usage drastically)'},
{'name': 'fmpy_sort_output', 'default': True, 'type': str2bool, 'const': True, 'nargs': '?', 'help': 'order fmpy output by item_id'},
{'name': 'stream_type', 'flag':'-t', 'default': 2, 'type':int, 'help': 'Set the IL input stream type, 2 = default loss stream, 1 = deprecated cov/item stream'},
{'name': 'net_ri', 'default': True},
{'name': 'include_loss_factor', 'default': True},
{'name': 'print_summary', 'default': True},
]
def _check_alloc_rules(self):
alloc_ranges = {
'ktools_alloc_rule_il': KTOOLS_ALLOC_FM_MAX,
'ktools_alloc_rule_ri': KTOOLS_ALLOC_FM_MAX}
for rule in alloc_ranges:
alloc_val = getattr(self, rule)
if (alloc_val < 0) or (alloc_val > alloc_ranges[rule]):
raise OasisException(f'Error: {rule}={alloc_val} - Not withing valid range [0..{alloc_ranges[rule]}]')
def run(self):
tmp_dir = None
src_dir = self.src_dir if self.src_dir else os.getcwd()
if self.run_dir:
run_dir = self.run_dir
else:
tmp_dir = tempfile.TemporaryDirectory()
run_dir = tmp_dir.name
include_loss_factor = not (len(self.loss_factor) == 1)
self._check_alloc_rules()
try:
location_fp = find_exposure_fp(src_dir, 'loc')
accounts_fp = find_exposure_fp(src_dir, 'acc')
except IndexError as e:
raise OasisException(
f'No location/exposure and account found in source directory "{src_dir}" - '
'a file named `location.*` and `account.x` are expected', e
)
ri_info_fp = accounts_fp and find_exposure_fp(src_dir, 'info', required = False)
ri_scope_fp = ri_info_fp and find_exposure_fp(src_dir, 'scope', required=False)
if ri_scope_fp is None: ri_info_fp = None # Need both files for ri
il = bool(accounts_fp)
ril = bool(ri_scope_fp)
self.logger.debug('\nRunning deterministic losses (GUL=True, IL={}, RIL={})\n'.format(il, ril))
if not os.path.exists(run_dir):
os.makedirs(run_dir)
# 1. Create Deterministic keys file
keys_fp = os.path.join(run_dir, 'keys.csv')
GenerateKeysDeterministic(
oed_location_csv=location_fp,
keys_data_csv=keys_fp,
num_subperils=self.num_subperils,
supported_oed_coverage_types=self.coverage_types,
).run()
# 2. Start Oasis files generation
GenerateFiles(
oasis_files_dir=run_dir,
oed_location_csv=location_fp,
oed_accounts_csv=accounts_fp,
oed_info_csv=ri_info_fp,
oed_scope_csv=ri_scope_fp,
keys_data_csv=keys_fp,
).run()
# 3. Run Deterministic Losses
losses = GenerateLossesDeterministic(
oasis_files_dir=run_dir,
output_dir=os.path.join(run_dir, 'output'),
include_loss_factor=include_loss_factor,
loss_factor=self.loss_factor,
num_subperils=self.num_subperils,
net_ri=self.net_ri,
ktools_alloc_rule_il=self.ktools_alloc_rule_il,
ktools_alloc_rule_ri=self.ktools_alloc_rule_ri,
fmpy=self.fmpy,
fmpy_low_memory=self.fmpy_low_memory,
fmpy_sort_output=self.fmpy_sort_output,
il_stream_type=self.stream_type,
).run()
guls_df = losses['gul']
ils_df = losses['il']
rils_df = losses['ri']
# Read in the summary map
summaries_df = get_dataframe(src_fp=os.path.join(run_dir, 'fm_summary_map.csv'))
guls_df.to_csv(path_or_buf=os.path.join(run_dir, 'guls.csv'), index=False, encoding='utf-8')
guls_df.rename(columns={'loss': 'loss_gul'}, inplace=True)
guls_df = guls_df.merge(
right=summaries_df,
left_on=["item_id"],
right_on=["agg_id"]
)
if include_loss_factor:
join_cols = ["event_id", "output_id", "loss_factor_idx"]
else:
join_cols = ["event_id", "output_id"]
if il:
ils_df.to_csv(path_or_buf=os.path.join(run_dir, 'ils.csv'), index=False, encoding='utf-8')
ils_df.rename(columns={'loss': 'loss_il'}, inplace=True)
all_losses_df = guls_df.merge(
how='left',
right=ils_df,
on=join_cols,
suffixes=["_gul", "_il"]
)
if ril:
rils_df.to_csv(path_or_buf=os.path.join(run_dir, 'rils.csv'), index=False, encoding='utf-8')
rils_df.rename(columns={'loss': 'loss_ri'}, inplace=True)
all_losses_df = all_losses_df.merge(
how='left',
right=rils_df,
on=join_cols
)
oed_hierarchy = get_oed_hierarchy()
portfolio_num = oed_hierarchy['portnum']['ProfileElementName'].lower()
acc_num = oed_hierarchy['accnum']['ProfileElementName'].lower()
loc_num = oed_hierarchy['locnum']['ProfileElementName'].lower()
policy_num = oed_hierarchy['polnum']['ProfileElementName'].lower()
if self.output_level == 'port':
summary_cols = [portfolio_num]
elif self.output_level == 'acc':
summary_cols = [portfolio_num, acc_num]
elif self.output_level == 'pol':
summary_cols = [portfolio_num, acc_num, policy_num]
elif self.output_level == 'loc':
summary_cols = [portfolio_num, acc_num, loc_num]
elif self.output_level == 'item':
summary_cols = [
'output_id', portfolio_num, acc_num, loc_num, policy_num,
'coverage_type_id']
if include_loss_factor:
group_by_cols = summary_cols + ['loss_factor_idx']
else:
group_by_cols = summary_cols
guls_df = guls_df.loc[:, group_by_cols + ['loss_gul']]
if not il and not ril:
all_loss_cols = group_by_cols + ['loss_gul']
all_losses_df = guls_df.loc[:, all_loss_cols]
all_losses_df.drop_duplicates(keep=False, inplace=True)
elif not ril:
all_loss_cols = group_by_cols + ['loss_gul', 'loss_il']
all_losses_df = all_losses_df.loc[:, all_loss_cols]
summary_gul_df = pd.DataFrame(
{'loss_gul': guls_df.groupby(group_by_cols)['loss_gul'].sum()}).reset_index()
summary_il_df = pd.DataFrame(
{'loss_il': all_losses_df.groupby(group_by_cols)['loss_il'].sum()}).reset_index()
all_losses_df = summary_gul_df.merge(how='left', right=summary_il_df, on=group_by_cols)
else:
all_loss_cols = group_by_cols + ['loss_gul', 'loss_il', 'loss_ri']
all_losses_df = all_losses_df.loc[:, all_loss_cols]
summary_gul_df = pd.DataFrame(
{'loss_gul': guls_df.groupby(group_by_cols)['loss_gul'].sum()}).reset_index()
summary_il_df = pd.DataFrame(
{'loss_il': all_losses_df.groupby(group_by_cols)['loss_il'].sum()}).reset_index()
summary_ri_df = pd.DataFrame(
{'loss_ri': all_losses_df.groupby(group_by_cols)['loss_ri'].sum()}).reset_index()
all_losses_df = summary_gul_df.merge(how='left', right=summary_il_df, on=group_by_cols)
all_losses_df = all_losses_df.merge(how='left', right=summary_ri_df, on=group_by_cols)
for i in range(len(self.loss_factor)):
if include_loss_factor:
total_gul = guls_df[guls_df.loss_factor_idx == i].loss_gul.sum()
else:
total_gul = guls_df.loss_gul.sum()
if not il and not ril:
all_loss_cols = all_loss_cols + ['loss_gul']
all_losses_df = guls_df.loc[:, all_loss_cols]
all_losses_df.drop_duplicates(keep=False, inplace=True)
header = \
'Losses (loss factor={:.2%}; total gul={:,.00f})'.format(
self.loss_factor[i],
total_gul)
elif not ril:
if include_loss_factor:
total_il = ils_df[ils_df.loss_factor_idx == i].loss_il.sum()
else:
total_il = ils_df.loss_il.sum()
header = \
'Losses (loss factor={:.2%}; total gul={:,.00f}; total il={:,.00f})'.format(
self.loss_factor[i],
total_gul, total_il)
else:
if include_loss_factor:
total_il = ils_df[ils_df.loss_factor_idx == i].loss_il.sum()
total_ri_net = rils_df[rils_df.loss_factor_idx == i].loss_ri.sum()
else:
total_il = ils_df.loss_il.sum()
total_ri_net = rils_df.loss_ri.sum()
total_ri_ceded = total_il - total_ri_net
header = \
'Losses (loss factor={:.2%}; total gul={:,.00f}; total il={:,.00f}; total ri ceded={:,.00f})'.format(
self.loss_factor[i],
total_gul, total_il, total_ri_ceded)
# Convert output cols to strings for formatting
for c in group_by_cols:
all_losses_df[c] = all_losses_df[c].apply(str)
if self.print_summary:
cols_to_print = all_loss_cols.copy()
if False:
cols_to_print.remove('loss_factor_idx')
if include_loss_factor:
print_dataframe(
all_losses_df[all_losses_df.loss_factor_idx == str(i)],
frame_header=header,
cols=cols_to_print)
else:
print_dataframe(
all_losses_df,
frame_header=header,
cols=cols_to_print)
if self.output_file:
all_losses_df.to_csv(self.output_file, index=False, encoding='utf-8')
if tmp_dir:
tmp_dir.cleanup()
return (il, ril)
class RunFmTest(ComputationStep):
"""
Runs an FM test case and validates generated
losses against expected losses.
only use 'update_expected' for debugging
it replaces the expected file with generated
"""
step_params = [
{'name': 'test_case_name', 'flag': '-c', 'type': str, 'help': 'Runs a specific test sub-directory from "test_case_dir". If not set then run all tests found.'},
{'name': 'list_tests', 'flag': '-l', 'action': 'store_true', 'help': 'List the valid test cases in the test directory rather than running'},
{'name': 'test_case_dir', 'flag': '-t', 'default': os.getcwd(), 'is_path': True, 'pre_exist': True, 'help': 'Test directory - should contain test directories containing OED files and expected results'},
{'name': 'run_dir', 'flag': '-r', 'help': 'Run directory - where files should be generated. If not set temporary files will not be saved.'},
{'name': 'num_subperils', 'flag': '-p', 'default': 1, 'type':int, 'help': 'Set the number of subperils returned by deterministic key generator'},
{'name': 'test_tolerance', 'type': float, 'help': 'Relative tolerance between expected values and results, default is "1e-4" or 0.0001', 'default': 1e-4},
{'name': 'fmpy', 'default': True, 'type': str2bool, 'const': True, 'nargs': '?', 'help': 'use fmcalc python version instead of c++ version'},
{'name': 'fmpy_low_memory', 'default': False, 'type': str2bool, 'const': True, 'nargs': '?', 'help': 'use memory map instead of RAM to store loss array (may decrease performance but reduce RAM usage drastically)'},
{'name': 'fmpy_sort_output', 'default': True, 'type': str2bool, 'const': True, 'nargs': '?', 'help': 'order fmpy output by item_id'},
{'name': 'update_expected', 'default': False},
{'name': 'expected_output_dir', 'default': "expected"},
]
def search_test_cases(self):
case_names = []
for test_case in os.listdir(path=self.test_case_dir):
if os.path.exists(
os.path.join(self.test_case_dir, test_case, self.expected_output_dir)
):
case_names.append(test_case)
case_names.sort()
return case_names, len(case_names)
def _case_dir_is_valid_test(self):
src_contents = [fn.lower() for fn in os.listdir(self.test_case_dir)]
return 'location.csv' and 'account.csv' and 'expected' in src_contents
def run(self):
# Run test case given on CLI
if self.test_case_name:
return self.execute_test_case(self.test_case_name)
# If 'test_case_dir' is a valid test run that dir directly
if self._case_dir_is_valid_test():
return self.execute_test_case('')
# Search for valid cases in sub-dirs and run all found
case_names, case_num = self.search_test_cases()
# If '--list-tests' is selected print found cases and exit
if self.list_tests:
for name in case_names:
self.logger.info(name)
exit(0)
if case_num < 1:
raise OasisException(f'No vaild FM test cases found in "{self.test_case_dir}"')
else:
# If test_case not selected run all cases
self.logger.info(f"Running: {case_num} Tests from '{self.test_case_dir}'")
self.logger.info(f'Test names: {case_names}')
failed_tests = []
exit_status = 0
for case in case_names:
test_result = self.execute_test_case(case)
if not test_result:
failed_tests.append(case)
exit_status = 1
if len(failed_tests) == 0:
self.logger.info("All tests passed")
else:
self.logger.info("{} test failed: ".format(len(failed_tests)))
| |
'''
screen_kmers.py
Generate kmers of the target sequence and screen out ones that don't pass
the sequence composition rules, that overlap Ns, and that don't fall within
specified Tm range.
'''
from Bio import SeqIO
import sys
import os
import pandas as pd
import numpy as np
from Bio.SeqUtils import MeltingTemp as mt
import math
import primer3
import probe_helpers
import argparse
from collections import defaultdict
import logging
os.environ['NUMEXPR_NUM_THREADS'] = '8'
def Tm_window_filter(df, min_tm, max_tm):
'''Select probes with Tms between min and max'''
sorted_df = df.sort_values('Tm')
passed_indices = np.searchsorted(sorted_df['Tm'].values, [min_tm, max_tm])
passed_df = sorted_df.iloc[passed_indices[0]: passed_indices[1]].copy()
return passed_df
def sequence_composition_filter(df, min_gc, max_gc, rules, filter = True):
'''
Remove sequences with homopolymeric repeats and other issues described in the rules.
If filter == True, then it will replace the probe_df with filtered probes only.
Set to False if you want to score the probes but not filter.
'''
df['first_half'] = df['length'].apply(lambda x: math.floor(x/2))
df['GC_content'] = df['sequence'].apply(lambda x: (x.count('G') + x.count('C'))/len(x))
df['A_content'] = df['sequence'].apply(lambda x: x.count('A')/len(x))
df['C_content'] = df['sequence'].apply(lambda x: x.count('C')/len(x))
#Set to True if the probe has this characteristic
df['GC_content_rule'] = df.apply(lambda x: min_gc > x['GC_content'] or x['GC_content'] > max_gc, axis = 1)
df['A_composition_rule'] = df.apply(lambda x: x['A_content'] > 0.28, axis = 1)
df['C_composition_rule'] = df.apply(lambda x: 0.22 > x['C_content'] or x['C_content'] > 0.28, axis = 1)
df['4xA_stack_rule'] = df['sequence'].apply(lambda x: 'AAAA' in x)
df['4xC_stack_rule'] = df.apply(lambda x: 'CCCC' in x['sequence'][0:x['first_half']], axis = 1)
df['earlyCs_rule'] = df.apply(lambda x: np.any([(x['sequence'][i:i+6].count('C') >= 4) for i in range(0, x['first_half'] - 5)]), axis = 1)
df['any5_rule'] = df['sequence'].apply(lambda x: np.any([N*5 in x for N in ['A','T','C','G']]))
df['passed_sequence'] = df.apply(lambda row: not row[rules].any(), axis = 1)
if filter == True:
df = df[df['passed_sequence']].copy()
return df
def scan_sequence(target_name, seq, min_len, max_len, Na_conc):
'''
Return probes from sequence for each length.
'''
lens_to_test = range(min_len, max_len + 1)
all_probes = []
for probe_len in lens_to_test:
these_probes = Tm_from_position(seq, probe_len, Na_conc)
all_probes.append(these_probes)
df = pd.concat(all_probes)
df['target_name'] = target_name
#do not change the index in any of the subsequent steps -- used as probe identifier
df.reset_index(drop = True, inplace = True)
df['unique_id'] = df.index.map(lambda x: '%s_%s' % (target_name, x))
df.set_index('unique_id', inplace = True)
return df
def Tm_from_position(seq, length, salt_conc):
'''
Calculate Tm of the probe of indicated length starting at each possible position.
Use Biopython's MeltingTemp functions:
Use probe conc (dnac1) = 250 nM (typical concentration for FISH experiments).
Use template conc (dnac1) = 0 to assume that probe is in excess.
This procedure goes from left to right along the target (RNA) sequence.
Reverse complement to get the probe (DNA) sequence.
'''
probe_list = []
target_len = len(seq)
for i in range(0, target_len - length + 1):
target_seq = seq[i: i + length]
probe_seq = str(target_seq.reverse_complement())
if 'N' in probe_seq:
#Add nan for probes containing Ns
probe_list.append([i, i + length, length, np.nan, str(target_seq), probe_seq])
else:
#using the R_DNA NN table requires the RNA sequence
Tm = mt.Tm_NN(target_seq.transcribe(), nn_table = mt.R_DNA_NN1, Na = salt_conc, saltcorr = 4, dnac1 = 250, dnac2 = 0)
probe_list.append([i, i + length, length, Tm, str(target_seq), probe_seq])
probe_df = pd.DataFrame(probe_list, columns = ['start', 'end', 'length', 'Tm', 'target_sequence', 'sequence'])
#adjust to 1-based, inclusive indices for output
probe_df['target_start'] = probe_df['start'] + 1
probe_df['target_end'] = probe_df['end']
return probe_df
def quantile_filter(df, original_df, window_size, Tm_quantile, min_probe_len, max_probe_len, filter = True):
'''
Calculate a rolling Tm quantile and determine if the probe passes the
quantile threshold. Set the Tms of the failed probes to NaN so that
the quantile will be calculated relative to original position in the sequence
but the failed probes will not be included in the calculation.
'''
quantile_df = original_df.sort_values('start', ascending = True)
failed_idx = quantile_df.index.difference(df.index)
quantile_df.loc[failed_idx, 'Tm'] = np.nan
#window size is adjusted here to be scaled to the number of probes per position
window_size = window_size * (max_probe_len - min_probe_len + 1)
#Calculate the Tm of the provided quantile at each position
quantile_df['rolling_Tm_quantile_co'] = quantile_df['Tm'].rolling(
window_size, center = True, min_periods = 1).quantile(Tm_quantile)
quantile_df['passed_Tm_quantile'] = quantile_df['Tm'] > quantile_df['rolling_Tm_quantile_co']
df['rolling_Tm_quantile_co'] = quantile_df['rolling_Tm_quantile_co']
df['passed_Tm_quantile'] = quantile_df['passed_Tm_quantile']
if filter == True:
df = df[df['passed_Tm_quantile']].copy()
return df
def structure_filter(df, hairpin_min, dimer_min, Na_conc, filter = True):
'''
Use primer3 to calculate energy of hairpin structure.
https://libnano.github.io/primer3-py/quickstart.html#thermodynamic-analysis
'''
df['hairpin_dG'] = df['sequence'].apply(lambda x: primer3.calcHairpin(x, mv_conc = Na_conc).dg/1000)
df['homodimer_dG'] = df['sequence'].apply(lambda x: primer3.calcHomodimer(x, mv_conc = Na_conc).dg/1000)
df['passed_structure'] = (df['hairpin_dG'] >= hairpin_min) & (df['homodimer_dG'] >= dimer_min)
if filter == True:
df = df[df['passed_structure']].copy()
return df
def excluded_nt_filter(df, min_probe_len, max_probe_len, excluded = None, excluded_consensus = None, filter = True):
'''
Remove probes in user-provided excluded regions, for example regions of high
tertiary structure. Can be from excluded (calculated in pipeline relative to
one transcript) or in excluded_conserved (relative to the pre-calculated
consensus sequence)
'''
ll = []
for ranges in [excluded, excluded_consensus]:
if probe_helpers.range_defined(ranges):
#range is already 0-based, but closed interval.
region_dict = defaultdict(set)
for i in ranges:
logging.info('Excluded nts %s-%s in consensus sequence.' % (i[0]+1, i[1]+1))
for j in range(min_probe_len, max_probe_len + 1):
#+1 because we want reads that include that region
#mask any of the start positions that would overlap the excluded region
masked_start = i[0] - j + 1
if masked_start < 0:
masked_start = 0
#endpt is excluded. +1 to cover it.
masked_end = i[1] + 1
new_range = set(range(masked_start, masked_end))
region_dict[j].update(new_range)
else:
region_dict = {}
ll.append(region_dict)
#combine the excluded dicts
bad_start_dict = {k: set() for k in set().union(*ll)}
for l in bad_start_dict:
bad_start_dict[l] = set().union(*[i[l] for i in ll if l in i])
#Check if potential probe start is masked:
df['passed_excluded'] = df.apply(lambda x: (x['start'] not in bad_start_dict[x['length']] if x['length'] in bad_start_dict else True), axis = 1)
if filter == True:
df = df[df['passed_excluded']].copy()
return df
def main(arglist):
#The sequence composition rules
default_rules = ['GC_content_rule', '4xA_stack_rule', '4xC_stack_rule', 'any5_rule']
possible_rules = ['GC_content_rule', 'A_composition_rule', 'C_composition_rule', '4xA_stack_rule', '4xC_stack_rule', 'earlyCs_rule', 'any5_rule']
parser = argparse.ArgumentParser()
#This set of args is generally provided by snakemake
parser.add_argument('-target_fasta', help = 'fasta file of target sequence with any excluded nts (i.e. non-conserved in set of transcript targets) marked by Ns')
parser.add_argument('-min_probe_len', type = int, default = 26)
parser.add_argument('-max_probe_len', type = int, default = 35)
parser.add_argument('-min_Tm', type = int, default = 0)
parser.add_argument('-max_Tm', type = int, default = 10000)
parser.add_argument('-min_gc', type = float, default = 0.4)
parser.add_argument('-max_gc', type = float, default = 0.6)
parser.add_argument('-outdir', help = 'name of output directory')
parser.add_argument('-Tm_quantile', type = float, default = 0.9, help = 'Tm of probe must be above this quantile to be selected')
parser.add_argument('-Tm_window_size', type = int, default = 200, help ='# nt to include in calculating the Tm quantile')
parser.add_argument('-excluded_regions_consensus', help = 'Regions to avoid placing probes in. Specify as a list of 1-based closed interval regions.')
parser.add_argument('-excluded_regions', help = 'Regions to avoid placing probes in, calculated from one transcript in each target. Not provided directly.')
parser.add_argument('-min_hairpin_dG', default = -3, help = 'deltaG values for probes must be > than this value')
parser.add_argument('-min_dimer_dG', default = -10, help = 'deltaG values for probes must be > than this value')
parser.add_argument('-probe_csv', default = 'potential_probes.csv')
parser.add_argument('-probe_fa', default = 'potential_probes.fa')
parser.add_argument('-logfile', default = 'logfile.txt')
#These args are generally taken from the defaults but they could be overriden by snakemake
#These could be in a list/series (specified at the per target level)
parser.add_argument('-sequence_filter_rules', nargs = '+', default = default_rules, help = 'remove probes not passing these, choose from: %s' % ', '.join(possible_rules))
#These will not be in a list
parser.add_argument('-Na_conc', default = 300, help = 'Na+ concentration of hybridization in mM')
args, unknown = parser.parse_known_args()
if 'snakemake' in globals():
args = probe_helpers.set_snake_args(args, snakemake)
target_name = os.path.basename(args.target_fasta).split('.fa')[0]
logging.basicConfig(level=logging.DEBUG, filename = args.logfile, filemode = 'w', format = '%(message)s')
logging.info('Target %s: ' % target_name)
exdf = pd.read_csv(args.excluded_regions)
if not pd.isnull(args.excluded_regions_consensus):
args.excluded_regions_consensus = probe_helpers.get_subregion_ranges(args.excluded_regions_consensus)
target_seq = next(SeqIO.parse(args.target_fasta, 'fasta')).seq.upper()
kmer_df = scan_sequence(target_name, target_seq, args.min_probe_len, args.max_probe_len, args.Na_conc)
#keep kmer_df as the original one and df as the one to be filtered.
df = kmer_df.copy()
df.dropna(subset = ['Tm'], inplace = True)
logging.info('Starting with %s potential probes.' % len(df))
print('removing probes in excluded regions')
ex_calc = exdf.loc[exdf['region_type'] == 'excluded_regions', ['start', 'end']].values
df = excluded_nt_filter(df, args.min_probe_len, | |
self._PolyOutList)
else:
outRec = self._PolyOutList[e.outIdx]
op = outRec.pts
if (toFront and _PointsEqual(pt, op.pt)) or (
not toFront and _PointsEqual(pt, op.prevOp.pt)
):
return
op2 = OutPt(outRec.idx, pt)
op2.nextOp = op
op2.prevOp = op.prevOp
op.prevOp.nextOp = op2
op.prevOp = op2
if toFront:
outRec.pts = op2
def _AppendPolygon(self, e1, e2):
outRec1 = self._PolyOutList[e1.outIdx]
outRec2 = self._PolyOutList[e2.outIdx]
holeStateRec = None
if _Param1RightOfParam2(outRec1, outRec2):
holeStateRec = outRec2
elif _Param1RightOfParam2(outRec2, outRec1):
holeStateRec = outRec1
else:
holeStateRec = _GetLowermostRec(outRec1, outRec2)
p1_lft = outRec1.pts
p2_lft = outRec2.pts
p1_rt = p1_lft.prevOp
p2_rt = p2_lft.prevOp
newSide = EdgeSide.Left
if e1.side == EdgeSide.Left:
if e2.side == EdgeSide.Left:
# z y x a b c
_ReversePolyPtLinks(p2_lft)
p2_lft.nextOp = p1_lft
p1_lft.prevOp = p2_lft
p1_rt.nextOp = p2_rt
p2_rt.prevOp = p1_rt
outRec1.pts = p2_rt
else:
# x y z a b c
p2_rt.nextOp = p1_lft
p1_lft.prevOp = p2_rt
p2_lft.prevOp = p1_rt
p1_rt.nextOp = p2_lft
outRec1.pts = p2_lft
else:
newSide = EdgeSide.Right
if e2.side == EdgeSide.Right:
# a b c z y x
_ReversePolyPtLinks(p2_lft)
p1_rt.nextOp = p2_rt
p2_rt.prevOp = p1_rt
p2_lft.nextOp = p1_lft
p1_lft.prevOp = p2_lft
else:
# a b c x y z
p1_rt.nextOp = p2_lft
p2_lft.prevOp = p1_rt
p1_lft.prevOp = p2_rt
p2_rt.nextOp = p1_lft
outRec1.bottomPt = None
if holeStateRec == outRec2:
if outRec2.FirstLeft != outRec1:
outRec1.FirstLeft = outRec2.FirstLeft
outRec1.isHole = outRec2.isHole
outRec2.pts = None
outRec2.bottomPt = None
outRec2.FirstLeft = outRec1
OKIdx = outRec1.idx
ObsoleteIdx = outRec2.idx
e1.outIdx = -1
e2.outIdx = -1
e = self._ActiveEdges
while e is not None:
if e.outIdx == ObsoleteIdx:
e.outIdx = OKIdx
e.side = newSide
break
e = e.nextInAEL
outRec2.idx = outRec1.idx
def _FixupIntersectionOrder(self):
self._CopyAELToSEL()
inode = self._IntersectNodes
while inode is not None:
if not _EdgesAdjacent(inode):
nextNode = inode.nextIn
while nextNode and not _EdgesAdjacent(nextNode):
nextNode = nextNode.nextIn
if nextNode is None:
return False
e1 = inode.e1
e2 = inode.e2
p = inode.pt
inode.e1 = nextNode.e1
inode.e2 = nextNode.e2
inode.pt = nextNode.pt
nextNode.e1 = e1
nextNode.e2 = e2
nextNode.pt = p
self._SwapPositionsInSEL(inode.e1, inode.e2)
inode = inode.nextIn
return True
def _ProcessEdgesAtTopOfScanbeam(self, topY):
e = self._ActiveEdges
while e is not None:
if _IsMaxima(e, topY) and _GetMaximaPair(e).dx != horizontal:
ePrev = e.prevInAEL
self._DoMaxima(e, topY)
if ePrev is None:
e = self._ActiveEdges
else:
e = ePrev.nextInAEL
else:
intermediateVert = _IsIntermediate(e, topY)
if intermediateVert and e.nextInLML.dx == horizontal:
if e.outIdx >= 0:
self._AddOutPt(e, e.Top)
hj = self._HorzJoins
if hj is not None:
while True:
_1, _2, overlap = _GetOverlapSegment(
hj.edge.Bot,
hj.edge.Top,
e.nextInLML.Bot,
e.nextInLML.Top,
)
if overlap:
self._AddJoin(
hj.edge, e.nextInLML, hj.savedIdx, e.outIdx
)
hj = hj.nextHj
if hj == self._HorzJoins:
break
self._AddHorzJoin(e.nextInLML, e.outIdx)
e = self._UpdateEdgeIntoAEL(e)
self._AddEdgeToSEL(e)
else:
e.Curr = Point(_TopX(e, topY), topY)
if (
self.ForceSimple
and e.prevInAEL is not None
and e.prevInAEL.Curr.x == e.Curr.x
and e.outIdx >= 0
and e.prevInAEL.outIdx >= 0
):
if intermediateVert:
self._AddOutPt(e.prevInAEL, Point(e.Curr.x, topY))
else:
self._AddOutPt(e, Point(e.Curr.x, topY))
e = e.nextInAEL
self._ProcessHorizontals()
e = self._ActiveEdges
while e is not None:
if _IsIntermediate(e, topY):
if e.outIdx >= 0:
self._AddOutPt(e, e.Top)
e = self._UpdateEdgeIntoAEL(e)
ePrev = e.prevInAEL
eNext = e.nextInAEL
if (
ePrev is not None
and ePrev.Curr.x == e.Bot.x
and (ePrev.Curr.y == e.Bot.y)
and (e.outIdx >= 0)
and (ePrev.outIdx >= 0)
and (ePrev.Curr.y > ePrev.Top.y)
and _SlopesEqual2(e, ePrev)
):
self._AddOutPt(ePrev, e.Bot)
self._AddJoin(e, ePrev)
elif (
eNext is not None
and (eNext.Curr.x == e.Bot.x)
and (eNext.Curr.y == e.Bot.y)
and (e.outIdx >= 0)
and (eNext.outIdx >= 0)
and (eNext.Curr.y > eNext.Top.y)
and _SlopesEqual2(e, eNext)
):
self._AddOutPt(eNext, e.Bot)
self._AddJoin(e, eNext)
e = e.nextInAEL
def _Area(self, pts):
# see http://www.mathopenref.com/coordpolygonarea2.html
result = 0.0
p = pts
while True:
result += (p.pt.x + p.prevOp.pt.x) * (p.prevOp.pt.y - p.pt.y)
p = p.nextOp
if p == pts:
break
return result / 2
def _JoinPoints(self, jr):
p1, p2 = None, None
outRec1 = self._PolyOutList[jr.poly1Idx]
outRec2 = self._PolyOutList[jr.poly2Idx]
if outRec1 is None or outRec2 is None:
return p1, p2, False
pp1a = outRec1.pts
pp2a = outRec2.pts
pt1 = jr.pt2a
pt2 = jr.pt2b
pt3 = jr.pt1a
pt4 = jr.pt1b
pp1a, pt1, pt2, result = _FindSegment(pp1a, pt1, pt2)
if not result:
return p1, p2, False
if outRec1 == outRec2:
pp2a = pp1a.nextOp
pp2a, pt3, pt4, result = _FindSegment(pp2a, pt3, pt4)
if not result or pp2a == pp1a:
return p1, p2, False
else:
pp2a, pt3, pt4, result = _FindSegment(pp2a, pt3, pt4)
if not result:
return p1, p2, False
pt1, pt2, result = _GetOverlapSegment(pt1, pt2, pt3, pt4)
if not result:
return p1, p2, False
prevOp = pp1a.prevOp
if _PointsEqual(pp1a.pt, pt1):
p1 = pp1a
elif _PointsEqual(prevOp.pt, pt1):
p1 = prevOp
else:
p1 = _InsertPolyPtBetween(pp1a, prevOp, pt1)
if _PointsEqual(pp1a.pt, pt2):
p2 = pp1a
elif _PointsEqual(prevOp.pt, pt2):
p2 = prevOp
elif (p1 == pp1a) or (p1 == prevOp):
p2 = _InsertPolyPtBetween(pp1a, prevOp, pt2)
elif _Pt3IsBetweenPt1AndPt2(pp1a.pt, p1.pt, pt2):
p2 = _InsertPolyPtBetween(pp1a, p1, pt2)
else:
p2 = _InsertPolyPtBetween(p1, prevOp, pt2)
prevOp = pp2a.prevOp
if _PointsEqual(pp2a.pt, pt1):
p3 = pp2a
elif _PointsEqual(prevOp.pt, pt1):
p3 = prevOp
else:
p3 = _InsertPolyPtBetween(pp2a, prevOp, pt1)
if _PointsEqual(pp2a.pt, pt2):
p4 = pp2a
elif _PointsEqual(prevOp.pt, pt2):
p4 = prevOp
elif (p3 == pp2a) or (p3 == prevOp):
p4 = _InsertPolyPtBetween(pp2a, prevOp, pt2)
elif _Pt3IsBetweenPt1AndPt2(pp2a.pt, p3.pt, pt2):
p4 = _InsertPolyPtBetween(pp2a, p3, pt2)
else:
p4 = _InsertPolyPtBetween(p3, prevOp, pt2)
if p1.nextOp == p2 and p3.prevOp == p4:
p1.nextOp = p3
p3.prevOp = p1
p2.prevOp = p4
p4.nextOp = p2
return p1, p2, True
elif p1.prevOp == p2 and p3.nextOp == p4:
p1.prevOp = p3
p3.nextOp = p1
p2.nextOp = p4
p4.prevOp = p2
return p1, p2, True
return p1, p2, False
def _FixupFirstLefts1(self, oldOutRec, newOutRec):
for outRec in self._PolyOutList:
if outRec.pts is not None and outRec.FirstLeft == oldOutRec:
if _Poly2ContainsPoly1(outRec.pts, newOutRec.pts):
outRec.FirstLeft = newOutRec
def _FixupFirstLefts2(self, oldOutRec, newOutRec):
for outRec in self._PolyOutList:
if outRec.FirstLeft == oldOutRec:
outRec.FirstLeft = newOutRec
def _GetOutRec(self, idx):
outrec = self._PolyOutList[idx]
while outrec != self._PolyOutList[outrec.idx]:
outrec = self._PolyOutList[outrec.idx]
return outrec
def _JoinCommonEdges(self):
for i in range(len(self._JoinList)):
jr = self._JoinList[i]
outRec1 = self._GetOutRec(jr.poly1Idx)
outRec2 = self._GetOutRec(jr.poly2Idx)
if outRec1.pts is None or outRec2.pts is None:
continue
if outRec1 == outRec2:
holeStateRec = outRec1
elif _Param1RightOfParam2(outRec1, outRec2):
holeStateRec = outRec2
elif _Param1RightOfParam2(outRec2, outRec1):
holeStateRec = outRec1
else:
holeStateRec = _GetLowermostRec(outRec1, outRec2)
p1, p2, result = self._JoinPoints(jr)
if not result:
continue
if outRec1 == outRec2:
outRec1.pts = p1
outRec1.bottomPt = None
outRec2 = self._CreateOutRec()
outRec2.pts = p2
jr.poly2Idx = outRec2.idx
if _Poly2ContainsPoly1(outRec2.pts, outRec1.pts):
outRec2.isHole = not outRec1.isHole
outRec2.FirstLeft = outRec1
self._FixupJoinRecs(jr, p2, i + 1)
if self._UsingPolyTree:
self._FixupFirstLefts2(outRec2, outRec1)
_FixupOutPolygon(outRec1)
_FixupOutPolygon(outRec2)
if (
(outRec2.isHole ^ self.ReverseSolution)
== self._Area(outRec2)
> 0.0
):
_ReversePolyPtLinks(outRec2.pts)
elif _Poly2ContainsPoly1(outRec1.pts, outRec2.pts):
outRec2.isHole = outRec1.isHole
outRec1.isHole = not outRec2.isHole
outRec2.FirstLeft = outRec1.FirstLeft
outRec1.FirstLeft = outRec2
self._FixupJoinRecs(jr, p2, i + 1)
if self._UsingPolyTree:
self._FixupFirstLefts2(outRec1, outRec2)
_FixupOutPolygon(outRec1)
_FixupOutPolygon(outRec2)
if (
(outRec1.isHole ^ self.ReverseSolution)
== self._Area(outRec1)
> 0.0
):
_ReversePolyPtLinks(outRec1.pts)
else:
outRec2.isHole = outRec1.isHole
outRec2.FirstLeft = outRec1.FirstLeft
self._FixupJoinRecs(jr, p2, i + 1)
if self._UsingPolyTree:
self._FixupFirstLefts1(outRec1, outRec2)
_FixupOutPolygon(outRec1)
_FixupOutPolygon(outRec2)
else:
_FixupOutPolygon(outRec1)
outRec2.pts = None
outRec2.bottomPt = None
outRec2.idx = outRec1.idx
outRec1.isHole = holeStateRec.isHole
if holeStateRec == outRec2:
outRec1.FirstLeft = outRec2.FirstLeft
outRec2.FirstLeft = outRec1
if self._UsingPolyTree:
self._FixupFirstLefts2(outRec2, outRec1)
return
def _DoSimplePolygons(self):
i = 0
while i < len(self._PolyOutList):
outrec = self._PolyOutList[i]
i += 1
op = outrec.pts
if op is None:
continue
while True:
op2 = op.nextOp
while op2 != outrec.pts:
if (
_PointsEqual(op.pt, op2.pt)
and op2.nextOp != op
and op2.prevOp != op
):
# split the polygon into two ...
op3 = op.prevOp
op4 = op2.prevOp
op.prevOp = op4
op4.nextOp = op
op2.prevOp = op3
op3.nextOp = op2
outrec.pts = op
outrec2 = self._CreateOutRec()
outrec2.pts = op2
_UpdateOutPtIdxs(outrec2)
if _Poly2ContainsPoly1(outrec2.pts, outrec.pts):
# OutRec2 is contained by OutRec1 ...
outrec2.isHole = not outrec.isHole
outrec2.FirstLeft = outrec
elif _Poly2ContainsPoly1(outrec.pts, outrec2.pts):
# OutRec1 is contained by OutRec2 ...
outrec2.isHole = outrec.isHole
outrec.isHole = not outrec2.isHole
outrec2.FirstLeft = outrec.FirstLeft
outrec.FirstLeft = outrec2
else:
# the 2 polygons are separate ...
outrec2.isHole = outrec.isHole
outrec2.FirstLeft = outrec.FirstLeft
op2 = op
# ie get ready for the next iteration
op2 = op2.nextOp
op = op.nextOp
if op == outrec.pts:
break
return
def _ExecuteInternal(self):
# try:
| |
d[i]],
[0, 0, 0, 1]])
T = np.dot(T, A)
# print(A)
return T
def invdyn(rb, qc, qcdot, qcddot, grav):
z0 = np.array([[0], [0], [1]])
R = np.identity(3)
Q = np.zeros((rb.ndof, 1))
grav = grav.reshape(3,1)
w = np.dot(np.transpose(R), np.zeros((3, 1)))
wdot = np.dot(np.transpose(R), np.zeros((3, 1)))
vdot = np.dot(np.transpose(R), grav)
Fm = np.empty((3,0))
Nm = np.empty((3,0))
n = np.zeros((3,rb.ndof))
f = np.zeros((3,rb.ndof))
for k in range(1):
q = qc[k, :].reshape((rb.ndof,1))
qdot = qcdot[k, :].reshape((rb.ndof,1))
qddot = qcddot[k, :].reshape((rb.ndof,1))
N_DOFS = rb.ndof
# Forward recursion
for i in range(N_DOFS):
T = rb.calc_transformation(i-1, i, q)
R = T[:3,:3]
p = np.array([[rb.joints[i].a], [rb.joints[i].d* np.sin(rb.joints[i].alpha)],[rb.joints[i].d * np.cos(rb.joints[i].alpha)]])
wdot = np.dot(R.T, (wdot + np.dot(z0,qddot[i,k])) + np.cross(w, np.dot(z0, qdot[i,k]), axis=0))
w = np.dot(R.T,(w + np.dot(z0, qdot[i,k])))
vdot = np.dot(R.T, vdot) + np.cross(wdot, p, axis=0) + np.cross(w, np.cross(w, p, axis=0), axis=0)
vcdot = vdot + np.cross(wdot, rb.joints[i].r, axis=0) + (np.cross(w, np.cross(w, rb.joints[i].r, axis=0), axis=0))
F = np.dot(rb.joints[i].m, vcdot)
N = np.dot(rb.joints[i].inertia, wdot) + np.cross(w, np.dot(rb.joints[i].inertia, w))
Fm = np.append(Fm, F, axis=1)
# print "line: ",i,"\nFm: ", Fm, "\n"
Nm = np.append(Nm, N, axis=1)
# print "line: ",i,"\nNm: ", Nm, "\n"
# Backward recursion
for i in reversed(range(N_DOFS)):
p = np.array([[rb.joints[i].a], [rb.joints[i].d * np.sin(rb.joints[i].alpha)],
[rb.joints[i].d * np.cos(rb.joints[i].alpha)]])
if i+1 < N_DOFS:
T = rb.calc_transformation(i, i+1, q)
R = T[:3, :3]
a = np.dot(R, (n[:, i + 1].reshape((3,1)) + np.cross( np.dot(R.T, p), f[:,i+1].reshape((3,1)), axis=0)) )
n[:, i] = np.ravel(a + np.cross( (rb.joints[i].r + p), Fm[:,i].reshape((3,1)), axis=0) + Nm[:,i].reshape((3,1)))
f[:,i] = np.dot(R, f[:,i+1]) + Fm[:,i]
else:
n[:, i] = np.ravel(np.cross(rb.joints[i].r + p, Fm[:, i].reshape((3,1)), axis=0) + Nm[:, i].reshape((3,1)))
f[:, i] = Fm[:, i]
T = rb.calc_transformation(i-1, i, q)
R = T[:3,:3]
# print n[:,i].shape
a = np.dot(np.transpose(n[:, i].reshape((3,1))), np.transpose(R))
# print "line: ", i," = ", n[:,1]
Q[i,k] = np.dot(a, z0)
return Q
def inertiaComp(self, qc):
if qc.shape[0] > self.ndof:
qc = qc[:self.ndof]
grav = np.array([[0],[0],[0]])
qd = np.zeros((1,self.ndof))
qdd = np.eye(self.ndof)
q_in = np.array([qc])
qd_in = np.array([qd])
M = np.zeros((self.ndof, self.ndof))
for i in range(self.ndof):
qdd_in = np.array([qdd[i,:]])
Q = self.inverseDynamics(q_in, qd_in, qdd_in, grav)
M[:,i] = Q
return M
def cgComp(self, qc,qcd, grav):
# grav = np.array([[0],[0],[-9.81]])
qdd = np.zeros((1,self.ndof))
Q = self.inverseDynamics(qc, qcd, qdd, grav)
return Q
def cinertiaComp(rb, q, J):
if q.shape[0] > rb.ndof:
q = q[:rb.ndof]
# # print q.shape
# J = rb.calcJac(q)
# # Jpinv = rb.pinv(J)
# A = rb.quat2Ja(quat)
# B = block_diag(np.eye(3), A[1:,:])
# Ja = np.dot(B,J)
# # Jad = np.dot(B,Jd)
# # Ja = np.dot(B,J)
Jpinv = rb.pinv(J)
M = rb.inertiaComp(q)
# print M.shape
# print J.shape
# print Jpinv.shape
Lambda = np.dot(Jpinv.T, np.dot(M, Jpinv))
return Lambda
def coriolisComp(rb, q, qd):
if q.shape[0] > rb.ndof:
q = q[:rb.ndof]
qd = qd[:rb.ndof]
N = rb.ndof
C = np.zeros((N,N))
Csq = np.zeros((N,N))
grav = np.array([0,0,0])
for j in range(N):
QD = np.zeros((N))
QD[j] = 1
tau = rb.inverseDynamics(q, QD, np.zeros(N), grav)
Csq[:,j] = Csq[:,j] + tau
for j in range(N):
for k in range(j+1,N):
QD = np.zeros((N))
QD[j] = 1
QD[k] = 1
tau = rb.inverseDynamics(q, QD, np.zeros(N), grav)
C[:,k] = C[:,k] + np.dot((tau - Csq[:,k] - Csq[:,j]), (qd[j]/2))
C[:,j] = C[:,j] + np.dot((tau - Csq[:,k] - Csq[:,j]), (qd[k]/2))
C = (C + np.dot(Csq, np.diag(qd)) )
return C
def ccoriolisComp(rb, q, qd, Ja, Jad):
if q.shape[0] > rb.ndof:
q = q[:rb.ndof]
qd = qd[:rb.ndof]
M = rb.inertiaComp(q)
C = rb.coriolisComp(q, qd)
Jpinv = rb.pinv(Ja)
mu = np.dot(Jpinv.T, np.dot((C - np.dot(M, np.dot(Jpinv, Jad))), Jpinv))
return mu
def gravloadComp(rb, q, grav):
if q.shape[0] > rb.ndof:
q = q[:rb.ndof]
qd = np.zeros(rb.ndof)
qdd = np.zeros(rb.ndof)
tau_g = rb.inverseDynamics(q, qd, qdd, grav)
return tau_g
############################################################
def forwardDynamics(self, Q, qc, qcdot, grav):
M = self.inertiaComp(qc)
CG = self.cgComp(qc, qcdot, grav)
qacc = np.dot(np.linalg.inv(M),(Q - CG));
return qacc
def plotX(self, X):
rc('text', usetex=True)
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(X[0, :], X[1, :])
ax.set_title('${X}_{in}(t)$')
# Grid lines
ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5)
# removing top and right borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Legends
ax.legend(["Trajectory"])
class traj:
def __init__(self, q0, qf, t0, tf, hz, Robot):
self.q0 = q0
self.qf = qf
self.Robot = Robot
self.p0 = self.initial_position()
self.pf = self.final_position()
self.R0 = self.initial_rotation_matrix()
self.Rf = self.final_rotation_matrix()
self.t0 = t0
self.tf = tf
self.hz = hz
self.dx = 1.0/hz
self.samples = int((tf * hz))
self.samples2 = self.samples*2
self.it = np.linspace(0, 1, self.samples)
self.q1et = quaternion.from_rotation_matrix(self.R0)
self.q2et = quaternion.from_rotation_matrix(self.Rf)
# self.q1et = Robot.rounding_quaternion(quaternion.from_float_array(Robot.mat2quat(self.R0)))
# self.q2et = Robot.rounding_quaternion(quaternion.from_float_array(Robot.mat2quat(self.Rf)))
self.quatf = self.q1et
self.quatf_d = self.quatf * np.log(self.q2et * self.q1et.inverse())
def initial_position(self):
# q = np.array([0, 0, 0, 0, 0, 0, 0])
kine = self.Robot.forwardKinematics(self.q0)
rpy = kine.rpy
p0 = cartesian(kine.transl[0], kine.transl[1], kine.transl[2], rpy[0], rpy[1], rpy[2])
return p0
def final_position(self):
kine = self.Robot.forwardKinematics(self.qf)
rpy = kine.rpy
pf = cartesian(kine.transl[0], kine.transl[1], kine.transl[2], rpy[0], rpy[1], rpy[2])
return pf
def initial_rotation_matrix(self):
kine = self.Robot.forwardKinematics(self.q0)
R0 = kine.R
return R0
def final_rotation_matrix(self):
kine = self.Robot.forwardKinematics(self.qf)
Rf = kine.R
return Rf
# Motion planning:
def pathplanning(self):
t0 = self.t0
tf = self.tf
hz = self.hz
samples = self.samples
dx = self.dx
v0 = 0 # Starting velocity
a0 = 0 # Starting acceleration
vf = 0 # Final velocity
af = 0 # Final acceleration
a_mat = np.array([[1, t0, t0 ** 2, t0 ** 3, t0 ** 4, t0 ** 5],
[0, 1, 2 ** t0, 3 * t0 ** 2, 4 * t0 ** 3, 5 * t0 ** 4],
[0, 0, 2, 6 * t0, 12 * t0 ** 2, 20 * t0 ** 3],
[1, tf, tf ** 2, tf ** 3, tf ** 4, tf ** 5],
[0, 1, 2 * tf, 3 * tf ** 2, 4 * tf ** 3, 5 * tf ** 4],
[0, 0, 2, 6 * tf, 12 * tf ** 2, 20 * tf ** 3]])
a_mat_1 = np.linalg.inv(a_mat)
cartesianX = np.array([self.p0.x, v0, a0, self.pf.x, vf, af])
cartesianY = np.array([self.p0.y, v0, a0, self.pf.y, vf, af])
cartesianZ = np.array([self.p0.z, v0, a0, self.pf.z, vf, af])
cartesianRoll = np.array([self.p0.roll, v0, a0, self.pf.roll, vf, af])
cartesianPitch = np.array([self.p0.pitch, v0, a0, self.pf.pitch, vf, af])
cartesianYaw = np.array([self.p0.yaw, v0, a0, self.pf.yaw, vf, af])
px = np.transpose(np.dot(a_mat_1, cartesianX))
py = np.transpose(np.dot(a_mat_1, cartesianY))
pz = np.transpose(np.dot(a_mat_1, cartesianZ))
proll = np.transpose(np.dot(a_mat_1, cartesianRoll))
ppitch = np.transpose(np.dot(a_mat_1, cartesianPitch))
pyaw = np.transpose(np.dot(a_mat_1, cartesianYaw))
pseg = np.fliplr(np.array([px,py,pz, proll, ppitch, pyaw]))
plen = np.linspace(0,tf,samples)
time_span = plen
X1 = np.zeros((samples,6))
for j in range(len(plen)):
t = plen[j]
for i in range(6):
X1[j,i] = pseg[i,0]*t**5 + pseg[i,1]*t**4 + pseg[i,2]*t**3 + pseg[i,3]*t**2+pseg[i,4]*t + pseg[i,5]
X1 = np.transpose(X1)
Xd1 = np.gradient(X1[0, :], dx)
Xd2 = np.gradient(X1[1, :], dx)
Xd3 = np.gradient(X1[2, :], dx)
Xd4 = np.gradient(X1[3, :], dx)
Xd5 = np.gradient(X1[4, :], dx)
Xd6 = np.gradient(X1[5, :], dx)
Xd = np.array([Xd1, Xd2, Xd3, Xd4, Xd5, Xd6])
Xdd1 = np.gradient(Xd[0, :], dx)
Xdd2 = np.gradient(Xd[1, :], dx)
Xdd3 = np.gradient(Xd[2, :], dx)
Xdd4 = np.gradient(Xd[3, :], dx)
Xdd5 = np.gradient(Xd[4, :], dx)
Xdd6 = np.gradient(Xd[5, :], dx)
Xdd = np.array([Xdd1, Xdd2, Xdd3, Xdd4, Xdd5, Xdd6])
return X1, Xd, Xdd
def pathplanning3(self):
t0 = self.t0
tf = self.tf
hz = self.hz
samples = self.samples
dx = self.dx
v0 = 0 # Starting velocity
a0 = 0 # Starting acceleration
vf = 0 # Final velocity
af = 0 # Final acceleration
a_mat = np.array([[1, t0, t0 ** 2, t0 ** 3, t0 ** 4, t0 ** 5],
[0, 1, 2 ** t0, 3 * t0 ** 2, 4 * t0 ** 3, 5 * t0 ** 4],
[0, 0, 2, 6 * t0, 12 * t0 ** 2, 20 * t0 ** 3],
[1, tf, tf ** 2, tf ** 3, tf ** 4, tf ** 5],
[0, 1, 2 * tf, 3 * tf ** 2, 4 * tf ** 3, 5 * tf ** 4],
[0, 0, 2, 6 * | |
## @file
# This file is used to implement of the various bianry parser.
#
# Copyright (c) 2021-, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
from re import T
import copy
import os
from edk2basetools.FMMT.PI.Common import *
from edk2basetools.FMMT.core.BiosTreeNode import *
from edk2basetools.FMMT.core.BiosTree import *
from edk2basetools.FMMT.core.GuidTools import *
ROOT_TREE = 'ROOT'
ROOT_FV_TREE = 'ROOT_FV_TREE'
ROOT_FFS_TREE = 'ROOT_FFS_TREE'
ROOT_SECTION_TREE = 'ROOT_SECTION_TREE'
FV_TREE = 'FV'
DATA_FV_TREE = 'DATA_FV'
FFS_TREE = 'FFS'
FFS_PAD = 'FFS_PAD'
FFS_FREE_SPACE = 'FFS_FREE_SPACE'
SECTION_TREE = 'SECTION'
SEC_FV_TREE = 'SEC_FV_IMAGE'
BINARY_DATA = 'BINARY'
Fv_count = 0
## Abstract factory
class BinaryFactory():
type:list = []
def Create_Product():
pass
class BinaryProduct():
## Use GuidTool to decompress data.
def DeCompressData(self, GuidTool, Section_Data: bytes) -> bytes:
ParPath = os.path.abspath(os.path.dirname(os.path.abspath(__file__))+os.path.sep+"..")
ToolPath = os.path.join(ParPath, r'FMMTConfig.ini')
guidtool = GUIDTools(ToolPath).__getitem__(struct2stream(GuidTool))
DecompressedData = guidtool.unpack(Section_Data)
return DecompressedData
def ParserData():
pass
class SectionFactory(BinaryFactory):
type = [SECTION_TREE]
def Create_Product():
return SectionProduct()
class FfsFactory(BinaryFactory):
type = [ROOT_SECTION_TREE, FFS_TREE]
def Create_Product():
return FfsProduct()
class FvFactory(BinaryFactory):
type = [ROOT_FFS_TREE, FV_TREE, SEC_FV_TREE]
def Create_Product():
return FvProduct()
class FdFactory(BinaryFactory):
type = [ROOT_FV_TREE, ROOT_TREE]
def Create_Product():
return FdProduct()
class SectionProduct(BinaryProduct):
## Decompress the compressed section.
def ParserData(self, Section_Tree, whole_Data: bytes, Rel_Whole_Offset: int=0) -> None:
if Section_Tree.Data.Type == 0x01:
Section_Tree.Data.OriData = Section_Tree.Data.Data
self.ParserFfs(Section_Tree, b'')
# Guided Define Section
elif Section_Tree.Data.Type == 0x02:
Section_Tree.Data.OriData = Section_Tree.Data.Data
DeCompressGuidTool = Section_Tree.Data.ExtHeader.SectionDefinitionGuid
Section_Tree.Data.Data = self.DeCompressData(DeCompressGuidTool, Section_Tree.Data.Data)
Section_Tree.Data.Size = len(Section_Tree.Data.Data) + Section_Tree.Data.HeaderLength
self.ParserFfs(Section_Tree, b'')
elif Section_Tree.Data.Type == 0x03:
Section_Tree.Data.OriData = Section_Tree.Data.Data
self.ParserFfs(Section_Tree, b'')
# SEC_FV Section
elif Section_Tree.Data.Type == 0x17:
global Fv_count
Sec_Fv_Info = FvNode(Fv_count, Section_Tree.Data.Data)
Sec_Fv_Tree = BIOSTREE('FV'+ str(Fv_count))
Sec_Fv_Tree.type = SEC_FV_TREE
Sec_Fv_Tree.Data = Sec_Fv_Info
Sec_Fv_Tree.Data.HOffset = Section_Tree.Data.DOffset
Sec_Fv_Tree.Data.DOffset = Sec_Fv_Tree.Data.HOffset + Sec_Fv_Tree.Data.Header.HeaderLength
Sec_Fv_Tree.Data.Data = Section_Tree.Data.Data[Sec_Fv_Tree.Data.Header.HeaderLength:]
Section_Tree.insertChild(Sec_Fv_Tree)
Fv_count += 1
def ParserFfs(self, ParTree, Whole_Data: bytes, Rel_Whole_Offset: int=0) -> None:
Rel_Offset = 0
Section_Offset = 0
# Get the Data from parent tree, if do not have the tree then get it from the whole_data.
if ParTree.Data != None:
Data_Size = len(ParTree.Data.Data)
Section_Offset = ParTree.Data.DOffset
Whole_Data = ParTree.Data.Data
else:
Data_Size = len(Whole_Data)
# Parser all the data to collect all the Section recorded in its Parent Section.
while Rel_Offset < Data_Size:
# Create a SectionNode and set it as the SectionTree's Data
Section_Info = SectionNode(Whole_Data[Rel_Offset:])
Section_Tree = BIOSTREE(Section_Info.Name)
Section_Tree.type = SECTION_TREE
Section_Info.Data = Whole_Data[Rel_Offset+Section_Info.HeaderLength: Rel_Offset+Section_Info.Size]
Section_Info.DOffset = Section_Offset + Section_Info.HeaderLength + Rel_Whole_Offset
Section_Info.HOffset = Section_Offset + Rel_Whole_Offset
Section_Info.ROffset = Rel_Offset
if Section_Info.Header.Type == 0:
break
# The final Section in parent Section does not need to add padding, else must be 4-bytes align with parent Section start offset
Pad_Size = 0
if (Rel_Offset+Section_Info.HeaderLength+len(Section_Info.Data) != Data_Size):
Pad_Size = GetPadSize(Section_Info.Size, 4)
Section_Info.PadData = Pad_Size * b'\x00'
if Section_Info.Header.Type == 0x02:
Section_Info.DOffset = Section_Offset + Section_Info.ExtHeader.DataOffset + Rel_Whole_Offset
Section_Info.Data = Whole_Data[Rel_Offset+Section_Info.ExtHeader.DataOffset: Rel_Offset+Section_Info.Size]
if Section_Info.Header.Type == 0x14:
ParTree.Data.Version = Section_Info.ExtHeader.GetVersionString()
if Section_Info.Header.Type == 0x15:
ParTree.Data.UiName = Section_Info.ExtHeader.GetUiString()
Section_Offset += Section_Info.Size + Pad_Size
Rel_Offset += Section_Info.Size + Pad_Size
Section_Tree.Data = Section_Info
ParTree.insertChild(Section_Tree)
class FfsProduct(BinaryProduct):
# ParserFFs / GetSection
def ParserData(self, ParTree, Whole_Data: bytes, Rel_Whole_Offset: int=0) -> None:
Rel_Offset = 0
Section_Offset = 0
# Get the Data from parent tree, if do not have the tree then get it from the whole_data.
if ParTree.Data != None:
Data_Size = len(ParTree.Data.Data)
Section_Offset = ParTree.Data.DOffset
Whole_Data = ParTree.Data.Data
else:
Data_Size = len(Whole_Data)
# Parser all the data to collect all the Section recorded in Ffs.
while Rel_Offset < Data_Size:
# Create a SectionNode and set it as the SectionTree's Data
Section_Info = SectionNode(Whole_Data[Rel_Offset:])
Section_Tree = BIOSTREE(Section_Info.Name)
Section_Tree.type = SECTION_TREE
Section_Info.Data = Whole_Data[Rel_Offset+Section_Info.HeaderLength: Rel_Offset+Section_Info.Size]
Section_Info.DOffset = Section_Offset + Section_Info.HeaderLength + Rel_Whole_Offset
Section_Info.HOffset = Section_Offset + Rel_Whole_Offset
Section_Info.ROffset = Rel_Offset
if Section_Info.Header.Type == 0:
break
# The final Section in Ffs does not need to add padding, else must be 4-bytes align with Ffs start offset
Pad_Size = 0
if (Rel_Offset+Section_Info.HeaderLength+len(Section_Info.Data) != Data_Size):
Pad_Size = GetPadSize(Section_Info.Size, 4)
Section_Info.PadData = Pad_Size * b'\x00'
if Section_Info.Header.Type == 0x02:
Section_Info.DOffset = Section_Offset + Section_Info.ExtHeader.DataOffset + Rel_Whole_Offset
Section_Info.Data = Whole_Data[Rel_Offset+Section_Info.ExtHeader.DataOffset: Rel_Offset+Section_Info.Size]
# If Section is Version or UI type, it saves the version and UI info of its parent Ffs.
if Section_Info.Header.Type == 0x14:
ParTree.Data.Version = Section_Info.ExtHeader.GetVersionString()
if Section_Info.Header.Type == 0x15:
ParTree.Data.UiName = Section_Info.ExtHeader.GetUiString()
Section_Offset += Section_Info.Size + Pad_Size
Rel_Offset += Section_Info.Size + Pad_Size
Section_Tree.Data = Section_Info
ParTree.insertChild(Section_Tree)
class FvProduct(BinaryProduct):
## ParserFv / GetFfs
def ParserData(self, ParTree, Whole_Data: bytes, Rel_Whole_Offset: int=0) -> None:
Ffs_Offset = 0
Rel_Offset = 0
# Get the Data from parent tree, if do not have the tree then get it from the whole_data.
if ParTree.Data != None:
Data_Size = len(ParTree.Data.Data)
Ffs_Offset = ParTree.Data.DOffset
Whole_Data = ParTree.Data.Data
else:
Data_Size = len(Whole_Data)
# Parser all the data to collect all the Ffs recorded in Fv.
while Rel_Offset < Data_Size:
# Create a FfsNode and set it as the FFsTree's Data
if Data_Size - Rel_Offset < 24:
Ffs_Tree = BIOSTREE('Free_Space')
Ffs_Tree.type = FFS_FREE_SPACE
Ffs_Tree.Data = FreeSpaceNode(Whole_Data[Rel_Offset:])
Ffs_Tree.Data.HOffset = Ffs_Offset + Rel_Whole_Offset
Ffs_Tree.Data.DOffset = Ffs_Tree.Data.HOffset
ParTree.Data.Free_Space = Data_Size - Rel_Offset
ParTree.insertChild(Ffs_Tree)
Rel_Offset = Data_Size
else:
Ffs_Info = FfsNode(Whole_Data[Rel_Offset:])
Ffs_Tree = BIOSTREE(Ffs_Info.Name)
Ffs_Info.HOffset = Ffs_Offset + Rel_Whole_Offset
Ffs_Info.DOffset = Ffs_Offset + Ffs_Info.Header.HeaderLength + Rel_Whole_Offset
Ffs_Info.ROffset = Rel_Offset
if Ffs_Info.Name == PADVECTOR:
Ffs_Tree.type = FFS_PAD
Ffs_Info.Data = Whole_Data[Rel_Offset+Ffs_Info.Header.HeaderLength: Rel_Offset+Ffs_Info.Size]
Ffs_Info.Size = len(Ffs_Info.Data) + Ffs_Info.Header.HeaderLength
# if current Ffs is the final ffs of Fv and full of b'\xff', define it with Free_Space
if struct2stream(Ffs_Info.Header).replace(b'\xff', b'') == b'':
Ffs_Tree.type = FFS_FREE_SPACE
Ffs_Info.Data = Whole_Data[Rel_Offset:]
Ffs_Info.Size = len(Ffs_Info.Data)
ParTree.Data.Free_Space = Ffs_Info.Size
else:
Ffs_Tree.type = FFS_TREE
Ffs_Info.Data = Whole_Data[Rel_Offset+Ffs_Info.Header.HeaderLength: Rel_Offset+Ffs_Info.Size]
# The final Ffs in Fv does not need to add padding, else must be 8-bytes align with Fv start offset
Pad_Size = 0
if Ffs_Tree.type != FFS_FREE_SPACE and (Rel_Offset+Ffs_Info.Header.HeaderLength+len(Ffs_Info.Data) != Data_Size):
Pad_Size = GetPadSize(Ffs_Info.Size, 8)
Ffs_Info.PadData = Pad_Size * b'\xff'
Ffs_Offset += Ffs_Info.Size + Pad_Size
Rel_Offset += Ffs_Info.Size + Pad_Size
Ffs_Tree.Data = Ffs_Info
ParTree.insertChild(Ffs_Tree)
class FdProduct(BinaryProduct):
type = [ROOT_FV_TREE, ROOT_TREE]
## Create DataTree with first level /fv Info, then parser each Fv.
def ParserData(self, WholeFvTree, whole_data: bytes=b'', offset: int=0) -> None:
# Get all Fv image in Fd with offset and length
Fd_Struct = self.GetFvFromFd(whole_data)
data_size = len(whole_data)
Binary_count = 0
global Fv_count
# If the first Fv image is the Binary Fv, add it into the tree.
if Fd_Struct[0][1] != 0:
Binary_node = BIOSTREE('BINARY'+ str(Binary_count))
Binary_node.type = BINARY_DATA
Binary_node.Data = BinaryNode(str(Binary_count))
Binary_node.Data.Data = whole_data[:Fd_Struct[0][1]]
Binary_node.Data.Size = len(Binary_node.Data.Data)
Binary_node.Data.HOffset = 0 + offset
WholeFvTree.insertChild(Binary_node)
Binary_count += 1
# Add the first collected Fv image into the tree.
Cur_node = BIOSTREE(Fd_Struct[0][0]+ str(Fv_count))
Cur_node.type = Fd_Struct[0][0]
Cur_node.Data = FvNode(Fv_count, whole_data[Fd_Struct[0][1]:Fd_Struct[0][1]+Fd_Struct[0][2][0]])
Cur_node.Data.HOffset = Fd_Struct[0][1] + offset
Cur_node.Data.DOffset = Cur_node.Data.HOffset+Cur_node.Data.Header.HeaderLength
Cur_node.Data.Data = whole_data[Fd_Struct[0][1]+Cur_node.Data.Header.HeaderLength:Fd_Struct[0][1]+Cur_node.Data.Size]
WholeFvTree.insertChild(Cur_node)
Fv_count += 1
Fv_num = len(Fd_Struct)
# Add all the collected Fv image and the Binary Fv image between them into the tree.
for i in range(Fv_num-1):
if Fd_Struct[i][1]+Fd_Struct[i][2][0] != Fd_Struct[i+1][1]:
Binary_node = BIOSTREE('BINARY'+ str(Binary_count))
Binary_node.type = BINARY_DATA
Binary_node.Data = BinaryNode(str(Binary_count))
Binary_node.Data.Data = whole_data[Fd_Struct[i][1]+Fd_Struct[i][2][0]:Fd_Struct[i+1][1]]
Binary_node.Data.Size = len(Binary_node.Data.Data)
Binary_node.Data.HOffset = Fd_Struct[i][1]+Fd_Struct[i][2][0] + offset
WholeFvTree.insertChild(Binary_node)
Binary_count += 1
Cur_node = BIOSTREE(Fd_Struct[i+1][0]+ str(Fv_count))
Cur_node.type = Fd_Struct[i+1][0]
Cur_node.Data = FvNode(Fv_count, whole_data[Fd_Struct[i+1][1]:Fd_Struct[i+1][1]+Fd_Struct[i+1][2][0]])
Cur_node.Data.HOffset = Fd_Struct[i+1][1] + offset
Cur_node.Data.DOffset = Cur_node.Data.HOffset+Cur_node.Data.Header.HeaderLength
Cur_node.Data.Data = whole_data[Fd_Struct[i+1][1]+Cur_node.Data.Header.HeaderLength:Fd_Struct[i+1][1]+Cur_node.Data.Size]
WholeFvTree.insertChild(Cur_node)
Fv_count += 1
# If the final Fv image is the Binary Fv, add it into the tree
if Fd_Struct[-1][1] + Fd_Struct[-1][2][0] != data_size:
Binary_node = BIOSTREE('BINARY'+ str(Binary_count))
Binary_node.type = BINARY_DATA
Binary_node.Data = BinaryNode(str(Binary_count))
Binary_node.Data.Data = whole_data[Fd_Struct[-1][1]+Fd_Struct[-1][2][0]:]
Binary_node.Data.Size = len(Binary_node.Data.Data)
Binary_node.Data.HOffset = Fd_Struct[-1][1]+Fd_Struct[-1][2][0] + offset
WholeFvTree.insertChild(Binary_node)
Binary_count += 1
## Get the first level Fv from Fd file.
def GetFvFromFd(self, whole_data: bytes=b'') -> list:
Fd_Struct = []
data_size = len(whole_data)
cur_index = 0
# Get all the EFI_FIRMWARE_FILE_SYSTEM2_GUID_BYTE FV image offset and length.
while cur_index < data_size:
if EFI_FIRMWARE_FILE_SYSTEM2_GUID_BYTE in whole_data[cur_index:]:
target_index = whole_data[cur_index:].index(EFI_FIRMWARE_FILE_SYSTEM2_GUID_BYTE) + cur_index
if whole_data[target_index+24:target_index+28] == FVH_SIGNATURE and whole_data[target_index-16:target_index] == ZEROVECTOR_BYTE:
Fd_Struct.append([FV_TREE, target_index - 16, unpack("Q", whole_data[target_index+16:target_index+24])])
cur_index = Fd_Struct[-1][1] + Fd_Struct[-1][2][0]
else:
cur_index = target_index + 16
else:
cur_index = data_size
cur_index = 0
# Get all the EFI_FIRMWARE_FILE_SYSTEM3_GUID_BYTE FV image offset and length.
while cur_index < data_size:
if EFI_FIRMWARE_FILE_SYSTEM3_GUID_BYTE in whole_data[cur_index:]:
target_index = whole_data[cur_index:].index(EFI_FIRMWARE_FILE_SYSTEM3_GUID_BYTE) + cur_index
if whole_data[target_index+24:target_index+28] == FVH_SIGNATURE and whole_data[target_index-16:target_index] == ZEROVECTOR_BYTE:
Fd_Struct.append([FV_TREE, target_index - 16, unpack("Q", whole_data[target_index+16:target_index+24])])
cur_index = Fd_Struct[-1][1] + Fd_Struct[-1][2][0]
else:
cur_index = | |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2016-2018 <NAME>
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import os
import logging
import time
import wave
import pyaudio
import wurb_core
def default_settings():
""" Available settings for the this module.
This info is used to define default values and to
generate the wurb_settings_DEFAULT.txt file."""
description = [
'# Settings for the sound recorder.',
]
default_settings = [
{'key': 'rec_directory_path', 'value': '/media/usb0/wurb1_rec'},
{'key': 'rec_filename_prefix', 'value': 'WURB1'},
{'key': 'rec_format', 'value': 'FS'}, # "TE" (Time Expansion) ot "FS" (Full Scan).
{'key': 'rec_max_length_s', 'value': '20'},
{'key': 'rec_buffers_s', 'value': 2.0}, # Pre- and post detected sound buffer size.
# Hardware.
{'key': 'rec_sampling_freq_khz', 'value': '384'},
{'key': 'rec_microphone_type', 'value': 'USB'}, # "USB" or "M500".
{'key': 'rec_part_of_device_name', 'value': 'Pettersson'},
{'key': 'rec_device_index', 'value': 0}, # Not used if "rec_part_of_device_name" is found.
]
developer_settings = [
{'key': 'rec_source_debug', 'value': 'N'},
{'key': 'rec_proc_debug', 'value': 'N'},
{'key': 'rec_target_debug', 'value': 'N'},
{'key': 'rec_source_adj_time_on_drift', 'value': 'Y'},
]
#
return description, default_settings, developer_settings
def get_device_list():
""" Sound source util. Check connected sound cards. """
py_audio = pyaudio.PyAudio()
device_list = []
device_count = py_audio.get_device_count()
for index in range(device_count):
info_dict = py_audio.get_device_info_by_index(index)
# Sound card for input only.
if info_dict['maxInputChannels'] != 0:
device_list.append(info_dict['name'])
#
return device_list
def get_device_index(part_of_device_name):
""" Sound source util. Lookup for device by name. """
py_audio = pyaudio.PyAudio()
device_count = py_audio.get_device_count()
for index in range(device_count):
info_dict = py_audio.get_device_info_by_index(index)
if part_of_device_name in info_dict['name']:
return index
#
return None
class WurbRecorder(object):
""" """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
self._settings = wurb_core.WurbSettings()
#
self._sound_manager = None
# self._is_recording = False
def setup_sound_manager(self):
""" """
# Sound stream parts:
# - Source
self._sound_source = None
if self._settings.text('rec_microphone_type') == 'M500':
# The Pettersson M500 microphone is developed for Windows. Special code to handle M500.
self._sound_source = wurb_core.SoundSourceM500(callback_function=self._callback_function)
else:
# Generic USB microphones, including Pettersson M500-384.
self._sound_source = wurb_core.SoundSource(callback_function=self._callback_function)
# - Process.
self._sound_process = wurb_core.SoundProcess(callback_function=self._callback_function)
# - Target.
self._sound_target = wurb_core.SoundTarget(callback_function=self._callback_function)
# - Manager.
self._sound_manager = wurb_core.SoundStreamManager(
self._sound_source,
self._sound_process,
self._sound_target)
def start_recording(self):
""" """
if self._sound_manager:
self._sound_manager.start_streaming()
def stop_recording(self, stop_immediate=False):
""" """
if self._sound_manager:
self._sound_manager.stop_streaming(stop_immediate)
class SoundSource(wurb_core.SoundSourceBase):
""" Subclass of SoundSourceBase. """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
self._settings = wurb_core.WurbSettings()
#
super(SoundSource, self).__init__()
#
self._debug = self._settings.boolean('rec_source_debug')
self._rec_source_adj_time_on_drift = self._settings.boolean('rec_source_adj_time_on_drift')
#
self._pyaudio = pyaudio.PyAudio()
self._stream = None
#
self.read_settings()
def read_settings(self):
""" Called from base class. """
if self._settings.text('rec_microphone_type') == 'M500':
# For Pettersson M500. Overrides settings.
self._sampling_freq_hz = 500000
else:
# From settings. Defaults for Pettersson M500-384.
self._sampling_freq_hz = self._settings.integer('rec_sampling_freq_khz') * 1000
# Sound card.
in_device_name = self._settings.text('rec_part_of_device_name')
in_device_index = self._settings.integer('rec_device_index') # Default=0. First recognized sound card.
if in_device_name:
self._in_device_index = wurb_core.get_device_index(in_device_name)
else:
self._in_device_index = in_device_index
self._logger.info('Recorder: Sampling frequency (hz): ' + str(self._sampling_freq_hz))
def _setup_pyaudio(self):
""" """
# Initiate PyAudio.
try:
self._stream = self._pyaudio.open(
format = self._pyaudio.get_format_from_width(2), # 2=16 bits.
channels = 1, # 1=Mono.
rate = self._sampling_freq_hz,
frames_per_buffer = self._sampling_freq_hz, # Buffer 1 sec.
input = True,
output = False,
input_device_index = self._in_device_index,
start = False,
)
except Exception as e:
self._stream = None
self._logger.error('Recorder: Failed to create stream: ' + str(e))
# Report to state machine.
if self._callback_function:
self._callback_function('rec_source_error')
return
def source_exec(self):
""" Called from base class. """
if self._stream is None:
self._setup_pyaudio()
#
if self._stream:
self._active = True
self._stream_active = True
self._stream_time_s = time.time()
self._stream.start_stream()
else:
self._logger.error('Recorder: Failed to read stream.')
return
#
buffer_size = int(self._sampling_freq_hz / 2)
# Main source loop.
try:
data = self._stream.read(buffer_size) #, exception_on_overflow=False)
while self._active and data:
# Add time and check for time drift.
self._stream_time_s += 0.5 # One buffer is 0.5 sec.
if (self._stream_time_s > (time.time() + 10)) or \
(self._stream_time_s < (time.time() - 10)):
#
time_diff_s = int(time.time() - self._stream_time_s)
if self._rec_source_adj_time_on_drift:
self._logger.warning('Recorder: Rec. time adjusted. Diff: ' + str(time_diff_s) + ' sec.')
self._stream_time_s = time.time()
else:
self._logger.debug('Recorder: Rec. time drift. Diff: ' + str(time_diff_s) + ' sec.')
# Push time and data buffer.
self.push_item((self._stream_time_s, data))
#
data = self._stream.read(buffer_size) #, exception_on_overflow=False)
except Exception as e:
self._logger.error('Recorder: Failed to read stream: ' + str(e))
# Main loop terminated.
self._logger.debug('Source: Source terminated.')
self.push_item(None)
#
if self._stream is not None:
try:
self._stream.stop_stream()
self._stream.close()
except:
self._logger.error('Recorder: Pyaudio stream stop/close failed.')
self._stream = None
class SoundSourceM500(SoundSource):
""" Subclass of SoundSource for the Pettersson M500 microphone. """
def __init__(self, callback_function=None):
""" """
super(SoundSourceM500, self).__init__(callback_function)
#
self._debug = self._settings.boolean('rec_source_debug')
self._rec_source_adj_time_on_drift = self._settings.boolean('rec_source_adj_time_on_drift')
#
self._m500batmic = None
def source_exec(self):
""" For the Pettersson M500 microphone. """
self._active = True
#
try:
if not self._m500batmic:
self._m500batmic = wurb_core.PetterssonM500BatMic()
#
self._stream_active = True
#
self._stream_time_s = time.time()
self._m500batmic.start_stream()
self._m500batmic.led_on()
except Exception as e:
self._logger.error('Recorder: Failed to create stream: ' + str(e))
# Report to state machine.
if self._callback_function:
self._callback_function('rec_source_error')
return
#
# buffer_size = int(self._sampling_freq_hz / 2)
buffer_size = int(self._sampling_freq_hz)
# Main source loop.
data = self._m500batmic.read_stream().tostring()
data_array = data
while self._active and (len(data) > 0):
# Push 0.5 sec each time. M500 can't deliver that size directly.
if len(data_array) >= buffer_size:
# Add time and check for time drift.
self._stream_time_s += 0.5 # One buffer is 0.5 sec.
if (self._stream_time_s > (time.time() + 10)) or \
(self._stream_time_s < (time.time() - 10)):
#
time_diff_s = int(time.time() - self._stream_time_s)
if self._rec_source_adj_time_on_drift:
self._logger.warning('Recorder: Rec. time adjusted. Diff: ' + str(time_diff_s) + ' sec.')
self._stream_time_s = time.time()
else:
self._logger.debug('Recorder: Rec. time drift. Diff: ' + str(time_diff_s) + ' sec.')
# Push time and data buffer.
self.push_item((self._stream_time_s, data_array[0:buffer_size]))
data_array = data_array[buffer_size:]
#
data = self._m500batmic.read_stream().tostring()
data_array += data
#
self._logger.debug('Source M500: Source terminated.')
self.push_item(None)
#
self._m500batmic.stop_stream()
class SoundProcess(wurb_core.SoundProcessBase):
""" Subclass of SoundProcessBase. """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
self._settings = wurb_core.WurbSettings()
#
super(SoundProcess, self).__init__()
#
self._debug = self._settings.boolean('rec_proc_debug')
self._rec_buffers_s = self._settings.float('rec_buffers_s')
def process_exec(self):
""" Called from base class. """
self._active = True
# Get sound detector based on user settings.
sound_detector = None
try:
sound_detector = wurb_core.SoundDetector().get_detector()
except Exception as e:
sound_detector = None
self._logger.error('Recorder: SoundDetector exception: ', str(e))
sound_detected = False
#
buffer_size = int(self._rec_buffers_s * 2.0) # Buffers are of 0.5 sec length.
#
silent_buffer = []
silent_counter = 9999 # Don't send before sound detected.
try:
while self._active:
time_and_data = self.pull_item()
if time_and_data is None:
self._logger.debug('Rec-process terminated.')
self._active = False
# Terminated by previous step.
self.push_item(None)
else:
# self.process_buffer(raw_data)
try:
sound_detected = sound_detector.check_for_sound(time_and_data)
except Exception as e:
sound_detected = True
#
if sound_detected:
if self._debug:
print('DEBUG: Sound detected.')
# Send pre buffer if this is the first one.
if len(silent_buffer) > 0:
for silent_time_and_data in silent_buffer:
self.push_item(silent_time_and_data)
#
silent_buffer = []
# Send buffer.
self.push_item(time_and_data)
silent_counter = 0
else:
if self._debug:
print('DEBUG: Sound not detected. Counter: ', silent_counter)
if silent_counter < buffer_size: # Unit 0.5 sec.
# Send after sound detected.
self.push_item(time_and_data)
silent_counter += 1
elif silent_counter < (buffer_size * 2): # Unit 0.5 sec.
# Accept longer silent part between pulses.
silent_buffer.append(time_and_data)
silent_counter += 1
else:
# Silent, but store in pre buffer.
self.push_item(False)
silent_buffer.append(time_and_data)
while len(silent_buffer) > buffer_size: # Unit 0.5sec.
silent_buffer.pop(0)
except Exception as e:
self._logger.error('Recorder: Sound process_exec exception: ', str(e))
class SoundTarget(wurb_core.SoundTargetBase):
""" Subclass of SoundTargetBase. """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
self._settings = wurb_core.WurbSettings()
#
super(SoundTarget, self).__init__()
# From settings.
self._dir_path = self._settings.text('rec_directory_path')
self._filename_prefix = self._settings.text('rec_filename_prefix')
rec_max_length_s = self._settings.integer('rec_max_length_s')
self._rec_max_length = rec_max_length_s * 2
# Default for latitude/longitude in the decimal degree format.
self._latitude = float(self._settings.float('default_latitude'))
self._longitude = float(self._settings.float('default_longitude'))
# Different microphone types.
if self._settings.text('rec_microphone_type') == 'M500':
# For M500 only.
if self._settings.text('rec_format') == 'TE':
self._filename_rec_type = 'TE500'
self._out_sampling_rate_hz = 50000
else:
self._filename_rec_type = 'FS500'
self._out_sampling_rate_hz = 500000
else:
# For standard USB, inclusive M500-384.
if self._settings.text('rec_format') == 'TE':
self._filename_rec_type = 'TE' + self._settings.text('rec_sampling_freq_khz')
self._out_sampling_rate_hz = self._settings.integer('rec_sampling_freq_khz') * 100
else:
self._filename_rec_type = 'FS' + self._settings.text('rec_sampling_freq_khz')
self._out_sampling_rate_hz | |
+= self.fc_reg * tf.reduce_sum(tf.square(self.W3[i]))
self.loss_reg = loss_em + loss_W1 + loss_W2 + loss_W3
if self.b_num == 3:
self.score_ipv, self.score_cart, self.score_buy = self._create_inference()
self.loss_ipv = tf.losses.log_loss(self.labels_ipv, self.score_ipv)
self.loss_cart = tf.losses.log_loss(self.labels_cart, self.score_cart)
self.loss_buy = tf.losses.log_loss(self.labels_buy, self.score_buy)
self.loss = self.ipv_loss_coefficient * self.loss_ipv + \
self.cart_loss_coefficient * self.loss_cart + \
self.buy_loss_coefficient * self.loss_buy + self.loss_reg
# used for cascade training
if self.cascade_mode == 'pretrain':
self.loss1 = self.loss_ipv + loss_em + loss_W1
self.loss2 = self.loss_ipv + self.loss_cart + loss_em + loss_W1 + loss_W2
self.loss3 = self.loss_ipv + self.loss_cart + self.loss_buy + self.loss_reg
elif self.cascade_mode == 'frozen':
self.loss1 = self.loss_ipv + loss_em + loss_W1
self.loss2 = 1/2 * (self.loss_ipv + self.loss_cart) + loss_em + loss_W2
self.loss3 = 1/3 * (self.loss_ipv + self.loss_cart + self.loss_buy) + loss_em + loss_W3
else:
self.loss1 = self.loss_ipv + loss_em + loss_W1
self.loss2 = self.loss_cart + loss_em + loss_W2
self.loss3 = self.loss_buy + loss_em + loss_W3
else:
if self.b_2_type == 'cb':
self.score_cart, self.score_buy = self._create_inference()
self.loss_cart = tf.losses.log_loss(self.labels_cart, self.score_cart)
self.loss_buy = tf.losses.log_loss(self.labels_buy, self.score_buy)
self.loss = self.cart_loss_coefficient * self.loss_cart + \
self.buy_loss_coefficient * self.loss_buy + self.loss_reg
elif self.b_2_type == 'vb':
self.score_ipv, self.score_buy = self._create_inference()
self.loss_ipv = tf.losses.log_loss(self.labels_ipv, self.score_ipv)
self.loss_buy = tf.losses.log_loss(self.labels_buy, self.score_buy)
self.loss = self.cart_loss_coefficient * self.loss_ipv + \
self.buy_loss_coefficient * self.loss_buy + self.loss_reg
self.loss1 = self.loss_ipv + loss_em + loss_W1
self.loss2 = self.loss_buy + loss_em + loss_W2
else:
self.score_ipv, self.score_cart = self._create_inference()
self.loss_ipv = tf.losses.log_loss(self.labels_ipv, self.score_ipv)
self.loss_cart = tf.losses.log_loss(self.labels_cart, self.score_cart)
self.loss = self.cart_loss_coefficient * self.loss_ipv + \
self.buy_loss_coefficient * self.loss_cart + self.loss_reg
else:
pass
def build_graph(self):
self.g = tf.Graph()
with self.g.as_default():
self._create_placeholders()
self._create_variables()
self._create_loss()
class Multi_NCF():
def __init__(self, num_users, num_items, args):
self.loss_func = args.loss_func
self.num_users = num_users
self.num_items = num_items
self.embedding_size = args.embed_size
self.learning_rate = args.lr
regs = eval(args.regs)
self.embedding_reg = regs[0]
self.fc_reg = regs[1]
loss_coefficient = eval(args.loss_coefficient)
self.cart_loss_coefficient = loss_coefficient[1]
self.buy_loss_coefficient = loss_coefficient[2]
self.ipv_loss_coefficient = 1 - self.cart_loss_coefficient - self.buy_loss_coefficient
self.opt = args.optimizer
self.layer_num = args.layer_num
self.b_num = args.b_num
self.b_2_type = args.b_2_type
def _create_placeholders(self):
with tf.name_scope('input_data'):
self.user_input = tf.placeholder(tf.int32, shape = [None, 1], name = 'user_input')
self.item_input = tf.placeholder(tf.int32, shape = [None, None], name = 'item_input')
self.labels_ipv = tf.placeholder(tf.float32, shape = [None, 1], name = 'labels_ipv')
self.labels_cart = tf.placeholder(tf.float32, shape = [None, 1], name = 'labels_cart')
self.labels_buy = tf.placeholder(tf.float32, shape = [None, 1], name = 'labels_buy')
def _create_variables(self):
with tf.name_scope('shared_embedding'):
self.embedding_P = tf.Variable(
tf.truncated_normal(shape = [self.num_users, self.embedding_size],
mean = 0.0, stddev = 0.01), name = 'embedding_P', dtype = tf.float32)
self.embedding_Q = tf.Variable(
tf.truncated_normal(shape = [self.num_items, self.embedding_size],
mean = 0.0, stddev = 0.01), name = 'embedding_Q', dtype = tf.float32)
with tf.name_scope('shared_bias'):
self.bias = tf.Variable(
tf.zeros([self.num_items, 1]),
name = 'bias', dtype = tf.float32)
with tf.name_scope('W_b_h'):
# h-vector
h_size = 3 * int(self.embedding_size/(2**self.layer_num))
self.h_1 = tf.Variable(
tf.random_uniform([h_size, 1], minval=-tf.sqrt(3 / h_size),
maxval=tf.sqrt(3 / h_size)), name='h_1')
self.h_2 = tf.Variable(
tf.random_uniform([h_size, 1], minval=-tf.sqrt(3 / h_size),
maxval=tf.sqrt(3 / h_size)), name='h_2')
self.h_3 = tf.Variable(
tf.random_uniform([h_size, 1], minval=-tf.sqrt(3 / h_size),
maxval=tf.sqrt(3 / h_size)), name='h_3')
if self.layer_num == 0:
pass
elif self.layer_num == 1:
# view specific
self.W1 = tf.Variable(tf.random_uniform(shape=[3*self.embedding_size, int(3*self.embedding_size/2)],
minval=-tf.sqrt(1 / self.embedding_size),
maxval=tf.sqrt(1 / self.embedding_size)), name='W1')
self.b1 = tf.Variable(tf.zeros([1, int(3*self.embedding_size/2)]), dtype=tf.float32, name='b1')
# add cart specific
self.W2 = tf.Variable(tf.random_uniform(shape=[3*self.embedding_size, int(3*self.embedding_size/2)],
minval=-tf.sqrt(3 / (2*self.embedding_size)),
maxval=tf.sqrt(3 / (2*self.embedding_size))), name='W2')
self.b2 = tf.Variable(tf.zeros([1, int(3*self.embedding_size/2)]), dtype=tf.float32, name='b2')
# buy specific
self.W3 = tf.Variable(tf.random_uniform(shape=[3*self.embedding_size, int(3*self.embedding_size/2)],
minval=-tf.sqrt(3 / (2*self.embedding_size)),
maxval=tf.sqrt(3 / (2*self.embedding_size))), name='W3')
self.b3 = tf.Variable(tf.zeros([1, int(3*self.embedding_size/2)]), dtype=tf.float32, name='b3')
else:
self.W1, self.b1 = [], []
self.W2, self.b2 = [], []
self.W3, self.b3 = [], []
for i in range(self.layer_num):
input_size = int(3*self.embedding_size/(2**i))
output_size = int(3*self.embedding_size/(2**(i+1)))
self.W1.append(tf.Variable(tf.random_uniform(shape=[input_size, output_size],
minval=-tf.sqrt(3 / input_size),
maxval=tf.sqrt(3 / input_size)), name='W1_%d' %i))
self.b1.append(tf.Variable(tf.zeros([1, output_size]), dtype=tf.float32, name='b1_%d' %i))
self.W2.append(tf.Variable(tf.random_uniform(shape=[input_size, output_size],
minval=-tf.sqrt(3 / input_size),
maxval=tf.sqrt(3 / input_size)), name='W2_%d' %i))
self.b2.append(tf.Variable(tf.zeros([1, output_size]), dtype=tf.float32, name='b2_%d' %i))
self.W3.append(tf.Variable(tf.random_uniform(shape=[input_size, output_size],
minval=-tf.sqrt(3 / input_size),
maxval=tf.sqrt(3 / input_size)), name='W3_%d' %i))
self.b3.append(tf.Variable(tf.zeros([1, output_size]), dtype=tf.float32, name='b3_%d' %i))
def _create_inference(self):
with tf.name_scope('inference'):
# [B, 1] item-popularity
b = tf.reduce_sum(tf.nn.embedding_lookup(self.bias, self.item_input), 1)
# [B, E] shared embeddings
embedding_p = tf.reduce_sum(tf.nn.embedding_lookup(self.embedding_P, self.user_input), 1)
embedding_q = tf.reduce_sum(tf.nn.embedding_lookup(self.embedding_Q, self.item_input), 1)
# [B, 3E]
z = tf.concat([embedding_p, embedding_q, embedding_p * embedding_q], 1, name='z')
if self.layer_num == 0:
pass
elif self.layer_num == 1:
if self.b_num == 3:
# predict ipv
output_ipv = tf.matmul(tf.nn.relu(tf.matmul(z, self.W1) + self.b1), self.h_1) + b
# predict cart
temp_cart = tf.matmul(tf.nn.relu(tf.matmul(z, self.W2) + self.b2), self.h_2)
output_cart = (temp_cart + output_ipv) / 2 + b
# predict buy
temp_buy = tf.matmul(tf.nn.relu(tf.matmul(z, self.W3) + self.b3), self.h_3)
output_buy = (temp_buy + output_cart) / 2 + b
return (tf.sigmoid(output_ipv, name = 'score_ipv'),
tf.sigmoid(output_cart, name = 'score_cart'),
tf.sigmoid(output_buy, name = 'score_buy'))
else:
if self.b_2_type == 'cb':
# predict cart
output_cart = tf.matmul(tf.nn.relu(tf.matmul(z, self.W1) + self.b1), self.h_1) + b
# predict buy
temp_buy = tf.matmul(tf.nn.relu(tf.matmul(z, self.W2) + self.b2), self.h_2)
output_buy = (temp_buy + output_cart) / 2 + b
return (tf.sigmoid(output_cart, name = 'score_cart'),
tf.sigmoid(output_buy, name = 'score_buy'))
elif self.b_2_type == 'vc':
# predict ipv
output_ipv = tf.matmul(tf.nn.relu(tf.matmul(z, self.W1) + self.b1), self.h_1) + b
# predict cart
temp_cart = tf.matmul(tf.nn.relu(tf.matmul(z, self.W2) + self.b2), self.h_2)
output_cart = (temp_cart + output_ipv) / 2 + b
return (tf.sigmoid(output_ipv, name = 'score_ipv'),
tf.sigmoid(output_cart, name = 'score_cart'))
else:
# predict ipv
output_ipv = tf.matmul(tf.nn.relu(tf.matmul(z, self.W1) + self.b1), self.h_1) + b
# predict buy
temp_buy = tf.matmul(tf.nn.relu(tf.matmul(z, self.W2) + self.b2), self.h_2)
output_buy = (temp_buy + output_ipv) / 2 + b
return (tf.sigmoid(output_ipv, name = 'score_ipv'),
tf.sigmoid(output_buy, name = 'score_buy'))
else:
fc_1, fc_2, fc_3 = [], [], []
for i in range(self.layer_num):
if i == 0:
fc_1.append(tf.nn.relu(tf.matmul(z, self.W1[i]) + self.b1[i]))
fc_2.append(tf.nn.relu(tf.matmul(z, self.W2[i]) + self.b2[i]))
fc_3.append(tf.nn.relu(tf.matmul(z, self.W3[i]) + self.b3[i]))
else:
fc_1.append(tf.nn.relu(tf.matmul(fc_1[i-1], self.W1[i]) + self.b1[i]))
fc_2.append(tf.nn.relu(tf.matmul(fc_2[i-1], self.W2[i]) + self.b2[i]))
fc_3.append(tf.nn.relu(tf.matmul(fc_3[i-1], self.W3[i]) + self.b3[i]))
if self.b_num == 3:
# predict ipv
output_ipv = tf.matmul(fc_1[i], self.h_1) + b
# predict cart
temp_cart = tf.matmul(fc_2[i], self.h_2)
output_cart = (temp_cart + output_ipv) / 2 + b
# predict buy
temp_buy = tf.matmul(fc_3[i], self.h_3)
output_buy = (temp_buy + output_cart) / 2 + b
return (tf.sigmoid(output_ipv, name = 'score_ipv'),
tf.sigmoid(output_cart, name = 'score_cart'),
tf.sigmoid(output_buy, name = 'score_buy'))
else:
if self.b_2_type == 'cb':
# predict cart
output_cart = tf.matmul(fc_1[i], self.h_1) + b
# predict buy
temp_buy = tf.matmul(fc_2[i], self.h_2)
output_buy = (temp_buy + output_cart) / 2 + b
return (tf.sigmoid(output_cart, name = 'score_cart'),
tf.sigmoid(output_buy, name = 'score_buy'))
elif self.b_2_type == 'vc':
# predict ipv
output_ipv = tf.matmul(fc_1[i], self.h_1) + b
# predict cart
temp_cart = tf.matmul(fc_2[i], self.h_2)
output_cart = (temp_buy + output_ipv) / 2 + b
return (tf.sigmoid(output_ipv, name = 'score_ipv'),
tf.sigmoid(output_cart, name = 'score_cart'))
else:
# predict ipv
output_ipv = tf.matmul(fc_1[i], self.h_1) + b
# predict buy
temp_buy = tf.matmul(fc_2[i], self.h_2)
output_buy = (temp_buy + output_ipv) / 2 + b
return (tf.sigmoid(output_ipv, name = 'score_ipv'),
tf.sigmoid(output_buy, name = 'score_buy'))
def _create_loss(self):
with tf.name_scope('loss'):
if self.loss_func == 'logloss':
loss_em = self.embedding_reg * tf.reduce_sum(tf.square(self.embedding_P)) + \
self.embedding_reg * tf.reduce_sum(tf.square(self.embedding_Q))
loss_W1 = 0
loss_W2 = 0
loss_W3 = 0
if self.layer_num == 1:
loss_W1 = self.fc_reg * tf.reduce_sum(tf.square(self.W1))
loss_W2 = self.fc_reg * tf.reduce_sum(tf.square(self.W2))
if self.b_num == 3:
loss_W3 = self.fc_reg * tf.reduce_sum(tf.square(self.W3))
else:
for i in range(len(self.W1)):
loss_W1 += self.fc_reg * tf.reduce_sum(tf.square(self.W1[i]))
loss_W2 += self.fc_reg * tf.reduce_sum(tf.square(self.W2[i]))
if self.b_num == 3:
loss_W3 += self.fc_reg * tf.reduce_sum(tf.square(self.W3[i]))
self.loss_reg = loss_em + loss_W1 + loss_W2 + loss_W3
if self.b_num == 3:
self.score_ipv, self.score_cart, self.score_buy = self._create_inference()
self.loss_ipv = tf.losses.log_loss(self.labels_ipv, self.score_ipv)
self.loss_cart = tf.losses.log_loss(self.labels_cart, self.score_cart)
self.loss_buy = tf.losses.log_loss(self.labels_buy, self.score_buy)
self.loss = self.ipv_loss_coefficient * self.loss_ipv + \
self.cart_loss_coefficient * self.loss_cart + \
self.buy_loss_coefficient * self.loss_buy + self.loss_reg
else:
if self.b_2_type == 'cb':
self.score_cart, self.score_buy = self._create_inference()
self.loss_cart = tf.losses.log_loss(self.labels_cart, self.score_cart)
self.loss_buy = tf.losses.log_loss(self.labels_buy, self.score_buy)
self.loss = self.cart_loss_coefficient * self.loss_cart + \
self.buy_loss_coefficient * self.loss_buy + self.loss_reg
elif self.b_2_type == 'vb':
self.score_ipv, self.score_buy = self._create_inference()
self.loss_ipv = tf.losses.log_loss(self.labels_ipv, self.score_ipv)
self.loss_buy = tf.losses.log_loss(self.labels_buy, self.score_buy)
self.loss = self.cart_loss_coefficient * self.loss_ipv + \
self.buy_loss_coefficient * self.loss_buy + self.loss_reg
else:
self.score_ipv, self.score_cart = self._create_inference()
self.loss_ipv | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import onnx
import onnx.numpy_helper
import struct
from pathlib import Path
import numpy as np
from onnx import onnx_pb as onnx_proto
from onnxruntime import SessionOptions, InferenceSession, GraphOptimizationLevel
from onnxruntime.quantization.quant_utils import QuantizationMode, QuantizedValueType
from onnxruntime.quantization.quant_utils import find_by_name, get_elem_index, get_mul_node, \
generate_identified_filename, attribute_to_kwarg, type_to_name
from onnxruntime.quantization.quant_utils import onnx_domain, __producer__, __version__
from lpot.adaptor.ox_utils.registry import CreateOpQuantizer, CreateDefaultOpQuantizer
from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zo, quantize_data, \
QuantizedValue, QuantizedInitializer
from lpot.model.onnx_model import ONNXModel
def _get_qrange_for_qType(qType, reduce_range=False):
'''
Helper function to get the quantization range for a type.
parameter qType: quantization type.
return: quantization range.
'''
if qType == onnx_proto.TensorProto.UINT8:
return 127 if reduce_range else 255
elif qType == onnx_proto.TensorProto.INT8:
# [-64, 64] for reduce_range, and [-127, 127] full_range.
return 128 if reduce_range else 254
else:
raise ValueError('unsupported quantization data type')
class ONNXQuantizer:
def __init__(self, model, q_config, mode, static, quantization_params,
op_types_to_quantize):
self.model = ONNXModel(model)
self.config = q_config
self.reduce_range = False
self.mode = mode # QuantizationMode.Value
self.static = static # use static quantization for inputs.
self.fuse_dynamic_quant = False
self.quantization_params = quantization_params
self.op_types_to_quantize = op_types_to_quantize
self.new_nodes = []
self.opset_version = self.check_opset_version()
# QuantizeRange tensor name and zero tensor name for scale and zero point calculation.
# Used when static is False
self.fixed_qrange_uint8_name = "fixed_quantization_range_uint8"
self.fixed_qrange_int8_name = "fixed_quantization_range_int8"
# For uint8 data-type, to compute zero point,
# we subtract rmin from 0 (represented by fixed_zero_name tensor)
self.fixed_zero_name = "fixed_zero"
# For int8 data-type, zero point is always zero
# (respresented by fixed_zero_point_name tensor)
self.fixed_zero_zp_name = "fixed_zero_zp"
# List of quantized weights
self._quantized_weights = []
# Map of all original value names to quantized value names
self.quantized_value_map = {}
def check_opset_version(self):
ai_onnx_domain = [
opset for opset in self.model.model.opset_import if not opset.domain \
or opset.domain == "ai.onnx"
]
if 1 != len(ai_onnx_domain):
raise ValueError('Failed to find proper ai.onnx domain')
opset_version = ai_onnx_domain[0].version
if opset_version == 10:
print(
"Warning: The original model opset version is {}, which does not support node \
fusions. Please update the model to opset >= 11 for better performance."
.format(opset_version))
return 10
if opset_version < 10:
print(
"Warning: The original model opset version is {}, which does not support \
quantization. Please update the model to opset >= 11. Updating the model \
automatically to opset 11. Please verify the quantized model."
.format(opset_version))
self.model.model.opset_import.remove(ai_onnx_domain[0])
self.model.model.opset_import.extend([onnx.helper.make_opsetid("", 11)])
opset_version = 11
self.fuse_dynamic_quant = True
return opset_version
def remove_fake_quantized_nodes(self): # pragma: no cover
'''
Detect and remove the quantize/dequantizelinear node pairs(fake quantized nodes
in Quantization-Aware training) and reconnect and update the nodes.
!!! not supported now !!!
'''
nodes_to_remove = []
initializers_to_remove = []
for curr_node in self.model.nodes():
if curr_node.op_type == 'QuantizeLinear':
next_node, prev_node, succ_node = None, None, None
for child_node in self.model.get_children(curr_node):
if child_node.op_type == 'DequantizeLinear':
next_node = child_node
if next_node is None:
raise ValueError(
"Remove fake-quantized node pair Error: DequantizeLinear node is \
not found for {}.".format(curr_node.name))
prev_node = self.model.get_parent(curr_node, 0)
if prev_node is None:
raise ValueError("Remove fake-quantized node pair Error: Parent node is \
not found for {}.".format(curr_node.name))
succ_nodes = self.model.get_children(next_node)
if len(succ_nodes) == 0:
raise ValueError("Remove fake-quantized node pair Error: No successive \
nodes found for {}.".format(next_node.name))
# TODO: convert it to the specified input_type
scale_tensor_name = curr_node.input[1]
zp_tensor_name = curr_node.input[2]
initializer_scale = find_by_name(scale_tensor_name, self.model.initializer())
initializer_zp = find_by_name(zp_tensor_name, self.model.initializer())
zp_and_scale = [
onnx.numpy_helper.to_array(initializer_zp),
onnx.numpy_helper.to_array(initializer_scale)
]
# connect the previous and successive node input and output
for succ_node in succ_nodes:
succ_idx = get_elem_index(next_node.output[0], succ_node.input)
if succ_idx != -1:
succ_node.input[succ_idx] = curr_node.input[0]
else:
raise ValueError(
"Remove fake-quantized node pair Error: Connection failed. \
No matched successive node input found for {}.".format(next_node.name))
param_name = curr_node.input[0]
if self.quantization_params is None:
self.quantization_params = {}
self.quantization_params[param_name] = zp_and_scale
# remove fake-quantized nodes
nodes_to_remove.extend([curr_node])
nodes_to_remove.extend([next_node])
# remove unused initializers in graph
initializers_to_remove.extend([initializer_scale])
initializers_to_remove.extend([initializer_zp])
self.model.remove_nodes(nodes_to_remove)
self.model.remove_initializers(initializers_to_remove)
return self.model.model
def should_quantize(self, node):
if node.name in self.config:
return self.config[node.name] != 'fp32'
else:
return False
def quantize_model(self):
self.remove_fake_quantized_nodes()
for node in self.model.nodes():
if self.should_quantize(node):
op_quantizer = CreateOpQuantizer(self, node)
else:
op_quantizer = CreateDefaultOpQuantizer(self, node)
op_quantizer.quantize()
self._dequantize_outputs()
# extend is used to append to the list for a protobuf fields
# https://developers.google.com/protocol-buffers/docs/reference
# /python-generated?csw=1#fields
self.model.graph().ClearField('node')
self.model.graph().node.extend(self.new_nodes)
# Remove weights which are already quantized from graph.
self._remove_quantized_weights()
self.model.model.producer_name = __producer__
self.model.model.producer_version = __version__
return self.model.model
@staticmethod
def tensor_proto_to_array(initializer):
if initializer.data_type == onnx_proto.TensorProto.FLOAT:
weights = onnx.numpy_helper.to_array(initializer)
else:
raise ValueError('Only float type quantization is supported. \
Weights {} is {}. '.format(initializer.name, type_to_name[initializer.data_type]))
return weights
def is_input_a_weight(self, input_name):
initializer = find_by_name(input_name, self.model.initializer())
return initializer is not None
def is_valid_quantize_weight(self, weight_name):
weight = find_by_name(weight_name, self.model.initializer())
return weight is not None and weight.data_type == onnx_proto.TensorProto.FLOAT
def _remove_quantized_weights(self):
''' Remove the weights which are already quantized from graph initializer list.
This function assumes that after quantization, all nodes that previously use a weight:
- use output from DequantizeLinear as input if they do not support quantization.
- use quantized weight if they support quantization.
'''
for weight in self._quantized_weights:
# Remove existing weight initializer
self.model.initializer().remove(weight.initializer)
# Removing input weight to a convolution
try:
weight_input = next(val for val in self.model.graph().input \
if val.name == weight.name)
self.model.graph().input.remove(weight_input)
except StopIteration:
if self.model.ir_version() < 4:
print("Warning: invalid weight name {} found in the graph \
(not a graph input)".format(weight.name))
def _update_weight(self, weight):
'''
Given a weight object, update the graph by doing the following:
- remove old initializer, update new initializers for
quantized weight, zero point, and scale
- remove old weight input, update with new inputs for
quantized weight, zero point, and scale
This function does NOT update the nodes in the graph, just initializers and inputs
'''
quantized_value = self.quantized_value_map[weight.name]
assert (quantized_value is not None)
packed_weight_name = quantized_value.q_name
scale_name = quantized_value.scale_name
zero_point_name = quantized_value.zp_name
# Update packed weight, zero point, and scale initializers
packed_weight_np_data = np.asarray(weight.quantized_data,
dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[weight.qType]
).reshape(weight.initializer.dims)
packed_weight_initializer = onnx.numpy_helper.from_array(packed_weight_np_data,\
packed_weight_name)
if weight.axis is not None:
zero_scale_shape = [weight.initializer.dims[weight.axis]]
else: # scale and zero point must be scalar
zero_scale_shape = []
zero_point_type = weight.qType
scale_initializer = onnx.helper.make_tensor(scale_name, onnx_proto.TensorProto.FLOAT,
zero_scale_shape, weight.scales)
zero_initializer = onnx.helper.make_tensor(zero_point_name, zero_point_type,
zero_scale_shape, weight.zero_points)
self.model.initializer().extend([packed_weight_initializer, scale_initializer,
zero_initializer])
self._quantized_weights.append(weight)
def _get_quantized_weight(self, initializer, qType):
'''
:param initializer: TensorProto initializer
:param qType: type to quantize to
:return: Weight class with quantization information
'''
weights_data = self.tensor_proto_to_array(initializer)
rmin, rmax, zero_point, scale, quantized_weights_data = quantize_data(
weights_data.flatten().tolist(), _get_qrange_for_qType(qType, \
self.reduce_range), qType)
weight = QuantizedInitializer(initializer.name,
initializer, [rmin], [rmax], [zero_point], [scale],
weights_data,
quantized_weights_data,
axis=None,
qType=qType)
# Log entry for this quantized weight
assert (weight.name not in self.quantized_value_map)
quantized_value = QuantizedValue(weight.name, weight.name + "_quantized",
weight.name + "_scale",
weight.name + "_zero_point",
QuantizedValueType.Initializer, None, qType)
self.quantized_value_map[weight.name] = quantized_value
return weight
def _get_dynamic_input_quantization_params(self, input_name, nodes_list, qType):
'''
Create nodes for dynamic quantization of input and add them to nodes_list.
parameter input_name: Name of the input.
parameter nodes_list: new nodes are appended to this list.
parameter qType: type to quantize to.
return: scale_name, zero_point_name, scale_shape, zero_point_shape.
'''
if qType == onnx_proto.TensorProto.INT8:
return self._get_dynamic_input_quantization_params_int8(input_name, nodes_list)
return self._get_dynamic_input_quantization_params_uint8(input_name, nodes_list)
def _get_dynamic_input_quantization_params_int8(self, input_name, nodes_list):
'''
Create nodes for dynamic quantization of input to int8 and add them to nodes_list
parameter input_name: Name of the input.
parameter nodes_list: new nodes are appended to this list.
return: scale_name, zero_point_name, scale_shape, zero_point_shape.
'''
qType = onnx_proto.TensorProto.INT8
# Reduce min and Reduce max
input_scale_name = input_name + "_scale"
reduce_min_name = input_name + "_ReduceMin"
reduce_min_node = onnx.helper.make_node("ReduceMin", [input_name],
[reduce_min_name + ":0"],
reduce_min_name,
keepdims=0)
nodes_list.append(reduce_min_node)
reduce_max_name = input_name + "_ReduceMax"
| |
<reponame>brtieu/python-sasctl<gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import io
import json
import os
import shutil
import tempfile
import uuid
import zipfile
try:
import swat
except ImportError:
swat = None
def create_package(table, input=None):
"""Create an importable model package from a CAS table.
Parameters
----------
table : swat.CASTable
The CAS table containing an ASTORE or score code.
input : DataFrame, type, list of type, or dict of str: type, optional
The expected type for each input value of the target function.
Can be omitted if target function includes type hints. If a DataFrame
is provided, the columns will be inspected to determine type information.
If a single type is provided, all columns will be assumed to be that type,
otherwise a list of column types or a dictionary of column_name: type
may be provided.
Returns
-------
BytesIO
A byte stream representing a ZIP archive which can be imported.
See Also
--------
:meth:`model_repository.import_model_from_zip <.ModelRepository.import_model_from_zip>`
"""
if swat is None:
raise RuntimeError("The 'swat' package is required to work with SAS models.")
if not isinstance(table, swat.CASTable):
raise ValueError(
"Parameter 'table' should be an instance of '%r' but "
"received '%r'." % (swat.CASTable, table)
)
if 'DataStepSrc' in table.columns:
# Input only passed to datastep
return create_package_from_datastep(table, input=input)
return create_package_from_astore(table)
def create_package_from_datastep(table, input=None):
"""Create an importable model package from a score code table.
Parameters
----------
table : swat.CASTable
The CAS table containing the score code.
input : DataFrame, type, list of type, or dict of str: type, optional
The expected type for each input value of the target function.
Can be omitted if target function includes type hints. If a DataFrame
is provided, the columns will be inspected to determine type information.
If a single type is provided, all columns will be assumed to be that type,
otherwise a list of column types or a dictionary of column_name: type
may be provided.
Returns
-------
BytesIO
A byte stream representing a ZIP archive which can be imported.
See Also
--------
:meth:`model_repository.import_model_from_zip <.ModelRepository.import_model_from_zip>`
"""
dscode = table.to_frame().loc[0, 'DataStepSrc']
# Extract inputs if provided
input_vars = []
# Workaround because sasdataframe does not like to be check if exist
if str(input) != "None":
from .pymas.python import ds2_variables
variables = None
if hasattr(input, 'columns'):
# Assuming input is a DataFrame representing model inputs. Use to
# get input variables
variables = ds2_variables(input)
elif isinstance(input, dict):
variables = ds2_variables(input)
if variables:
input_vars = [v.as_model_metadata() for v in variables if not v.out]
# Find outputs from ds code
output_vars = []
for sasline in dscode.split('\n'):
if sasline.strip().startswith('label'):
output_var = {}
for tmp in sasline.split('='):
if 'label' in tmp:
ovarname = tmp.split('label')[1].strip()
output_var.update({"name": ovarname})
# Determine type of variable is decimal or string
if "length " + ovarname in dscode:
sastype = (
dscode.split("length " + ovarname)[1].split(';')[0].strip()
)
if "$" in sastype:
output_var.update({"type": "string"})
output_var.update({"length": sastype.split("$")[1]})
else:
output_var.update({"type": "decimal"})
output_var.update({"length": sastype})
else:
# If no length for variable, default is decimal, 8
output_var.update({"type": "decimal"})
output_var.update({"length": 8})
else:
output_var.update(
{"description": tmp.split(';')[0].strip().strip("'")}
)
output_vars.append(output_var)
file_metadata = [{'role': 'score', 'name': 'dmcas_scorecode.sas'}]
zip_file = _build_zip_from_files(
{
'fileMetadata.json': file_metadata,
'dmcas_scorecode.sas': dscode,
'ModelProperties.json': {"scoreCodeType": "dataStep"},
'outputVar.json': output_vars,
'inputVar.json': input_vars,
}
)
return zip_file
def create_package_from_astore(table):
"""Create an importable model package from an ASTORE.
Parameters
----------
table : swat.CASTable
The CAS table containing the ASTORE.
Returns
-------
BytesIO
A byte stream representing a ZIP archive which can be imported.
See Also
--------
:meth:`model_repository.import_model_from_zip <.ModelRepository.import_model_from_zip>`
"""
files = create_files_from_astore(table)
return _build_zip_from_files(files)
def create_files_from_astore(table):
"""Generate files for importing a model from an ASTORE.
Parameters
----------
table : swat.CASTable
The CAS table containing the ASTORE.
Returns
-------
dict
Dictionary of filename: content pairs.
"""
if swat is None:
raise RuntimeError(
"The 'swat' package is required to work with " "ASTORE models."
)
if not isinstance(table, swat.CASTable):
raise ValueError(
"Parameter 'table' should be an instance of '%r' but "
"received '%r'." % (swat.CASTable, table)
)
sess = table.session.get_connection()
sess.loadactionset('astore')
result = sess.astore.describe(rstore=table, epcode=True)
# Model Manager expects a 0-byte ASTORE file. Will retrieve actual ASTORE
# from CAS during model publish.
astore = bytes()
# Raise error if describe action fails
if result.status_code != 0:
raise RuntimeError(result)
astore_key = result.Key.Key[0].strip()
# Remove "Keep" sas code from CAS/EP code so full table plus output are
# returned. This is so the MM performance charts and test work.
keepstart = result.epcode.find("Keep")
keepend = result.epcode.find(";", keepstart)
ep_ds2 = result.epcode[0:keepstart] + result.epcode[keepend + 1 :]
package_ds2 = _generate_package_code(result)
model_properties = _get_model_properties(result)
input_vars = [
get_variable_properties(var) for var in result.InputVariables.itertuples()
]
input_vars = [v for v in input_vars if v.get('role', '').upper() == 'INPUT']
output_vars = [
get_variable_properties(var) for var in result.OutputVariables.itertuples()
]
astore_filename = '_' + uuid.uuid4().hex[:25].upper()
# Copy the ASTORE table to the ModelStore.
# Raise an error if the action fails
with swat.options(exception_on_severity=2):
table.save(name=astore_filename, caslib='ModelStore', replace=True)
file_metadata = [
{'role': 'analyticStore', 'name': ''},
{'role': 'score', 'name': 'dmcas_epscorecode.sas'},
]
astore_metadata = [
{
'name': astore_filename,
'caslib': 'ModelStore',
'uri': '/dataTables/dataSources/cas~fs~cas-shared-default~fs~ModelStore/tables/{}'.format(
astore_filename
),
'key': astore_key,
}
]
return {
'dmcas_packagescorecode.sas': '\n'.join(package_ds2),
'dmcas_epscorecode.sas': ep_ds2,
astore_filename: astore,
'ModelProperties.json': model_properties,
'fileMetadata.json': file_metadata,
'AstoreMetadata.json': astore_metadata,
'inputVar.json': input_vars,
'outputVar.json': output_vars,
}
def _build_zip_from_files(files):
"""Create a ZIP file containing the provided files.
Parameters
----------
files : dict
Dictionary of filename: content to be added to the .zip file.
Returns
-------
BytesIO
Byte stream representation of the .zip file.
"""
try:
# Create a temp folder
folder = tempfile.mkdtemp()
for k, v in files.items():
filename = os.path.join(folder, k)
# Write JSON file
if os.path.splitext(k)[-1].lower() == '.json':
with open(filename, 'w') as f:
json.dump(v, f, indent=1)
else:
mode = 'wb' if isinstance(v, bytes) else 'w'
with open(filename, mode) as f:
f.write(v)
files = os.listdir(folder)
with zipfile.ZipFile(os.path.join(folder, 'model.zip'), 'w') as z:
for file in files:
z.write(os.path.join(folder, file), file)
# Need to return the ZIP file data but also need to ensure the
# directory is cleaned up.
# Read the bytes from disk and return an in memory "file".
with open(os.path.join(folder, 'model.zip'), 'rb') as z:
return io.BytesIO(z.read())
finally:
shutil.rmtree(folder)
def get_variable_properties(var):
type_mapping = {'interval': '', 'num': 'decimal', 'character': 'string'}
meta = {'name': var.Name.strip(), 'length': int(var.Length)}
# Input variable table has Type & RawType columns, but RawType aligns with Type column from Output variable table.
if hasattr(var, 'RawType'):
meta['type'] = type_mapping[var.RawType.strip().lower()]
else:
meta['type'] = type_mapping[var.Type.strip().lower()]
if hasattr(var, 'Role'):
meta['role'] = var.Role.strip().upper()
return meta
def _get_model_properties(result):
properties = {
"custom properties": [],
"externalUrl": "",
"trainTable": "",
"trainCodeType": "",
"description": "",
"tool": 'SAS Visual Data Mining and Machine Learning',
"toolVersion": "",
"targetVariable": '',
"scoreCodeType": "ds2MultiType",
"externalModelId": "",
"function": '',
"eventProbVar": "",
"modeler": "",
"name": "",
"targetEvent": "",
"targetLevel": "",
"algorithm": '',
}
algorithm = result.Description[result.Description.Attribute == 'Analytic Engine']
if algorithm.size > 0:
algorithm = str(algorithm.Value.iloc[0]).lower()
else:
algorithm = None
def is_classification(r):
"""Determine if the ASTORE model describes a classification model."""
return classification_target(r) is not None
def classification_target(r):
"""Get the name of the classification target variable."""
target = r.OutputVariables.Name[r.OutputVariables.Name.str.startswith('I_')]
if target.shape[0] > 0:
return target.iloc[0].replace('I_', '', 1)
return None
def regression_target(r):
"""Get the name of the regression target variable."""
target = r.OutputVariables.Name.str.startswith('P_')
target = r.OutputVariables.Name[target].iloc[0]
return target.replace('P_', '', 1)
if algorithm == 'glm':
properties['algorithm'] = 'Linear regression'
properties['tool'] = 'SAS Visual Analytics'
properties['function'] = 'prediction'
properties['targetVariable'] = regression_target(result)
elif algorithm == 'logistic':
properties['algorithm'] = 'Logistic regression'
properties['tool'] = 'SAS Visual Analytics'
properties['function'] = 'classification'
properties['targetVariable'] = classification_target(result)
elif algorithm == 'forest':
properties['algorithm'] = 'Random forest'
if is_classification(result):
properties['function'] = 'classification'
properties['targetVariable'] = classification_target(result)
else:
properties['function'] = 'prediction'
properties['targetVariable'] = regression_target(result)
elif algorithm == 'gradboost':
properties['algorithm'] = 'Gradient boosting'
if is_classification(result):
properties['function'] = 'classification'
properties['targetVariable'] = classification_target(result)
if result.OutputVariables.Name.str.startswith('P_').sum() == 2:
properties['targetLevel'] = 'binary'
else:
properties['function'] = 'prediction'
properties['targetVariable'] = regression_target(result)
elif algorithm == 'svmachine':
properties['algorithm'] = 'Support vector machine'
if is_classification(result):
properties['function'] = 'classification'
properties['targetVariable'] = classification_target(result)
properties['targetLevel'] = 'binary'
else:
properties['function'] = 'prediction'
properties['targetVariable'] = regression_target(result)
elif algorithm == 'bnet':
properties['algorithm'] = 'Bayesian network'
properties['function'] = 'classification'
properties['targetVariable'] = classification_target(result)
if result.OutputVariables.Name.str.startswith('P_').sum() == 2:
properties['targetLevel'] = 'binary'
else:
| |
. I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
if 50 - 50: II111iiii
if 39 - 39: II111iiii . OoOoOO00 - Oo0Ooo * i1IIi . OoooooooOO
if 44 - 44: I1IiiI
def oOO0O0O0OO00oo ( ) :
lisp . lisp_set_exception ( )
if 39 - 39: IiII % OoOoOO00 * I1ii11iIi11i - OoooooooOO - Oo0Ooo
return
if 75 - 75: i11iIiiIii . ooOoO0o % i1IIi . I1IiiI - oO0o + Oo0Ooo
if 66 - 66: oO0o % I1ii11iIi11i . II111iiii / OoOoOO00 / OoO0O00
if 47 - 47: iII111i + O0 / II111iiii * I1IiiI - OoooooooOO . Ii1I
if 28 - 28: oO0o . oO0o . iIii1I11I1II1 . OOooOOo . I1ii11iIi11i * i11iIiiIii
if 72 - 72: I11i
if 26 - 26: IiII % Oo0Ooo
if 72 - 72: O0 + o0oOOo0O0Ooo + I1IiiI / Oo0Ooo
if 83 - 83: IiII - I1IiiI . Ii1I
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
def IiI1I1IIIi1i ( lisp_socket ) :
lisp . lisp_set_exception ( )
i11i1iiiII = { "lisp-itr" : False , "lisp-etr" : False , "lisp-rtr" : False ,
"lisp-mr" : False , "lisp-ms" : False , "lisp-ddt" : False }
if 73 - 73: O0 * I1Ii111 . i1IIi
while ( True ) :
time . sleep ( 1 )
OO00OoOO = i11i1iiiII
i11i1iiiII = { }
if 45 - 45: II111iiii * i1IIi
for iIii11iI1II in OO00OoOO :
i11i1iiiII [ iIii11iI1II ] = lisp . lisp_is_running ( iIii11iI1II )
if ( OO00OoOO [ iIii11iI1II ] == i11i1iiiII [ iIii11iI1II ] ) : continue
if 25 - 25: OoOoOO00 + iIii1I11I1II1 % I11i / Oo0Ooo * Oo0Ooo
lisp . lprint ( "*** Process '{}' has {} ***" . format ( iIii11iI1II ,
"come up" if i11i1iiiII [ iIii11iI1II ] else "gone down" ) )
if 51 - 51: oO0o - OoO0O00 + iII111i - o0oOOo0O0Ooo . OoO0O00 % I1ii11iIi11i
if 14 - 14: I1IiiI / O0
if 43 - 43: oO0o - IiII % i11iIiiIii * II111iiii . I1Ii111 - I11i
if 13 - 13: OoO0O00
if ( i11i1iiiII [ iIii11iI1II ] == True ) :
lisp . lisp_ipc_lock . acquire ( )
lispconfig . lisp_send_commands ( lisp_socket , iIii11iI1II )
lisp . lisp_ipc_lock . release ( )
if 70 - 70: IiII . I1Ii111 * OoO0O00 + I11i - IiII . IiII
if 60 - 60: i11iIiiIii * Oo0Ooo % OoO0O00 + OoO0O00
if 84 - 84: iIii1I11I1II1 + OoooooooOO
return
if 77 - 77: O0 * I1ii11iIi11i * oO0o + OoO0O00 + I1ii11iIi11i - I1Ii111
if 10 - 10: I1ii11iIi11i + IiII
if 58 - 58: I1IiiI + OoooooooOO / iII111i . ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i
if 62 - 62: II111iiii
if 12 - 12: IiII + II111iiii
if 92 - 92: I1Ii111 % iIii1I11I1II1 - iII111i / i11iIiiIii % ooOoO0o * o0oOOo0O0Ooo
if 80 - 80: iII111i
def iI1I1ii11IIi1 ( ) :
lisp . lisp_set_exception ( )
OOo = 60
if 80 - 80: o0oOOo0O0Ooo / oO0o / Ii1I - I1IiiI % I1Ii111
while ( True ) :
time . sleep ( OOo )
if 44 - 44: I1IiiI % OOooOOo * i11iIiiIii * i11iIiiIii - Oo0Ooo . I1Ii111
o00 = [ ]
i111iiIiiIiI = lisp . lisp_get_timestamp ( )
if 59 - 59: OOooOOo + I1IiiI / II111iiii / OoOoOO00
if 80 - 80: OoOoOO00 + iIii1I11I1II1 . IiII
if 76 - 76: I1IiiI * OOooOOo
if 12 - 12: iIii1I11I1II1 / I11i % Ii1I
for iI1i in lisp . lisp_info_sources_by_address :
OooO00 = lisp . lisp_info_sources_by_address [ iI1i ]
if ( OooO00 . no_timeout ) : continue
if ( OooO00 . uptime + OOo < i111iiIiiIiI ) : continue
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
o00 . append ( iI1i )
if 27 - 27: OoO0O00 + Oo0Ooo
iIi = OooO00 . nonce
if ( iIi == None ) : continue
if ( iIi in lisp . lisp_info_sources_by_nonce ) :
lisp . lisp_info_sources_by_nonce . pop ( iIi )
if 92 - 92: I1IiiI % iII111i
if 31 - 31: OoooooooOO - oO0o / I1Ii111
if 62 - 62: i11iIiiIii - I11i
if 81 - 81: I11i
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
for iI1i in o00 :
lisp . lisp_info_sources_by_address . pop ( iI1i )
if 31 - 31: i1IIi % II111iiii
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
return
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if 3 - 3: II111iiii / OOooOOo
if 48 - 48: ooOoO0o . I1ii11iIi11i
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
if 34 - 34: I1IiiI
def o0OoOo0O00 ( lisp_ipc_control_socket , lisp_sockets ) :
lisp . lisp_set_exception ( )
while ( True ) :
try : iI1i1iI1iI = lisp_ipc_control_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
iiiI11 = iI1i1iI1iI [ 0 ] . split ( b"@" )
oOOo0 = iI1i1iI1iI [ 1 ]
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
III1iII1I1ii = iiiI11 [ 0 ] . decode ( )
O0o0O0O0O = iiiI11 [ 1 ] . decode ( )
oo00O00oO = int ( iiiI11 [ 2 ] )
OOoOoO = iiiI11 [ 3 : : ]
if 72 - 72: OoOoOO00 / I1Ii111 * IiII % iIii1I11I1II1
if 53 - 53: OoO0O00 . O0 . I1IiiI * OOooOOo / o0oOOo0O0Ooo
if 34 - 34: OoOoOO00
if 16 - 16: i1IIi - I1Ii111 - II111iiii
if ( len ( OOoOoO ) > 1 ) :
OOoOoO = lisp . lisp_bit_stuff ( OOoOoO )
else :
OOoOoO = OOoOoO [ 0 ]
if 83 - 83: I1IiiI - OoO0O00 - o0oOOo0O0Ooo / O0 - I11i . II111iiii
if 27 - 27: Ii1I
if ( III1iII1I1ii != "control-packet" ) :
lisp . lprint ( ( "lisp_core_control_packet_process() received " + "unexpected control-packet, message ignored" ) )
if 59 - 59: Ii1I / II111iiii - IiII % OoOoOO00 % OoooooooOO
continue
if 79 - 79: iII111i . OoooooooOO . I1IiiI * O0 * OoO0O00 - OOooOOo
if 33 - 33: I1ii11iIi11i . Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
lisp . lprint ( ( "{} {} bytes from {}, dest/port: {}/{}, control-" + "packet: {}" ) . format ( lisp . bold ( "Receive" , False ) , len ( OOoOoO ) ,
# iII111i * I1Ii111 * I11i * iII111i
oOOo0 , O0o0O0O0O , oo00O00oO , lisp . lisp_format_packet ( OOoOoO ) ) )
if 57 - 57: OOooOOo % OoO0O00 - I1IiiI
if 3 - 3: OOooOOo + i1IIi % I1ii11iIi11i
if 100 - 100: OoooooooOO + i11iIiiIii % o0oOOo0O0Ooo + I1IiiI . Oo0Ooo . II111iiii
if 93 - 93: II111iiii . i11iIiiIii + II111iiii % oO0o
if 98 - 98: I1Ii111 * oO0o * OoOoOO00 + Ii1I * iII111i
if 4 - 4: IiII
oO0ooOOO = lisp . lisp_control_header ( )
oO0ooOOO . decode ( OOoOoO )
if ( oO0ooOOO . type == lisp . LISP_MAP_REPLY ) :
ii11Ii1IiiI1 = lisp . lisp_map_reply ( )
ii11Ii1IiiI1 . decode ( OOoOoO )
if ( O0Oo0O00o0oo0OO ( None , 0 , ii11Ii1IiiI1 . nonce ) ) :
OO0o0o0oo ( lisp_sockets , oOOo0 , oo00O00oO , OOoOoO )
continue
if 16 - 16: iIii1I11I1II1 * iII111i + | |
5, 1, -5): (0, 1),
(9, 5, 1, -4): (0, 1),
(9, 5, 1, -3): (0, 1),
(9, 5, 1, -2): (0, 1),
(9, 5, 1, -1): (0, 1),
(9, 5, 1, 0): (-1, 1),
(9, 5, 1, 1): (-1, 1),
(9, 5, 1, 2): (-1, 1),
(9, 5, 1, 3): (-1, 1),
(9, 5, 1, 4): (-1, 1),
(9, 5, 1, 5): (-1, 1),
(9, 5, 2, -5): (0, 1),
(9, 5, 2, -4): (0, 1),
(9, 5, 2, -3): (0, 1),
(9, 5, 2, -2): (0, 1),
(9, 5, 2, -1): (0, 1),
(9, 5, 2, 0): (0, 1),
(9, 5, 2, 1): (0, 1),
(9, 5, 2, 2): (0, 1),
(9, 5, 2, 3): (0, 1),
(9, 5, 2, 4): (0, 1),
(9, 5, 2, 5): (0, 1),
(9, 5, 3, -5): (0, 1),
(9, 5, 3, -4): (0, 1),
(9, 5, 3, -3): (0, 1),
(9, 5, 3, -2): (0, 1),
(9, 5, 3, -1): (0, 1),
(9, 5, 3, 0): (0, 1),
(9, 5, 3, 1): (0, 1),
(9, 5, 3, 2): (0, 1),
(9, 5, 3, 3): (0, 1),
(9, 5, 3, 4): (0, 1),
(9, 5, 3, 5): (0, 1),
(9, 5, 4, -5): (0, 1),
(9, 5, 4, -4): (0, 1),
(9, 5, 4, -3): (0, 1),
(9, 5, 4, -2): (0, 1),
(9, 5, 4, -1): (0, 1),
(9, 5, 4, 0): (0, 1),
(9, 5, 4, 1): (0, 1),
(9, 5, 4, 2): (0, 1),
(9, 5, 4, 3): (0, 1),
(9, 5, 4, 4): (0, 1),
(9, 5, 4, 5): (0, 1),
(9, 5, 5, -5): (0, 1),
(9, 5, 5, -4): (0, 1),
(9, 5, 5, -3): (0, 1),
(9, 5, 5, -2): (0, 1),
(9, 5, 5, -1): (0, 1),
(9, 5, 5, 0): (0, 1),
(9, 5, 5, 1): (0, 1),
(9, 5, 5, 2): (0, 1),
(9, 5, 5, 3): (0, 1),
(9, 5, 5, 4): (0, 1),
(9, 5, 5, 5): (0, 1),
(9, 6, -5, -5): (0, 1),
(9, 6, -5, -4): (0, 1),
(9, 6, -5, -3): (0, 1),
(9, 6, -5, -2): (0, 1),
(9, 6, -5, -1): (0, 1),
(9, 6, -5, 0): (0, 1),
(9, 6, -5, 1): (0, 1),
(9, 6, -5, 2): (0, 1),
(9, 6, -5, 3): (0, 1),
(9, 6, -5, 4): (0, 1),
(9, 6, -5, 5): (0, 1),
(9, 6, -4, -5): (0, 1),
(9, 6, -4, -4): (0, 1),
(9, 6, -4, -3): (0, 1),
(9, 6, -4, -2): (0, 1),
(9, 6, -4, -1): (0, 1),
(9, 6, -4, 0): (0, 1),
(9, 6, -4, 1): (0, 1),
(9, 6, -4, 2): (0, 1),
(9, 6, -4, 3): (1, 1),
(9, 6, -4, 4): (0, 1),
(9, 6, -4, 5): (0, 1),
(9, 6, -3, -5): (-1, 1),
(9, 6, -3, -4): (-1, 1),
(9, 6, -3, -3): (-1, 1),
(9, 6, -3, -2): (-1, 1),
(9, 6, -3, -1): (-1, 1),
(9, 6, -3, 0): (1, 1),
(9, 6, -3, 1): (1, 1),
(9, 6, -3, 2): (1, 1),
(9, 6, -3, 3): (1, 1),
(9, 6, -3, 4): (1, 1),
(9, 6, -3, 5): (1, 0),
(9, 6, -2, -5): (-1, 1),
(9, 6, -2, -4): (-1, 1),
(9, 6, -2, -3): (-1, 0),
(9, 6, -2, -2): (0, 1),
(9, 6, -2, -1): (1, 1),
(9, 6, -2, 0): (1, 1),
(9, 6, -2, 1): (1, 1),
(9, 6, -2, 2): (1, 1),
(9, 6, -2, 3): (1, 1),
(9, 6, -2, 4): (1, 1),
(9, 6, -2, 5): (1, 0),
(9, 6, -1, -5): (-1, 1),
(9, 6, -1, -4): (-1, 1),
(9, 6, -1, -3): (-1, 1),
(9, 6, -1, -2): (-1, 1),
(9, 6, -1, -1): (1, 1),
(9, 6, -1, 0): (1, 1),
(9, 6, -1, 1): (1, 1),
(9, 6, -1, 2): (1, 1),
(9, 6, -1, 3): (1, 1),
(9, 6, -1, 4): (1, 1),
(9, 6, -1, 5): (1, 0),
(9, 6, 0, -5): (1, 1),
(9, 6, 0, -4): (1, 1),
(9, 6, 0, -3): (1, 1),
(9, 6, 0, -2): (1, 1),
(9, 6, 0, -1): (0, 1),
(9, 6, 0, 0): (0, 1),
(9, 6, 0, 1): (0, 1),
(9, 6, 0, 2): (0, 1),
(9, 6, 0, 3): (0, 1),
(9, 6, 0, 4): (0, 1),
(9, 6, 0, 5): (0, 1),
(9, 6, 1, -5): (0, 1),
(9, 6, 1, -4): (0, 1),
(9, 6, 1, -3): (0, 1),
(9, 6, 1, -2): (0, 1),
(9, 6, 1, -1): (0, 1),
(9, 6, 1, 0): (-1, 1),
(9, 6, 1, 1): (-1, 1),
(9, 6, 1, 2): (-1, 1),
(9, 6, 1, 3): (-1, 1),
(9, 6, 1, 4): (-1, 1),
(9, 6, 1, 5): (-1, 1),
(9, 6, 2, -5): (0, 1),
(9, 6, 2, -4): (0, 1),
(9, 6, 2, -3): (0, 1),
(9, 6, 2, -2): (0, 1),
(9, 6, 2, -1): (0, 1),
(9, 6, 2, 0): (0, 1),
(9, 6, 2, 1): (0, 1),
(9, 6, 2, 2): (0, 1),
(9, 6, 2, 3): (0, 1),
(9, 6, 2, 4): (0, 1),
(9, 6, 2, 5): (0, 1),
(9, 6, 3, -5): (0, 1),
(9, 6, 3, -4): (0, 1),
(9, 6, 3, -3): (0, 1),
(9, 6, 3, -2): (0, 1),
(9, 6, 3, -1): (0, 1),
(9, 6, 3, 0): (0, 1),
(9, 6, 3, 1): (0, 1),
(9, 6, 3, 2): (0, 1),
(9, 6, 3, 3): (0, 1),
(9, 6, 3, 4): (0, 1),
(9, 6, 3, 5): (0, 1),
(9, 6, 4, -5): (0, 1),
(9, 6, 4, -4): (0, 1),
(9, 6, 4, -3): (0, 1),
(9, 6, 4, -2): (0, 1),
(9, 6, 4, -1): (0, 1),
(9, 6, 4, 0): (0, 1),
(9, 6, 4, 1): (0, 1),
(9, 6, 4, 2): (0, 1),
(9, 6, 4, 3): (0, 1),
(9, 6, 4, 4): (0, 1),
(9, 6, 4, 5): (0, 1),
(9, 6, 5, -5): (0, 1),
(9, 6, 5, -4): (0, 1),
(9, 6, 5, -3): (0, 1),
(9, 6, 5, -2): (0, 1),
(9, 6, 5, -1): (0, 1),
(9, 6, 5, 0): (0, 1),
(9, 6, 5, 1): (0, 1),
(9, 6, 5, 2): (0, 1),
(9, 6, 5, 3): (0, 1),
(9, 6, 5, 4): (0, 1),
(9, 6, 5, 5): (0, 1),
(9, 7, -5, -5): (0, 1),
(9, 7, -5, -4): (0, 1),
(9, 7, -5, -3): (0, 1),
(9, 7, -5, -2): (0, 1),
(9, 7, -5, -1): (0, 1),
(9, 7, -5, 0): (0, 1),
(9, 7, -5, 1): (0, 1),
(9, 7, -5, 2): (0, 1),
(9, 7, -5, 3): (0, 1),
(9, 7, -5, 4): (0, 1),
(9, 7, -5, 5): (0, 1),
(9, 7, -4, -5): (0, 1),
(9, 7, -4, -4): (0, 1),
(9, 7, -4, -3): (0, 1),
(9, 7, -4, -2): (0, 1),
(9, 7, -4, -1): (0, 1),
(9, 7, -4, 0): (0, 1),
(9, 7, -4, 1): (0, 1),
(9, 7, -4, 2): (0, 1),
(9, 7, -4, 3): (0, 1),
(9, 7, -4, 4): (1, 1),
(9, 7, -4, 5): (1, 0),
(9, 7, -3, -5): (-1, 1),
(9, 7, -3, -4): (-1, 1),
(9, 7, -3, -3): (-1, 1),
(9, 7, -3, -2): (-1, 1),
(9, 7, -3, -1): (-1, 1),
(9, 7, -3, 0): (1, 1),
(9, 7, -3, 1): (1, 1),
(9, 7, -3, 2): (1, 1),
(9, 7, -3, 3): (1, 1),
(9, 7, -3, 4): (1, 1),
(9, 7, -3, 5): (1, 0),
(9, 7, -2, -5): (-1, 1),
(9, 7, -2, -4): (-1, 0),
(9, 7, -2, -3): (0, 1),
(9, 7, -2, -2): (0, 1),
(9, 7, -2, | |
import time
#ls.ch1_read gives motor status, read.value gives only temperature
def run_articulatus_test1(t=5, name = 'TD'):
x_list = [0.75]
y_range = [4.9, 5.125,10]
#y_offset
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [6, 42, 7]
samples = ['RZ_T4_Tooth']
name_fmt = '{sample}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
sample_name = name_fmt.format(sample=sample)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(stage.x, x)
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
def run_articulatus_test2(t=5, name = 'TD'):
x_list = [0.65]
y_range = [4.3, 4.9,12]
#y_offset
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [6, 42, 7]
samples = ['RZ_T4_Stylus']
name_fmt = '{sample}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
sample_name = name_fmt.format(sample=sample)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(stage.x, x)
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
def run_articulatus_test3(t=5, name = 'TD'):
x_list = [0.65]
y_range = [3.825, 3.95,6]
#y_offset
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [6, 42, 7]
samples = ['RZ_T5_Tooth']
name_fmt = '{sample}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
sample_name = name_fmt.format(sample=sample)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(stage.x, x)
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
def run_articulatus_test4(t=5, name = 'TD'):
x_list = [0.65]
y_range = [3.7, 3.825,6]
#y_offset
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [6, 42, 7]
samples = ['RZ_T5_Sytlus']
name_fmt = '{sample}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
sample_name = name_fmt.format(sample=sample)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(stage.x, x)
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
def run_chitont7(t=10, name = 'TD', s_name = 'Cryptochiton_stellerit7'):
y_range = [3.725, 3.825, 3]
x_list = np.linspace(-8.225, -7.75, 20)
dets = [pil300KW, pil1M]
waxs_arc = [4, 58, 10]
samples = [s_name]
name_fmt = '{s_name}_{x_position}'
# param = '16.1keV'
#assert len(y_list) == len(samples), f'Number of X coordinates ({len(y_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x in x_list:
yield from bps.mv(stage.x, x)
x = stage.x.position
sample_name = name_fmt.format(s_name=s_name, x_position = x)
print(f'\n\t=== Sample: {sample_name} ===\n')
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_chitont15(t=10, name = 'TD', s_name = 'Cryptochiton_stellerit15'):
y_range = [3.35, 3.5, 4]
x_list = np.linspace(-4.15, -3.125, 42)
dets = [pil300KW, pil1M]
waxs_arc = [4, 58, 10]
samples = [s_name]
name_fmt = '{s_name}_{x_position}'
# param = '16.1keV'
#assert len(y_list) == len(samples), f'Number of X coordinates ({len(y_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x in x_list:
yield from bps.mv(stage.x, x)
x = stage.x.position
sample_name = name_fmt.format(s_name=s_name, x_position = x)
print(f'\n\t=== Sample: {sample_name} ===\n')
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_chitont30(t=10, name = 'TD', s_name = 'Cryptochiton_stellerit30'):
y_range = [3.025, 3.425, 9]
x_list = np.linspace(-1.9, -0.85, 43)
dets = [pil300KW, pil1M]
waxs_arc = [4, 58, 10]
samples = [s_name]
name_fmt = '{s_name}_{x_position}'
# param = '16.1keV'
#assert len(y_list) == len(samples), f'Number of X coordinates ({len(y_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x in x_list:
yield from bps.mv(stage.x, x)
x = stage.x.position
sample_name = name_fmt.format(s_name=s_name, x_position = x)
print(f'\n\t=== Sample: {sample_name} ===\n')
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_chitont45(t=10, name = 'TD', s_name = 'Cryptochiton_stellerit45'):
y_range = [3, 3.45, 19]
x_list = np.linspace(-1.9, -0.85, 43)
dets = [pil300KW, pil1M]
waxs_arc = [4, 58, 10]
samples = [s_name]
name_fmt = '{s_name}_{x_position}'
# param = '16.1keV'
#assert len(y_list) == len(samples), f'Number of X coordinates ({len(y_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x in x_list:
yield from bps.mv(stage.x, x)
x = stage.x.position
sample_name = name_fmt.format(s_name=s_name, x_position = x)
print(f'\n\t=== Sample: {sample_name} ===\n')
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_rz1(t=5, name = 'TD'):
x_list = [9.44]
y_range = [6.209,6.235,14]
#y_offset
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [4, 58, 10]
samples = ['RZ_18-0731_Leftsection_line1']
name_fmt = '{sample}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
sample_name = name_fmt.format(sample=sample)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(stage.x, x)
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_rz2(t=5, name = 'RZ'):
x_list = [-15.337]
y_range = [5.557,5.569,7]
#y_offset
# Detectors, motors:
dets = [pil300KW,ls.ch1_read]
waxs_arc = [4, 58, 10]
samples = ['18-0731_Leftsection_line2']
name_fmt = '{sample}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
yield from bps.sleep(300)
sample_name = name_fmt.format(sample=sample)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(stage.x, x)
sample_id(user_name=name, sample_name=sample_name)
yield from e_grid_scan(dets, stage.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_td1(t=5, name = 'TD'):
x_list = [24.9, 18.6, 12.3, 6, -0.3, -6.6, -12.9, -19.2]
y_list = [-0.3]
#y_offset
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [6, 42, 7]
samples = ['NaH2PO4', 'PAN-Ni', 'PAN']
name_fmt = '{sample}_{y_position}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
yield from bps.mv(stage.x, x)
for y in (y_list):
sample_name = name_fmt.format(sample=sample, y_position=y)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(stage.y, y)
sample_id(user_name=name, sample_name=sample_name)
yield from escan(dets, waxs, *waxs_arc)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_td2(t=10, name = 'TD'):
y_range = [-50, 50, 5]
x_list = [25930, 19679, 13311, 6948, 585, -5778, -12141, -18504]
#y_offset
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [3, 45, 8]
samples = ['NC400', 'PAN600', 'BM600', 'BM', 'NC', 'PANOx', 'BM400', 'CF']
name_fmt = '{sample}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
sample_name = name_fmt.format(sample=sample)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(piezo.x, x)
sample_id(user_name=name, sample_name=sample_name)
yield from bp.grid_scan(dets, piezo.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_td_single(t=10, name = 'TD'):
y_range = [60, 160, 5]
x_list = [6948]
#y_offset
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [3, 45, 8]
samples = ['BM_cont']
name_fmt = '{sample}'
# param = '16.1keV'
assert len(x_list) == len(samples), f'Number of X coordinates ({len(x_list)}) is different from number of samples ({len(samples)})'
det_exposure_time(t)
for x, sample in zip(x_list, samples):
sample_name = name_fmt.format(sample=sample)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bps.mv(piezo.x, x)
sample_id(user_name=name, sample_name=sample_name)
yield from bp.grid_scan(dets, piezo.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_ramyaX1(t=5, name = 'BSpec_RT_Hairs', s_name = 'Exo_FineRes'):
y_range = [-1250, -1050, 101]
x_list = np.linspace(14900, 14800, 5)
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [3, 15, 3]
samples = [s_name]
name_fmt = '{s_name}_{x_position}'
# param = '16.1keV'
det_exposure_time(t)
for x in x_list:
yield from bps.mv(piezo.x, x)
x = piezo.x.position
sample_name = name_fmt.format(s_name=s_name, x_position = x)
print(f'\n\t=== Sample: {sample_name} ===\n')
sample_id(user_name=name, sample_name=sample_name)
yield from bp.grid_scan(dets, piezo.y, *y_range, waxs, *waxs_arc, 1)
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5)
def run_ramyaX2(t=5, name = 'BSpec_RT', s_name = 'MesoEndo3_FineRes'):
y_range = [-975, -1175, 101]
x_list = np.linspace(-6240, -6140, 5)
# Detectors, motors:
dets = [pil300KW]
waxs_arc = [3, 15, 3]
samples = [s_name]
name_fmt = '{s_name}_{x_position}'
# param = '16.1keV'
det_exposure_time(t)
for | |
initialize moveable object slots.
(try_end),
#MM
(call_script, "script_multiplayer_mm_reset_stuff_after_round"),
#Auto-lower squad-size if too many players
(try_begin),
(multiplayer_is_server),
(assign,":num_players",0),
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active,":player_no"),
(val_add,":num_players",1),
(try_end),
(store_mul,":average_num_bots","$g_squad_size_limit",":num_players"),
(gt,":average_num_bots",600), #Try not to have more than 600 bots
(store_div,"$g_squad_size_limit",600,":num_players"),
(assign,reg1,"$g_squad_size_limit"),
(str_store_string, s0, "@SERVER"),
(str_store_string, s4, "str_admin_set_squad_size_s0_reg1"),
(assign, ":mod_variable", mod_variable_squad_size),
(try_for_range, ":cur_player", 1, multiplayer_player_loops_end), #0 is server and already set...
(player_is_active, ":cur_player"),
(multiplayer_send_2_int_to_player, ":cur_player", multiplayer_event_return_mod_variable, ":mod_variable", "$g_squad_size_limit"),
(try_end),
(call_script, "script_multiplayer_broadcast_message"),
(try_end),
]),
(0, 0, 0, [(neg|multiplayer_is_dedicated_server)], #if there is nobody in any teams do not reduce round time.
[
(call_script,"script_multiplayer_reset_round_time_if_no_agents"),
]),
(1, 0, 0, [(multiplayer_is_dedicated_server)], #if there is nobody in any teams do not reduce round time.
[
(call_script,"script_multiplayer_reset_round_time_if_no_agents"),
]),
(1, 0, 0, [(multiplayer_is_server),
(eq, "$g_round_ended", 0),],
[
#(store_add, ":total_bots", "$g_multiplayer_num_bots_team_1", "$g_multiplayer_num_bots_team_2"),
(store_mission_timer_a, ":round_time"),
(val_sub, ":round_time", "$g_round_start_time"),
(assign,":continue",0),
(try_begin),
(lt, ":round_time", multiplayer_new_agents_finish_spawning_time),
(assign,":continue",1),
(else_try),
(eq, "$g_multiplayer_player_respawn_as_bot", 1),
(assign,":continue",1),
(try_end),
(eq,":continue",1),
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(player_get_team_no, ":player_team", ":player_no"),
(try_begin),
(neg|player_is_busy_with_menus, ":player_no"),
(player_slot_eq, ":player_no", slot_player_spawned_this_round, 0),
(lt, ":round_time", multiplayer_new_agents_finish_spawning_time), #lt
(lt, ":player_team", multi_team_spectator), #if player is currently spectator do not spawn his agent
(player_get_troop_id, ":player_troop", ":player_no"), #if troop is not selected do not spawn his agent
(ge, ":player_troop", 0),
(try_begin),
(eq, ":player_team", 0),
(assign, ":entry_no", multi_initial_spawn_point_team_1),
(else_try),
(eq, ":player_team", 1),
(assign, ":entry_no", multi_initial_spawn_point_team_2),
(try_end),
(call_script, "script_multiplayer_buy_agent_equipment", ":player_no"),
(player_spawn_new_agent, ":player_no", ":entry_no"),
(player_set_slot, ":player_no", slot_player_spawned_this_round, 1),
#Spawn Player Squad
(player_get_slot,":selected_bot_type",":player_no",slot_player_bot_type_wanted),
#(call_script,"script_commander_get_squad_size",":player_no"),
#(assign,":num_bots",reg0),
(call_script,"script_scale_num_bots_after_troop_type",":selected_bot_type","$g_squad_size_limit"),
(assign,":num_bots",reg0),
(store_current_scene, ":cur_scene"),
(modify_visitors_at_site, ":cur_scene"),
(add_visitors_to_current_scene, ":entry_no", ":selected_bot_type", ":num_bots", ":player_team", ":player_no"),
#To ensure any balancing bots becomes the same squad even if player changes bot type before balancing kicks in
(player_set_slot,":player_no",slot_player_bot_type_spawned,":selected_bot_type"),
(else_try), #Spawn additional bots to balance team at the end of spawning time
(eq,"$g_scale_squad_size",1),
(player_slot_eq, ":player_no", slot_player_spawned_this_round, 1), #Only add bots for spawned players
(eq, ":round_time", multiplayer_new_agents_finish_spawning_time), #lt
(lt, ":player_team", multi_team_spectator), #if player is currently spectator do not spawn bots for him...
(player_get_agent_id, ":player_agent", ":player_no"),
(ge, ":player_agent", 0),
(agent_is_active,":player_agent"),
(agent_is_alive,":player_agent"), #Only spawn bots for alive players...
(try_begin),
(eq, ":player_team", 0),
(assign, ":entry_no", multi_initial_spawn_point_team_1),
(else_try),
(eq, ":player_team", 1),
(assign, ":entry_no", multi_initial_spawn_point_team_2),
(try_end),
(call_script,"script_commander_get_additional_bots",":player_no"),
(assign,":num_bots",reg0),
(gt,":num_bots",0),
(player_get_slot,":selected_bot_type",":player_no",slot_player_bot_type_spawned),
(is_between,":selected_bot_type",multiplayer_ai_troops_begin,multiplayer_ai_troops_end), #Bot has to be valid
(call_script,"script_scale_num_bots_after_troop_type",":selected_bot_type",":num_bots"),
(assign,":num_bots",reg0),
(store_current_scene, ":cur_scene"),
(modify_visitors_at_site, ":cur_scene"),
(add_visitors_to_current_scene, ":entry_no", ":selected_bot_type", ":num_bots", ":player_team", ":player_no"),
(else_try), #spawning as a bot (if option ($g_multiplayer_player_respawn_as_bot) is 1)
(neg|player_is_busy_with_menus, ":player_no"),
(eq, "$g_multiplayer_player_respawn_as_bot", 1),
(assign,":continue",0),
(player_get_agent_id, ":player_agent", ":player_no"),
(try_begin),
(agent_is_active, ":player_agent"),
(neg|agent_is_alive, ":player_agent"),
(agent_get_time_elapsed_since_removed, ":elapsed_time", ":player_agent"),
(gt, ":elapsed_time", "$g_multiplayer_respawn_period"),
(assign,":continue",1),
(else_try), #If player is dead too long (busy with menus) agent becomes inactive.
(neg|agent_is_active, ":player_agent"), #No active agent
(player_slot_ge, ":player_no", slot_player_spawned_this_round, 1), #But has actually spawned before
(assign,":continue",1), #Then continue
(try_end),
(eq,":continue",1),
(player_get_slot, ":x_coor", ":player_no", slot_player_death_pos_x),
(player_get_slot, ":y_coor", ":player_no", slot_player_death_pos_y),
(player_get_slot, ":z_coor", ":player_no", slot_player_death_pos_z),
(init_position, pos0),
(position_set_x, pos0, ":x_coor"),
(position_set_y, pos0, ":y_coor"),
(position_set_z, pos0, ":z_coor"),
(assign,":bot_agent",-1),
(assign,":min_distance",999999),
(try_for_agents,":cur_agent"),
(agent_is_active,":cur_agent"),
(agent_is_human,":cur_agent"),
(agent_is_alive,":cur_agent"),
(agent_get_team,":agent_team",":cur_agent"),
(eq,":agent_team",":player_team"),
(agent_get_group,":agent_group",":cur_agent"),
(eq,":agent_group",":player_no"),
(agent_get_position, pos1, ":cur_agent"),
(get_distance_between_positions, ":dist", pos0, pos1),
(lt,":dist",":min_distance"),
(assign,":bot_agent",":cur_agent"),
(try_end),
#(call_script, "script_find_most_suitable_bot_to_control", ":player_no"),
#(assign,":bot_agent",reg0),
(gt,":bot_agent",-1),
(player_control_agent, ":player_no", ":bot_agent"),
#Replace any fake weapons with real
(try_for_range_backwards,":equipment_slot",ek_item_0,ek_head),
(agent_get_item_slot, ":item_id", ":bot_agent", ":equipment_slot"),
(gt,":item_id",-1), # even have an item there?
(try_begin),
(eq,":item_id","itm_french_briquet_garde_fake"),
(agent_unequip_item, ":bot_agent", "itm_french_briquet_garde_fake", ":equipment_slot"),
(agent_equip_item, ":bot_agent", "itm_french_briquet_garde", ":equipment_slot"),
(else_try),
(eq,":item_id","itm_french_briquet_fake"),
(agent_unequip_item, ":bot_agent", "itm_french_briquet_fake", ":equipment_slot"),
(agent_equip_item, ":bot_agent", "itm_french_briquet", ":equipment_slot"),
(else_try),
(eq,":item_id","itm_russian_briquet_1807_fake"),
(agent_unequip_item, ":bot_agent", "itm_russian_briquet_1807_fake", ":equipment_slot"),
(agent_equip_item, ":bot_agent", "itm_russian_briquet_1807", ":equipment_slot"),
(else_try),
(eq,":item_id","itm_russian_briquet_1807_black_fake"),
(agent_unequip_item, ":bot_agent", "itm_russian_briquet_1807_black_fake", ":equipment_slot"),
(agent_equip_item, ":bot_agent", "itm_russian_briquet_1807_black", ":equipment_slot"),
(else_try),
(eq,":item_id","itm_russian_briquet_1807_black_blackbelt_fake"),
(agent_unequip_item, ":bot_agent", "itm_russian_briquet_1807_black_blackbelt_fake", ":equipment_slot"),
(agent_equip_item, ":bot_agent", "itm_russian_briquet_1807_black_blackbelt", ":equipment_slot"),
(else_try),
(eq,":item_id","itm_russian_briquet_1807_landwehr_fake"),
(agent_unequip_item, ":bot_agent", "itm_russian_briquet_1807_landwehr_fake", ":equipment_slot"),
(agent_equip_item, ":bot_agent", "itm_russian_briquet_1807_landwehr", ":equipment_slot"),
(else_try),
(eq,":item_id","itm_russian_peasant_axe_landwehr_fake"),
(agent_unequip_item, ":bot_agent", "itm_russian_peasant_axe_landwehr_fake", ":equipment_slot"),
(agent_equip_item, ":bot_agent", "itm_russian_peasant_axe_landwehr", ":equipment_slot"),
(else_try),
(eq,":item_id","itm_austrian_infantry_briquet_fake"),
(agent_unequip_item, ":bot_agent", "itm_austrian_infantry_briquet_fake", ":equipment_slot"),
(agent_equip_item, ":bot_agent", "itm_austrian_infantry_briquet", ":equipment_slot"),
(try_end),
(try_end),
(player_get_slot, ":num_spawns", ":player_no", slot_player_spawned_this_round),
(val_add, ":num_spawns", 1),
(player_set_slot, ":player_no", slot_player_spawned_this_round, ":num_spawns"),
(try_end),
(try_end),
]),
#multiplayer_server_spawn_bots,
#multiplayer_server_manage_bots,
multiplayer_server_check_end_map,
(ti_tab_pressed, 0, 0, [],
[
(try_begin),
(eq, "$g_multiplayer_mission_end_screen", 0),
(assign, "$g_multiplayer_stats_chart_opened_manually", 1),
(start_presentation, "prsnt_multiplayer_stats_chart"),
(try_end),
]),
multiplayer_once_at_the_first_frame,
(ti_escape_pressed, 0, 0, [],
[
(neg|is_presentation_active, "prsnt_multiplayer_escape_menu"),
(neg|is_presentation_active, "prsnt_multiplayer_stats_chart"),
(eq, "$g_waiting_for_confirmation_to_terminate", 0),
(start_presentation, "prsnt_multiplayer_escape_menu"),
]),
# 18
] + mm_multiplayer_common,
),
(
"multiplayer_duel",mtf_battle_mode,-1, #duel mode
"You lead your men to battle.",
[
(0,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(1,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(2,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(3,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(4,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(5,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(6,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(7,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(8,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(9,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(10,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(11,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(12,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(13,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(14,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(15,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(16,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(17,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(18,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(19,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(20,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(21,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(22,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(23,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(24,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(25,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(26,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(27,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(28,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(29,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(30,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(31,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(32,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(33,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(34,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(35,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(36,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(37,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(38,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(39,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(40,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(41,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(42,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(43,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(44,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(45,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(46,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(47,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(48,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(49,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(50,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(51,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(52,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(53,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(54,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(55,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(56,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(57,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(58,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(59,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(60,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(61,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(62,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(63,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
],
[
multiplayer_server_check_polls, multiplayer_server_generate_build_points,
(ti_on_agent_spawn, 0, 0, [],
[
(store_trigger_param_1, ":agent_no"),
(call_script, "script_multiplayer_server_on_agent_spawn_common", ":agent_no"),
]),
(ti_server_player_joined, 0, 0, [],
[
(store_trigger_param_1, ":player_no"),
(call_script, "script_multiplayer_server_player_joined_common", ":player_no"),
]),
(ti_before_mission_start, 0, 0, [],
[
(assign, "$g_multiplayer_game_type", multiplayer_game_type_duel),
(call_script, "script_multiplayer_server_before_mission_start_common"),
#make everyone see themselves as allies, no friendly fire
(team_set_relation, 0, 0, 1),
(team_set_relation, 0, 1, 1),
(team_set_relation, 1, 1, 1),
(mission_set_duel_mode, 1),
(call_script, "script_multiplayer_init_mission_variables"),
(call_script, "script_multiplayer_remove_headquarters_flags"), # close this line and open map in deathmatch mod and use all ladders firstly
# to be able to edit maps without damaging any headquarters flags ext.
#MM
(call_script, "script_multiplayer_mm_before_mission_start_common"),
]),
(ti_after_mission_start, 0, 0, [],
[
(set_spawn_effector_scene_prop_kind, 0, -1), #during this mission, agents of "team 0" will try to spawn around scene props with kind equal to -1(no effector for this mod)
(set_spawn_effector_scene_prop_kind, 1, -1), #during this mission, agents of "team 1" will try to spawn around scene props with kind equal to -1(no effector for this mod)
(call_script, "script_initialize_all_scene_prop_slots"),
(call_script, "script_multiplayer_move_moveable_objects_initial_positions"),
(assign, "$g_multiplayer_ready_for_spawning_agent", 1),
#MM
(call_script, "script_multiplayer_mm_after_mission_start_common"),
]),
(ti_on_multiplayer_mission_end, 0, 0, [],
[
(neg|multiplayer_is_dedicated_server),
(assign, "$g_multiplayer_stats_chart_opened_manually", 0),
(start_presentation, "prsnt_multiplayer_stats_chart_deathmatch"),
]),
(ti_on_agent_killed_or_wounded, 0, 0, [],
[
(store_trigger_param_1, ":dead_agent_no"),
(store_trigger_param_2, ":killer_agent_no"),
(call_script, "script_multiplayer_server_on_agent_killed_or_wounded_common", ":dead_agent_no", ":killer_agent_no"),
(try_begin),
(call_script,"script_client_get_my_agent"),
(assign,":player_agent",reg0),
(agent_is_active, ":player_agent"),
(agent_slot_ge, ":player_agent", slot_agent_in_duel_with, 0),
(try_begin),
(eq, ":dead_agent_no", ":player_agent"),
(display_message, "str_you_have_lost_a_duel"),
(else_try),
(agent_slot_eq, ":player_agent", slot_agent_in_duel_with, ":dead_agent_no"),
(display_message, "str_you_have_won_a_duel"),
(try_end),
(try_end),
(try_begin),
(agent_slot_ge, ":dead_agent_no", slot_agent_in_duel_with, 0),
(agent_get_slot, ":duelist_agent_no", ":dead_agent_no", slot_agent_in_duel_with),
(agent_set_slot, ":dead_agent_no", slot_agent_in_duel_with, -1),
(try_begin),
(agent_is_active, ":duelist_agent_no"),
(agent_set_slot, ":duelist_agent_no", slot_agent_in_duel_with, -1),
(agent_clear_relations_with_agents, ":duelist_agent_no"),
(try_begin),
(agent_get_player_id, ":duelist_player_no", ":duelist_agent_no"),
(neg|player_is_active, ":duelist_player_no"), #might be AI
(agent_force_rethink, ":duelist_agent_no"),
(try_end),
(try_end),
(try_end),
# Vincenzo begin
# Won duel, set health to 100%
(try_begin),
(multiplayer_is_server),
(agent_is_active,":killer_agent_no"),
(agent_is_active,":dead_agent_no"),
(agent_is_human,":dead_agent_no"),
(agent_set_hit_points, ":killer_agent_no", 100, 0), # Heal the player
(agent_refill_ammo,":killer_agent_no"), # and refill ammo.
(agent_get_horse, ":horse_agent", ":killer_agent_no"),
(agent_is_active,":horse_agent"),
(agent_set_hit_points, ":horse_agent", 100, 0), # Heal the Horse
(try_end),
# Vincenzo end
]),
(1, 0, 0, [(multiplayer_is_server),],
[
(try_for_range, ":player_no", "$g_player_loops_begin", multiplayer_player_loops_end),
(player_is_active, ":player_no"),
(neg|player_is_busy_with_menus, ":player_no"),
(player_get_team_no, ":player_team", ":player_no"), #if player is currently spectator do not spawn his agent
(lt, ":player_team", multi_team_spectator),
(player_get_troop_id, ":player_troop", ":player_no"), #if troop is not selected do not spawn his agent
(ge, ":player_troop", 0),
(player_get_agent_id, ":player_agent", ":player_no"),
(assign, ":spawn_new", 0),
(try_begin),
(player_get_slot, ":player_first_spawn", ":player_no", slot_player_first_spawn),
(eq, ":player_first_spawn", 1),
(assign, ":spawn_new", 1),
(player_set_slot, ":player_no", slot_player_first_spawn, 0),
(else_try),
(try_begin),
(lt, ":player_agent", 0),
(assign, ":spawn_new", 1),
(else_try),
(neg|agent_is_alive, ":player_agent"),
(agent_get_time_elapsed_since_removed, ":elapsed_time", ":player_agent"),
(gt, ":elapsed_time", "$g_multiplayer_respawn_period"),
(assign, ":spawn_new", 1),
(try_end),
(try_end),
(eq, ":spawn_new", 1),
(call_script, "script_multiplayer_buy_agent_equipment", ":player_no"),
(troop_get_inventory_slot, ":has_item", ":player_troop", ek_horse),
(try_begin),
(ge, ":has_item", 0),
(assign, ":is_horseman", 1),
(else_try),
(assign, ":is_horseman", 0),
(try_end),
(call_script, "script_multiplayer_find_spawn_point", ":player_team", 0, ":is_horseman"),
(player_spawn_new_agent, ":player_no", reg0),
(try_end),
]),
(1, 0, 0, [ (multiplayer_is_server),
(this_or_next|gt,"$g_multiplayer_num_bots_team_1",0),
(gt,"$g_multiplayer_num_bots_team_2",0), # are there any bots? :p
], #do this in every new frame, but not at the same time
[
(store_mission_timer_a, ":mission_timer"),
(ge, ":mission_timer", 2),
(assign, ":team_1_count", 0),
(assign, ":team_2_count", 0),
(try_for_agents, ":cur_agent"),
(agent_is_active, ":cur_agent"),
(agent_is_non_player, ":cur_agent"),
(agent_is_human, ":cur_agent"),
(assign, ":will_be_counted", 0),
(try_begin),
(agent_is_alive, ":cur_agent"),
(assign, ":will_be_counted", 1), #alive so will be counted
(else_try),
(agent_get_time_elapsed_since_removed, ":elapsed_time", ":cur_agent"),
(le, ":elapsed_time", "$g_multiplayer_respawn_period"),
(assign, ":will_be_counted", 1),
(try_end),
(eq, ":will_be_counted", 1),
(agent_get_team, ":cur_team", ":cur_agent"),
(try_begin),
(eq, ":cur_team", 0),
(val_add, ":team_1_count", 1),
(else_try),
(eq, ":cur_team", 1),
(val_add, ":team_2_count", 1),
(try_end),
(try_end),
(store_sub, "$g_multiplayer_num_bots_required_team_1", "$g_multiplayer_num_bots_team_1", ":team_1_count"),
(store_sub, "$g_multiplayer_num_bots_required_team_2", "$g_multiplayer_num_bots_team_2", ":team_2_count"),
(val_max, "$g_multiplayer_num_bots_required_team_1", 0),
(val_max, "$g_multiplayer_num_bots_required_team_2", 0),
]),
(0, 0, 0, [ (multiplayer_is_server),
(eq, "$g_multiplayer_ready_for_spawning_agent", 1),
(store_add, ":total_req", "$g_multiplayer_num_bots_required_team_1", "$g_multiplayer_num_bots_required_team_2"),
(gt, ":total_req", 0),
],
[
(try_begin),
(store_add, ":total_req", "$g_multiplayer_num_bots_required_team_1", "$g_multiplayer_num_bots_required_team_2"),
(store_random_in_range, ":random_req", 0, ":total_req"),
(val_sub, ":random_req", "$g_multiplayer_num_bots_required_team_1"),
(try_begin),
| |
ax2=ax.twinx()
ax2.scatter(TimeAxis,
self.Meta['R_Cut'],
linewidths=4, label = 'R_Cut', color='g')
if Species is not None:
for x in Species:
ax2.scatter(TimeAxis,
self.Meta['Cut' + x],
linewidths=4, label = 'R_Cut' + x)
if Errors is True:
ax2.errorbar(TimeAxis, self.Meta['R_Cut'],
self.Err['R_Cut'], color='g',
capsize = 5, capthick = 3)
if Species is not None:
for x in Species:
ax2.errorbar(TimeAxis,self.Meta['Cut' + x],
self.Err['Cut' + x],
capsize = 5, capthick = 3)
except KeyError:
pass
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
ax.set_xlabel('Time (ps)')
ax.set_ylabel(Stat.upper())
if Temp is True:
ax3 = ax.twiny()
ax1Ticks = ax.get_xticks()
ax3Ticks = ax1Ticks
ax3.set_xticks(ax2Ticks)
ax3.set_xbound(ax.get_xbound())
ax3.set_xticklabels(tick_function(ax2Ticks))
ax3.set_xlabel('Temperature (K)')
plt.savefig(self.Base + self.Images + '/' + str(Stat) + '.png' , dpi = 100, bbox_inches='tight')
plt.close()
def tick_function(self, X):
try:
inc = (max(self.Meta['Temp']) - min(self.Meta['Temp']))/( 10*len(self.Meta['Temp']) )
V = min(self.Meta['Temp']) + X*inc
return ["%.3f" % z for z in V]
except KeyError:
return None
def com_plot_bi(self, Dists = None, Species = None, Frames = [0], Errors = False):
if self.Errors is True:
Errors = True
if Dists is None:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nNo distributions requested.\n")
return None
elif type(Dists) is list:
for Dist in Dists:
if Dist is "MidCoMDist":
D = "Cluster Centre"
elif Dist is "CoMDist":
D = "Sub-cluster Centre"
else:
raise KeyError("Invalid distribution.\n")
if Species is None:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nNo chemical species requested.\n")
elif type(Species) is list:
for Specie in Species:
for frame in Frames:
try:
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(self.Meta['CoMSpace'], self.Meta[Dist + Specie][frame], color= 'k', linewidth = 4)
if Errors is True:
ax.fill_between(self.Meta['CoMSpace'],
self.Meta[Dist + Specie][frame] + self.Err[Dist + Specie][frame],
self.Meta[Dist + Specie][frame] - self.Err[Dist + Specie][frame],
color = 'k', alpha = 0.25)
ax.set_xlabel('Distance (Angstrom)')
ax.set_ylabel('Probability')
try:
ax.text(self.Meta['CoMSpace'][5], 0.65*max(self.Meta[Dist + Specie][frame]), "%s to %s\nTime: %sps\nTemp: %sK"
%(Specie, D, self.Meta['SimTime'][frame], "{:.1f}".format(self.Meta['Temp'][frame])))
except KeyError:
pass
plt.savefig(self.Base + self.Images + '/' + Dist+Specie+str(frame) + '.png',
dpi = 100, bbox_inches='tight')
plt.close()
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nThere was an error trying to plot %s.\n" %(Dist+Specie))
pass
def cna_plot(self, Name = 'CNA_Time', Frames = [], Errors = False):
if self.Errors is True:
Errors = True
for Frame in Frames:
try:
X_CNA = [ str(a) for a in self.Meta['masterkey'] ] # Create a set of ticks for the x-axis
fig = plt.figure(figsize = (9,3) )
if Errors is True:
ax = plt.bar( X_CNA, self.Meta['cna_sigs'][Frame], yerr = self.Err['cna_sigs'][Frame], tick_label = X_CNA )
else:
ax = plt.bar( X_CNA, self.Meta['cna_sigs'][Frame], tick_label = X_CNA)
plt.xlabel("CNA Signature", fontsize = 14)
plt.ylabel("Probability", fontsize = 14)
plt.xticks(rotation=90,fontsize = 14)
try:
plt.text( X_CNA[-7], 0.8*np.amax(self.Meta['cna_sigs'][Frame]),
'Time: %sps\nTemp: %sK' %(self.Meta["SimTime"][Frame],
"{:.1f}".format(self.Meta['Temp'][Frame])), fontsize = 14 )
except KeyError:
pass
plt.savefig(self.Base+self.Images+'/'+Name+str(Frame)+'.png', dpi = 100, bbox_inches = 'tight')
plt.close()
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nThis quantitiy, cna, does not exist in the metadata.\n")
return None
def agcn_histo(self, Frames = [], Errors = False):
for Frame in Frames:
fig, ax = plt.subplots()
fig.set_size_inches(9,3)
y,binEdges = np.histogram(self.Meta['agcn'][Frame], bins = 40)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
ax.bar(bincenters, y, color='r')
try:
ax.text(bincenters[4], 0.7*np.amax(y), "Time : %sps\nTemp : %sK"%(self.Meta['SimTime'][Frame], "{:.1f}".format(self.Meta['Temp'][Frame])) )
plt.savefig(self.Base + self.Images + '/'+ 'AGCNDist'+str(self.Meta['SimTime'][Frame])+'.png', dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'AGCNDist.png', dpi = 100, bbox_inches='tight')
plt.close()
def com_full_plot(self, Frames = [], Errors = False):
if self.Errors is True:
Errors = True
for Frame in Frames:
fig, ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(self.Meta['CoMSpace'], self.Meta['CoMDist'][Frame], color='k')
if Errors is True:
ax.fill_between(self.Meta['CoMSpace'] ,
self.Meta['CoMDist'][Frame] + self.Err['CoMDist'][Frame],
self.Meta['CoMDist'][Frame] - self.Err['CoMDist'][Frame],
color='k', alpha = 0.25)
ax.set_xlabel('Distance (Angstrom)')
ax.set_ylabel('RDF')
try:
ax.text(self.Meta['CoMSpace'][5], 0.65*max(self.Meta['CoMDist'][Frame]), "Full System\nTime: %sps\nTemp: %sK" %(self.Meta['SimTime'][Frame], "{:.1f}".format(self.Meta['Temp'][Frame])))
plt.savefig(self.Base + self.Images + '/'+ 'FullCoM'+str(self.Meta['SimTime'][Frame])+'.png',
dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'FullCoM.png', dpi = 100, bbox_inches='tight')
plt.close()
def Mass(self,r):
return ((4/3) * np.pi * r**3 )
def cum_com(self, Frames):
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
for Frame in Frames:
Int = [ np.trapz(self.Meta['CoMDist'][Frame][:x], self.Meta['CoMSpace'][:x]) for x in range(100) ]
try:
ax.plot(self.Meta['CoMSpace'], Int, label = '%sps' %(self.Meta['SimTime'][Frame]))
except KeyError:
ax.plot(self.Meta['CoMSpace'], Int, label = str(Frame))
ax.plot(self.Meta['CoMSpace'], self.Mass(self.Meta['CoMSpace'])/max(self.Mass(self.Meta['CoMSpace'])), label = 'Spherical mass distribution', linestyle = 'dashed')
ax.set_xlabel('Distance from centre (Angstrom)')
ax.set_ylabel('M(r) / M(R)')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
try:
plt.savefig(self.Base + self.Images + '/'+ 'Cum_CoM'+str(self.Meta['SimTime'][Frame])+'.png',
dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'Cum_CoM.png',
dpi = 100, bbox_inches='tight')
plt.close()
def cna_traj(self, Sigs = [], Errors = False):
if self.Errors is True:
Errors = True
try:
Time = self.Meta['SimTime']
except KeyError:
Time = range(len(self.Meta['cna_sigs']))
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
for x in Sigs:
try:
ax.plot(Time, self.Collect_CNA(x), label = x)
if Errors is True:
ax.fill_between(Time,
np.asarray(self.Collect_CNA(x)) + np.asarray(self.Collect_CNA_error(x)),
np.asarray(self.Collect_CNA(x)) - np.asarray(self.Collect_CNA_error(x)),
alpha = 0.25)
except ValueError:
print(x, type(x))
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write(f"\nSignature, '{0}', not in metadata.\n".format(x))
ax.set_xlabel('Time (ps)')
ax.set_ylabel('Probability')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(self.Base + self.Images + '/'+ 'CNA_Traj'+'.png',
dpi = 100, bbox_inches='tight')
plt.close()
def h_c(self, Errors = False):
if self.Errors is True:
Errors = True
Time = self.Meta['SimTime']
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(Time, self.Meta['h'], label = 'Collectivity')
ax.plot(Time, self.Meta['c'], label = 'Concertedness')
if Errors is True:
ax.fill_between(Time[1:],
self.Meta['h']+self.Err['h'],
self.Meta['h']-self.Err['h'],
alpha = 0.25)
ax.fill_between(Time[2:-1],
self.Meta['c']+self.Err['c'],
self.Meta['c']-self.Err['c'],
alpha = 0.25)
ax.set_xlabel('Time (ps)')
ax.set_ylabel(' H / C')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(self.Base + self.Images + '/'+ 'HC_Stats'+'.png',
dpi = 100, bbox_inches='tight')
plt.close()
def pair_plot(Data, System):
try:
HeAdj = Data['HeAdj']
NewHe = []
except KeyError:
sys.exit()
for x in range(len(HeAdj)):
try:
NewHe.append(sum(HeAdj[x][1]))
except TypeError:
pass
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(Data['SimTime'], [sum(Data['HoAdjPt'][x]) for x in range(len(Data['HoAdjPt']))], 'orange', label='Pt only')
ax2 = ax.twinx()
ax2.plot(Data['SimTime'], [sum(Data['HoAdjAu'][x]) for x in range(len(Data['HoAdjAu']))] , 'blue', label = 'Au only')
ax3 = ax.twiny()
ax3.plot(NewHe, label = 'Hetero pairs only', color='red')
ax2.axes.yaxis.set_visible(False)
ax3.axes.xaxis.set_visible(False)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
ax.set_xlabel('Time (ps)')
ax.set_ylabel('Number of pairs')
fig.legend(bbox_to_anchor=(0, 1.0, 1., 0), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(System['base_dir']+System['plot_dir'] + '/Pairs.png', dpi = 100, bbox_inches='tight')
def All_CNA_Traj(System, Pipeline, outfile):
CNA = []
for x in System['iter_dir']:
for y in [(4,2,2), (4,2,1), (3,1,1)]:
Index = Pipeline.BigMeta[x]['cna'][0][0].index(y)
Temp = [ Pipeline.BigMeta[x]['cna'][i][1][Index] for i in range(len(Pipeline.BigMeta[x]['cna'])) ]
CNA.append(Temp)
x = Pipeline.BigMeta[System['iter_dir'][0]]['SimTime']
fig, axs = plt.subplots(2, 2, sharex='col', sharey='row')
fig.set_size_inches(9,3)
(ax1, ax2), (ax3, ax4) = axs
ax1.plot(x, CNA[0], label = '(4 2 2)')
ax1.plot(x, CNA[1], label = '(4 2 1)')
ax1.plot(x, CNA[2], label = '(3 1 1)')
ax2.plot(x, CNA[3])
ax2.plot(x, CNA[4])
ax2.plot(x, CNA[5])
ax3.plot(x, CNA[6])
ax3.plot(x, CNA[7])
ax3.plot(x, CNA[8])
ax4.plot(x, CNA[9])
ax4.plot(x, CNA[10])
ax4.plot(x, CNA[11])
for ax in axs.flat:
ax.label_outer()
ax.set_ylim(0, 0.7)
fig.legend( loc='upper center', ncol=3, fontsize = 10)
plt.savefig(outfile, dpi = 100, bbox_inches='tight')
"""
##########################################################################
The following are old functions with little utility but may be
reintroduced if there is demand for such things.
def AGCN_Excess():
Excess = []
for i in range( len( AverageMeta['agcn'] ) ):
Temp = [ a>12 for a in AverageMeta['agcn'][i] ]
Excess.append(np.sum(Temp))
return Excess
def Strange_CNA():
Indices = [ 14, 15, 24, 25, 38 ] #37 and on to the end are all odd
CNA = AverageMeta['cna'] # All of the heights
Strange_Dict = {}
for Index in Indices:
Strange_Dict[AverageMeta['masterkey'][Index]] = np.zeros((len(CNA)), dtype = np.float64)
for Key in AverageMeta['masterkey'][Indices[-1]:]:
Strange_Dict[Key] = np.zeros((len(CNA)), dtype = np.float64)
Key = list(Strange_Dict.keys())
Mast = AverageMeta['masterkey']
for frame in range(len(CNA)):
for Sig in CNA[frame]:
for obj in Key:
if list(CNA[frame]).index(Sig) == Mast.index(obj):
if Sig > 0:
Strange_Dict[obj][frame] = 1
Bar_Heights = []
for Item in Strange_Dict:
Bar_Heights.append( np.sum(Strange_Dict[Item]) )
return (Strange_Dict.keys(), Bar_Heights)
fig, ax = plt.subplots()
fig.set_size_inches((21,7))
ax.plot(New, label = '(4,5,5)', color='k')
Ticks = | |
from ratelimit.decorators import ratelimit
from datetime import timedelta
from dateutil.tz import tzutc
from io import StringIO
from link_header import Link as Rel, LinkHeader
from urllib.parse import urlencode
import time
from timegate.utils import closest
from warcio.timeutils import datetime_to_http_date
from werkzeug.http import parse_date
from django.forms import widgets
from django.shortcuts import render, get_object_or_404, redirect
from django.http import (HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect,
JsonResponse, HttpResponseNotFound, HttpResponseBadRequest)
from django.urls import reverse, NoReverseMatch
from django.conf import settings
from django.core.files.storage import default_storage
from django.utils import timezone
from django.views.generic import TemplateView
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import cache_control
from django.views.decorators.clickjacking import xframe_options_exempt
from django.utils.six.moves.http_client import responses
from perma.wsgi_utils import retry_on_exception
from ..models import Link, Registrar, Organization, LinkUser
from ..forms import ContactForm
from ..utils import (if_anonymous, ratelimit_ip_key, redirect_to_download,
protocol, stream_warc_if_permissible, set_options_headers,
timemap_url, timegate_url, memento_url, memento_data_for_url, url_with_qs_and_hash,
get_client_ip, remove_control_characters)
from ..email import send_admin_email, send_user_email_copy_admins
import logging
logger = logging.getLogger(__name__)
valid_serve_types = ['image', 'warc_download']
class DirectTemplateView(TemplateView):
extra_context = None
def get_context_data(self, **kwargs):
""" Override Django's TemplateView to allow passing in extra_context. """
context = super(self.__class__, self).get_context_data(**kwargs)
if self.extra_context is not None:
for key, value in self.extra_context.items():
if callable(value):
context[key] = value()
else:
context[key] = value
return context
def landing(request):
"""
The landing page
"""
if request.user.is_authenticated and request.get_host() not in request.META.get('HTTP_REFERER',''):
return HttpResponseRedirect(reverse('create_link'))
else:
# orgs_count = Organization.objects.count()
# users_count = LinkUser.objects.count()
# links_count = Link.objects.filter(is_private=False).count()
return render(request, 'landing.html', {
'this_page': 'landing',
# 'orgs_count': orgs_count, 'users_count': users_count, 'links_count': links_count,
})
def about(request):
"""
The about page
"""
partners = sorted(Registrar.objects.filter(show_partner_status=True), key=lambda r: r.partner_display_name or r.name)
halfway_point = int(len(partners)/2)
# sending two sets of arrays so that we can separate them
# into two columns alphabetically, the right way
partners_first_col = partners[:halfway_point] if len(partners) > 0 else []
partners_last_col = partners[halfway_point:] if len(partners) > 0 else []
return render(request, 'about.html', {
'partners': partners,
'partners_first_col': partners_first_col,
'partners_last_col': partners_last_col
})
def faq(request):
"""
The faq page
"""
registrars_count = Registrar.objects.approved().count()
orgs_count = Organization.objects.all().count()
users_count = LinkUser.objects.all().count()
links_count = Link.objects.filter(is_private=False).count()
return render(request, 'docs/faq.html', {'registrars_count': registrars_count,
'orgs_count': orgs_count, 'users_count': users_count, 'links_count': links_count,})
def stats(request):
"""
The global stats
"""
return render(request, 'stats.html')
@if_anonymous(cache_control(max_age=settings.CACHE_MAX_AGES['single_permalink']))
@ratelimit(rate=settings.MINUTE_LIMIT, block=True, key=ratelimit_ip_key)
@ratelimit(rate=settings.HOUR_LIMIT, block=True, key=ratelimit_ip_key)
@ratelimit(rate=settings.DAY_LIMIT, block=True, key=ratelimit_ip_key)
def single_permalink(request, guid):
"""
Given a Perma ID, serve it up.
"""
raw_user_agent = request.META.get('HTTP_USER_AGENT', '')
# Create a canonical version of guid (non-alphanumerics removed, hyphens every 4 characters, uppercase),
# and forward to that if it's different from current guid.
canonical_guid = Link.get_canonical_guid(guid)
# We only do the redirect if the correctly-formatted GUID actually exists --
# this prevents actual 404s from redirecting with weird formatting.
link = get_object_or_404(Link.objects.all_with_deleted(), guid=canonical_guid)
if canonical_guid != guid:
return HttpResponsePermanentRedirect(reverse('single_permalink', args=[canonical_guid]))
# Forward to replacement link if replacement_link is set.
if link.replacement_link_id:
return HttpResponseRedirect(reverse('single_permalink', args=[link.replacement_link_id]))
# If we get an unrecognized archive type (which could be an old type like 'live' or 'pdf'), forward to default version
serve_type = request.GET.get('type')
if serve_type is None:
serve_type = 'source'
elif serve_type not in valid_serve_types:
return HttpResponsePermanentRedirect(reverse('single_permalink', args=[canonical_guid]))
# serve raw WARC
if serve_type == 'warc_download':
return stream_warc_if_permissible(link, request.user)
# handle requested capture type
if serve_type == 'image':
capture = link.screenshot_capture
# not all Perma Links have screenshots; if no screenshot is present,
# forward to primary capture for playback or for appropriate error message
if (not capture or capture.status != 'success') and link.primary_capture:
return HttpResponseRedirect(reverse('single_permalink', args=[guid]))
else:
capture = link.primary_capture
# if primary capture did not work, but screenshot did work, forward to screenshot
if (not capture or capture.status != 'success') and link.screenshot_capture and link.screenshot_capture.status == 'success':
return HttpResponseRedirect(reverse('single_permalink', args=[guid])+"?type=image")
try:
capture_mime_type = capture.mime_type()
except AttributeError:
# If capture is deleted, then mime type does not exist. Catch error.
capture_mime_type = None
# Special handling for mobile pdf viewing because it can be buggy
# Redirecting to a download page if on mobile
redirect_to_download_view = redirect_to_download(capture_mime_type, raw_user_agent)
# If this record was just created by the current user, we want to do some special-handling:
# for instance, show them a message in the template, and give the playback extra time to initialize
new_record = request.user.is_authenticated and link.created_by_id == request.user.id and not link.user_deleted \
and link.creation_timestamp > timezone.now() - timedelta(seconds=300)
# Provide the max upload size, in case the upload form is used
max_size = settings.MAX_ARCHIVE_FILE_SIZE / 1024 / 1024
if not link.submitted_description:
link.submitted_description = "This is an archive of %s from %s" % (link.submitted_url, link.creation_timestamp.strftime("%A %d, %B %Y"))
logger.info(f"Preparing context for {link.guid}")
context = {
'link': link,
'redirect_to_download_view': redirect_to_download_view,
'mime_type': capture_mime_type,
'can_view': request.user.can_view(link),
'can_edit': request.user.can_edit(link),
'can_delete': request.user.can_delete(link),
'can_toggle_private': request.user.can_toggle_private(link),
'capture': capture,
'serve_type': serve_type,
'new_record': new_record,
'this_page': 'single_link',
'max_size': max_size,
'link_url': settings.HOST + '/' + link.guid,
'protocol': protocol(),
}
if context['can_view'] and link.can_play_back():
if new_record:
logger.info(f"Ensuring warc for {link.guid} has finished uploading.")
def assert_exists(filename):
assert default_storage.exists(filename)
try:
retry_on_exception(assert_exists, args=[link.warc_storage_file()], exception=AssertionError, attempts=settings.WARC_AVAILABLE_RETRIES)
except AssertionError:
logger.error(f"Made {settings.WARC_AVAILABLE_RETRIES} attempts to get {link.guid}'s warc; still not available.")
# Let's consider this a HTTP 200, I think...
return render(request, 'archive/playback-delayed.html', context, status=200)
context['client_side_playback'] = request.GET.get('client-side') if (
request.GET.get('client-side') and
settings.OFFER_CLIENT_SIDE_PLAYBACK and
not request.user.is_anonymous and
request.user.offer_client_side_playback
) else ''
if context['client_side_playback']:
logger.info(f'Using client-side playback for {link.guid}')
else:
# Play back using Webrecorder
try:
logger.info(f"Initializing play back of {link.guid}")
wr_username = link.init_replay_for_user(request)
except Exception: # noqa
# We are experiencing many varieties of transient flakiness in playback:
# second attempts, triggered by refreshing the page, almost always seem to work.
# While we debug... let's give playback a second try here, and see if this
# noticeably improves user experience.
logger.exception(f"First attempt to init replay of {link.guid} failed. (Retrying: observe whether this error recurs.)")
time.sleep(settings.WR_PLAYBACK_RETRY_AFTER)
logger.info(f"Initializing play back of {link.guid} (2nd try)")
wr_username = link.init_replay_for_user(request)
logger.info(f"Updating context with WR playback information for {link.guid}")
context.update({
'wr_host': settings.PLAYBACK_HOST,
'wr_prefix': link.wr_iframe_prefix(wr_username),
'wr_url': capture.url,
'wr_timestamp': link.creation_timestamp.strftime('%Y%m%d%H%M%S'),
})
logger.info(f"Rendering template for {link.guid}")
response = render(request, 'archive/single-link.html', context)
# Adjust status code
if link.user_deleted:
response.status_code = 410
elif not context['can_view'] and link.is_private:
response.status_code = 403
# Add memento headers, when appropriate
logger.info(f"Deciding whether to include memento headers for {link.guid}")
if link.is_visible_to_memento():
logger.info(f"Including memento headers for {link.guid}")
response['Memento-Datetime'] = datetime_to_http_date(link.creation_timestamp)
# impose an arbitrary length-limit on the submitted URL, so that this header doesn't become illegally large
url = link.submitted_url[:500]
# strip control characters from url, if somehow they slipped in prior to https://github.com/harvard-lil/perma/commit/272b3a79d94a795142940281c9444b45c24a05db
url = remove_control_characters(url)
response['Link'] = str(
LinkHeader([
Rel(url, rel='original'),
Rel(timegate_url(request, url), rel='timegate'),
Rel(timemap_url(request, url, 'link'), rel='timemap', type='application/link-format'),
Rel(timemap_url(request, url, 'json'), rel='timemap', type='application/json'),
Rel(timemap_url(request, url, 'html'), rel='timemap', type='text/html'),
Rel(memento_url(request, link), rel='memento', datetime=datetime_to_http_date(link.creation_timestamp)),
])
)
logger.info(f"Returning response for {link.guid}")
return response
@xframe_options_exempt
def set_iframe_session_cookie(request):
"""
The <iframe> used for Perma Link playback serves content from Webrecorder.
If the Perma Link is private, playback requires a WR session cookie.
The cookie's value is set via a WR api call during Perma's
`link.init_replay_for_user` and is stored in Perma's session data.
If the iframe requests a resource without the cookie,
WR will redirect here. This route in turn redirects back to WR with the
session cookie as a GET param. WR sets the cookie in the browser, and then,
finally, redirects to the originally requested resource.
"""
if request.method == 'OPTIONS':
# no redirects required; subsequent requests from the browser get the cookie
response = HttpResponse()
else:
cookie = urlencode({'cookie': request.session.get('wr_private_session_cookie')})
query = request.META.get('QUERY_STRING', '')
if not cookie:
user = 'Anonymous'
if request.user.is_authenticated:
user = f"User {request.user.id}"
logger.error(f'No WR cookie found in session! User: {user}. Session keys: {request.session.keys()}.')
return render(request, 'archive/archive-error.html', {
'err_url': f'_set_session?{query}',
'timestamp': timezone.now(),
'err_msg': 'Missing cookie',
})
url = protocol() + settings.PLAYBACK_HOST + f'/_set_session?{query}&{cookie}'
response = HttpResponseRedirect(url)
response['Cache-Control'] = 'no-cache'
# set CORS headers (for both OPTIONS and actual redirect)
set_options_headers(request, response)
return response
def serve_warc(request, guid):
"""
This is a redundant route for downloading a warc, for use in client-side playback,
which has specific requirements:
- the warc must be served from a URL ending in `.warc`
- the response cannot be streamed
"""
canonical_guid = Link.get_canonical_guid(guid)
link = get_object_or_404(Link.objects.all_with_deleted(), guid=canonical_guid)
return stream_warc_if_permissible(link, request.user, stream=False)
def replay_service_worker(request):
"""
The service worker required for client-side playback:
"""
return HttpResponse(f'importScripts("{ settings.SERVICE_WORKER_URL }");\n', content_type='application/x-javascript')
@if_anonymous(cache_control(max_age=settings.CACHE_MAX_AGES['timemap']))
@ratelimit(rate=settings.MINUTE_LIMIT, block=True, key=ratelimit_ip_key)
@ratelimit(rate=settings.HOUR_LIMIT, block=True, key=ratelimit_ip_key)
@ratelimit(rate=settings.DAY_LIMIT, block=True, key=ratelimit_ip_key)
def timemap(request, response_format, url):
url = url_with_qs_and_hash(url, request.META['QUERY_STRING'])
data = memento_data_for_url(request, url)
if data:
if response_format == 'json':
response | |
s_zj = surfSrc.zjSort
xt = surfTar.xiSort
yt = surfTar.yiSort
zt = surfTar.ziSort
k = surfSrc.sortSource % param.K # Gauss point
aux = numpy.zeros(2)
directKt_sort(Ktx_aux, Kty_aux, Ktz_aux, int(LorY),
numpy.ravel(surfSrc.vertex[surfSrc.triangleSort[:]]),
numpy.int32(k), s_xj, s_yj, s_zj, xt, yt, zt, m, mKc,
surfTar.P2P_list[surf], surfTar.offsetTarget,
surfTar.sizeTarget, surfSrc.offsetSource,
surfTar.offsetTwigs[surf], surfSrc.AreaSort, surfSrc.Xsk,
surfSrc.Wsk, param.kappa, param.threshold, param.eps, aux)
timing.AI_int += int(aux[0])
timing.time_an += aux[1]
toc = time.time()
timing.time_P2P += toc - tic
return Ktx_aux, Kty_aux, Ktz_aux
def P2P_gpu(surfSrc, surfTar, m, mx, my, mz, mKc, mVc, K_gpu, V_gpu, surf,
LorY, K_diag, IorE, L, w, param, timing, kernel):
"""
It computes the near field contribution of the double and single layer
potential using the sorted data and adds it to the far field contribution
given as an input, on the GPU.
Note: In this context when we refer to mass we mean
mass = (vector x gauss weights)
mass-clean = (vector)
where 'vector' is the vector in the matrix-vector multiplication in
the GMRES.
Arguments
----------
surfSrc: class, source surface, the one that contains the gauss points.
surfTar: class, target surface, the one that contains the collocation
points.
m : array, mass of the source particle for the single layer potential
calculation.
mx : array, mass of the source particle times the 'x' component of the
normal vector, for the double layer potential calculation.
my : array, mass of the source particle times the 'y' component of the
normal vector, for the double layer potential calculation.
mz : array, mass of the source particle times the 'z' component of the
normal vector, for the double layer potential calculation.
mKc : array, mass-clean of the source particle for the double layer
potential calculation.
mVc : array, mass-clean of the source particle for the double layer
potential calculation.
K_gpu : array, far field contribution to the double layer potential.
V_gpu : array, far field contribution to the single layer potential.
surf : int, position of the source surface in the surface array.
K_diag : array, diagonal elements of the double layer integral operator.
IorE : int, internal (1) or external (2).
L : float, representative distance of the triangles. (sqrt{2*Area})
w : array, gauss points.
param : class, parameters related to the surface.
timing : class, it contains timing information for different parts of
the code.
kernel : pycuda source module.
Returns
--------
K_gpu : array, far plus near field contribution to the double layer
potential.
V_gpu : array, far plus near field contribution to the single layer
potential.
"""
if param.GPU == 1:
tic = cuda.Event()
toc = cuda.Event()
else:
tic = Event()
toc = Event()
tic.record()
REAL = param.REAL
mDev = cuda.to_device(m.astype(REAL))
mxDev = cuda.to_device(mx.astype(REAL))
myDev = cuda.to_device(my.astype(REAL))
mzDev = cuda.to_device(mz.astype(REAL))
mKcDev = cuda.to_device(mKc.astype(REAL))
mVcDev = cuda.to_device(mVc.astype(REAL))
toc.record()
toc.synchronize()
timing.time_trans += tic.time_till(toc) * 1e-3
tic.record()
GSZ = int(numpy.ceil(float(param.Nround) / param.NCRIT)) # CUDA grid size
direct_gpu = kernel.get_function("P2P")
AI_int = cuda.to_device(numpy.zeros(param.Nround, dtype=numpy.int32))
# GPU arrays are flattened, need to point to first element
ptr_offset = surf * len(surfTar.offsetTwigs[surf]
) # Pointer to first element of offset arrays
ptr_list = surf * len(surfTar.P2P_list[surf]
) # Pointer to first element in lists arrays
# Check if internal or external to send correct singular integral
if IorE == 1:
sglInt = surfSrc.sglInt_intDev
else:
sglInt = surfSrc.sglInt_extDev
direct_gpu(K_gpu,
V_gpu,
surfSrc.offSrcDev,
surfTar.offTwgDev,
surfTar.P2P_lstDev,
surfTar.sizeTarDev,
surfSrc.kDev,
surfSrc.xjDev,
surfSrc.yjDev,
surfSrc.zjDev,
mDev,
mxDev,
myDev,
mzDev,
mKcDev,
mVcDev,
surfTar.xiDev,
surfTar.yiDev,
surfTar.ziDev,
surfSrc.AreaDev,
sglInt,
surfSrc.vertexDev,
numpy.int32(ptr_offset),
numpy.int32(ptr_list),
numpy.int32(LorY),
REAL(param.kappa),
REAL(param.threshold),
numpy.int32(param.BlocksPerTwig),
numpy.int32(param.NCRIT),
REAL(K_diag),
AI_int,
surfSrc.XskDev,
surfSrc.WskDev,
block=(param.BSZ, 1, 1),
grid=(GSZ, 1))
toc.record()
toc.synchronize()
timing.time_P2P += tic.time_till(toc) * 1e-3
tic.record()
AI_aux = numpy.zeros(param.Nround, dtype=numpy.int32)
AI_aux = cuda.from_device(AI_int, param.Nround, dtype=numpy.int32)
timing.AI_int += sum(AI_aux[surfTar.unsort])
toc.record()
toc.synchronize()
timing.time_trans += tic.time_till(toc) * 1e-3
return K_gpu, V_gpu
def P2PKt_gpu(surfSrc, surfTar, m, mKtc, Ktx_gpu, Kty_gpu, Ktz_gpu, surf, LorY,
w, param, timing, kernel):
"""
It computes the near field contribution of the double and single layer
potential using the sorted data and adds it to the far field contribution
given as an input, on the GPU.
Note: In this context when we refer to mass we mean
mass = (vector x gauss weights)
mass-clean = (vector)
where 'vector' is the vector in the matrix-vector multiplication in
the GMRES.
Arguments
----------
surfSrc: class, source surface, the one that contains the gauss points.
surfTar: class, target surface, the one that contains the collocation
points.
m : array, mass of the source particle for the adjoint double layer
potential calculation.
mKc : array, mass-clean of the source particle for the adjoint double
layer potential calculation.
Ktx_gpu: array, x component of the far field contribution to the adjoint
double layer potential.
Kty_gpu: array, y component of the far field contribution to the adjoint
double layer potential.
Ktz_gpu: array, z component of the far field contribution to the adjoint
double layer potential.
surf : int, position of the source surface in the surface array.
LorY : int, Laplace (1) or Yukawa (2).
w : array, gauss points.
param : class, parameters related to the surface.
timing : class, it contains timing information for different parts of
the code.
kernel : pycuda source module.
Returns
--------
Ktx_gpu: array, x component of the far plus near field contribution to the
adjoint double layer potential.
Kty_gpu: array, y component of the far plus near field contribution to the
adjoint double layer potential.
Ktz_gpu: array, z component of the far plus near field contribution to the
adjoint double layer potential.
"""
if param.GPU == 1:
tic = cuda.Event()
toc = cuda.Event()
else:
tic = Event()
toc = Event()
tic.record()
REAL = param.REAL
mDev = cuda.to_device(m.astype(REAL))
mKtcDev = cuda.to_device(mKtc.astype(REAL))
toc.record()
toc.synchronize()
timing.time_trans += tic.time_till(toc) * 1e-3
tic.record()
GSZ = int(numpy.ceil(float(param.Nround) / param.NCRIT)) # CUDA grid size
directKt_gpu = kernel.get_function("P2PKt")
AI_int = cuda.to_device(numpy.zeros(param.Nround, dtype=numpy.int32))
# GPU arrays are flattened, need to point to first element
ptr_offset = surf * len(surfTar.offsetTwigs[surf]
) # Pointer to first element of offset arrays
ptr_list = surf * len(surfTar.P2P_list[surf]
) # Pointer to first element in lists arrays
directKt_gpu(Ktx_gpu,
Kty_gpu,
Ktz_gpu,
surfSrc.offSrcDev,
surfTar.offTwgDev,
surfTar.P2P_lstDev,
surfTar.sizeTarDev,
surfSrc.kDev,
surfSrc.xjDev,
surfSrc.yjDev,
surfSrc.zjDev,
mDev,
mKtcDev,
surfTar.xiDev,
surfTar.yiDev,
surfTar.ziDev,
surfSrc.AreaDev,
surfSrc.vertexDev,
numpy.int32(ptr_offset),
numpy.int32(ptr_list),
numpy.int32(LorY),
REAL(param.kappa),
REAL(param.threshold),
numpy.int32(param.BlocksPerTwig),
numpy.int32(param.NCRIT),
AI_int,
surfSrc.XskDev,
surfSrc.WskDev,
block=(param.BSZ, 1, 1),
grid=(GSZ, 1))
toc.record()
toc.synchronize()
timing.time_P2P += tic.time_till(toc) * 1e-3
tic.record()
AI_aux = numpy.zeros(param.Nround, dtype=numpy.int32)
AI_aux = cuda.from_device(AI_int, param.Nround, dtype=numpy.int32)
timing.AI_int += sum(AI_aux[surfTar.unsort])
toc.record()
toc.synchronize()
timing.time_trans += tic.time_till(toc) * 1e-3
return Ktx_gpu, Kty_gpu, Ktz_gpu
def M2P_nonvec(Cells, CJ, xq, Kval, Vval, index, par_reac, source, time_M2P):
"""
It computes the far field contribution of the double and single layer
potential without doing the assumption that all the particles in the same
twig cell have the same interaction list.
This is used for the calculation for the reaction potential where the
targets are the point-charges location.
Arguments
----------
Cells : array, cells of the tree.
CJ : int, index of the source cell.
xq : array, postion of the point charges.
Kval : array, far field contribution to the double layer potential.
Vval : array, far field contribution to the single layer potential.
index : list, pointers to the location of the mulipole of order i,j,k
in the multipole array.
par_reac: class, fine parameters related to the surface.
source : list, P2P interaction list, which is a list of the cells that
each charge-point interacts by P2P.
time_M2P: real, timed consumed in compute M2P_nonvec function.
Returns
--------
Kval : array, far field contribution to the double layer potential.
Vval : array, far field contribution to the single layer potential.
source : list, P2P interaction list, which is a list of the cells that
each charge-point interacts.
time_M2P: real, time consumed in compute M2P_nonvec function.
"""
if (Cells[CJ].ntarget >= par_reac.NCRIT): # if not a twig
for c in range(8):
if (Cells[CJ].nchild & (1 << c)):
CC = Cells[CJ].child[c] # Points at child cell
dxi = Cells[CC].xc - xq[0]
dyi = Cells[CC].yc - xq[1]
dzi = Cells[CC].zc | |
import requests
from rdflib import Graph, URIRef
from rdflib.namespace import RDF, SKOS
from rdflib.util import guess_format
from rdflib.exceptions import ParserError
from rdflib.plugins.parsers.notation3 import BadSyntax
import skosify
import os
import gzip
import json
import logging
from io import BytesIO
import zipfile
import time
import pygsheets
import googleapiclient.errors
# The MIME Types for the possible rdf file formats. Needed to upload a file on apache jena.
TURTLE_MIME_TYPE = 'application/x-turtle'
N3_MIME_TYPE = 'text/n3; charset=utf-8'
NT_MIME_TYPE = 'application/n-triples'
RDF_MIME_TYPE = 'application/rdf-xml'
JSON_LD_MIME_TYPE = 'application/json'
# Column value of sheet
TITLE = 0
URL = 1
FILE_TYPE = 2
SHORT_NAME = 3
SPARQL_GRAPH_NAME = 4
DEFAULT_LANGUAGE = 5
READY = 6
NAMESPACE = 7
TRIPLE_COUNT = 8
ERROR_TYPE = 9
ERROR = 10
SKOSMOS_ENTRY = 11
class InvalidMIMETypeError(Exception): pass
class DownloadError(Exception): pass
class FusekiUploadError(Exception): pass
class NoNamespaceDetectedError(Exception): pass
class SheetUpdate(object):
"""Holds the changes made across the classes."""
def __init__(self):
self.namespace = ''
self.triple_count = ''
self.error_type = ''
self.error_message = ''
self.skosmos_entry = ''
class SkosifiedGraph(object):
"""
Loads the graph and makes adjustements to it. These adjustments should help Skosmos to display the vocabularies.
"""
def __init__(self, file_name: str, format: str, name: str, namespace: str, temp_path: str, default_language,
update: SheetUpdate, logger=logging.getLogger('bartoc-skosify')):
"""
:param file_name: Name of the file where the vocabulary was saved after download.
:param format: What format the vocabulary is stored in (rdf+xml, turtle, n-triples, n3
:param name: The title/name of the vocabulary. Comes from the sheet and not the vocabulary.
:param namespace: If the namespace inside of the sheet is defined this is it. Otherwise None.
:param temp_path: Path to the storage for temporary files. Configured in default.cfg
:param default_language: (NIY) When defined skosify will add this language to all labels within the vocabulary.
:param update: The current sheet update object.
:param logger: The logger used.
"""
self.logger = logger
self.namespace = namespace
self.file_name = file_name
self.temp_path = temp_path
self.format = format
self.name = name
self.default_language = default_language
self.update = update
self.rdf = Graph()
def process(self):
"""
This processes the vocabulary with Skosify.
Will first parse it and attempt to detect the namespace if not defined.
Then will load it with skosfiy with various options enabled.
:raises Various errors when the file can't be parsed or serialized.
"""
try:
self.rdf.parse(self.file_name, format='json-ld' if self.format == 'json' else guess_format(self.format))
except (ParserError, BadSyntax) as error:
self.update.error_type = 'PARSER ERROR'
self.update.error_message = str(error)
self.logger.exception('Could not parse vocabulary %s:', self.name)
return
# if parser was not successful there is no point in continuing.
# if no namespace has been defined try to find one.
if self.namespace == '':
self.detect_namespace()
try:
self.rdf.serialize(destination='test.ttl', format='ttl')
# Does some magic to the vocabulary.
# Documentation is somewhat sparse but can be found here: https://github.com/NatLibFi/Skosify
self.rdf = skosify.skosify(self.rdf, label=self.name, namespace=self.namespace,
default_language=self.default_language, mark_top_concepts=True,
eliminate_redundancy=True, break_cycles=True, keep_related=False,
cleanup_classes=True, cleanup_properties=True, cleanup_unreachable=True)
except SystemExit:
# Whenever skosify encounters a fatal/critical error it calls sys.exit(1). This is caught here.
self.logger.critical('Was unable to skosify %s', self.name)
self.update.error_type = 'Skosify Critical Error'
self.update.error_message = 'Skosify was unable to deal with this vocabulary. ' \
'Check out the log why this is.'
pass
finally:
# Writes the graph to disk. independent of whether skosify was successful or not.
self.rdf.serialize(destination=self.temp_path + 'upload.ttl', format='ttl', encoding='utf-8')
self.file_name = 'upload.ttl'
self.format = 'ttl'
def detect_namespace(self):
"""
Attempts to extract a base namespace from the vocabulary graph.
Will first find a random concept or concept scheme and then extract the base name space from it.
:raises NoNamespaceDetectedError: If no namespace can be found.
"""
concept = self.rdf.value(None, RDF.type, SKOS.Concept, any=True)
if concept is None:
concept = self.rdf.value(None, RDF.type, SKOS.ConceptScheme, any=True)
if concept is None:
raise NoNamespaceDetectedError('Could not detect a namespace for ' + self.name +
', because there are no SKOS Concepts or SKOS Concept Schemes defined.')
local_name = concept.split('/')[-1].split('#')[-1]
namespace = URIRef(concept.replace(local_name, '').strip('#'))
if namespace.strip() == '':
raise NoNamespaceDetectedError('Could not detect a namespace for ' + self.name +
', because the URI is not valid.')
self.logger.info('Namespace detection successful: %s.', namespace)
self.namespace = namespace
class FusekiUpdate(object):
"""This class handles the download and upload of each vocabulary."""
def __init__(self, title: str, url: str, file_type: str, short_name: str,
sparql_graph: str, namespace: str, default_language: str, temp_path: str,
update: SheetUpdate, logger=logging.getLogger('fuseki-update')):
"""
:param title: Name/Title of the vocabulary. Input from sheet.
:param url: Url to where the vocabulary can be downloaded. Input from sheet.
:param file_type: MIME Type of the downloaded file. Input from sheet.
:param short_name: Short name of the vocabulary. Input from sheet.
:param sparql_graph: Required to name the sparql graph in fuseki.
:param namespace: Namespace to fill in void:uriSpace in Skosmos entry file.
:param temp_path: File path to temporary folders. Input from default.cfg.
:param update: The SheetUpdate object for this vocabulary.
:param logger: The logger...
"""
self.logger = logger
self.title = title.strip()
self.url = url.strip()
self.short_name = short_name.strip()
self.file_end = file_type.strip().lower()
self.sparql_graph = sparql_graph.strip()
self.namespace = namespace.strip()
self.temp_path = temp_path
self.local_file_name = ''
if default_language.strip() != '':
self.default_language = default_language.strip()
else:
self.default_language = None
self.sheet_updates = update
if self.namespace == '':
self.sheet_updates.namespace = self.namespace
self.graph = None
self.mime_type = ''
def process(self):
"""Goes through the various steps to process the vocabuarly.
1. Check if the mime type given is valid.
2. Download the file from the given url.
3. Skosify the vocabulary.
4. Upload the file to Fuseki.
5. Clean up temporary files.
"""
self.mime_type = self.check_mime_type(self.file_end)
self.download_file(self.url)
self.graph = SkosifiedGraph(self.local_file_name, self.file_end, self.title, self.namespace, self.temp_path,
self.default_language, self.sheet_updates)
try:
self.graph.process()
except NoNamespaceDetectedError as error:
self.logger.exception(str(error))
self.sheet_updates.error_type = 'NO NAMESPACE DETECTED'
self.sheet_updates.error_message = str(error)
self.sheet_updates.namespace = str(self.graph.namespace)
if self.graph.namespace == '':
raise NoNamespaceDetectedError('Could not determine a namespace. Please provide one.')
# See if the file type has changed. This happens if skosify is successful.
self.mime_type = self.check_mime_type(self.graph.format)
self.local_file_name = self.graph.file_name
self.upload_file()
self.sheet_updates.skosmos_entry = self.create_skosmos_entry()
def check_mime_type(self, file_type):
"""
Set mime type and check if it is a valid value. Otherwise continue.
IMPORTANT: THIS DOES NOT CHECK IF THE PROVIDED FILE ACTUALLY HAS THIS MIME TYPE!
"""
if file_type == 'rdf':
return RDF_MIME_TYPE
elif file_type == 'ttl':
return TURTLE_MIME_TYPE
elif file_type == 'n3':
return N3_MIME_TYPE
elif file_type == 'nt':
return NT_MIME_TYPE
elif file_type == 'json':
return JSON_LD_MIME_TYPE
else:
self.sheet_updates.error_type = "FILE TYPE ERROR"
self.sheet_updates.error_message = 'Invalid MIME Type: expected RDF, TTL, N3, NT or JSON, found ' + \
file_type + '.'
raise InvalidMIMETypeError('Invalid MIME Type found: ' + file_type + '.')
def download_file(self, url: str):
"""
Download the file from the given url.
Will first attempt to download and read the file. Will only accept downloads with status code 200.
If the file is archived it is unpacked. Can handle .zip & .gz. All other archives will lead to errors.
Will load all content as binary and then decode to UTF-8.
Write file to disk.
:param url: The url.
:raises DownloadError If the download could not be completed.
"""
if url.startswith('http'):
try:
download_file_response = requests.get(url)
except (requests.exceptions.RequestException, ConnectionError, TimeoutError) as error:
self.sheet_updates.error_type = 'CONNECTION ERROR'
self.sheet_updates.error_message = 'Could not connect to ' + url
self.logger.exception(error)
raise DownloadError('Could not download from ' + url + ' because of a connection error.')
if not download_file_response.ok:
self.sheet_updates.error_type = 'DOWNLOAD ERROR (' + str(download_file_response.status_code) + ')'
self.sheet_updates.error_message = download_file_response.text
raise DownloadError('Was unable to download the file from ' + url)
content = download_file_response.content
buffer = BytesIO(download_file_response.content)
elif url.startswith('ftp'):
import urllib.parse
import ftplib
parts = urllib.parse.urlparse(url)
file_name = parts.path.split('/')[-1]
path = parts.path.replace(file_name, '')
ftp = ftplib.FTP(parts.netloc)
ftp.login()
ftp.cwd(path)
ftp.retrbinary('RETR ' + file_name, open(self.temp_path + file_name, 'wb').write)
ftp.quit()
with open(self.temp_path + file_name, 'rb') as file:
content = file.read()
buffer = BytesIO(content)
else:
self.sheet_updates.error_type = 'DOWNLOAD ERROR'
self.sheet_updates.error_message = 'Invalid protocol: only HTTP[S] & FTP are supported!'
raise DownloadError('Invalid protocol: only HTTP[S] & FTP are supported!')
# save downloaded file locally to ensure that it is unzipped
# and does not need to be downloaded again for getting an URI
file_name = self.temp_path + 'temporary.' + self.file_end.lower()
if url.endswith('.zip'):
z = zipfile.ZipFile(buffer)
text = z.read(z.infolist()[0]).decode('utf-8')
elif url.endswith('.gz'):
text = gzip.decompress(content).decode('utf-8')
else:
text = content.decode('utf-8')
with open(file_name, 'w', encoding='utf-8') as file:
file.write(text)
self.local_file_name = file_name
def upload_file(self):
"""
Upload the file to the fuseki triple store with | |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=eval-used, redefined-outer-name
"""Models for intrinsic and extrinsic word embedding evaluation"""
import mxnet as mx
from mxnet import registry
from mxnet.gluon import HybridBlock
__all__ = [
'register', 'create', 'list_evaluation_functions',
'WordEmbeddingSimilarityFunction', 'WordEmbeddingAnalogyFunction',
'CosineSimilarity', 'ThreeCosMul', 'ThreeCosAdd',
'WordEmbeddingSimilarity', 'WordEmbeddingAnalogy']
class _WordEmbeddingEvaluationFunction(HybridBlock): # pylint: disable=abstract-method
"""Base class for word embedding evaluation functions."""
pass
class WordEmbeddingSimilarityFunction(_WordEmbeddingEvaluationFunction): # pylint: disable=abstract-method
"""Base class for word embedding similarity functions."""
pass
class WordEmbeddingAnalogyFunction(_WordEmbeddingEvaluationFunction): # pylint: disable=abstract-method
"""Base class for word embedding analogy functions.
Parameters
----------
idx_to_vec : mxnet.ndarray.NDArray
Embedding matrix.
k : int, default 1
Number of analogies to predict per input triple.
eps : float, optional, default=1e-10
A small constant for numerical stability.
"""
pass
###############################################################################
# Similarity and analogy functions registry helpers
###############################################################################
_REGSITRY_KIND_CLASS_MAP = {
'similarity': WordEmbeddingSimilarityFunction,
'analogy': WordEmbeddingAnalogyFunction
}
def register(class_):
"""Registers a new word embedding evaluation function.
Once registered, we can create an instance with
:func:`~gluonnlp.embedding.evaluation.create`.
Examples
--------
>>> @gluonnlp.embedding.evaluation.register
... class MySimilarityFunction(gluonnlp.embedding.evaluation.WordEmbeddingSimilarityFunction):
... def __init__(self, eps=1e-10):
... pass
>>> similarity_function = gluonnlp.embedding.evaluation.create('similarity',
... 'MySimilarityFunction')
>>> print(type(similarity_function))
<class 'MySimilarityFunction'>
>>> @gluonnlp.embedding.evaluation.register
... class MyAnalogyFunction(gluonnlp.embedding.evaluation.WordEmbeddingAnalogyFunction):
... def __init__(self, k=1, eps=1E-10):
... pass
>>> analogy_function = gluonnlp.embedding.evaluation.create('analogy', 'MyAnalogyFunction')
>>> print(type(analogy_function))
<class 'MyAnalogyFunction'>
"""
if issubclass(class_, WordEmbeddingSimilarityFunction):
register_ = registry.get_register_func(
WordEmbeddingSimilarityFunction,
'word embedding similarity evaluation function')
elif issubclass(class_, WordEmbeddingAnalogyFunction):
register_ = registry.get_register_func(
WordEmbeddingAnalogyFunction,
'word embedding analogy evaluation function')
else:
raise RuntimeError(
'The custom function must either subclass '
'WordEmbeddingSimilarityFunction or WordEmbeddingAnalogyFunction')
return register_(class_)
def create(kind, name, **kwargs):
"""Creates an instance of a registered word embedding evaluation function.
Parameters
----------
kind : ['similarity', 'analogy']
Return only valid names for similarity, analogy or both kinds of
functions.
name : str
The evaluation function name (case-insensitive).
Returns
-------
An instance of
:class:`gluonnlp.embedding.evaluation.WordEmbeddingAnalogyFunction`:
or
:class:`gluonnlp.embedding.evaluation.WordEmbeddingSimilarityFunction`:
An instance of the specified evaluation function.
"""
if kind not in _REGSITRY_KIND_CLASS_MAP.keys():
raise KeyError(
'Cannot find `kind` {}. Use '
'`list_evaluation_functions(kind=None).keys()` to get'
'all the valid kinds of evaluation functions.'.format(kind))
create_ = registry.get_create_func(
_REGSITRY_KIND_CLASS_MAP[kind],
'word embedding {} evaluation function'.format(kind))
return create_(name, **kwargs)
def list_evaluation_functions(kind=None):
"""Get valid word embedding functions names.
Parameters
----------
kind : ['similarity', 'analogy', None]
Return only valid names for similarity, analogy or both kinds of functions.
Returns
-------
dict or list:
A list of all the valid evaluation function names for the specified
kind. If kind is set to None, returns a dict mapping each valid name to
its respective output list. The valid names can be plugged in
`gluonnlp.model.word_evaluation_model.create(name)`.
"""
if kind is None:
kind = tuple(_REGSITRY_KIND_CLASS_MAP.keys())
if not isinstance(kind, tuple):
if kind not in _REGSITRY_KIND_CLASS_MAP.keys():
raise KeyError(
'Cannot find `kind` {}. Use '
'`list_evaluation_functions(kind=None).keys()` to get all the'
'valid kinds of evaluation functions.'.format(kind))
reg = registry.get_registry(_REGSITRY_KIND_CLASS_MAP[kind])
return list(reg.keys())
else:
return {name: list_evaluation_functions(kind=name) for name in kind}
###############################################################################
# Word embedding similarity functions
###############################################################################
@register
class CosineSimilarity(WordEmbeddingSimilarityFunction):
"""Computes the cosine similarity.
Parameters
----------
eps : float, optional, default=1e-10
A small constant for numerical stability.
"""
def __init__(self, eps=1e-10, **kwargs):
super(CosineSimilarity, self).__init__(**kwargs)
self.eps = eps
def hybrid_forward(self, F, x, y): # pylint: disable=arguments-differ
"""Compute the cosine similarity between two batches of vectors.
The cosine similarity is the dot product between the L2 normalized
vectors.
Parameters
----------
x : Symbol or NDArray
y : Symbol or NDArray
Returns
-------
similarity : Symbol or NDArray
The similarity computed by WordEmbeddingSimilarity.similarity_function.
"""
x = F.L2Normalization(x, eps=self.eps)
y = F.L2Normalization(y, eps=self.eps)
x = F.expand_dims(x, axis=1)
y = F.expand_dims(y, axis=2)
return F.batch_dot(x, y).reshape((-1, ))
###############################################################################
# Word embedding analogy functions
###############################################################################
@register
class ThreeCosMul(WordEmbeddingAnalogyFunction):
"""The 3CosMul analogy function.
The 3CosMul analogy function is defined as
.. math::
\\arg\\max_{b^* ∈ V}\\frac{\\cos(b^∗, b) \\cos(b^*, a)}{cos(b^*, a^*) + ε}
See the following paper for more details:
- <NAME>., & <NAME>. (2014). Linguistic regularities in sparse and
explicit word representations. In <NAME>, & <NAME>, Proceedings of the
Eighteenth Conference on Computational Natural Language Learning, CoNLL 2014,
Baltimore, Maryland, USA, June 26-27, 2014 (pp. 171–180). : ACL.
Parameters
----------
idx_to_vec : mxnet.ndarray.NDArray
Embedding matrix.
k : int, default 1
Number of analogies to predict per input triple.
exclude_question_words : bool, default True
Exclude the 3 question words from being a valid answer.
eps : float, optional, default=1e-10
A small constant for numerical stability.
"""
def __init__(self, idx_to_vec, k=1, eps=1E-10, exclude_question_words=True, **kwargs):
super(ThreeCosMul, self).__init__(**kwargs)
self.k = k
self.eps = eps
self._exclude_question_words = exclude_question_words
self._vocab_size, self._embed_size = idx_to_vec.shape
idx_to_vec = mx.nd.L2Normalization(idx_to_vec, eps=self.eps)
with self.name_scope():
self.weight = self.params.get_constant('weight', idx_to_vec)
def hybrid_forward(self, F, words1, words2, words3, weight): # pylint: disable=arguments-differ
"""Compute ThreeCosMul for given question words.
Parameters
----------
words1 : Symbol or NDArray
Question words at first position. Shape (batch_size, )
words2 : Symbol or NDArray
Question words at second position. Shape (batch_size, )
words3 : Symbol or NDArray
Question words at third position. Shape (batch_size, )
Returns
-------
Symbol or NDArray
Predicted answer words. Shape (batch_size, k).
"""
words123 = F.concat(words1, words2, words3, dim=0)
embeddings_words123 = F.Embedding(words123, weight,
input_dim=self._vocab_size,
output_dim=self._embed_size)
similarities = F.FullyConnected(
embeddings_words123, weight, no_bias=True,
num_hidden=self._vocab_size, flatten=False)
# Map cosine similarities to [0, 1]
similarities = (similarities + 1) / 2
sim_w1w4, sim_w2w4, sim_w3w4 = F.split(similarities, num_outputs=3,
axis=0)
sim = (sim_w2w4 * sim_w3w4) / (sim_w1w4 + self.eps)
if self._exclude_question_words:
for words in [words1, words2, words3]:
sim = sim * F.one_hot(words, self.weight.shape[0], 0, 1)
pred_idxs = F.topk(sim, k=self.k)
return pred_idxs
@register
class ThreeCosAdd(WordEmbeddingAnalogyFunction):
"""The 3CosAdd analogy function.
The 3CosAdd analogy function is defined as
.. math::
\\arg\\max_{b^* ∈ V}[\\cos(b^∗, b - a + a^*)]
See the following paper for more details:
- <NAME>., & <NAME>. (2014). Linguistic regularities in sparse and
explicit word representations. In <NAME>, & <NAME>, Proceedings of the
Eighteenth Conference on Computational Natural Language Learning, CoNLL 2014,
Baltimore, Maryland, USA, June 26-27, 2014 (pp. 171–180). : ACL.
Parameters
----------
idx_to_vec : mxnet.ndarray.NDArray
Embedding matrix.
normalize : bool, default True
Normalize all word embeddings before computing the analogy.
k : int, default 1
Number of analogies to predict per input triple.
exclude_question_words : bool, default True
Exclude the 3 question words from being a valid answer.
eps : float, optional, default=1e-10
A small constant for numerical stability.
"""
def __init__(self,
idx_to_vec,
normalize=True,
k=1,
eps=1E-10,
exclude_question_words=True,
**kwargs):
super(ThreeCosAdd, self).__init__(**kwargs)
self.k = k
self.eps = eps
self.normalize = normalize
self._exclude_question_words = exclude_question_words
self._vocab_size, self._embed_size = idx_to_vec.shape
if self.normalize:
idx_to_vec = mx.nd.L2Normalization(idx_to_vec, eps=self.eps)
with self.name_scope():
self.weight = self.params.get_constant('weight', idx_to_vec)
def hybrid_forward(self, F, words1, words2, words3, weight): # pylint: disable=arguments-differ
"""Compute ThreeCosAdd for given question words.
Parameters
----------
words1 : Symbol or NDArray
Question words at first position. Shape (batch_size, )
words2 : Symbol or NDArray
Question words at second position. Shape (batch_size, )
words3 : Symbol or NDArray
Question words at third position. Shape (batch_size, )
Returns
-------
Symbol or NDArray
Predicted answer words. Shape (batch_size, k).
"""
words123 = F.concat(words1, words2, words3, dim=0)
embeddings_words123 = F.Embedding(words123, weight,
input_dim=self._vocab_size,
output_dim=self._embed_size)
if self.normalize:
similarities = F.FullyConnected(
embeddings_words123, weight, no_bias=True,
num_hidden=self._vocab_size, flatten=False)
sim_w1w4, sim_w2w4, sim_w3w4 = F.split(similarities, num_outputs=3,
axis=0)
pred = sim_w3w4 - sim_w1w4 + sim_w2w4
else:
embeddings_word1, embeddings_word2, embeddings_word3 = F.split(
embeddings_words123, num_outputs=3, axis=0)
vector = (embeddings_word3 - embeddings_word1 + embeddings_word2)
pred = F.FullyConnected(
vector, weight, no_bias=True, num_hidden=self._vocab_size,
flatten=False)
if self._exclude_question_words:
for words in [words1, words2, words3]:
pred = pred * F.one_hot(words, self.weight.shape[0], 0, 1)
pred_idxs = F.topk(pred, k=self.k)
return pred_idxs
###############################################################################
# Evaluation blocks
###############################################################################
class WordEmbeddingSimilarity(HybridBlock):
"""Word embeddings similarity task evaluator.
Parameters
----------
idx_to_vec : mxnet.ndarray.NDArray
Embedding matrix.
similarity_function : str, default 'CosineSimilarity'
Name of a registered WordEmbeddingSimilarityFunction.
eps : float, optional, | |
>>
#@+node:ekr.20090126093408.33: *7* << Create the third column of widgets >>
# The var names must match the names in leoFind class.
table = (
("Entire Outline","entire-outline",wx.RB_GROUP),
("Suboutline Only","suboutline_only_flag",0),
("Node Only","node_only_flag",0),
("Selection Only","selection-only",0))
for label,var,group in table:
id = wx.NewId()
box = wx.RadioButton(self,id,label,
wx.DefaultPosition,(100,25),
group,wx.DefaultValidator,"group2")
col3Sizer.Add(box,0,wx.BORDER | wx.LEFT,20)
self.frame.dict[var] = box,id
#@-<< Create the third column of widgets >>
col4Sizer = wx.BoxSizer(wx.VERTICAL)
#@+<< Create the fourth column of widgets >>
#@+node:ekr.20090126093408.34: *7* << Create the fourth column of widgets >>
# The var names must match the names in leoFind class.
table = (
("search_headline_flag","Search Headline Text"),
("search_body_flag","Search Body Text"),
("mark_finds_flag","Mark Finds"),
("mark_changes_flag","Mark Changes"))
for var,label in table:
id = wx.NewId()
box = wx.CheckBox(self,id,label,
wx.DefaultPosition,(100,25),
0,wx.DefaultValidator,"")
col4Sizer.Add(box,0,wx.BORDER | wx.LEFT,20)
self.frame.dict[var] = box,id
#@-<< Create the fourth column of widgets >>
# Pack the columns
columnSizer = wx.BoxSizer(wx.HORIZONTAL)
columnSizer.Add(col1Sizer)
columnSizer.Add(col2Sizer)
columnSizer.Add(col3Sizer)
columnSizer.Add(col4Sizer)
topSizer.Add(columnSizer)
topSizer.Add(0,10)
#@-<< Create all the find check boxes >>
#@+<< Create all the find buttons >>
#@+node:ekr.20090126093408.35: *6* << Create all the find buttons >>
# The row sizers are a bit dim: they should distribute the buttons automatically.
row1Sizer = wx.BoxSizer(wx.HORIZONTAL)
#@+<< Create the first row of buttons >>
#@+node:ekr.20090126093408.36: *7* << Create the first row of buttons >>
row1Sizer.Add(90,0)
table = (
("findButton","Find",True),
("batch_flag","Show Context",False), # Old batch_flag now means Show Context.
("findAllButton","Find All",True))
for var,label,isButton in table:
id = wx.NewId()
if isButton:
widget = button = wx.Button(self,id,label,
wx.DefaultPosition,(100,25),
0,wx.DefaultValidator,"")
else:
widget = box = wx.CheckBox(self,id,label,
wx.DefaultPosition,(100,25),
0,wx.DefaultValidator,"")
self.frame.dict[var] = box,id
row1Sizer.Add(widget)
row1Sizer.Add((25,0),)
#@-<< Create the first row of buttons >>
row2Sizer = wx.BoxSizer(wx.HORIZONTAL)
#@+<< Create the second row of buttons >>
#@+node:ekr.20090126093408.37: *7* << Create the second row of buttons >>
row2Sizer.Add(90,0)
table = (
("changeButton","Change"),
("changeThenFindButton","Change,Then Find"),
("changeAllButton","Change All"))
for var,label in table:
id = wx.NewId()
button = wx.Button(self,id,label,
wx.DefaultPosition,(100,25),
0,wx.DefaultValidator,"")
row2Sizer.Add(button)
row2Sizer.Add((25,0),)
#@-<< Create the second row of buttons >>
# Pack the two rows
buttonSizer = wx.BoxSizer(wx.VERTICAL)
buttonSizer.Add(row1Sizer)
buttonSizer.Add(0,10)
buttonSizer.Add(row2Sizer)
topSizer.Add(buttonSizer)
topSizer.Add(0,10)
#@-<< Create all the find buttons >>
self.SetAutoLayout(True) # tell dialog to use sizer
self.SetSizer(topSizer) # actually set the sizer
topSizer.Fit(self)# set size to minimum size as calculated by the sizer
topSizer.SetSizeHints(self)# set size hints to honour mininum size
#@-others
#@+node:ekr.20090126093408.38: *4* wxFindTab class (leoFind.findTab)
class wxFindTab (leoFind.findTab):
'''A subclass of the findTab class containing all wxGui code.'''
#@+others
#@+node:ekr.20090126093408.39: *5* Birth
#@+node:ekr.20090126093408.40: *6* wxFindTab.ctor
if 0: # We can use the base-class ctor.
def __init__ (self,c,parentFrame):
leoFind.findTab.__init__(self,c,parentFrame)
# Init the base class.
# Calls initGui, createFrame, createBindings & init(c), in that order.
#@+node:ekr.20090126093408.41: *6* initGui
# Called from leoFind.findTab.ctor.
def initGui (self):
# g.trace('wxFindTab')
self.svarDict = {} # Keys are ivar names, values are svar objects.
for key in self.intKeys:
self.svarDict[key] = self.svar()
for key in self.newStringKeys:
self.svarDict[key] = self.svar()
#@+node:ekr.20090126093408.42: *6* init (wxFindTab)
# Called from leoFind.findTab.ctor.
# We must override leoFind.init to init the checkboxes 'by hand' here.
def init (self,c):
# Separate c.ivars are much more convenient than a svarDict.
for key in self.intKeys:
# Get ivars from @settings.
val = c.config.getBool(key)
setattr(self,key,val)
val = g.choose(val,1,0)
svar = self.svarDict.get(key)
if svar: svar.set(val)
#g.trace(key,val)
#@+<< set find/change widgets >>
#@+node:ekr.20090126093408.43: *7* << set find/change widgets >>
self.find_ctrl.delete(0,"end")
self.change_ctrl.delete(0,"end")
# Get setting from @settings.
for w,setting,defaultText in (
(self.find_ctrl,"find_text",'<find pattern here>'),
(self.change_ctrl,"change_text",''),
):
s = c.config.getString(setting)
if not s: s = defaultText
w.insert("end",s)
#@-<< set find/change widgets >>
#@+<< set radio buttons from ivars >>
#@+node:ekr.20090126093408.44: *7* << set radio buttons from ivars >>
# In Tk, setting the var also sets the widget.
# Here, we do so explicitly.
d = self.widgetsDict
for ivar,key in (
("pattern_match","pattern-search"),
#("script_search","script-search")
):
svar = self.svarDict[ivar].get()
if svar:
self.svarDict["radio-find-type"].set(key)
w = d.get(key)
if w: w.SetValue(True)
break
else:
self.svarDict["radio-find-type"].set("plain-search")
for ivar,key in (
("suboutline_only","suboutline-only"),
("node_only","node-only"),
# ("selection_only","selection-only")
):
svar = self.svarDict[ivar].get()
if svar:
self.svarDict["radio-search-scope"].set(key)
break
else:
key = 'entire-outline'
self.svarDict["radio-search-scope"].set(key)
w = self.widgetsDict.get(key)
if w: w.SetValue(True)
#@-<< set radio buttons from ivars >>
#@+<< set checkboxes from ivars >>
#@+node:ekr.20090126093408.45: *7* << set checkboxes from ivars >>
for ivar in (
'ignore_case',
'mark_changes',
'mark_finds',
'pattern_match',
'reverse',
'search_body',
'search_headline',
'whole_word',
'wrap',
):
svar = self.svarDict[ivar].get()
if svar:
w = self.widgetsDict.get(ivar)
if w: w.SetValue(True)
#@-<< set checkboxes from ivars >>
#@+node:ekr.20090126093408.46: *5* class svar
class svar:
'''A class like Tk's IntVar and StringVar classes.'''
def __init__(self):
self.val = None
def get (self):
return self.val
def set (self,val):
self.val = val
#@+node:ekr.20090126093408.47: *5* createFrame (wxFindTab)
def createFrame (self,parentFrame):
self.parentFrame = self.top = parentFrame
self.createFindChangeAreas()
self.createBoxes()
self.createButtons()
self.layout()
self.createBindings()
#@+node:ekr.20090126093408.48: *6* createFindChangeAreas
def createFindChangeAreas (self):
f = self.top
self.fLabel = wx.StaticText(f,label='Find', style=wx.ALIGN_RIGHT)
self.cLabel = wx.StaticText(f,label='Change',style=wx.ALIGN_RIGHT)
self.find_ctrl = plainTextWidget(self.c,f,name='find-text', size=(300,-1))
self.change_ctrl = plainTextWidget(self.c,f,name='change-text',size=(300,-1))
#@+node:ekr.20090126093408.49: *6* layout
def layout (self):
f = self.top
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
sizer2 = wx.FlexGridSizer(2, 2, vgap=10,hgap=5)
sizer2.Add(self.fLabel,0,wx.EXPAND)
sizer2.Add(self.find_ctrl.widget,1,wx.EXPAND,border=5)
sizer2.Add(self.cLabel,0,wx.EXPAND)
sizer2.Add(self.change_ctrl.widget,1,wx.EXPAND,border=5)
sizer.Add(sizer2,0,wx.EXPAND)
sizer.AddSpacer(10)
#label = wx.StaticBox(f,label='Find Options')
#boxes = wx.StaticBoxSizer(label,wx.HORIZONTAL)
boxes = wx.BoxSizer(wx.HORIZONTAL)
lt_col = wx.BoxSizer(wx.VERTICAL)
rt_col = wx.BoxSizer(wx.VERTICAL)
for w in self.boxes [:6]:
lt_col.Add(w,0,wx.EXPAND,border=5)
lt_col.AddSpacer(5)
for w in self.boxes [6:]:
rt_col.Add(w,0,wx.EXPAND,border=5)
rt_col.AddSpacer(5)
boxes.Add(lt_col,0,wx.EXPAND)
boxes.AddSpacer(20)
boxes.Add(rt_col,0,wx.EXPAND)
sizer.Add(boxes,0) #,wx.EXPAND)
f.SetSizer(sizer)
#@+node:ekr.20090126093408.50: *6* createBoxes
def createBoxes (self):
'''Create two columns of radio buttons & check boxes.'''
c = self.c ; f = self.parentFrame
self.boxes = []
self.widgetsDict = {} # Keys are ivars, values are checkboxes or radio buttons.
data = ( # Leading star denotes a radio button.
('Whole &Word', 'whole_word',),
('&Ignore Case','ignore_case'),
('Wrap &Around','wrap'),
('&Reverse', 'reverse'),
('Rege&xp', 'pattern_match'),
('Mark &Finds', 'mark_finds'),
("*&Entire Outline","entire-outline"),
("*&Suboutline Only","suboutline-only"),
("*&Node Only","node-only"),
('Search &Headline','search_headline'),
('Search &Body','search_body'),
('Mark &Changes','mark_changes'),
)
# Important: changing these controls merely changes entries in self.svarDict.
# First, leoFind.update_ivars sets the find ivars from self.svarDict.
# Second, self.init sets the values of widgets from the ivars.
inGroup = False
for label,ivar in data:
if label.startswith('*'):
label = label[1:]
style = g.choose(inGroup,0,wx.RB_GROUP)
inGroup = True
w = wx.RadioButton(f,label=label,style=style)
self.widgetsDict[ivar] = w
def radioButtonCallback(event=None,ivar=ivar):
svar = self.svarDict["radio-search-scope"]
svar.set(ivar)
w.Bind(wx.EVT_RADIOBUTTON,radioButtonCallback)
else:
w = wx.CheckBox(f,label=label)
self.widgetsDict[ivar] = w
def checkBoxCallback(event=None,ivar=ivar):
svar = self.svarDict.get(ivar)
val = svar.get()
svar.set(g.choose(val,False,True))
# g.trace(ivar,val)
w.Bind(wx.EVT_CHECKBOX,checkBoxCallback)
self.boxes.append(w)
#@+node:ekr.20090126093408.51: *6* createBindings TO DO
def createBindings (self):
return ### not ready yet
def setFocus(w):
c = self.c
c.widgetWantsFocusNow(w)
w.setSelectionRange(0,0)
return "break"
def toFind(event,w=ftxt): return setFocus(w)
def toChange(event,w=ctxt): return setFocus(w)
def insertTab(w):
data = w.getSelectionRange()
if data: start,end = data
else: start = end = w.getInsertPoint()
w.replace(start,end,"\t")
return "break"
def insertFindTab(event,w=ftxt): return insertTab(w)
def insertChangeTab(event,w=ctxt): return insertTab(w)
ftxt.bind("<Tab>",toChange)
ctxt.bind("<Tab>",toFind)
ftxt.bind("<Control-Tab>",insertFindTab)
ctxt.bind("<Control-Tab>",insertChangeTab)
#@+node:ekr.20090126093408.52: *6* createButtons (does nothing)
def createButtons (self):
'''Create two columns of buttons.'''
#@+node:ekr.20090126093408.53: *5* createBindings (wsFindTab) TO DO
def createBindings (self):
pass
#@+node:ekr.20090126093408.54: *5* Support for minibufferFind class (wxFindTab)
# This is the same as the Tk code because we simulate Tk svars.
#@+node:ekr.20090126093408.55: *6* getOption
def getOption (self,ivar):
var = self.svarDict.get(ivar)
if var:
val = var.get()
# g.trace('%s = %s' % (ivar,val))
return val
else:
g.trace('bad ivar name: %s' % ivar)
return None
#@+node:ekr.20090126093408.56: *6* setOption
def setOption (self,ivar,val):
if ivar in self.intKeys:
if val is not None:
var = self.svarDict.get(ivar)
var.set(val)
# g.trace('%s = %s' % (ivar,val))
elif not g.app.unitTesting:
g.trace('oops: bad find ivar %s' % ivar)
#@+node:ekr.20090126093408.57: *6* toggleOption
def toggleOption (self,ivar):
if ivar in self.intKeys:
var = self.svarDict.get(ivar)
val = not var.get()
var.set(val)
# g.trace('%s = %s' % (ivar,val),var)
else:
g.trace('oops: bad find ivar %s' % ivar)
#@-others
#@+node:ekr.20090126093408.58: *4* class wxSpellTab TO DO
class wxSpellTab:
#@+others
#@+node:ekr.20090126093408.59: *5* wxSpellTab.__init__
def __init__ (self,c,tabName):
self.c = c
self.tabName = tabName
self.createFrame()
self.createBindings()
###self.fillbox([])
#@+node:ekr.20090126093408.60: *5* createBindings TO DO
def createBindings (self):
return ###
c = self.c ; k = c.k
widgets = (self.listBox, self.outerFrame)
for w in widgets:
# Bind shortcuts for the following commands...
for commandName,func in (
('full-command', k.fullCommand),
('hide-spell-tab', self.handler.hide),
('spell-add', self.handler.add),
('spell-find', self.handler.find),
('spell-ignore', self.handler.ignore),
('spell-change-then-find', self.handler.changeThenFind),
):
junk, bunchList = c.config.getShortcut(commandName)
for bunch in bunchList:
accel = bunch.val
shortcut = k.shortcutFromSetting(accel)
if shortcut:
# g.trace(shortcut,commandName)
w.bind(shortcut,func)
self.listBox.bind("<Double-1>",self.onChangeThenFindButton)
self.listBox.bind("<Button-1>",self.onSelectListBox)
self.listBox.bind("<Map>",self.onMap)
#@+node:ekr.20090126093408.61: *5* createFrame TO DO
def createFrame | |
<reponame>GanstaKingofSA/RenPy-Universal-Player<filename>python-packages/ost.py<gh_stars>1-10
# Copyright (C) 2021 GanstaKingofSA (Hanaka)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The main code behind the music room. Pardon any mess within this PY file.
import random
import re
import os
import json
import logging
import renpy
import pygame_sdl2
from tinytag import TinyTag
from minimalRPATool import RenPyArchive
from renpy.text.text import Text
from renpy.display.im import image
import renpy.audio.music as music
import renpy.display.behavior as displayBehavior
# Creation of Music Room and Code Setup
version = 1.7
music.register_channel("music_room", mixer="music_room_mixer", loop=False)
if renpy.windows:
gamedir = renpy.config.gamedir.replace("\\", "/")
elif renpy.android:
try:
os.mkdir(os.path.join(os.environ["ANDROID_PUBLIC"], "game"))
except:
pass
gamedir = os.path.join(os.environ["ANDROID_PUBLIC"], "game")
else:
gamedir = renpy.config.gamedir
if renpy.android:
logdir = os.path.join(os.environ["ANDROID_PUBLIC"], "ost_log.txt")
else:
logdir = os.path.join(renpy.config.basedir, "ost_log.txt")
if os.path.exists(logdir):
os.remove(logdir)
# Lists for holding media types
autoDefineList = []
manualDefineList = []
soundtracks = []
file_types = (".mp3", ".ogg", ".opus", ".wav")
# Stores soundtrack in progress
game_soundtrack = False
# Stores positions of track/volume/default priority
time_position = 0.0
time_duration = 3.0
old_volume = 0.0
priorityScan = 2
scale = 1.0
# Stores paused track/player controls
game_soundtrack_pause = False
prevTrack = False
randomSong = False
loopSong = False
organizeAZ = False
organizePriority = True
pausedstate = False
random.seed()
class soundtrack:
"""
Class responsible to define songs to the music player.
"""
def __init__(
self,
name="",
path="",
priority=2,
author="",
byteTime=False,
description="",
cover_art=False,
unlocked=True,
):
self.name = name
self.path = path
self.priority = priority
self.author = author
if byteTime:
self.byteTime = byteTime
else:
self.byteTime = get_duration(path)
self.description = description
if not cover_art:
self.cover_art = "images/music_room/nocover.png"
else:
self.cover_art = cover_art
self.unlocked = unlocked
@renpy.exports.pure
class AdjustableAudioPositionValue(renpy.ui.BarValue):
"""
Class that replicates a music progress bar in Ren'Py.
"""
def __init__(self, channel="music_room", update_interval=0.0):
self.channel = channel
self.update_interval = update_interval
self.adjustment = None
self._hovered = False
def get_pos_duration(self):
if not music.is_playing(self.channel):
pos = time_position
else:
pos = music.get_pos(self.channel) or 0.0
duration = time_duration
return pos, duration
def get_song_options_status(self):
return loopSong, randomSong
def get_adjustment(self):
pos, duration = self.get_pos_duration()
self.adjustment = renpy.ui.adjustment(
value=pos, range=duration, changed=self.set_pos, adjustable=True
)
return self.adjustment
def hovered(self):
self._hovered = True
def unhovered(self):
self._hovered = False
def set_pos(self, value):
loopThis = self.get_song_options_status()
if self._hovered and pygame_sdl2.mouse.get_pressed()[0]:
music.play("<from {}>".format(value) + game_soundtrack.path, self.channel)
if loopThis:
music.queue(game_soundtrack.path, self.channel, loop=True)
def periodic(self, st):
pos, duration = self.get_pos_duration()
loopThis, doRandom = self.get_song_options_status()
if pos and pos <= duration:
self.adjustment.set_range(duration)
self.adjustment.change(pos)
if pos > duration - 0.20:
if loopThis:
music.play(game_soundtrack.path, self.channel, loop=True)
elif doRandom:
random_song()
else:
next_track()
return self.update_interval
if renpy.config.screen_width != 1280:
scale = renpy.config.screen_width / 1280.0
else:
scale = 1.0
def music_pos(style_name, st, at):
"""
Returns the track position to Ren'Py.
"""
global time_position
if music.get_pos(channel="music_room") is not None:
time_position = music.get_pos(channel="music_room")
readableTime = convert_time(time_position)
d = Text(readableTime, style=style_name)
return d, 0.20
def get_duration(songPath=None):
if game_soundtrack and game_soundtrack.byteTime and not songPath:
return game_soundtrack.byteTime
else:
try:
if songPath:
pathToSong = songPath
else:
pathToSong = game_soundtrack.path
tags = TinyTag.get_renpy(pathToSong, image=False)
if tags.duration:
return tags.duration
else:
if not songPath:
return music.get_duration(channel="music_room") or time_duration
except:
if not songPath:
return music.get_duration(channel="music_room") or time_duration
def music_dur(style_name, st, at):
"""
Returns the track duration to Ren'Py.
"""
global time_duration
time_duration = get_duration()
readableDuration = convert_time(time_duration)
d = Text(readableDuration, style=style_name)
return d, 0.20
def dynamic_title_text(style_name, st, at):
"""
Returns a resized song title text to Ren'Py.
"""
title = len(game_soundtrack.name)
if title <= 21:
songNameSize = int(37 * scale)
elif title <= 28:
songNameSize = int(29 * scale)
else:
songNameSize = int(23 * scale)
d = Text(
game_soundtrack.name, style=style_name, substitute=False, size=songNameSize
)
return d, 0.20
def dynamic_author_text(style_name, st, at):
"""
Returns a resized song artist text to Ren'Py.
"""
author = len(game_soundtrack.author)
if author <= 32:
authorNameSize = int(25 * scale)
elif author <= 48:
authorNameSize = int(23 * scale)
else:
authorNameSize = int(21 * scale)
d = Text(
game_soundtrack.author, style=style_name, substitute=False, size=authorNameSize
)
return d, 0.20
def refresh_cover_data(st, at):
"""
Returns the song cover art to Ren'Py.
"""
d = image(game_soundtrack.cover_art)
return d, 0.20
def dynamic_description_text(style_name, st, at):
"""
Returns a resized song album/comment to Ren'Py.
"""
desc = len(game_soundtrack.description)
if desc <= 32:
descSize = int(25 * scale)
elif desc <= 48:
descSize = int(23 * scale)
else:
descSize = int(21 * scale)
d = Text(
game_soundtrack.description, style=style_name, substitute=False, size=descSize
)
return d, 0.20
def auto_play_pause_button(st, at):
"""
Returns either a play/pause button to Ren'Py based off song play status.
"""
if music.is_playing(channel="music_room"):
if pausedstate:
d = displayBehavior.ImageButton("images/music_room/pause.png")
else:
d = displayBehavior.ImageButton(
"images/music_room/pause.png", action=current_music_pause
)
else:
d = displayBehavior.ImageButton(
"images/music_room/play.png", action=current_music_play
)
return d, 0.20
def convert_time(x):
"""
Converts track position and duration to human-readable time.
"""
hour = ""
if int(x / 3600) > 0:
hour = str(int(x / 3600))
if hour != "":
if int((x % 3600) / 60) < 10:
minute = ":0" + str(int((x % 3600) / 60))
else:
minute = ":" + str(int((x % 3600) / 60))
else:
minute = "" + str(int(x / 60))
if int(x % 60) < 10:
second = ":0" + str(int(x % 60))
else:
second = ":" + str(int(x % 60))
return hour + minute + second
def current_music_pause():
"""
Pauses the current song playing.
"""
global game_soundtrack_pause, pausedstate
pausedstate = True
if not music.is_playing(channel="music_room"):
return
else:
soundtrack_position = music.get_pos(channel="music_room") + 1.6
if soundtrack_position is not None:
game_soundtrack_pause = (
"<from " + str(soundtrack_position) + ">" + game_soundtrack.path
)
music.stop(channel="music_room", fadeout=2.0)
def current_music_play():
"""
Plays either the paused state of the current song or a new song to the
player.
"""
global pausedstate
pausedstate = False
if not game_soundtrack_pause:
music.play(game_soundtrack.path, channel="music_room", fadein=2.0)
else:
music.play(game_soundtrack_pause, channel="music_room", fadein=2.0)
def current_music_forward():
"""
Fast-forwards the song by 5 seconds or advances to the next song.
"""
global game_soundtrack_pause
if music.get_pos(channel="music_room") is None:
soundtrack_position = time_position + 5
else:
soundtrack_position = music.get_pos(channel="music_room") + 5
if soundtrack_position >= time_duration:
game_soundtrack_pause = False
if randomSong:
random_song()
else:
next_track()
else:
game_soundtrack_pause = (
"<from " + str(soundtrack_position) + ">" + game_soundtrack.path
)
music.play(game_soundtrack_pause, channel="music_room")
def current_music_backward():
"""
Rewinds the song by 5 seconds or advances to the next song behind it.
"""
global game_soundtrack_pause
if music.get_pos(channel="music_room") is None:
soundtrack_position = time_position - 5
else:
soundtrack_position = music.get_pos(channel="music_room") - 5
if soundtrack_position <= 0.0:
game_soundtrack_pause = False
next_track(True)
else:
game_soundtrack_pause = (
"<from " + str(soundtrack_position) + ">" + game_soundtrack.path
)
music.play(game_soundtrack_pause, channel="music_room")
def next_track(back=False):
"""
Advances to the next song ahead or behind to the player or the start/end.
"""
global game_soundtrack
for index, item in enumerate(soundtracks):
if (
game_soundtrack.description == item.description
and game_soundtrack.name == item.name
):
try:
if back:
game_soundtrack = soundtracks[index - 1]
else:
game_soundtrack = soundtracks[index + 1]
except:
if back:
game_soundtrack = soundtracks[-1]
else:
game_soundtrack = soundtracks[0]
break
if game_soundtrack != False:
music.play(game_soundtrack.path, channel="music_room", loop=loopSong)
def random_song():
"""
Advances to the next song with pure randomness.
"""
global game_soundtrack
unique = 1
if soundtracks[-1].path == game_soundtrack.path:
pass
else:
while unique != 0:
a = random.randrange(0, len(soundtracks) - 1)
if game_soundtrack != soundtracks[a]:
unique = 0
game_soundtrack = soundtracks[a]
if game_soundtrack != False:
music.play(game_soundtrack.path, channel="music_room", loop=loopSong)
def mute_player():
"""
Mutes the music player.
"""
global old_volume
logging.info("Muting the audio player.")
if renpy.game.preferences.get_volume("music_room_mixer") != 0.0:
old_volume = renpy.game.preferences.get_volume("music_room_mixer")
renpy.game.preferences.set_volume("music_room_mixer", 0.0)
else:
if old_volume == 0.0:
renpy.game.preferences.set_volume("music_room_mixer", 0.5)
else:
renpy.game.preferences.set_volume("music_room_mixer", old_volume)
def refresh_list():
"""
Refreshes the song list.
"""
logging.info("Refreshing the music player list.")
scan_song()
resort()
def resort():
"""
Adds songs to the song list and resorts them by priority or | |
(
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
traffic_mode VARCHAR(7) NOT NULL,
traffic_profile VARCHAR(4) NOT NULL,
host_ip VARCHAR(128) NOT NULL,
cpu INTEGER NOT NULL,
start_time TIMESTAMP NOT NULL,
end_time TIMESTAMP )"""
self.__tables['tcp_client_vip_metrics'] = """CREATE TABLE tcp_client_vip_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
connections NUMERIC(20,1) NOT NULL,
good_connections NUMERIC(20,1) NOT NULL,
failed_connections NUMERIC(20,1) NOT NULL,
sessions NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, res_hash, ses_hash) )"""
self.__tables['udp_client_vip_metrics'] = """CREATE TABLE udp_client_vip_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
good_connections NUMERIC(20,1) NOT NULL,
failed_connections NUMERIC(20,1) NOT NULL,
sessions NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, res_hash, ses_hash) )"""
self.__tables['udp_server_vip_metrics'] = """CREATE TABLE udp_server_vip_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
dg_rcvd NUMERIC(20,1) NOT NULL,
dg_recv_timedout NUMERIC(20,1) NOT NULL,
dg_size_rcvd NUMERIC(20,1) NOT NULL,
dg_sent NUMERIC(20,1) NOT NULL,
dg_send_fail NUMERIC(20,1) NOT NULL,
dg_size_sent NUMERIC(20,1) NOT NULL,
request_rcvd NUMERIC(20,1) NOT NULL,
request_recv_timedout NUMERIC(20,1) NOT NULL,
response_sent NUMERIC(20,1) NOT NULL,
response_send_fail NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, vip) )"""
self.__tables['ses_bucket_metrics'] = """CREATE TABLE ses_bucket_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
metrics JSON NOT NULL,
PRIMARY KEY(ts, host_ip, vip, res_hash, ses_hash) )"""
self.__tables['tcp_client_url_metrics'] = """CREATE TABLE tcp_client_url_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
method VARCHAR(10) NOT NULL,
uri VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
http_gets_sent NUMERIC(20,1) NOT NULL,
http_gets_rcvd NUMERIC(20,1) NOT NULL,
http_posts_sent NUMERIC(20,1) NOT NULL,
http_posts_rcvd NUMERIC(20,1) NOT NULL,
reqs_sent NUMERIC(20,1) NOT NULL,
resp_rcvd NUMERIC(20,1) NOT NULL,
responses_1xx NUMERIC(20,1) NOT NULL,
responses_2xx NUMERIC(20,1) NOT NULL,
responses_200 NUMERIC(20,1) NOT NULL,
responses_3xx NUMERIC(20,1) NOT NULL,
responses_4xx NUMERIC(20,1) NOT NULL,
responses_404 NUMERIC(20,1) NOT NULL,
responses_5xx NUMERIC(20,1) NOT NULL,
failed_reqs NUMERIC(20,1) NOT NULL,
len_fail NUMERIC(20,1) NOT NULL,
persist_fail NUMERIC(20,1) NOT NULL,
tcp_failures NUMERIC(20,1) NOT NULL,
mean_latency NUMERIC(20,15) NOT NULL,
var_latency NUMERIC(20,15) NOT NULL,
latency_min NUMERIC(20,15) NOT NULL,
latency_max NUMERIC(20,15) NOT NULL,
bytes_download NUMERIC(30,10) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, method, uri, res_hash, ses_hash) )"""
self.__tables['udp_client_url_metrics'] = """CREATE TABLE udp_client_url_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
method VARCHAR(10) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
reqs_sent NUMERIC(20,1) NOT NULL,
reqs_failed NUMERIC(20,1) NOT NULL,
dg_sent NUMERIC(20,1) NOT NULL,
dg_size_sent NUMERIC(20,1) NOT NULL,
dg_send_fail NUMERIC(20,1) NOT NULL,
resp_rcvd NUMERIC(20,1) NOT NULL,
resp_timedout NUMERIC(20,1) NOT NULL,
dg_recd NUMERIC(20,1) NOT NULL,
dg_size_recd NUMERIC(20,1) NOT NULL,
dg_recv_timedout NUMERIC(20,1) NOT NULL,
latency_min NUMERIC(20,15) NOT NULL,
latency_max NUMERIC(20,15) NOT NULL,
mean_latency NUMERIC(20,15) NOT NULL,
var_latency NUMERIC(20,15) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, method, res_hash, ses_hash) )"""
self.__tables['url_bucket_metrics'] = """CREATE TABLE url_bucket_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
method VARCHAR(10) NOT NULL,
uri VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
metrics JSON NOT NULL,
PRIMARY KEY(ts, host_ip, vip, method, uri, res_hash, ses_hash) )"""
self.__tables['tcp_client_ses_metrics'] = """CREATE TABLE tcp_client_ses_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
sessions NUMERIC(20,1) NOT NULL,
open_connections NUMERIC(20,1) NOT NULL,
total_connections NUMERIC(20,1) NOT NULL,
cycles_complete NUMERIC(20,1) NOT NULL,
reqs_sent NUMERIC(20,1) NOT NULL,
resp_rcvd NUMERIC(20,1) NOT NULL,
http_gets_sent NUMERIC(20,1) NOT NULL,
http_gets_rcvd NUMERIC(20,1) NOT NULL,
http_posts_sent NUMERIC(20,1) NOT NULL,
http_posts_rcvd NUMERIC(20,1) NOT NULL,
failed_reqs NUMERIC(20,1) NOT NULL,
len_fail NUMERIC(20,1) NOT NULL,
persist_fail NUMERIC(20,1) NOT NULL,
post_fnf NUMERIC(20,1) NOT NULL,
bytes_download NUMERIC(30,10) NOT NULL,
complete_time NUMERIC(30,15) NOT NULL,
PRIMARY KEY(ts, host_ip, res_hash, ses_hash) )"""
self.__tables['udp_client_ses_metrics'] = """CREATE TABLE udp_client_ses_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
sessions NUMERIC(20,1) NOT NULL,
cycles_complete NUMERIC(20,1) NOT NULL,
good_connections NUMERIC(20,1) NOT NULL,
failed_connections NUMERIC(20,1) NOT NULL,
reqs_sent NUMERIC(20,1) NOT NULL,
reqs_failed NUMERIC(20,1) NOT NULL,
dg_sent NUMERIC(20,1) NOT NULL,
dg_size_sent NUMERIC(20,1) NOT NULL,
dg_send_fail NUMERIC(20,1) NOT NULL,
resp_rcvd NUMERIC(20,1) NOT NULL,
resp_timedout NUMERIC(20,1) NOT NULL,
dg_recd NUMERIC(20,1) NOT NULL,
dg_size_recd NUMERIC(20,1) NOT NULL,
dg_recv_timedout NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, res_hash, ses_hash) )"""
self.__tables['memory_metrics'] = """CREATE TABLE memory_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
pid INTEGER NOT NULL,
malloc INTEGER[] NOT NULL,
free INTEGER[] NOT NULL,
PRIMARY KEY(ts, host_ip, res_hash, ses_hash, pid) )"""
self.__tables['error_metrics'] = """CREATE TABLE error_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
method VARCHAR(10) NOT NULL,
uri VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
error_type VARCHAR(128) NOT NULL,
ts_range TSRANGE NOT NULL,
counter NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, method, uri, res_hash, error_type, ses_hash) )"""
create_table_connection = connect(dbname=self.__config['db'], user=self.__config['user'], \
password=self.__config['password'], port=self.__config['port'])
try:
with create_table_connection.cursor() as cursor:
for table_name, query_to_create in self.__tables.items():
cursor.execute(query_to_create)
create_table_connection.commit()
self.__lgr.info("Created table {}".format(table_name))
except:
create_table_connection.rollback()
self.__lgr.error("Rollback during creation of {} ERROR={}".format(table_name, \
traceback.format_exc()))
finally:
create_table_connection.close()
try:
self.__intialize_row_counters()
#Specifies the keys that are grouped by, by default
self.__default_select_keys = {}
self.__default_select_keys['tcp_client_vip_metrics'] = ['vip']
self.__default_select_keys['tcp_client_url_metrics'] = ['vip']
self.__default_select_keys['udp_client_vip_metrics'] = ['vip']
self.__default_select_keys['udp_server_vip_metrics'] = ['vip']
self.__default_select_keys['udp_client_url_metrics'] = ['vip']
self.__default_select_keys['error_metrics'] = ['vip', 'error_type']
self.__default_select_keys['memory_metrics'] = ['index']
self.__default_select_keys['tcp_client_ses_metrics'] = []
self.__default_select_keys['udp_client_ses_metrics'] = []
#Specifies the order in which the group by operation has to be performed
self.__ORDER_OF_GROUP_BY = ['res_hash', 'res_tag', 'ses_hash', 'ses_tag', 'host_ip', 'vip',
'method', 'uri', #Only for tcp_client_url_metrics
'error_type', #Only for error_metrics
'pid', 'index' #Only for memory_metrics
]
self.__lgr.debug("ORDER: %s" %str(self.__ORDER_OF_GROUP_BY))
except:
self.__lgr.error(traceback.format_exc())
def __intialize_row_counters(self):
self.__last_read_row = {}
for key in self.__tables:
self.__last_read_row[key] = 0
def __insert_with_ts(self, conn, table_name, *args):
try:
values = ", ".join(map(str,args))
insert_query = "INSERT INTO {} VALUES (DEFAULT, {}, {})".format(table_name,
"'{}'".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S")), values)
self.__lgr.debug("__insert_with_ts={}".format(insert_query))
with conn.cursor() as cursor:
cursor.execute(insert_query)
conn.commit()
return True
except:
self.__lgr.error(traceback.format_exc())
conn.rollback()
return False
def __insert(self, conn, table_name, *args):
try:
values = ", ".join(map(str,args))
insert_query = "INSERT INTO {} VALUES ({})".format(table_name, values)
with conn.cursor() as cursor:
cursor.execute(insert_query)
conn.commit()
return True
except:
self.__lgr.error(traceback.format_exc())
conn.rollback()
return False
def __execute_query(self, conn, query, fetch=True):
try:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute(query)
if not fetch:
return True
else:
result = cursor.fetchall()
conn.commit()
return result
except:
conn.rollback()
self.__lgr.error("Error during executing {}. ERROR={}".format(query, traceback.format_exc()))
return None
def __insert_server_vip_metrics(self, db_connection, ts, host_ip, metrics_dict):
table_name = 'udp_server_vip_metrics'
for vip, metric_json in metrics_dict.items():
self.__insert_with_ts(db_connection, table_name,
"'{}'".format(ts), "'{}'".format(host_ip), "'{}'".format(vip), \
metric_json['dg_rcvd'], metric_json['dg_recv_timedout'], metric_json['dg_size_rcvd'],
metric_json['dg_sent'], metric_json['dg_send_fail'], metric_json['dg_size_sent'],
metric_json['request_rcvd'], metric_json['request_recv_timedout'],
metric_json['response_sent'], metric_json['response_send_fail'])
def __insert_vip_metrics(self, db_connection, ts, host_ip, metrics_dict, is_bucketed=False):
try:
if(is_bucketed):
table_name = 'ses_bucket_metrics'
else:
table_name = {
HTTP_PROFILE : 'tcp_client_vip_metrics',
UDP_CLIENT_PROFILE : 'udp_client_vip_metrics'
}
for res_hash, res_hash_values in metrics_dict.items():
for ses_hash, ses_hash_values in res_hash_values.items():
for vip, metric_json in ses_hash_values.items():
profile_type = metric_json.get("profile_type", -1)
if(profile_type == UDP_CLIENT_PROFILE):
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip), "'{}'".format(vip), \
res_hash, ses_hash, metric_json['good_connections'], \
metric_json['failed_connections'], metric_json['sessions'])
elif(profile_type == HTTP_PROFILE):
if is_bucketed:
metric_json = {"buckets" : metric_json}
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip), "'{}'".format(vip), \
res_hash, ses_hash, "'{}'".format(metric_json))
else:
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip), "'{}'".format(vip), \
res_hash, ses_hash, metric_json['connections'], \
metric_json['good_connections'], metric_json['failed_connections'], \
metric_json['sessions'])
except:
self.__lgr.error("%s: %s" %(table_name, traceback.format_exc()))
def __insert_memory_metrics(self, db_connection, ts, host_ip, metrics_dict):
try:
table_name = 'memory_metrics'
for res_hash, res_hash_values in metrics_dict.items():
for ses_hash, ses_hash_values in res_hash_values.items():
for pid, metric_json in ses_hash_values.items():
self.__insert_with_ts(db_connection, table_name,
"'{}'".format(ts), "'{}'".format(host_ip), res_hash, ses_hash, pid,
"array{}".format(metric_json['malloc_metric']),
"array{}".format(metric_json['free_metric']))
except:
self.__lgr.error("%s: %s" %(table_name, traceback.format_exc()))
def __insert_error_metrics(self, db_connection, ts, host_ip, metrics_dict):
try:
table_name = 'error_metrics'
for res_hash, res_hash_values in metrics_dict.items():
for ses_hash, ses_hash_values in res_hash_values.items():
for vip, vip_values in ses_hash_values.items():
for method, method_values in vip_values.items():
for uri, error_values in method_values.items():
for error_type, metric_json in error_values.items():
self.__insert_with_ts(db_connection, table_name,
"'{}'".format(ts), "'{}'".format(host_ip), \
"'{}'".format(vip), "'{}'".format(method), \
| |
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import re
from google.appengine.api import users
from gae_libs.handlers.base_handler import BaseHandler, Permission
from gae_libs.dashboard_util import GetPagedResults
from handlers.code_coverage import utils
from libs.time_util import ConvertUTCToPST
from model.code_coverage import CoverageReportModifier
from model.code_coverage import FileCoverageData
from model.code_coverage import PostsubmitReport
from model.code_coverage import PresubmitCoverageData
from model.code_coverage import SummaryCoverageData
from services.code_coverage import code_coverage_util
from waterfall import waterfall_config
# The regex to extract the luci project name from the url path.
_LUCI_PROJECT_REGEX = re.compile(r'^/coverage/p/([^/]+)')
# The regex to extract the year since which referenced coverage is desired
_REFERENCED_COVERAGE_YEAR_REGEX = re.compile(r'.*/referenced([0-9]+)')
def _GetPostsubmitDefaultReportConfig(luci_project):
"""Returns a tuple of (host, project, ref, platform) to serve default report.
Following is an example config:
{
'default_postsubmit_report_config': {
'chromium': {
'host': 'chromium.googlesource.com',
'project': 'chromium/src',
'ref': 'refs/heads/main',
'platform': 'linux',
}
}
}
"""
return waterfall_config.GetCodeCoverageSettings().get(
'default_postsubmit_report_config', {}).get(luci_project, None)
def _GetSameOrMostRecentReportForEachPlatform(luci_project, host, project, ref,
revision):
"""Find the matching report on other platforms, or the most recent.
The intent of this function is to help the UI list the platforms that are
available, and let the user switch. If a report with the same revision exists
and is supposed to be visible to the public users, use it, otherwise use the
most recent visible one.
"""
result = {}
for platform, info in utils.GetPostsubmitPlatformInfoMap(
luci_project).iteritems():
# Some 'platforms' are hidden from the selection to avoid confusion, as they
# may be custom reports that do not make sense outside a certain team.
# They should still be reachable via a url.
if (info.get('hidden') and not users.is_current_user_admin()):
continue
bucket = info['bucket']
builder = info['builder']
same_report = PostsubmitReport.Get(
server_host=host,
project=project,
ref=ref,
revision=revision,
bucket=bucket,
builder=builder)
if same_report and same_report.visible:
result[platform] = same_report
continue
query = PostsubmitReport.query(
PostsubmitReport.gitiles_commit.project == project,
PostsubmitReport.gitiles_commit.server_host == host,
PostsubmitReport.bucket == bucket, PostsubmitReport.builder == builder,
PostsubmitReport.visible == True, PostsubmitReport.modifier_id ==
0).order(-PostsubmitReport.commit_timestamp)
entities = query.fetch(limit=1)
if entities:
result[platform] = entities[0]
return result
def _MakePlatformSelect(luci_project, host, project, ref, revision, path,
current_platform):
"""Populate values needed to render a form to let the user switch platforms.
This will produce parameters needed for the form to post to the same page so
that upon submission it loads the report at the same path, and it will also
provide the options that can be selected in the dropdown.
"""
result = {
'params': {
'host': host,
'project': project,
'ref': ref,
},
'options': [],
}
if path:
result['params']['path'] = path
for platform, report in _GetSameOrMostRecentReportForEachPlatform(
luci_project, host, project, ref, revision).iteritems():
option = {
'platform':
platform,
'ui_name':
utils.GetPostsubmitPlatformInfoMap(luci_project)[platform]
['ui_name'],
'selected':
platform == current_platform,
}
if report.gitiles_commit.revision == revision:
# If the same revision is available in the target platform, add it to the
# option s.t. the form can populate this revision field before
# submission.
option['revision'] = revision
result['options'].append(option)
result['options'].sort(key=lambda x: x['ui_name'])
return result
def _IsServePresubmitCoverageDataEnabled():
"""Returns True if the feature to serve presubmit coverage data is enabled.
Returns:
Returns True if it is enabled, otherwise, False.
"""
# Unless the flag is explicitly set, assuming disabled by default.
return waterfall_config.GetCodeCoverageSettings().get(
'serve_presubmit_coverage_data', False)
def _GetBanner(project):
"""If there is a service banner for a given project landing page, return it.
E.g. a maintenance announcement or outage acknowledgement, etc.
The setting is expected to be a dict mapping a project to the contents of the
div tag for the banner. If no project banner is defined, return the default
one.
This expected to be None if no banner is to be shown.
"""
banners = waterfall_config.GetCodeCoverageSettings().get(
'project_banners', {})
return banners.get(project, banners.get('default'))
def _GetPathRootAndSeparatorFromDataType(data_type):
"""Returns the path of the root and path separator for the given data type."""
if data_type in ('files', 'dirs'):
return '//', '/'
elif data_type == 'components':
return '>>', '>'
return None, None
def _GetNameToPathSeparator(path, data_type):
"""Returns a list of [name, sub_path] for the given path.
Example:
1. //root/src/file.cc -> [
['root/', '//root/'],
['src/', '//root/src/'],
['file.cc', '//root/src/file.cc']
]
2. //root/src/path1/ -> [
['root/', '//root/'],
['src/', '//root/src/'],
['path1/', '//root/src/path1/']
]
3. component1>component2 -> [
['component1', 'component1'],
['component2', 'component1>component2'],
]
"""
path_parts = []
if not path:
return path_parts
path_root, path_separator = _GetPathRootAndSeparatorFromDataType(data_type)
if path == path_root:
return path_parts
if data_type == 'components':
index = 0
else:
index = 2 # Skip the leading '//' in the path.
while index >= 0:
next_index = path.find(path_separator, index)
if next_index >= 0:
name = path[index:next_index + 1]
if data_type == 'components':
sub_path = path[:next_index]
else:
sub_path = path[:next_index + 1]
next_index += 1
else:
name = path[index:]
sub_path = path
path_parts.append([name, sub_path])
index = next_index
return path_parts
def _SplitLineIntoRegions(line, uncovered_blocks):
"""Returns a list of regions for a line of code.
The structure of the output is as follows:
[
{
'covered': True/False # Whether this region is actually covered.
'text': string # The source text for this region.
}
]
The regions in the output list are in the order they appear in the line.
For example, the following loop reconstructs the entire line:
text = ''
for region in _SplitLineIntoRegions(line, uncovered_blocks):
text += region['text']
assert text == line
"""
if not uncovered_blocks:
return [{'is_covered': True, 'text': line}]
regions = []
region_start = 0
for block in uncovered_blocks:
# Change from 1-indexing to 0-indexing
first = block['first'] - 1
last = block['last']
if last < 0:
last = len(line)
else:
last -= 1
# Generate the covered region that precedes this uncovered region.
preceding_text = line[region_start:first]
if preceding_text:
regions.append({'is_covered': True, 'text': preceding_text})
regions.append({
'is_covered': False,
# `last` is inclusive
'text': line[first:last + 1]
})
region_start = last + 1
# If there is any text left on the line, it must be covered. If it were
# uncovered, it would have been part of the final entry in uncovered_blocks.
remaining_text = line[region_start:]
if remaining_text:
regions.append({'is_covered': True, 'text': remaining_text})
return regions
class ServeCodeCoverageData(BaseHandler):
PERMISSION_LEVEL = Permission.ANYONE
def _ServePerCLCoverageData(self):
"""Serves per-cl coverage data.
There are two types of requests: 'lines' and 'percentages', and the reason
why they're separate is that:
1. Calculating lines takes much longer than percentages, especially when
data needs to be shared between two equivalent patchsets, while for
percentages, it's assumed that incremental coverage percentages would be
the same for equivalent patchsets and no extra work is needed.
2. Percentages are usually requested much earlier than lines by the Gerrit
plugin because the later won't be displayed until the user actually
expands the diff view.
The format of the returned data conforms to:
https://chromium.googlesource.com/infra/gerrit-plugins/code-coverage/+/213d226a5f1b78c45c91d49dbe32b09c5609e9bd/src/main/resources/static/coverage.js#93
"""
def _ServeLines(lines_data):
"""Serves lines coverage data."""
lines_data = lines_data or []
formatted_data = {'files': []}
for file_data in lines_data:
formatted_data['files'].append({
'path':
file_data['path'][2:],
'lines':
code_coverage_util.DecompressLineRanges(file_data['lines']),
})
return {'data': {'data': formatted_data,}, 'allowed_origin': '*'}
def _ServePercentages(abs_coverage, inc_coverage, abs_unit_tests_coverage,
inc_unit_tests_coverage):
"""Serves percentages coverage data."""
def _GetCoverageMetricsPerFile(coverage):
coverage_per_file = {}
for e in coverage:
coverage_per_file[e.path] = {
'covered': e.covered_lines,
'total': e.total_lines,
}
return coverage_per_file
abs_coverage_per_file = _GetCoverageMetricsPerFile(abs_coverage)
inc_coverage_per_file = _GetCoverageMetricsPerFile(inc_coverage)
abs_unit_tests_coverage_per_file = _GetCoverageMetricsPerFile(
abs_unit_tests_coverage)
inc_unit_tests_coverage_per_file = _GetCoverageMetricsPerFile(
inc_unit_tests_coverage)
formatted_data = {'files': []}
for p in set(abs_coverage_per_file.keys() +
abs_unit_tests_coverage_per_file.keys()):
formatted_data['files'].append({
'path':
p[2:],
'absolute_coverage':
abs_coverage_per_file.get(p, None),
'incremental_coverage':
inc_coverage_per_file.get(p, None),
'absolute_unit_tests_coverage':
abs_unit_tests_coverage_per_file.get(p, None),
'incremental_unit_tests_coverage':
inc_unit_tests_coverage_per_file.get(p, None),
})
return {'data': {'data': formatted_data,}, 'allowed_origin': '*'}
host = self.request.get('host')
project = self.request.get('project')
try:
change = int(self.request.get('change'))
patchset = int(self.request.get('patchset'))
except ValueError, ve:
return BaseHandler.CreateError(
error_message=(
'Invalid value for change(%r) or patchset(%r): need int, %s' %
(self.request.get('change'), self.request.get('patchset'),
ve.message)),
return_code=400,
allowed_origin='*')
data_type = self.request.get('type', 'lines')
logging.info('Serving coverage data for CL:')
logging.info('host=%s', host)
logging.info('change=%d', change)
logging.info('patchset=%d', patchset)
logging.info('type=%s', data_type)
configs = utils.GetAllowedGitilesConfigs()
if project not in configs.get(host.replace('-review', ''), {}):
return BaseHandler.CreateError(
error_message='"%s/%s" is not supported.' % (host, project),
return_code=400,
allowed_origin='*',
is_project_supported=False)
if data_type not in ('lines', 'percentages'):
return BaseHandler.CreateError(
error_message=(
'Invalid type: "%s", must be "lines" (default) or "percentages"' %
data_type),
return_code=400,
allowed_origin='*')
if not _IsServePresubmitCoverageDataEnabled():
# TODO(crbug.com/908609): Switch to 'is_service_enabled'.
kwargs = {'is_project_supported': False}
return BaseHandler.CreateError(
error_message='The functionality has been temporarity disabled.',
return_code=400,
allowed_origin='*',
**kwargs)
entity = PresubmitCoverageData.Get(
server_host=host, change=change, patchset=patchset)
is_serving_percentages = (data_type == 'percentages')
if entity:
if is_serving_percentages:
return _ServePercentages(entity.absolute_percentages,
entity.incremental_percentages,
entity.absolute_percentages_unit,
entity.incremental_percentages_unit)
return _ServeLines(entity.data)
# If | |
# coding: utf-8
"""
weasyprint.tests.test_api
-------------------------
Test the public API.
:copyright: Copyright 2011-2014 <NAME> and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import os
import io
import sys
import math
import contextlib
import threading
import gzip
import zlib
import lxml.html
import lxml.etree
import cairocffi as cairo
import pytest
from .testing_utils import (
resource_filename, assert_no_logs, capture_logs, TestHTML,
http_server, temp_directory)
from .test_draw import image_to_pixels
from ..compat import urljoin, urlencode, urlparse_uses_relative, iteritems
from ..urls import path2url
from .. import HTML, CSS, default_url_fetcher
from .. import __main__
from .. import navigator
from ..document import _TaggedTuple
CHDIR_LOCK = threading.Lock()
@contextlib.contextmanager
def chdir(path):
"""Change the current directory in a context manager."""
with CHDIR_LOCK:
old_dir = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_dir)
def read_file(filename):
"""Shortcut for reading a file."""
with open(filename, 'rb') as fd:
return fd.read()
def write_file(filename, content):
"""Shortcut for reading a file."""
with open(filename, 'wb') as fd:
fd.write(content)
def _test_resource(class_, basename, check, **kwargs):
"""Common code for testing the HTML and CSS classes."""
absolute_filename = resource_filename(basename)
url = path2url(absolute_filename)
check(class_(absolute_filename, **kwargs))
check(class_(guess=absolute_filename, **kwargs))
check(class_(filename=absolute_filename, **kwargs))
check(class_(url, **kwargs))
check(class_(guess=url, **kwargs))
check(class_(url=url, **kwargs))
with open(absolute_filename, 'rb') as fd:
check(class_(fd, **kwargs))
with open(absolute_filename, 'rb') as fd:
check(class_(guess=fd, **kwargs))
with open(absolute_filename, 'rb') as fd:
check(class_(file_obj=fd, **kwargs))
with open(absolute_filename, 'rb') as fd:
content = fd.read()
with chdir(os.path.dirname(__file__)):
relative_filename = os.path.join('resources', basename)
check(class_(relative_filename, **kwargs))
check(class_(string=content, base_url=relative_filename, **kwargs))
encoding = kwargs.get('encoding') or 'utf8'
check(class_(string=content.decode(encoding), # unicode
base_url=relative_filename, **kwargs))
with pytest.raises(TypeError):
class_(filename='foo', url='bar')
@assert_no_logs
def test_html_parsing():
"""Test the constructor for the HTML class."""
def check_doc1(html, has_base_url=True):
"""Check that a parsed HTML document looks like resources/doc1.html"""
assert html.root_element.tag == 'html'
assert [child.tag for child in html.root_element] == ['head', 'body']
_head, body = html.root_element
assert [child.tag for child in body] == ['h1', 'p', 'ul', 'div']
h1 = body[0]
assert h1.text == 'WeasyPrint test document (with Ünicōde)'
if has_base_url:
url = urljoin(html.base_url, 'pattern.png')
assert url.startswith('file:')
assert url.endswith('weasyprint/tests/resources/pattern.png')
else:
assert html.base_url is None
_test_resource(TestHTML, 'doc1.html', check_doc1)
_test_resource(TestHTML, 'doc1_UTF-16BE.html', check_doc1,
encoding='UTF-16BE')
with chdir(os.path.dirname(__file__)):
filename = os.path.join('resources', 'doc1.html')
tree = lxml.html.parse(filename)
check_doc1(TestHTML(tree=tree, base_url=filename))
check_doc1(TestHTML(tree=tree), has_base_url=False)
head, _body = tree.getroot()
assert head.tag == 'head'
lxml.etree.SubElement(head, 'base', href='resources/')
check_doc1(TestHTML(tree=tree, base_url='.'))
@assert_no_logs
def test_css_parsing():
"""Test the constructor for the CSS class."""
def check_css(css):
"""Check that a parsed stylsheet looks like resources/utf8-test.css"""
# Using 'encoding' adds a CSSCharsetRule
rule = css.stylesheet.rules[-1]
assert rule.selector.as_css() == 'h1::before'
content, background = rule.declarations
assert content.name == 'content'
string, = content.value
assert string.value == 'I løvë Unicode'
assert background.name == 'background-image'
url_value, = background.value
assert url_value.type == 'URI'
url = urljoin(css.base_url, url_value.value)
assert url.startswith('file:')
assert url.endswith('weasyprint/tests/resources/pattern.png')
_test_resource(CSS, 'utf8-test.css', check_css)
_test_resource(CSS, 'latin1-test.css', check_css, encoding='latin1')
def check_png_pattern(png_bytes, x2=False, blank=False, rotated=False):
from .test_draw import _, r, B, assert_pixels_equal
if blank:
expected_pixels = [
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
]
size = 8
elif x2:
expected_pixels = [
_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_,
_+_+_+_+r+r+B+B+B+B+B+B+_+_+_+_,
_+_+_+_+r+r+B+B+B+B+B+B+_+_+_+_,
_+_+_+_+B+B+B+B+B+B+B+B+_+_+_+_,
_+_+_+_+B+B+B+B+B+B+B+B+_+_+_+_,
_+_+_+_+B+B+B+B+B+B+B+B+_+_+_+_,
_+_+_+_+B+B+B+B+B+B+B+B+_+_+_+_,
_+_+_+_+B+B+B+B+B+B+B+B+_+_+_+_,
_+_+_+_+B+B+B+B+B+B+B+B+_+_+_+_,
_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_,
]
size = 16
elif rotated:
expected_pixels = [
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
_+_+B+B+B+B+_+_,
_+_+B+B+B+B+_+_,
_+_+B+B+B+B+_+_,
_+_+r+B+B+B+_+_,
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
]
size = 8
else:
expected_pixels = [
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
_+_+r+B+B+B+_+_,
_+_+B+B+B+B+_+_,
_+_+B+B+B+B+_+_,
_+_+B+B+B+B+_+_,
_+_+_+_+_+_+_+_,
_+_+_+_+_+_+_+_,
]
size = 8
surface = cairo.ImageSurface.create_from_png(io.BytesIO(png_bytes))
assert_pixels_equal('api_png', size, size,
image_to_pixels(surface, size, size),
b''.join(expected_pixels))
@assert_no_logs
def test_python_render():
"""Test rendering with the Python API."""
base_url = resource_filename('dummy.html')
html_string = '<body><img src=pattern.png>'
css_string = '''
@page { margin: 2px; size: 8px; background: #fff }
body { margin: 0; font-size: 0 }
img { image-rendering: optimizeSpeed }
@media screen { img { transform: rotate(-90deg) } }
'''
html = TestHTML(string=html_string, base_url=base_url)
css = CSS(string=css_string)
png_bytes = html.write_png(stylesheets=[css])
pdf_bytes = html.write_pdf(stylesheets=[css])
assert png_bytes.startswith(b'\211PNG\r\n\032\n')
assert pdf_bytes.startswith(b'%PDF')
check_png_pattern(png_bytes)
# TODO: check PDF content? How?
class fake_file(object):
def __init__(self):
self.chunks = []
def write(self, data):
self.chunks.append(bytes(data[:]))
def getvalue(self):
return b''.join(self.chunks)
png_file = fake_file()
html.write_png(png_file, stylesheets=[css])
assert png_file.getvalue() == png_bytes
pdf_file = fake_file()
html.write_pdf(pdf_file, stylesheets=[css])
assert pdf_file.getvalue() == pdf_bytes
with temp_directory() as temp:
png_filename = os.path.join(temp, '1.png')
pdf_filename = os.path.join(temp, '1.pdf')
html.write_png(png_filename, stylesheets=[css])
html.write_pdf(pdf_filename, stylesheets=[css])
assert read_file(png_filename) == png_bytes
assert read_file(pdf_filename) == pdf_bytes
png_filename = os.path.join(temp, '2.png')
pdf_filename = os.path.join(temp, '2.pdf')
with open(png_filename, 'wb') as png_file:
html.write_png(png_file, stylesheets=[css])
with open(pdf_filename, 'wb') as pdf_file:
html.write_pdf(pdf_file, stylesheets=[css])
assert read_file(png_filename) == png_bytes
assert read_file(pdf_filename) == pdf_bytes
x2_png_bytes = html.write_png(stylesheets=[css], resolution=192)
check_png_pattern(x2_png_bytes, x2=True)
screen_css = CSS(string=css_string, media_type='screen')
rotated_png_bytes = html.write_png(stylesheets=[screen_css])
check_png_pattern(rotated_png_bytes, rotated=True)
assert TestHTML(
string=html_string, base_url=base_url, media_type='screen'
).write_png(
stylesheets=[io.BytesIO(css_string.encode('utf8'))]
) == rotated_png_bytes
assert TestHTML(
string='<style>%s</style>%s' % (css_string, html_string),
base_url=base_url, media_type='screen'
).write_png() == rotated_png_bytes
@assert_no_logs
def test_command_line_render():
"""Test rendering with the command-line API."""
css = b'''
@page { margin: 2px; size: 8px; background: #fff }
@media screen { img { transform: rotate(-90deg) } }
body { margin: 0; font-size: 0 }
'''
html = b'<body><img src=pattern.png>'
combined = b'<style>' + css + b'</style>' + html
linked = b'<link rel=stylesheet href=style.css>' + html
with chdir(resource_filename('')):
# Reference
html_obj = TestHTML(string=combined, base_url='dummy.html')
pdf_bytes = html_obj.write_pdf()
png_bytes = html_obj.write_png()
x2_png_bytes = html_obj.write_png(resolution=192)
rotated_png_bytes = TestHTML(string=combined, base_url='dummy.html',
media_type='screen').write_png()
empty_png_bytes = TestHTML(
string=b'<style>' + css + b'</style>').write_png()
check_png_pattern(png_bytes)
check_png_pattern(rotated_png_bytes, rotated=True)
check_png_pattern(empty_png_bytes, blank=True)
def run(args, stdin=b''):
stdin = io.BytesIO(stdin)
stdout = io.BytesIO()
try:
__main__.HTML = TestHTML
__main__.main(args.split(), stdin=stdin, stdout=stdout)
finally:
__main__.HTML = HTML
return stdout.getvalue()
with temp_directory() as temp:
with chdir(temp):
pattern_bytes = read_file(resource_filename('pattern.png'))
write_file('pattern.png', pattern_bytes)
write_file('no_css.html', html)
write_file('combined.html', combined)
write_file('combined-UTF-16BE.html',
combined.decode('ascii').encode('UTF-16BE'))
write_file('linked.html', linked)
write_file('style.css', css)
run('combined.html out1.png')
run('combined.html out2.pdf')
assert read_file('out1.png') == png_bytes
assert read_file('out2.pdf') == pdf_bytes
run('combined-UTF-16BE.html out3.png --encoding UTF-16BE')
assert read_file('out3.png') == png_bytes
combined_absolute = os.path.join(temp, 'combined.html')
run(combined_absolute + ' out4.png')
assert read_file('out4.png') == png_bytes
combined_url = path2url(os.path.join(temp, 'combined.html'))
run(combined_url + ' out5.png')
assert read_file('out5.png') == png_bytes
run('linked.html out6.png') # test relative URLs
assert read_file('out6.png') == png_bytes
run('combined.html out7 -f png')
run('combined.html out8 --format pdf')
assert read_file('out7') == png_bytes
assert read_file('out8') == pdf_bytes
run('no_css.html out9.png')
run('no_css.html out10.png -s style.css')
assert read_file('out9.png') != png_bytes
assert read_file('out10.png') == png_bytes
stdout = run('--format png combined.html -')
assert stdout == png_bytes
run('- out11.png', stdin=combined)
check_png_pattern(read_file('out11.png'))
assert read_file('out11.png') == png_bytes
stdout = run('--format png - -', stdin=combined)
assert stdout == png_bytes
run('combined.html out13.png --media-type screen')
run('combined.html out12.png -m screen')
run('linked.html out14.png -m screen')
assert read_file('out12.png') == rotated_png_bytes
assert read_file('out13.png') == rotated_png_bytes
assert read_file('out14.png') == rotated_png_bytes
stdout = run('-f pdf combined.html -')
assert stdout.count(b'attachment') == 0
stdout = run('-f pdf -a pattern.png combined.html -')
assert stdout.count(b'attachment') == 1
stdout = run('-f pdf -a style.css -a pattern.png combined.html -')
assert stdout.count(b'attachment') == 2
stdout = run('-f png -r 192 linked.html -')
assert stdout == x2_png_bytes
stdout = run('-f png --resolution 192 linked.html -')
assert run('linked.html - -f png --resolution 192') == x2_png_bytes
assert stdout == x2_png_bytes
os.mkdir('subdirectory')
os.chdir('subdirectory')
with capture_logs() as logs:
stdout = run('--format png - -', stdin=combined)
assert len(logs) == 1
assert logs[0].startswith('WARNING: Failed to load image')
assert stdout == empty_png_bytes
stdout = run('--format png --base-url .. - -', stdin=combined)
assert stdout == png_bytes
@assert_no_logs
def test_unicode_filenames():
"""Test non-ASCII filenames both in Unicode or bytes form."""
# Replicate pattern.png in CSS so that base_url does not matter.
html = b'''
<style>
@page { margin: 2px; size: 8px; background: #fff }
html { background: #00f; }
body { background: #f00; width: 1px; height: 1px }
</style>
<body>
'''
png_bytes = TestHTML(string=html).write_png()
check_png_pattern(png_bytes)
# Remember we have __future__.unicode_literals
unicode_filename = 'Unicödé'
with temp_directory() as temp:
with chdir(temp):
write_file(unicode_filename, html)
assert os.listdir('.') == [unicode_filename]
# This should be independent of the encoding used by the filesystem
bytes_filename, = os.listdir(b'.')
assert TestHTML(unicode_filename).write_png() == png_bytes
assert TestHTML(bytes_filename).write_png() == png_bytes
os.remove(unicode_filename)
assert os.listdir('.') == []
TestHTML(string=html).write_png(unicode_filename)
assert read_file(bytes_filename) == png_bytes
# Surface.write_to_png does not accept bytes filenames
# on Python 3
if sys.version_info[0] < 3:
os.remove(unicode_filename)
assert os.listdir('.') == []
TestHTML(string=html).write_png(bytes_filename)
assert read_file(unicode_filename) == png_bytes
@assert_no_logs
def test_low_level_api():
html = TestHTML(string='<body>')
css = CSS(string='''
@page { margin: 2px; size: 8px; background: #fff }
html { background: #00f; }
body { background: #f00; width: 1px; height: 1px }
''')
pdf_bytes = html.write_pdf(stylesheets=[css])
assert pdf_bytes.startswith(b'%PDF')
assert html.render([css]).write_pdf() == pdf_bytes
png_bytes = html.write_png(stylesheets=[css])
document = html.render([css], enable_hinting=True)
page, = document.pages
assert page.width == 8
assert page.height == 8
assert document.write_png() == (png_bytes, 8, 8)
assert document.copy([page]).write_png() == (png_bytes, 8, 8)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 8, 8)
page.paint(cairo.Context(surface))
file_obj = io.BytesIO()
surface.write_to_png(file_obj)
check_png_pattern(file_obj.getvalue())
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 8, 8)
context = | |
``tile_type``.
:param tags: A list of strings holding tag values.
:param clip: A Boolean indicating if the result should be clipped
(default: False).
:param margin: ...
:param limit: A max. number of features to return in the result.
:param params: ...
:param selection: ...
:param skip_cache: ...
:param clustering: ...
:param clusteringParams: ...
:param force_2d: If set to True the features in the response
will have only X and Y components, by default all
x,y,z coordinates will be returned.
:param mode: A string to indicate how to optimize the resultset and
geometries for display. Allowed values are ``raw`` and ``viz``.
:param viz_sampling: A string to indicate the sampling strength in
case of ``mode=viz``. Allowed values are: ``low``, ``med``,
``high``, and ``off``, default: ``med``.
:return: A dict representing a feature collection.
Available tile types are:
- quadkeys, `Virtual Earth Tile System
<https://www.rigacci.org/wiki/doku.php/tecnica/gps_cartografia_gis/ve>`_,
- web, `Tiled Web Map <https://en.wikipedia.org/wiki/Tiled_web_map>`_.
- tms, `OSGEO Tile Map Service
<https://wiki.osgeo.org/wiki/Tile_Map_Service_Specification>`_,
- here, ?
"""
path = f"/hub/spaces/{space_id}/tile/{tile_type}/{tile_id}"
q_params: Dict[str, str] = {"clientId": _CLIENT_ID}
if tags:
q_params["tags"] = ",".join(tags)
if clip:
q_params["clip"] = str(clip).lower()
if params:
q_params.update(params)
if selection:
q_params["selection"] = ",".join(selection)
if skip_cache:
q_params["skipCache"] = str(skip_cache).lower() # pragma: no cover
if clustering:
q_params["clustering"] = clustering
if clusteringParams:
d = dict((f"clustering.{k}", v) for (k, v) in clusteringParams.items())
q_params.update(d)
if margin:
q_params["margin"] = str(margin)
if limit:
q_params["limit"] = str(limit)
if force_2d:
q_params["force2D"] = str(force_2d).lower()
if mode:
q_params["mode"] = str(mode).lower()
if viz_sampling:
q_params["vizSampling"] = str(viz_sampling).lower()
return self.get(path=path, params=q_params).json()
def get_space_search(
self,
space_id: str,
tags: Optional[List[str]] = None,
limit: Optional[int] = None,
params: Optional[dict] = None,
selection: Optional[List[str]] = None,
skip_cache: Optional[bool] = None,
force_2d: Optional[bool] = None,
) -> dict:
"""Search for features.
:param space_id: A string with the ID of the desired XYZ space.
:param tags: A list of strings holding tag values.
:param limit: A max. number of features to return in the result.
:param params: ...
:param selection: ...
:param skip_cache: ...
:param force_2d: If set to True the features in the response
will have only X and Y components, by default all
x,y,z coordinates will be returned.
:return: A dict representing a feature collection.
Example:
>>> feats = api.get_space_search(space_id=space_id)
>>> print(feats["type"] )
>>> print(len(feats["features"]) )
"""
q_params: Dict[str, str] = {"clientId": _CLIENT_ID}
if tags:
q_params["tags"] = ",".join(tags)
if limit:
q_params["limit"] = str(limit)
if params:
q_params.update(params)
if selection:
q_params["selection"] = ",".join(selection)
if skip_cache:
q_params["skipCache"] = str(skip_cache).lower() # pragma: no cover
if force_2d:
q_params["force2D"] = str(force_2d).lower()
path = f"/hub/spaces/{space_id}/search"
return self.get(path=path, params=q_params).json()
# FIXME
def get_space_iterate(
self,
space_id: str,
limit: int,
force_2d: Optional[bool] = None,
) -> Generator:
"""Iterate features in the space (yielding them one by one).
:param space_id: A string representing desired space ID.
:param limit: A max. number of features to return in the result.
:param force_2d: If set to True the features in the response
will have only X and Y components, by default all
x,y,z coordinates will be returned.
:yields: A feature in space.
"""
path = f"/hub/spaces/{space_id}/iterate"
params = {"limit": limit, "clientId": _CLIENT_ID}
if force_2d:
params["force2D"] = str(force_2d).lower()
while True:
res: dict = self.get(path=path, params=params).json()
handle = res.get("handle", None)
feats = res["features"]
for feat in feats:
yield feat
if handle:
params = {"limit": limit, "handle": handle}
if handle is None or len(feats) < limit:
break
def get_space_all(self, space_id: str, limit: int, max_len=1000) -> dict:
"""Get all features as one single GeoJSON feature collection.
This is a convenience method, not directly available in the XYZ API.
It hides the API paging mechanism and returns all data in one chunk.
So be careful if you don't know how much data you will get.
:param space_id: A string representing desired space ID.
:param limit: A max. number of features to return in the result.
:param max_len: A max. number of features to return in the result.
:return: A dict representing a feature collection.
Example:
>>> fc = api.get_space_all(space_id=space_id, limit=100)
>>> print(len(fc["features"]) )
>>> print(fc["type"])
"""
feature_it = self.get_space_iterate(space_id=space_id, limit=limit)
gj = geojson.FeatureCollection(list(feature_it)[:max_len])
return gj
def get_space_count(self, space_id: str) -> dict:
"""Get feature count.
:param space_id: A string with the ID of the desired XYZ space.
:return: A dict containing the number of features inside the specified
space.
"""
path = f"/hub/spaces/{space_id}/count"
params = {"clientId": _CLIENT_ID}
return self.get(path=path, params=params).json()
# Edit Features
def put_space_features(
self,
space_id: str,
data: dict,
add_tags: Optional[List[str]] = None,
remove_tags: Optional[List[str]] = None,
) -> dict:
"""Create or replace multiple features.
:param space_id: A string with the ID of the desired XYZ space.
:param data: A JSON object describing one or more features to add.
:param add_tags: A list of strings describing tags to be added to
the features.
:param remove_tags: A list of strings describing tags to be removed
from the features.
:return: A dict representing a feature collection.
Example:
>>> from xyzspaces.datasets import get_countries_data
>>> gj_countries = get_countries_data()
>>> features = api.put_space_features(
... space_id=space_id,
... data=gj_countries,
... add_tags=["foo", "bar"],
... remove_tags=["bar"],
... )
>>> print(features)
"""
path = f"/hub/spaces/{space_id}/features"
params = join_string_lists(addTags=add_tags, removeTags=remove_tags)
params.update({"clientId": _CLIENT_ID})
return self.put(path=path, params=params, json=data, headers=self.headers).json()
def post_space_features(
self,
space_id: str,
data: dict, # must be a feature collection
add_tags: Optional[List[str]] = None,
remove_tags: Optional[List[str]] = None,
) -> dict:
"""Modify multiple features in the space.
:param space_id: A string with the ID of the desired XYZ space.
:param data: A JSON object describing one or more features to
modify.
:param add_tags: A list of strings describing tags to be added to
the features.
:param remove_tags: A list of strings describing tags to be removed
from the features.
:return: A dict representing a feature collection.
Example:
>>> data = dict(type="FeatureCollection", features=[deu, ita])
>>> space_features = api.post_space_features(
... space_id=space_id, data=data)
>>> print(space_features)
"""
path = f"/hub/spaces/{space_id}/features"
params = join_string_lists(addTags=add_tags, removeTags=remove_tags)
params.update({"clientId": _CLIENT_ID})
return self.post(path=path, params=params, json=data, headers=self.headers).json()
def delete_space_features(
self,
space_id: str,
id: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
) -> str:
"""Delete multiple features from the space.
:param space_id: A string with the ID of the desired XYZ space.
:param id: A list of feature IDs to delete.
:param tags: A list of strings describing tags the features to
be deleted must have.
:return: A response from API call.
Example:
>>> deu = api.get_space_feature(space_id=space_id, feature_id="DEU")
>>> ita = api.get_space_feature(space_id=space_id, feature_id="ITA")
>>> deleted_features = api.delete_space_features(
... space_id=space_id, id=["DEU", "ITA"]) # noqa: E501
"""
path = f"/hub/spaces/{space_id}/features"
params = {"clientId": _CLIENT_ID}
if id:
# TODO: The wildcard sign(*) could be used to delete all features
# in the space.
params["id"] = ",".join(id)
if tags:
params["tags"] = ",".join(tags)
return self.delete(path=path, params=params, headers=self.headers).text
def put_space_feature(
self,
space_id: str,
data: dict,
feature_id: Optional[str] = None,
add_tags: Optional[List[str]] = None,
remove_tags: Optional[List[str]] = None,
) -> dict:
"""Create or replace a single feature.
:param space_id: A string with the ID of the desired XYZ space.
:param data: A JSON object describing the feature to be added.
:param feature_id: A string with the ID of the feature to be created.
:param add_tags: A list of strings describing tags to be added to
the feature.
:param remove_tags: A list of strings describing tags to be removed
from the feature.
:return: A dict representing a feature.
Example:
>>> api.put_space_feature(
... space_id=space_id, feature_id=feature_id, data=fra)
"""
if feature_id is not None:
path = f"/hub/spaces/{space_id}/features/{feature_id}"
else:
path = f"/hub/spaces/{space_id}/features/"
params = join_string_lists(addTags=add_tags, removeTags=remove_tags)
params.update({"clientId": _CLIENT_ID})
return self.put(path=path, params=params, json=data, headers=self.headers).json()
def patch_space_feature(
self,
space_id: str,
feature_id: str,
data: dict,
add_tags: Optional[List[str]] = None,
remove_tags: Optional[List[str]] = None,
) -> dict:
"""Patch a single feature in the space.
:param space_id: A string with the ID of the desired XYZ space.
:param feature_id: A string with the ID of the feature to be modified.
:param data: A JSON object describing the feature to be changed.
:param add_tags: A list of strings describing tags to be added to
the feature.
:param remove_tags: A list of strings describing tags to be removed
from the feature.
:return: A dict representing a feature.
"""
| |
<filename>tests/test_dataframe/test_set.py<gh_stars>10-100
import pytest
import raccoon as rc
from raccoon.utils import assert_frame_equal
def test_set_cell():
actual = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}, index=[10, 11, 12], columns=['a', 'b', 'c'],
sort=False)
# change existing value
actual.set(11, 'b', 55)
assert actual.get(11, 'b') == 55
actual.set(10, 'a', 11)
assert actual.get(10, 'a') == 11
actual.set(10, 'c', 13)
assert actual.get(10, 'c') == 13
assert actual.data == [[11, 2, 3], [4, 55, 6], [13, 8, 9]]
# add a new row
actual.set(13, 'b', 14)
assert actual.data == [[11, 2, 3, None], [4, 55, 6, 14], [13, 8, 9, None]]
# add a new column
actual.set(13, 'd', 88)
assert actual.data == [[11, 2, 3, None], [4, 55, 6, 14], [13, 8, 9, None], [None, None, None, 88]]
# add a new row and column
actual.set(14, 'e', 999)
assert actual.data == [[11, 2, 3, None, None], [4, 55, 6, 14, None], [13, 8, 9, None, None],
[None, None, None, 88, None], [None, None, None, None, 999]]
# add a new row note that index does not sort
actual.set(1, 'a', -100)
assert actual.data == [[11, 2, 3, None, None, -100], [4, 55, 6, 14, None, None], [13, 8, 9, None, None, None],
[None, None, None, 88, None, None], [None, None, None, None, 999, None]]
assert actual.index == [10, 11, 12, 13, 14, 1]
assert all([isinstance(actual.data[x], list) for x in range(len(actual.columns))])
def test_set_cell_sorted():
actual = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}, index=[10, 12, 13], columns=['a', 'b', 'c'],
sort=True)
# change existing value
actual.set(12, 'b', 55)
assert actual.get(12, 'b') == 55
actual.set(10, 'a', 11)
assert actual.get(10, 'a') == 11
actual.set(10, 'c', 13)
assert actual.get(10, 'c') == 13
assert actual.data == [[11, 2, 3], [4, 55, 6], [13, 8, 9]]
# add a new row
actual.set(14, 'b', 14)
assert actual.index == [10, 12, 13, 14]
assert actual.data == [[11, 2, 3, None], [4, 55, 6, 14], [13, 8, 9, None]]
actual.set(11, 'a', -1)
assert actual.index == [10, 11, 12, 13, 14]
assert actual.data == [[11, -1, 2, 3, None], [4, None, 55, 6, 14], [13, None, 8, 9, None]]
# add a new column
actual.set(13, 'd', 88)
assert actual.data == [[11, -1, 2, 3, None], [4, None, 55, 6, 14], [13, None, 8, 9, None],
[None, None, None, 88, None]]
# add a new row and column
actual.set(15, 'e', 999)
assert actual.index == [10, 11, 12, 13, 14, 15]
assert actual.data == [[11, -1, 2, 3, None, None], [4, None, 55, 6, 14, None], [13, None, 8, 9, None, None],
[None, None, None, 88, None, None], [None, None, None, None, None, 999]]
assert all([isinstance(actual.data[x], list) for x in range(len(actual.columns))])
# fails for mixed index type
with pytest.raises(TypeError):
actual.set('Z', 'e', 60)
def test_set_row():
actual = rc.DataFrame({'a': [1, 3], 'b': [4, 6], 'c': [7, 9]}, index=[10, 12], columns=['a', 'b', 'c'],
sort=True)
# change existing row
actual.set(indexes=10, values={'a': 11, 'b': 44, 'c': 77})
assert actual.data == [[11, 3], [44, 6], [77, 9]]
actual.set(indexes=12, values={'a': 33, 'b': 66, 'c': 99})
assert actual.data == [[11, 33], [44, 66], [77, 99]]
# insert new row in the middle
actual.set(indexes=11, values={'a': 22, 'b': 5, 'c': 88})
assert actual.data == [[11, 22, 33], [44, 5, 66], [77, 88, 99]]
# add a new row to end
actual.set(indexes=13, values={'a': 4, 'b': 7, 'c': 10})
assert actual.data == [[11, 22, 33, 4], [44, 5, 66, 7], [77, 88, 99, 10]]
actual.set(indexes=14, values={'b': 8, 'c': 11})
assert actual.data == [[11, 22, 33, 4, None], [44, 5, 66, 7, 8], [77, 88, 99, 10, 11]]
assert actual.index == [10, 11, 12, 13, 14]
# add a new row to beginning
actual.set(indexes=9, values={'a': -1, 'b': -2, 'c': -3})
assert actual.data == [[-1, 11, 22, 33, 4, None], [-2, 44, 5, 66, 7, 8], [-3, 77, 88, 99, 10, 11]]
assert actual.index == [9, 10, 11, 12, 13, 14]
actual.set(indexes=8, values={'b': -3, 'c': -4})
assert actual.data == [[None, -1, 11, 22, 33, 4, None], [-3, -2, 44, 5, 66, 7, 8], [-4, -3, 77, 88, 99, 10, 11]]
assert actual.index == [8, 9, 10, 11, 12, 13, 14]
# bad column names
with pytest.raises(ValueError):
actual.set(indexes=14, values={'a': 0, 'bad': 1})
# bad values type
with pytest.raises(TypeError):
actual.set(indexes=14, values=[1, 2, 3, 4, 5])
def test_set_row_sorted():
actual = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}, index=[10, 11, 12], columns=['a', 'b', 'c'],
sort=False)
# change existing row
actual.set(indexes=10, values={'a': 11, 'b': 44, 'c': 77})
assert actual.data == [[11, 2, 3], [44, 5, 6], [77, 8, 9]]
actual.set(indexes=12, values={'a': 33, 'b': 66, 'c': 99})
assert actual.data == [[11, 2, 33], [44, 5, 66], [77, 8, 99]]
# change subset of existing row
actual.set(indexes=11, values={'a': 22, 'c': 88})
assert actual.data == [[11, 22, 33], [44, 5, 66], [77, 88, 99]]
# add a new row
actual.set(indexes=13, values={'a': 4, 'b': 7, 'c': 10})
assert actual.data == [[11, 22, 33, 4], [44, 5, 66, 7], [77, 88, 99, 10]]
actual.set(indexes=14, values={'b': 8, 'c': 11})
assert actual.data == [[11, 22, 33, 4, None], [44, 5, 66, 7, 8], [77, 88, 99, 10, 11]]
assert actual.index == [10, 11, 12, 13, 14]
# bad column names
with pytest.raises(ValueError):
actual.set(indexes=14, values={'a': 0, 'bad': 1})
# bad values type
with pytest.raises(TypeError):
actual.set(indexes=14, values=[1, 2, 3, 4, 5])
def test_set_column():
actual = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}, index=[10, 11, 12], columns=['a', 'b', 'c'],
sort=False)
# change existing column
actual.set(columns='b', values=[44, 55, 66])
assert actual.data == [[1, 2, 3], [44, 55, 66], [7, 8, 9]]
# add a new column
actual.set(columns='e', values=[10, 11, 12])
assert actual.data == [[1, 2, 3], [44, 55, 66], [7, 8, 9], [10, 11, 12]]
assert all([isinstance(actual.data[x], list) for x in range(len(actual.columns))])
# not enough values
with pytest.raises(ValueError):
actual.set(columns='e', values=[1, 2])
# number of values must equal number of True indexes
with pytest.raises(ValueError):
actual.set(indexes=[True, False, True], columns='e', values=[1, 2, 3])
# too many values
with pytest.raises(ValueError):
actual.set(columns='e', values=[1, 2, 3, 4])
def test_set_column_sorted():
actual = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}, index=[10, 11, 12], columns=['a', 'b', 'c'],
sort=True)
# change existing column
actual.set(columns='b', values=[44, 55, 66])
assert actual.data == [[1, 2, 3], [44, 55, 66], [7, 8, 9]]
# add a new column
actual.set(columns='e', values=[10, 11, 12])
assert actual.data == [[1, 2, 3], [44, 55, 66], [7, 8, 9], [10, 11, 12]]
assert all([isinstance(actual.data[x], list) for x in range(len(actual.columns))])
# not enough values
with pytest.raises(ValueError):
actual.set(columns='e', values=[1, 2])
# too many values
with pytest.raises(ValueError):
actual.set(columns='e', values=[1, 2, 3, 4])
def test_set_col_index_subset():
actual = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}, index=[10, 11, 12], columns=['a', 'b', 'c'],
sort=False)
# by index value
actual.set(columns='b', indexes=[12, 11, 10], values=[66, 55, 44])
assert actual.data == [[1, 2, 3], [44, 55, 66], [7, 8, 9]]
actual.set(columns='a', indexes=[12, 10], values=[33, 11])
assert actual.data == [[11, 2, 33], [44, 55, 66], [7, 8, 9]]
# new rows
actual.set(columns='c', indexes=[12, 13, 14], values=[120, 130, 140])
assert actual.data == [[11, 2, 33, None, None], [44, 55, 66, None, None], [7, 8, 120, 130, 140]]
assert actual.index == [10, 11, 12, 13, 14]
# new row new columns
actual.set(columns='z', indexes=[14, 15, 16], values=['zoo', 'boo', 'hoo'])
assert actual.data == [[11, 2, 33, None, None, None, None], [44, 55, 66, None, None, None, None],
[7, 8, 120, 130, 140, None, None], [None, None, None, None, 'zoo', 'boo', 'hoo']]
assert actual.index == [10, 11, 12, 13, 14, 15, 16]
assert all([isinstance(actual.data[x], list) for x in range(len(actual.columns))])
# values list shorter than indexes, raise error
with pytest.raises(ValueError):
actual.set(indexes=[10, 11], columns='a', values=[1])
# by boolean list
actual = rc.DataFrame({'c': [1, 2], 'a': [4, 5], 'b': [7, 8]}, index=['first', 'second'], columns=['a', 'b', 'c'],
sort=False)
actual.set(columns='c', indexes=[False, True], values=[99])
assert actual.data == [[4, 5], [7, 8], [1, 99]]
# boolean list not size of existing index
with pytest.raises(ValueError):
actual.set(indexes=[True, False, | |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
isbg scans an IMAP Inbox and runs every entry against SpamAssassin.
For any entries that match, the message is copied to another folder,
and the original marked or deleted.
This software was mainly written <NAME> <<EMAIL>>
and maintained by <NAME> <<EMAIL>> since
novembre 2009. You may use isbg under any OSI approved open source
license such as those listed at http://opensource.org/licenses/alphabetical
Usage:
isbg.py [options]
isbg.py (-h | --help)
isbg.py --version
Options:
--delete The spams will be marked for deletion from your inbox
--deletehigherthan # Delete any spam with a score higher than #
--exitcodes Use exitcodes to detail what happened
--expunge Cause marked for deletion messages to also be deleted
(only useful if --delete is specified)
--flag The spams will be flagged in your inbox
--gmail Delete by copying to '[Gmail]/Trash' folder
--help Show the help screen
--ignorelockfile Don't stop if lock file is present
--imaphost hostname IMAP server name
--imaplist List imap directories
--imappasswd passwd IMAP account password
--imapport port Use a custom port
--imapuser username Who you login as
--imapinbox mbox Name of your inbox folder
--learnspambox mbox Name of your learn spam folder
--learnhambox mbox Name of your learn ham folder
--learnthendestroy Mark learnt messages for deletion
--lockfilegrace # Set the lifetime of the lock file to # (in minutes)
--lockfilename file Override the lock file name
--maxsize numbytes Messages larger than this will be ignored as they are
unlikely to be spam
--movehamto mbox Move ham to folder
--noninteractive Prevent interactive requests
--noreport Don't include the SpamAssassin report in the message
copied to your spam folder
--nostats Don't print stats
--partialrun num Stop operation after scanning 'num' unseen emails
--passwdfilename Use a file to supply the password
--savepw Store the password to be used in future runs
--spamc Use spamc instead of standalone SpamAssassin binary
--spaminbox mbox Name of your spam folder
--nossl Don't use SSL to connect to the IMAP server
--teachonly Don't search spam, just learn from folders
--trackfile file Override the trackfile name
--verbose Show IMAP stuff happening
--version Show the version information
(Your inbox will remain untouched unless you specify --flag or --delete)
"""
import sys # Because sys.stderr.write() is called bellow
try:
from docopt import docopt # Creating command-line interface
except ImportError:
sys.stderr.write("Missing dependency: docopt")
from subprocess import Popen, PIPE
import imaplib
import re
import os
import getpass
import string
import time
import atexit
try:
from hashlib import md5
except ImportError:
from md5 import md5
imapuser = ''
imaphost = 'localhost'
imappasswd = None
imapinbox = "INBOX"
spaminbox = "INBOX.spam"
interactive = sys.stdin.isatty()
maxsize = 120000 # messages larger than this aren't considered
pastuidsfile = None
lockfilegrace = 240
alreadylearnt = "Message was already un/learned"
# satest is the command that is used to test if the message is spam
satest = ["spamassassin", "--exit-code"]
# sasave is the one that dumps out a munged message including report
sasave = ["spamassassin"]
# what we use to set flags on the original spam in imapbox
spamflagscmd = "+FLAGS.SILENT"
# and the flags we set them to (none by default)
spamflags = "("
# exclude the spamassassin report in the message placed in spaminbox
noreport = False
# ###
# ### exitcode maps
# ###
exitcodeok = 0 # all went well
exitcodenewmsgs = 1 # there were new messages - none of them spam
exitcodenewspam = 2 # they were all spam
exitcodenewmsgspam = 3 # there were new messages and new spam
exitcodeflags = 10 # there were errors in the command line arguments
exitcodeimap = 11 # there was an IMAP level error
exitcodespamc = 12 # error of communication between spamc and spamd
exitcodetty = 20 # error because of non interative terminal
exitcodelocked = 30 # there's certainly another isbg running
# IMAP implementation detail
# Courier IMAP ignores uid fetches where more than a certain number are listed
# so we break them down into smaller groups of this size
uidfetchbatchsize = 25
# password saving stuff. A vague level of obfuscation
passwdfilename = None
passwordhash = None
passwordhashlen = 256 # should be a multiple of 16
partialrun = None
def errorexit(msg, exitcode=exitcodeflags):
sys.stderr.write(msg)
sys.stderr.write("\nUse --help to see valid options and arguments\n")
sys.exit(exitcode)
def addspamflag(flag):
global spamflags
if len(spamflags) > 1:
spamflags = spamflags + " "
spamflags = spamflags + flag
def hexof(x):
res = ""
for i in x:
res = res + ("%02x" % ord(i))
return res
def hexdigit(c):
if c >= '0' and c <= '9':
return ord(c)-ord('0')
if c >= 'a' and c <= 'f':
return 10 + ord(c) - ord('a')
if c >= 'A' and c <= 'F':
return 10 + ord(c) - ord('A')
raise ValueError(repr(c) + " is not a valid hexadecimal digit")
def dehexof(x):
res = ""
while(len(x)):
res = res + chr(16 * hexdigit(x[0]) + hexdigit(x[1]))
x = x[2:]
return res
# Argument processing
try:
opts = docopt(__doc__, version="isbg version 1.00")
except Exception, e:
errorexit("Option processing failed - " + str(e))
if opts["--delete"] is True:
if opts["--gmail"] is True:
pass
else:
addspamflag("\\Deleted")
if opts["--deletehigherthan"] is not None:
try:
deletehigherthan = float(opts["--deletehigherthan"])
except:
errorexit("Unrecognized score - " + opts["--deletehigherthan"])
if deletehigherthan < 1:
errorexit("Score " + repr(deletehigherthan) + " is too small")
if opts["--flag"] is True:
addspamflag("\\Flagged")
if opts["--imaphost"] is not None:
imaphost = opts["--imaphost"]
if opts["--imappasswd"] is not None:
imappasswd = opts["--imappasswd"]
if opts["--imapport"] is not None:
imapport = int(opts["--imapport"])
if opts["--imapuser"] is not None:
imapuser = opts["--imapuser"]
if opts["--imapinbox"] is not None:
imapinbox = opts["--imapinbox"]
if opts["--learnspambox"] is not None:
learnspambox = opts["--learnspambox"]
if opts["--learnhambox"] is not None:
learnhambox = opts["--learnhambox"]
if opts["--lockfilegrace"] is not None:
lockfilegrace = int(opts["--lockfilegrace"])
if opts["--maxsize"] is not None:
try:
maxsize = int(opts["--maxsize"])
except:
errorexit("Unrecognised size - " + opts["--maxsize"])
if maxsize < 1:
errorexit("Size " + repr(maxsize) + " is too small")
if opts["--movehamto"] is not None:
movehamto = opts["--movehamto"]
if opts["--noninteractive"] is True:
interactive = 0
if opts["--noreport"] is True:
noreport = True
if opts["--spamc"] is True:
spamc = True
satest = ["spamc", "-c"]
sasave = ["spamc"]
if opts["--spaminbox"] is not None:
spaminbox = opts["--spaminbox"]
if opts["--lockfilename"] is not None:
lockfilename = opts["--lockfilename"]
if opts["--trackfile"] is not None:
pastuidsfile = opts["--trackfile"]
if opts["--partialrun"] is not None:
partialrun = opts["--partialrun"]
if partialrun < 1:
errorexit("Partial run number must be equal to 1 or higher")
# fixup any arguments
if spamflags[-1] != ')':
spamflags = spamflags + ')'
if opts["--imapport"] is None:
if opts["--nossl"] is True:
imapport = 143
else:
imapport = 993
if pastuidsfile is None:
pastuidsfile = os.path.expanduser("~" + os.sep + ".isbg-track")
m = md5()
m.update(imaphost)
m.update(imapuser)
m.update(repr(imapport))
res = hexof(m.digest())
pastuidsfile = pastuidsfile + res
if opts["--lockfilename"] is None:
lockfilename = os.path.expanduser("~" + os.sep + ".isbg-lock")
# Delete lock file
def removelock():
os.remove(lockfilename)
atexit.register(removelock)
# Password stuff
def getpw(data, hash):
res = ""
for i in range(0, passwordhashlen):
c = ord(data[i]) ^ ord(hash[i])
if c == 0:
break
res = res + chr(c)
return res
def setpw(pw, hash):
if len(pw) > passwordhashlen:
raise ValueError("""Password of length %d is too long to
store (max accepted is %d)"""
% (len(pw), passwordhashlen))
res = list(hash)
for i in range(0, len(pw)):
res[i] = chr(ord(res[i]) ^ ord(pw[i]))
return string.join(res, '')
if passwdfilename is None:
m = md5()
m.update(imaphost)
m.update(imapuser)
m.update(repr(imapport))
passwdfilename = os.path.expanduser("~" + os.sep +
".isbg-" + hexof(m.digest()))
if passwordhash is None:
# We make hash that the password is xor'ed against
m = md5()
m.update(imaphost)
m.update(m.digest())
m.update(imapuser)
m.update(m.digest())
m.update(repr(imapport))
m.update(m.digest())
passwordhash = m.digest()
while len(passwordhash) < passwordhashlen:
m.update(passwordhash)
passwordhash = passwordhash + m.digest()
if opts["--verbose"] is True:
print("Lock file is", lockfilename)
print("Trackfile is", pastuidsfile)
print("SpamFlags are", spamflags)
print("Password file is", passwdfilename)
# Acquire lockfilename or exit
if opts["--ignorelockfile"] is True:
if opts["--verbose"] is True:
print("Lock file is ignored. Continue.")
else:
if os.path.exists(lockfilename) and (os.path.getmtime(lockfilename) +
(lockfilegrace * 60) > time.time()):
if opts["--verbose"] is True:
print("""\nLock file is present. Guessing isbg
is already running. Exit.""")
exit(exitcodelocked)
else:
lockfile = open(lockfilename, 'w')
lockfile.write(repr(os.getpid()))
lockfile.close()
# Figure out the password
if imappasswd is None:
if opts["--savepw"] is False and os.path.exists(passwdfilename) is True:
try:
imappasswd = getpw(dehexof(open(passwdfilename, "rb").read()),
passwordhash)
if opts["--verbose"] is True:
print("Successfully read password file")
except:
pass
# do we have to prompt?
if imappasswd is None:
if not interactive:
errorexit("""You need to specify your imap password and save it
with the --savepw switch""", exitcodeok)
imappasswd = getpass.getpass("IMAP password for %s@%s: "
% (imapuser, imaphost))
# Should we save | |
assert np.allclose(expected_grad_3, grad_out_3)
def test_changing_param_quantizer_settings(self):
""" Test that changing param quantizer settings takes effect after computing encodings is run """
model = SmallMnist()
# Skew weights of conv1
old_weight = model.conv1.weight.detach().clone()
model.conv1.weight = torch.nn.Parameter(old_weight + .9 * torch.abs(torch.min(old_weight)), requires_grad=False)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
# Check that no encoding is present for param quantizer
assert not sim.model.conv1.param_quantizers['weight'].encoding
# Compute encodings
sim.compute_encodings(dummy_forward_pass, None)
asym_min = sim.model.conv1.param_quantizers['weight'].encoding.min
asym_max = sim.model.conv1.param_quantizers['weight'].encoding.max
assert 8 == sim.model.conv1.param_quantizers['weight'].encoding.bw
# Check that offset is not relatively symmetric
assert not sim.model.conv1.param_quantizers['weight'].encoding.offset in [-127, -128]
# Change param quantizer to symmetric and new bitwidth
sim.model.conv1.param_quantizers['weight'].use_symmetric_encodings = True
sim.model.conv1.param_quantizers['weight'].bitwidth = 4
sim.compute_encodings(dummy_forward_pass, None)
sym_min = sim.model.conv1.param_quantizers['weight'].encoding.min
sym_max = sim.model.conv1.param_quantizers['weight'].encoding.max
assert 4 == sim.model.conv1.param_quantizers['weight'].encoding.bw
# Check that offset is still symmetric
assert sim.model.conv1.param_quantizers['weight'].encoding.offset in [-7, -8]
# Check that mins and maxes have been recomputed
assert not asym_min == sym_min
assert not asym_max == sym_max
def test_compute_encodings_on_subset_of_modules(self):
""" Test that computing encodings on a subset of modules causes remaining quantized modules to be set to
passThrough mode. """
def dummy_forward_pass(model, _):
conv1_out = model.conv1(torch.randn((1, 1, 28, 28)))
relu1_out = model.relu1(conv1_out)
model = SmallMnist()
model.eval()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
sim.compute_encodings(dummy_forward_pass, None)
for name, module in sim.model.named_modules():
if isinstance(module, StaticGridQuantWrapper):
assert QcQuantizeOpMode.ACTIVE == module._mode
if name == 'relu1':
assert module.output_quantizers[0].enabled
elif name in ['conv2', 'conv2_drop', 'relu2', 'relu3', 'dropout', 'fc2', 'log_softmax']:
assert not module.output_quantizers[0].enabled
def test_connected_graph_is_none(self):
""" Test that an assertion is thrown when connected graph is not able to be built. """
def raise_trace_error(_self, _model, _inputs):
raise torch.jit.TracingCheckError(None, None)
model = SmallMnist()
model.eval()
with unittest.mock.patch.object(ConnectedGraph, '__init__', raise_trace_error):
with unittest.mock.patch.object(ConnectedGraph, '__del__', lambda _self: None):
with pytest.raises(AssertionError):
_ = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
def test_rnn_quantization(self):
""" Test quantizing a model with rnn layer """
model = SingleLayerRNNModel()
dummy_input = torch.randn(10, 1, 3)
sim = QuantizationSimModel(model, dummy_input)
assert isinstance(sim.model.rnn, QcQuantizeRecurrent)
def test_quantizing_qc_quantize_module(self):
""" Test that qc_quantize_module is identified as not quantizable """
qc_quantize_module = QcQuantizeRecurrent(torch.nn.RNN(input_size=3, hidden_size=5, num_layers=1), weight_bw=16,
activation_bw=16, quant_scheme=QuantScheme.post_training_tf,
round_mode='nearest', data_type=QuantizationDataType.int)
assert not QuantizationSimModel._is_quantizable_module(qc_quantize_module)
def test_export_recurrent_model(self):
""" Test export functionality with recurrent models """
# models = [TwoLayerBidirectionaRNNModel(), TwoLayerBidirectionalLSTMModel(), TwoLayerBidirectionalGRUModel()]
models = [TwoLayerBidirectionalLSTMModel()]
dummy_input = torch.randn(10, 1, 3)
def forward_pass(model, args):
model.eval()
model(dummy_input)
for model in models:
sim = QuantizationSimModel(model, dummy_input)
# Quantize
sim.compute_encodings(forward_pass, None)
# Edit part of weights tensor to compare with original model before and after removal of quantize module
with torch.no_grad():
sim.model.recurrent.weight_ih_l0[0][0] = 1
edited_weight = sim.model.recurrent.weight_ih_l0.detach().clone()
# Check that edited weight is different than original weight in module_to_quantize
assert not torch.equal(edited_weight, sim.model.recurrent.module_to_quantize.weight_ih_l0)
sim.export('./data', 'recurrent_save', dummy_input)
exported_model = torch.load('./data/recurrent_save.pth')
# Check that weight from quantized module was copied to original module successfully
assert isinstance(exported_model.recurrent, (torch.nn.RNN, torch.nn.LSTM, torch.nn.GRU))
assert torch.equal(edited_weight, exported_model.recurrent.weight_ih_l0)
with open('./data/recurrent_save.encodings') as f:
encodings = json.load(f)
# verifying the encoding against default eAI HW cfg
# activation encoding (input only w/o cell state) -- x_l0, h_l0, x_l1 & h_l1
assert 8 == len(encodings['activation_encodings'])
# param encoding (weight only w/o bias) -- W_l0, R_l0, W_l1 & R_l1
assert 4 == len(encodings['param_encodings'])
os.remove('./data/recurrent_save.pth')
os.remove('./data/recurrent_save.onnx')
os.remove('./data/recurrent_save.encodings')
def test_compute_encoding_with_given_bitwidth(self):
"""
Test functionality to compute encoding for given bitwidth
"""
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(
torch.as_tensor(np.array([1.203197181224823, 0], dtype='float32')),
data_type=QuantizationDataType.int)
assert -2147483648 == encoding_dict['offset']
assert -1.2031972414 == round(encoding_dict['min'], 10)
assert 1.2031972408 == round(encoding_dict['max'], 10)
assert round(encoding_dict['scale'], 14) == 5.6028e-10
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(
torch.as_tensor(np.array([0.7796169519533523, -0.9791506528745285], dtype='float32')),
data_type=QuantizationDataType.int)
assert -2147483648 == encoding_dict['offset']
assert -0.9791506533 == round(encoding_dict['min'], 10)
assert 0.9791506529 == round(encoding_dict['max'], 10)
assert round(encoding_dict['scale'], 14) == 4.5595e-10
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(
torch.as_tensor(np.array([-0.7796169519533523, -0.9791506528745285], dtype='float32')),
data_type=QuantizationDataType.int)
assert -2147483648 == encoding_dict['offset']
assert round(encoding_dict['scale'], 14) == 4.5595e-10
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(
torch.as_tensor(np.array([-0.7796169519533523, -0.9791506528745285], dtype='float32')),
data_type=QuantizationDataType.float)
assert 16 == encoding_dict['bitwidth']
assert 'float' == encoding_dict['dtype']
def test_export_dict_input_output(self):
""" test export functionality on dictionary input and output """
dummy_input = {'a': torch.randn(1, 10, 10, 10),
'b': torch.randn(1, 10, 10, 10),
'c': torch.randn(1, 10, 10, 10) }
model = InputOutputDictModel()
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.mul1.output_quantizers[0].enabled = True
sim.model.mul2.output_quantizers[0].enabled = True
sim.model.mul3.output_quantizers[0].enabled = True
# Quantize
sim.compute_encodings(forward_pass, None)
o_names = ['ab', 'bc', 'ca']
sim.export('./data/', 'dict_input_output_model', dummy_input,
onnx_export_args=OnnxExportApiArgs(input_names=list(dummy_input.keys()),
output_names=o_names,
opset_version=12
))
with open('./data/dict_input_output_model.encodings') as json_file:
encoding_data = json.load(json_file)
print(encoding_data)
onnx_model = onnx.load('./data/dict_input_output_model.onnx')
for inp in onnx_model.graph.input:
assert inp.name in ['a', 'b', 'c']
for exp, act in zip(o_names, onnx_model.graph.output):
assert exp == act.name
for tensor_name in encoding_data["activation_encodings"].keys():
assert tensor_name in o_names
def test_compute_encoding_fp16(self):
"""
Test encodings generated for fp16
"""
dummy_input = {'a': torch.randn(1, 10, 10, 10),
'b': torch.randn(1, 10, 10, 10),
'c': torch.randn(1, 10, 10, 10)}
model = InputOutputDictModel()
sim = QuantizationSimModel(model, default_output_bw=16, default_param_bw=16, dummy_input=dummy_input,
default_data_type=QuantizationDataType.float)
quantizer = sim.model.mul1.input_quantizer
enc_dict = sim._create_encoding_dict(encoding=None, quantizer=quantizer, propagate_encodings=False)
assert enc_dict['dtype'] == 'float'
assert enc_dict['bitwidth'] == 16
assert 'min' not in enc_dict
assert 'max' not in enc_dict
assert 'scale' not in enc_dict
assert 'offset' not in enc_dict
assert 'is_symmetric' not in enc_dict
def test_mapping_encoding_for_torch_module_with_multiple_onnx_ops(self):
"""
Test the input and output encoding map to input/output at subgraph level when atorch module generates
multiple onnx ops i.e. a sub-graph
"""
dummy_input = torch.randn(1, 4, 256, 512)
model = SoftMaxAvgPoolModel()
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.sfmax.output_quantizers[0].enabled = True
sim.model.sfmax.input_quantizers[0].enabled = True
sim.model.avgpool.output_quantizers[0].enabled = True
sim.model.avgpool.input_quantizers[0].enabled = True
sim.compute_encodings(forward_pass, None)
sim.export('./data', 'sfmaxavgpool_model', dummy_input)
with open('./data/sfmaxavgpool_model.encodings') as json_file:
encoding_data = json.load(json_file)
assert not set(encoding_data["activation_encodings"].keys()).symmetric_difference(('4', '9', 't.1'))
def test_transformer_mask_override_tf(self):
"""
test logic to override mask for a custom block with mask op for tf mode
:return:
"""
torch.manual_seed(10)
class AttnBlock(nn.Module):
def __init__(self):
super(AttnBlock, self).__init__()
self.add = elementwise_ops.Add()
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = self.add(x1, x2)
return self.softmax(x)
class DummyAttnBlockModel(nn.Module):
def __init__(self):
super(DummyAttnBlockModel, self).__init__()
self.block = AttnBlock()
def forward(self, x1, x2):
return self.block(x1, x2)
# update data input to reflect range at add -10000 to ~16.xx
# this results in max being mapped to zero when econding grid is computed with 8 bit for mask add
dummy_input = (torch.FloatTensor(32, 1, 100, 100).uniform_(-6000, 15),
torch.FloatTensor(32, 1, 100, 100).uniform_(-5000, 17))
def forward_pass(sim_model, _):
sim_model.eval()
with torch.no_grad():
sim_model(*dummy_input)
# use some dummy custom block type
model = DummyAttnBlockModel()
from aimet_common.defs import QuantScheme
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
old_encoding_min = sim.model.block.add.output_quantizer.encoding.min
old_encoding_max = sim.model.block.add.output_quantizer.encoding.max
print("old encoding min = ", old_encoding_min)
print("old encoding max = ", old_encoding_max)
assert int(old_encoding_min) == -11013
assert int(old_encoding_max) == 0
# use override registration function
transformer_utils.register_attention_mask_override('AttnBlock', 'add')
sim2 = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=dummy_input)
# compute encodings again to check override takes effect
sim2.compute_encodings(forward_pass, None)
new_encoding_min = sim2.model.block.add.output_quantizer.encoding.min
new_encoding_max = sim2.model.block.add.output_quantizer.encoding.max
print("encoding min = ", new_encoding_min)
print("encoding max = ", new_encoding_max)
# validate override
assert int(new_encoding_min) == -6
assert int(new_encoding_max) == 17
assert sim2.model.block.add.output_quantizer.encoding.bw == 8
def test_transformer_mask_override_tf_enhanced(self):
"""
test logic to override mask for a custom block with mask op for tf enhanced mode
:return:
"""
torch.manual_seed(10)
class AttnBlock(nn.Module):
def __init__(self):
super(AttnBlock, self).__init__()
self.add_2 = elementwise_ops.Add()
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = self.add_2(x1, x2)
return self.softmax(x)
class DummyAttnBlockModel(nn.Module):
def __init__(self):
super(DummyAttnBlockModel, self).__init__()
self.block = AttnBlock()
def forward(self, x1, x2):
return self.block(x1, x2)
# update data input to reflect range at add -10000 to ~16.xx
# this results in max being mapped to zero when econding grid is computed with 8 bit for mask add
dummy_input = (torch.FloatTensor(32, 1, 100, 100).uniform_(-6000, 15),
torch.FloatTensor(32, 1, 100, 100).uniform_(-5000, 17))
def forward_pass(sim_model, _):
sim_model.eval()
with torch.no_grad():
sim_model(*dummy_input)
# use some dummy custom block type
model = DummyAttnBlockModel()
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
old_encoding_min = sim.model.block.add_2.output_quantizer.encoding.min
old_encoding_max = sim.model.block.add_2.output_quantizer.encoding.max
print("old encoding min = ", old_encoding_min)
print("old encoding max = ", old_encoding_max)
assert int(old_encoding_min) == -10974
assert int(old_encoding_max) == 0
# use override registration function
transformer_utils.register_attention_mask_override('AttnBlock', 'add_2')
sim2 = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, dummy_input=dummy_input)
# compute encodings again to check override takes effect
sim2.compute_encodings(forward_pass, None)
new_encoding_min = sim2.model.block.add_2.output_quantizer.encoding.min
new_encoding_max = sim2.model.block.add_2.output_quantizer.encoding.max
print("encoding min = ", new_encoding_min)
print("encoding max = ", new_encoding_max)
assert int(new_encoding_min) == -6
assert int(new_encoding_max) == 17
assert sim2.model.block.add_2.output_quantizer.encoding.bw == 8
def test_transformer_mask_override_transformers_tf_enhanced(self):
"""
test logic to override mask for a DistilBERT, RoBERTa, GPT-2 models.
:return:
"""
torch.manual_seed(10)
class MultiHeadSelfAttention(nn.Module):
def | |
<reponame>vritxii/machine_learning_labs
# -*- coding:utf-8 -*-
from __future__ import division
import math
import json
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
from six.moves import xrange
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.framework import ops
import os
import time
from glob import glob
'''
Run command sample:
python dcgan.py --input_height=28 --output_height=28 --train
DCGAN Features:
1. Use strided convolutions instead of spatial pooling in the discriminator model, and fractional strided convolutions, deconv, deconvolution in the generator model.
2. In addition to the output layer of the generator model and the input layer of the discriminator model, Batch Normalization is used on all other layers of the network.
Using BN, stable learning can help to deal with training problems caused by poor initialization.
3. Remove the full connection layer, and directly use the convolution layer connected to the input layer and the discriminator input layer and the output layer.
4. Use the Tanh activation function at the producer's output layer, and ReLU at the other layers; use leaky ReLU on the discriminator.
'''
pp = pprint.PrettyPrinter()
get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
crop=True, grayscale=False):
image = imread(image_path, grayscale)
return transform(image, input_height, input_width,
resize_height, resize_width, crop)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, grayscale = False):
if (grayscale):
return scipy.misc.imread(path, flatten = True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter '
'must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(
x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(
image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
def visualize(sess, dcgan, config):
image_frame_dim = int(math.ceil(config.batch_size**.5))
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.random.uniform(-1, 1, size=(config.batch_size , dcgan.z_dim))
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_%s.png' % (idx))
try:
image_summary = tf.image_summary
scalar_summary = tf.scalar_summary
histogram_summary = tf.histogram_summary
merge_summary = tf.merge_summary
SummaryWriter = tf.train.SummaryWriter
except:
image_summary = tf.summary.image
scalar_summary = tf.summary.scalar
histogram_summary = tf.summary.histogram
merge_summary = tf.summary.merge
SummaryWriter = tf.summary.FileWriter
if "concat_v2" in dir(tf):
def concat(tensors, axis, *args, **kwargs):
return tf.concat_v2(tensors, axis, *args, **kwargs)
else:
def concat(tensors, axis, *args, **kwargs):
return tf.concat(tensors, axis, *args, **kwargs)
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return concat([
x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
'''
Convolution kernel: 5 * 5, stride: [1 2 2 1]. By dimensioning the convolution steps of 2 instead
of pooling dimensionality, padding = 'SAME', the convolution output dimension is [64 14 14 11] .
Then using the normalization of batch normalization and leaky ReLU, the output is concatenated with
yb, resulting in h0 with dimension [64 14 14 21]. Similarly, the dimension of h1 is [647 * 7 * 74 + 10],
the dimension of h2 is [64 1024 + 10], and then a linear output is connected to obtain h3 with the
dimension of [64 1]. Since we want the discriminator The output represents the probability, so eventually
a sigmoid is used for activation.
'''
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
# Support for verisons of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
def image_manifold_size(num_images):
manifold_h = int(np.floor(np.sqrt(num_images)))
manifold_w = int(np.ceil(np.sqrt(num_images)))
assert manifold_h * manifold_w == num_images
return manifold_h, manifold_w
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
class DCGAN(object):
def __init__(self, sess, input_height=108, input_width=108, crop=True,
batch_size=64, sample_num = 64, output_height=64, output_width=64,
y_dim=10, z_dim=100, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default',
input_fname_pattern='*.jpg', checkpoint_dir=None, sample_dir=None):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
y_dim: (optional) Dimension of dim for y. [10]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.crop = crop
self.batch_size = batch_size
self.sample_num = sample_num
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.y_dim = y_dim
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
if not self.y_dim:
self.d_bn3 = batch_norm(name='d_bn3')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
if not self.y_dim:
self.g_bn3 = batch_norm(name='g_bn3')
self.dataset_name = dataset_name
self.input_fname_pattern = input_fname_pattern
self.checkpoint_dir = checkpoint_dir
self.data_X, self.data_y = self.load_mnist()
self.c_dim = self.data_X[0].shape[-1]
self.grayscale = (self.c_dim == 1)
self.build_model()
def build_model(self):
if self.y_dim:
self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
else:
self.y = None
if self.crop:
image_dims = [self.output_height, self.output_width, self.c_dim]
else:
image_dims = [self.input_height, self.input_width, self.c_dim]
self.inputs = tf.placeholder(
tf.float32, [self.batch_size] + image_dims, name='real_images')
inputs = self.inputs
self.z = tf.placeholder(
tf.float32, [None, self.z_dim], name='z')
self.z_sum = histogram_summary("z", self.z)
self.G = self.generator(self.z, self.y)
self.D, self.D_logits = self.discriminator(inputs, self.y, reuse=False)
self.sampler = self.sampler(self.z, self.y)
self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)
self.d_sum = histogram_summary("d", self.D)
self.d__sum = histogram_summary("d_", self.D_)
self.G_sum = image_summary("G", self.G)
def sigmoid_cross_entropy_with_logits(x, y):
'''
For the real data, the discriminant loss function d_loss_real is the cross
entropy of the discriminator output and 1, and for the generated data, the
discriminant loss function d_loss_fake is the cross entropy of the output and 0,
so the discriminator's loss function d_loss = d_loss_real + d_loss_fake ; The
generator's loss function is the cross-entropy of the output of the g_loss
iscriminator with respect to the generated data.
'''
try:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)
except:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y)
self.d_loss_real = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits, tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(
sigmoid_cross_entropy_with_logits(self.D_logits_, tf.zeros_like(self.D_)))
self.g_loss = tf.reduce_mean(
| |
action_size = self.num_opt_phase
action_space = spaces.Box(0.0, 1.0, (action_size,), dtype=np.float32)
else:
print("internal error : not supported action type: action_type={}".format(self.action_type))
# 신호 사이클에서 비율로 한다면.... PPO...
#
# action_size = 최소 시간 이상 Phase 수
# self.cycle_length, self.min_sum, self.optimizing_phase_cnt,
return action_space
def makeObservationSpace(self):
# 1. observation feature의 크기를 계산한다
feature_length = 0
# extention-advice : 상태 필드에 따라 확장해야 함.
for field in self.state_fields:
if field == "current_phase":
feature_length += self.num_phases
elif field == "current_phase_spent_duration":
feature_length += 2 # spent/max, min/max
elif field == "duration_rate_per_phase":
feature_length += self.num_phases
elif field in self.RETRIEVAL_FIELDS_WITH_LANES:
feature_length += len(self.in_comming_lanes)
else:
print("Internal Error : {} is not supported as an observation.\n".format(field),
"\t\tYou should extend makeObsevationSpace/compute_observation(), DIC_RETRIEVAL_FUNC_MAP,...")
# 2. observation space 를 생성한다.
obs_space = spaces.Box(low=np.zeros(feature_length), high=np.ones(feature_length))
if "obs" in self.print_debug_msg:
print("feature_length={} observation space={}".format(feature_length, obs_space))
return obs_space
def getNextActionTime(self):
next_time = 0
if self.next_action_time == None:
if self.action_type == DYNAMIC:
next_time = self.delta_time
else: # SPLIT_PHASE or STATIC
current_phase_index = self.phase_index
phase_duration = self.phase_duration_info_list[current_phase_index][DUR_CUR]
current_spent = self.env.te_connector.getSpentDuration(self.id, phase_duration)
next_time = phase_duration - current_spent
for p_idx in range(current_phase_index+1, self.num_phases):
phase_duration = self.phase_duration_info_list[p_idx][DUR_CUR]
next_time += phase_duration
else:
if self.action_type == DYNAMIC:
next_time = self.next_action_time + self.delta_time
else:
next_time = self.next_action_time + self.cycle_length
return next_time
@property
def phase_index(self):
# self.current_phase_index
# todo current_phase_index 를 계속 유지하여 시뮬레이터에 접근하지 않고 가져오게 하자...
# delta_time 이 1보다 큰 경우에 어떻게 유지할지 생각해 보아야 한다.
# 중간에 신호가 바뀌어서 실제와 유지하고있는 값이 달라지는 상황이 발생할 수 있다.
return self.env.te_connector.getCurrentPhaseIndex(self.id)
@property
def time_to_act(self):
return self.next_action_time == self.env.currentSimulationStep
def update(self):
self.spent += 1 # # 현재 신호 유지 시간 증가
def applyAction(self, action):
self.applyActionNow(self, action)
# self.applyActionTest(action)
# 정적시험과 비교해보기 위함.
def applyActionTest(self, action):
# print("type(action)={} action={}".format(type(action), action)) # type(action)=<class 'numpy.ndarray'> action=[18] or [ -1, 0, 0, 1]
# raise Exception
if self.action_type == STATIC:
self.next_action_time += self.delta_time
# 유지하고 있던 정보를 변경한다.
self.spent = 0
else: # PHASE_SPLIT
# action에 따라 Phase 를 분할 한다.
# 신호를 설정한다.
# 유지하고 있던 정보를 변경한다. 현재 로직의 duration 정보 병경
# assert self.num_opt_phase == len(action)
#1. 신호 phase 지속 시간을 조절한다.
# split_mode = {SM_FIXED_TUMBLING, SM_DYNAMIC_TUMBLING}
if self.split_mode==SM_FIXED_SPLIT: # fixed split
if 1:
for i in range(self.num_opt_phase):
idx = self.phases_to_optimize[i]
self.phase_duration_info_list[idx][DUR_CUR] = self.phase_duration_info_list[idx][DUR_FIX]
else :
print("error........ can not happen")
raise Exception
# 2. 환경의 TrafficLight의 현재 Logic 에 반영한다.
new_phase_duration = [dur[DUR_CUR] for dur in self.phase_duration_info_list]
self.env.te_connector.changeTLPhase(self.id, self.current_logic_index, new_phase_duration)
self.next_action_time += self.delta_time
# 유지하고 있던 정보를 변경한다.
self.spent = 0
# todo next_action_time 설정/생신 관련 확인할 것
def applyActionNow(self, action):
# print("type(action)={} action={}".format(type(action), action)) # type(action)=<class 'numpy.ndarray'> action=[18] or [ -1, 0, 0, 1]
# raise Exception
if self.action_type == STATIC:
self.next_action_time += self.delta_time
# 유지하고 있던 정보를 변경한다.
self.spent = 0
elif self.action_type == DYNAMIC:
# 현재 신호 페이즈를 얻어온다.
current_phase_index = self.phase_index
# 적용할 신호 페이즈를 정한다.
new_phase_index = (current_phase_index + action) % self.num_phases
# 결정된 신호를 설정한다.
self.env.te_connector.setPhase(self.id, new_phase_index)
self.next_action_time += self.delta_time
# 유지하고 있던 정보를 변경한다.
if current_phase_index != new_phase_index:
self.spent = 0
else: # PHASE_SPLIT
# action에 따라 Phase 를 분할 한다.
# 신호를 설정한다.
# 유지하고 있던 정보를 변경한다. 현재 로직의 duration 정보 병경
# assert self.num_opt_phase == len(action)
#1. 신호 phase 지속 시간을 조절한다.
# split_mode = {SM_FIXED_TUMBLING, SM_DYNAMIC_TUMBLING}
if self.split_mode==SM_FIXED_SPLIT: # fixed split
if 0:
for i in range(self.num_opt_phase):
delta_duration = action[i] * self.unit_of_duration_adjustment
idx = self.phases_to_optimize[i]
self.phase_duration_info_list[idx][DUR_CUR] += delta_duration
# todo min...max 사이에 들어가는지 확인해야 한다. cycle_length도
# 벋어나면 패널티를 준다....
# 아래 사용되지 않는 것이다.
new_cycle_length = np.sum([self.phase_duration_info_list[i][DUR_CUR] for i in range(self.num_phases)])
elif 0:
new_phase_duration_info = copy.deepcopy(self.phase_duration_info_list)
convert_map = DIC_ACTION_CONVERT_MAP[self.num_opt_phase][action[0]]
for i in range(self.num_opt_phase):
delta_duration = convert_map[i] * self.unit_of_duration_adjustment
idx = self.phases_to_optimize[i]
new_phase_duration_info[idx][DUR_CUR] = new_phase_duration_info[idx][DUR_CUR] + delta_duration
# cycle length, min/max 가 보장된다.
# 최소 최대 녹색 만족 여부 체크
try:
assert(new_phase_duration_info[idx][DUR_CUR]>=new_phase_duration_info[idx][DUR_MIN]) # 최소 녹색
assert(new_phase_duration_info[idx][DUR_CUR]<=new_phase_duration_info[idx][DUR_MAX]) # 최대 녹색
except:
print("tsid={} idx={} org={} conv_map={} new={}".format(self.id, idx,
self.phase_duration_info_list, convert_map, new_phase_duration_info))
else :
try :
convert_map = DIC_ACTION_CONVERT_MAP[self.num_opt_phase][action[0]]
for i in range(self.num_opt_phase):
delta_duration = convert_map[i] * self.unit_of_duration_adjustment
idx = self.phases_to_optimize[i]
self.phase_duration_info_list[idx][DUR_CUR] = self.phase_duration_info_list[idx][DUR_FIX] + delta_duration
# cycle length, min/max 가 보장된다.
# 최소 최대 녹색 만족 여부 체크
try:
assert(self.phase_duration_info_list[idx][DUR_CUR]>=self.phase_duration_info_list[idx][DUR_MIN]) # 최소 녹색
assert(self.phase_duration_info_list[idx][DUR_CUR]<=self.phase_duration_info_list[idx][DUR_MAX]) # 최대 녹색
except:
print("tsid={} idx={} conv_map={} phase_dur_info={}".format(self.id, idx, convert_map,
self.phase_duration_info_list))
except :
print("error...... tid={}".format(self.id))
print("i={} phase_to_optimize={}".format(i, self.phases_to_optimize))
a = 1/0
elif self.split_mode==SM_DYNAMIC_SPLIT: # dynamic split
# todo action space를 고려하여 조정한다.
action = (action - self.action_space.low[0]) / (self.action_space.high[0] - self.action_space.low[0])
total = np.sum(action)
## 1. 새로운 지속 시간을 계산한다.
for i in range(self.num_opt_phase):
idx = self.phases_to_optimize[i] # phase index
if total:
cvted_additional_duration = np.round(action[i] / total * self.available_room)
else: # total is 0 (zero)
cvted_additional_duration = np.round(self.available_room / self.num_opt_phase)
if np.isnan(cvted_additional_duration) :
print("SM_DYNAMIC_SPLIT ____ action[{}]={} total={} room={}".format(i, action[i], total, self.available_room))
raise Exception
min_duration = self.phase_duration_info_list[idx][DUR_MIN]
self.phase_duration_info_list[idx][DUR_CUR] = int(min_duration + cvted_additional_duration)
# todo min...max 사이에 들어갈수 없나?
# 벋어나면 패널티를 준다....
# 가장 많은 가용 시간을 할당 받는 신호 Phase의 index를 구한다.
idx = self.phases_to_optimize[np.argmax(action)]
# 신호 주기와 새로운 신호 로직의 신호 주기의 차를 구한다.
spare = int(self.cycle_length - np.sum([self.phase_duration_info_list[i][DUR_CUR] for i in range(self.num_phases)]))
# 신호 주기를 같게 하기 위해 새로운 신호 조직에 차이를 반영한다.
self.phase_duration_info_list[idx][DUR_CUR] += spare
## 반올림(소수)이기에 항상 같을 수 없다.
try:
assert self.cycle_length==np.sum([self.phase_duration_info_list[i][DUR_CUR] for i in range(self.num_phases)])
except:
print("cycle length is not equal .... \n\tcycle_length={}\n\tphase_dur_info={}"
.format(self.cycle_length, self.phase_duration_info_list))
# new_phase_duration_info = self.phase_duration_info_list.copy()
# print("cvted duration={}".format([dur[DUR_CUR] for dur in self.phase_duration_info_list]))
elif self.split_mode == SM_DYNAMIC_NC_CYCLE_LENGTH: # dynamic_tumbling without considering cycle length
## 1. 새로운 지속 시간을 계산한다.
total = np.sum(action)
for i in range(self.num_opt_phase):
idx = self.phases_to_optimize[i] # phase indec
if total:
cvted_additional_duration = np.round(action[i] * self.phase_duration_info_list[idx][DUR_BUF])
else: # total is 0 (zero)
cvted_additional_duration = np.round(self.available_room / self.num_opt_phase)
if np.isnan(cvted_additional_duration) :
print("SM_DYNAMIC_NC_CYCLE_LENGTH ____ action[{}]={} total={} room={}".format(i, action[i], total, self.available_room))
raise Exception
min_duration = self.phase_duration_info_list[idx][DUR_MIN]
self.phase_duration_info_list[idx][DUR_CUR] = int(min_duration + cvted_additional_duration)
# todo cycle_length에 대한 고려는 ?....
current_cycle_length = sum([self.phase_duration_info_list[i][DUR_CUR] for i in range(self.num_phases)])
self.cycle_length = current_cycle_length
self.delta_time = self.cycle_length
# new_phase_duration_info = self.phase_duration_info_list.copy()
# 2. 환경의 TrafficLight의 현재 Logic 에 반영한다.
new_phase_duration = [dur[DUR_CUR] for dur in self.phase_duration_info_list]
self.env.te_connector.changeTLPhase(self.id, self.current_logic_index, new_phase_duration)
self.next_action_time += self.delta_time
# 유지하고 있던 정보를 변경한다.
self.spent = 0
def _getDensityPerLane(self, lanes):
"""
차선의 현재 차량 밀도를 반환한다.
밀도 = 차선의 현재 차량 대수 / (차선 길이 / 차량 길이)
todo occupancy 와 차이는?
todo SUMO 에서는 traci.lane.getLastStepOccupancy(lane_id)로 구할 수 있다.
"""
return [min(1, self.env.te_connector.getCurrentVolumeOnLane(lane)
/ (self.env.te_connector.getLaneLength(lane) / self.vehicle_size_min_gap))
for lane in lanes]
def _getHaltingDensityPerLane(self, lanes):
"""
차선 용량 대비 차선에 멈춰서 기다리고 있는 차량 비율
halting_density = halting_vehicle_num / capacity
"""
return [min(1, self.env.te_connector.getCurrentHaltingNumberOnLane(lane)
/ (self.env.te_connector.getLaneLength(lane) / self.vehicle_size_min_gap))
for lane in lanes]
def _getHaltingRatePerLane(self, lanes):
"""
차선의 전체 차량 수 대비 멈춰서 기다리고 있는 차량 비율
halting_rate = halting_vehicle_num / vehicle_num
"""
halting_rate_list = []
for lane in lanes:
vehicle_num = self.env.te_connector.getCurrentVolumeOnLane(lane)
halting_num = self.env.te_connector.getCurrentHaltingNumberOnLane(lane)
halting_rate = halting_num / vehicle_num if vehicle_num > 0 else 0
halting_rate_list.append(halting_rate)
return halting_rate_list
def _getCurrentPhase(self, current_phase_index):
"""
return ont-hot encoded phase index
"""
return [1 if current_phase_index==i else 0
for i in range(self.num_phases)]
def _getCurrentPhaseSpentDuration(self, current_phase_index):
"""
return [phase spent duration rate, min/max ratio]
[spent/max, min/max]
"""
# spent/max, min/max
minDur = self.phase_duration_info_list[current_phase_index][DUR_MIN]
maxDur = self.phase_duration_info_list[current_phase_index][DUR_MAX]
return [np.minimum(1.0, self.spent / maxDur), minDur / maxDur]
# # dynamic 의 경우에 부여 신호 시간(spent)이 최대 지속 시간(maxDur)보가 커지는 경우가 발생해서,
# # observation space를 벗어나는 경우가 발생하는데 이를 방지하기 위함.
def _getDurationRatePerPhase(self):
# duration_rate_list = []
# for i in range(self.num_phase):
# duration_rate = self.phase_duration_info_list[i][DUR_CUR]/self.cycle_length
# duration_rate_list.append(duration_rate)
# return duration_rate_list
return [self.phase_duration_info_list[i][DUR_CUR]/self.cycle_length
for i in range(self.num_phases)]
def compute_observation(self):
"""
상태 정보를 반환한다.
todo 만약 주변 교차로 정보가 필요하면 여기서 얻어온다.
주변 교차로의 입력 차선을 먼저 구한 후에 이를 이용한다..
this_obs = []
for n_tl_id in self.knn_neighbors:
in_lanes = self.env.neighbors_info_dic[n_tl_id]
sub_obs = self.DIC_RETRIEVAL_FUNC_MAP[field](in_lanes)
this_obs += sub_obs
makeObservationSpace()도 수정해야 한다.
"""
obs = []
current_phase_index = self.phase_index
for field in self.state_fields:
if field in self.RETRIEVAL_FIELDS_WITH_PHASE_INDEX: # action_type이 phase_split의 경우에는 의미가 없다
this_obs = self.DIC_RETRIEVAL_FUNC_MAP[field](current_phase_index)
# else if field == "current_phase": # phase_split의 경우에는 의미가 없다.
# this_obs = [1 | |
<reponame>ARM-DOE/warno
import datetime
import requests
import logging
import psutil
import json
import os
import dateutil.parser
from flask import Flask, request, render_template
from flask_migrate import Migrate, upgrade
from flask_migrate import migrate as db_migrate
from flask_migrate import downgrade
from WarnoConfig import config
from WarnoConfig import utility
from WarnoConfig import redis_interface
from WarnoConfig.models import db
from WarnoConfig.models import EventWithValue, EventWithText, ProsensingPAF, InstrumentDataReference, User
from WarnoConfig.models import Instrument, Site, InstrumentLog, PulseCapture, EventCode
# Set up logging
LOG_PATH = os.environ.get("LOG_PATH")
if LOG_PATH is None:
LOG_PATH = "/vagrant/logs/"
# Logs to the main log
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(module)s:%(lineno)d: %(message)s',
filename='%scombined.log' % LOG_PATH,
filemode='a', level=logging.DEBUG)
# Logs to the event manager log
EM_LOGGER = logging.getLogger(__name__)
EM_HANDLER = logging.FileHandler("%sevent_manager_server.log" % LOG_PATH, mode="a")
EM_HANDLER.setFormatter(logging.Formatter('%(levelname)s:%(asctime)s:%(module)s:%(lineno)d: %(message)s'))
EM_LOGGER.addHandler(EM_HANDLER)
# Add event manager handler to the main werkzeug logger
logging.getLogger("werkzeug").addHandler(EM_HANDLER)
# Located http://flask.pocoo.org/snippets/35/
class ReverseProxied(object):
"""Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
Parameters
----------
given_app: the WSGI application
"""
def __init__(self, given_app):
self.app = given_app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
server = environ.get('HTTP_X_FORWARDED_SERVER', '')
if server:
environ['HTTP_HOST'] = server
return self.app(environ, start_response)
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Database Setup
db_cfg = config.get_config_context()['database']
s_db_cfg = config.get_config_context()['s_database']
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://%s:%s@%s:%s/%s' % (db_cfg['DB_USER'], s_db_cfg['DB_PASS'],
db_cfg['DB_HOST'], db_cfg['DB_PORT'],
db_cfg['DB_NAME'])
# Redis setup. This whole setup section feels pretty wrong. Probably needs a dire rework.
redint = redis_interface.RedisInterface()
db.init_app(app)
migrate = Migrate(app, db)
migration_path = os.environ.get("MIGRATION_PATH")
is_central = 0
cf_url = ""
cfg = None
headers = {'Content-Type': 'application/json'}
cert_verify = False
def save_json_db_info():
"""Saves database tables containing more permanent information, such as sites or instruments, to a json file.
File name has the format 'db_info_*day*_*month*_*year*', where the date corresponds to the date the function was run
because it shows the current status of the database.
Example File (indentation unnecessary):
[
{
"definition":
{
"table_name":
*database table name*,
"columns":
[(column_name_1, column_type_1), ..., (column_name_N, column_type_N)]
}
"data":
[
[val_1, val_2, ..., val_N],
[val_1, val_2, ..., val_N],
...,
[val_1, val_2, ..., val_N] ]
},
{ *table_2* },
...,
{ *table_N* }
]
"""
save_time = datetime.datetime.now()
filename = "db_info_" + str(save_time.day) + "_" + str(save_time.month) + "_" + str(save_time.year) + "_t_" +\
str(save_time.hour) + "_" + str(save_time.minute) + ".json"
tables = ["event_codes", "users", "sites", "instruments", "instrument_data_references"]
first_table = True
with open(filename, "w") as datafile:
# Begin the list of tables
datafile.write("[")
# Only prepend a comma separation if it is not the first table to be saved
for table in tables:
if not first_table:
datafile.write(", ")
else:
first_table = False
datafile.write('{"definition": ')
definition = dict()
definition['table_name'] = table
rows = db.session.execute("SELECT column_name, data_type FROM information_schema.columns "
"WHERE table_name = :table", dict(table=table)).fetchall()
definition['columns'] = [(row[0], row[1]) for row in rows]
# Write the definition and start the data section, with its list of records
json.dump(definition, datafile)
datafile.write(', "data": [')
first_record = True
rows = db.session.execute("SELECT * FROM %s" % (table,)).fetchall()
data = [list(row) for row in rows]
# Datetimes must be converted to iso compliant time format for json.dump
# Different tables have time in different places
for record in data:
# If the record is not the first, prepends a comma separation to separate the list elements
if not first_record:
datafile.write(", ")
else:
first_record = False
json.dump(record, datafile)
# Close the JSON for this table section
datafile.write("]}")
# Close the list of tables
datafile.write("]")
@app.route("/eventmanager/archive_data")
def save_json_db_data():
"""Saves database tables containing data information, such as 'events_with_value' or 'prosensing_paf' events, to a
json file. 'num_entries' for each table specifies how many data rows are in the file for the table, making
iterative parsing much easier.
Example File (indentation unnecessary):
[
{
"definition":
{
"table_name":
*database table name*,
"columns":
[(column_name_1, column_type_1), ..., (column_name_N, column_type_N)]
}
"data":
[
[val_1, val_2, ..., val_N],
[val_1, val_2, ..., val_N],
...,
[val_1, val_2, ..., val_N] ]
},
{ *table_2* },
...,
{ *table_N* }
]
"""
# Get the cutoff time for the data. Any data recorded before this time will be saved to json and deleted from the
# database
cutoff_time = datetime.datetime.now() + datetime.timedelta(-db_cfg['days_retained'])
# First save off the supplementary database table information (users, instruments, etc.)
# File name format described next
save_json_db_info()
# Each data file saved will use this extension, resulting in "*id*_archived_*day*_*month*_*year*.json".
# For example, for an instrument id of 5, the filename would be "5_archived_30_12_1999", meaning all the data in the
# archived file is dated on or before 30th of December, 1999
filename_extension = "_archived_" + str(cutoff_time.day) + "_" + str(cutoff_time.month) + \
"_" + str(cutoff_time.year) + "_t_" + str(cutoff_time.hour) + "_" + \
str(cutoff_time.minute) + ".json"
# Names of the tables to be saved.
tables = ["prosensing_paf", "events_with_value", "events_with_text", "instrument_logs", "pulse_captures"]
# Get a list of instrument_ids, so that the data can be archived according to the instrument the data is for
rows = db.session.execute("SELECT instrument_id FROM instruments").fetchall()
instrument_ids = [row[0] for row in rows]
for instrument_id in instrument_ids:
filename = str(instrument_id) + filename_extension
first_table = True
with open(filename, "w") as datafile:
# Begin the list of tables
datafile.write("[")
# Only prepend a comma separation if it is not the first table to be saved
for table in tables:
if not first_table:
datafile.write(", ")
else:
first_table = False
datafile.write('{\n"definition": ')
definition = dict()
definition['table_name'] = table
# List of (column_name, data_type) pairs to define the format of each data row
rows = db.session.execute("SELECT column_name, data_type FROM information_schema.columns WHERE table_name = :table",
dict(table=table)).fetchall()
definition['columns'] = [(row[0], row[1]) for row in rows]
# If there are any values to be saved, defines the oldest as the 'start_time' and the newest as the
# 'end_time'. Together they allow anyone reading the file to easily get the time range of the values
rows = db.session.execute("SELECT time FROM %s WHERE instrument_id = :id ORDER BY time ASC LIMIT 1" % (table,),
dict(id=instrument_id)).fetchall()
if len(rows) > 0:
# A little extra work to make the database time JSON serializable UTC
definition['start_time'] = rows[0][0].isoformat() + "Z"
else:
definition['start_time'] = None
rows = db.session.execute("SELECT time FROM %s WHERE instrument_id = :id ORDER BY time DESC LIMIT 1" % (table,),
dict(id=instrument_id)).fetchall()
if len(rows) > 0:
# A little extra work to make the database time JSON serializable UTC
definition['end_time'] = rows[0][0].isoformat() + "Z"
else:
definition['end_time'] = None
# Count the number of rows to be saved off. Having this defined makes reading in the data easier
count = db.session.execute("SELECT count(*) FROM %s WHERE time < :time AND instrument_id = :id" % (table,),
dict(time=cutoff_time.isoformat(), id=instrument_id)).fetchall()[0][0]
definition['num_entries'] = count
# Write the definition and start the data section, with its list of records
json.dump(definition, datafile)
datafile.write(', "data": [')
offset = 0
chunk_size = 5000
first_record = True
while True:
# Using offset and chunk_size in conjunction, retrieves and saves chunks of 5000 records at a time
# Loop runs until no records are returned, then breaks
rows = db.session.execute("SELECT * FROM %s WHERE time < :time AND instrument_id = :id ORDER BY time ASC OFFSET :offset LIMIT :chunk_size" % (table,),
dict(offset=offset, chunk_size=chunk_size, time=cutoff_time.isoformat(), id=instrument_id)).fetchall()
if len(rows) <= 0:
break
# The data must be a list to update values by index
data = [list(row) for row in rows]
# Datetimes must be converted to iso compliant time format for json.dump
# Different tables have time in different places
if table in ["prosensing_paf", "instrument_logs"]:
time_index = 1
elif table == "pulse_captures":
time_index = 2
else:
time_index = 3
for item in data:
item[time_index] = item[time_index].isoformat() + "Z"
# Encode each data row into a JSON string
data = [json.dumps(item) for item in data]
# If the record is not the first, prepends a comma separation to separate the list elements.
# The rows are joined by commas to make them JSON compliant
if not | |
subprocess.check_output(executables, universal_newlines=True).splitlines()
pass
res_ = [x for x in set(candidates) - set(exclusions) if self.should_copy(x)]
with open(cache_filename, 'w', encoding='utf-8') as lf:
lf.write('\n'.join(res_))
return res_
def add(self, what, to_=None, recursive=True):
if 'java-11' in what:
dfsfsfdsf=1
try:
if not to_:
to_ = what
if to_.startswith('/'):
to_ = to_[1:]
dir_, _ = os.path.split(to_)
pathlib.Path(os.path.join(self.root_dir, dir_)).mkdir(parents=True, exist_ok=True)
# ar.add(f)
if os.path.isdir(what):
# copy_tree(what, os.path.join(root_dir, to_))
# Какого хуя!!!!!!!!!!
# if not os.path.exists(os.path.join(self.root_dir, to_)):
# shutil.copytree(what, os.path.join(self.root_dir, to_), symlinks=True, copy_function=self.mycopy)
# #, exist_ok=True)
pass
else:
self.mycopy(what, os.path.join(self.root_dir, to_))
pass
except Exception as ex_:
print("Troubles on adding", to_ , "<-", what)
pass
#raise ex_
pass
def projects(self):
"""
return all projects list (Python/go/etc)
"""
projects_ = []
if self.pp:
projects_ += self.pp.projects()
if self.gp:
projects_ += self.gp.projects()
return projects_
def process_binary(self, binpath):
'''
Фиксим бинарник.
'''
for wtf_ in ['libldap']:
if wtf_ in binpath:
return
# m = magic.detect_from_filename(binpath)
m = fucking_magic(binpath)
if m in ['inode/symlink', 'text/plain']:
return
# if m.mime_type not in ['application/x-sharedlib', 'application/x-executable']
if not 'ELF' in m:
return
pyname = os.path.basename(binpath)
try:
patched_binary = fix_binary(binpath, '$ORIGIN/../lib64/')
except Exception as ex_:
print("Mime type ", m)
print("Cannot fix", binpath)
raise ex_
try:
interpreter = subprocess.check_output(['patchelf',
'--print-interpreter',
patched_binary], universal_newlines=True).splitlines()[0]
self.add(os.path.realpath(interpreter), os.path.join("pbin", "ld.so"))
except Exception as ex_:
print('Cannot get interpreter for binary', binpath)
# raise ex_
pass
self.add(patched_binary, os.path.join("pbin", pyname))
os.remove(patched_binary)
def fix_sharedlib(self, binpath, targetpath):
relpath = os.path.join(os.path.relpath("lib64", targetpath), "lib64")
patched_binary = fix_binary(binpath, '$ORIGIN/' + relpath)
self.add(patched_binary, targetpath)
os.remove(patched_binary)
pass
def get_all_sources(self):
for td_ in self.projects() + self.spec.templates_dirs:
git_url, git_branch, path_to_dir_, _ = self.explode_pp_node(td_)
yield git_url, git_branch, path_to_dir_
def folder_command(self):
'''
Just checking out sources.
This stage should be done when we have authorization to check them out.
'''
if not self.pp:
return
curdir = os.getcwd()
args = self.args
in_src = os.path.relpath(self.src_dir, start=self.curdir)
# for td_ in self.projects() + self.spec.templates_dirs:
# git_url, git_branch, path_to_dir_, _ = self.explode_pp_node(td_)
for git_url, git_branch, path_to_dir_ in self.get_all_sources():
os.chdir(curdir)
if os.path.exists(path_to_dir_):
os.chdir(path_to_dir_)
print('*'*10 + f' Git «{args.folder_command}» for {git_url} ')
scmd = f'''{args.folder_command}'''
os.system(scmd)
pass
def checkout_sources(self):
'''
Just checking out sources.
This stage should be done when we have authorization to check them out.
'''
if not self.pp:
return
args = self.args
lines = []
lines2 = []
in_src = os.path.relpath(self.src_dir, start=self.curdir)
# lines.add("rm -rf %s " % in_src)
lines.append(f"""
mkdir -p tmp/snaphots-src
snapshotdir=$(date +"tmp/snaphots-src/snapshot-src-before-%Y-%m-%d-%H-%M-%S")
mv in/src $snapshotdir
mkdir -p {in_src}
""")
already_checkouted = set()
for td_ in self.projects() + self.spec.templates_dirs:
git_url, git_branch, path_to_dir_, _ = self.explode_pp_node(td_)
if path_to_dir_ not in already_checkouted:
probably_package_name = os.path.split(path_to_dir_)[-1]
already_checkouted.add(path_to_dir_)
path_to_dir = os.path.relpath(path_to_dir_, start=self.curdir)
newpath = path_to_dir + '.new'
lines.append('rm -rf "%(newpath)s"' % vars())
# scmd = 'git --git-dir=/dev/null clone --single-branch --branch %(git_branch)s --depth=1 %(git_url)s %(newpath)s ' % vars()
scmd = '''
git --git-dir=/dev/null clone %(git_url)s %(newpath)s
pushd %(newpath)s
git checkout %(git_branch)s
git config core.fileMode false
git config core.autocrlf input
git lfs install
git lfs pull
popd
''' % vars()
lines.append(scmd)
lines2.append('''
pushd "%(path_to_dir)s"
git config core.fileMode false
git config core.autocrlf input
git pull
pipenv run python -m pip uninstall %(probably_package_name)s -y
pipenv run python setup.py develop
popd
''' % vars())
# Fucking https://www.virtualbox.org/ticket/19086 + https://www.virtualbox.org/ticket/8761
lines.append("""
if [ -d "%(newpath)s" ]; then
echo 2 > /proc/sys/vm/drop_caches
find "%(path_to_dir)s" -type f -delete;
find "%(path_to_dir)s" -type f -exec rm -rf {} \;
rm -rf "%(path_to_dir)s"
mv "%(newpath)s" "%(path_to_dir)s"
rm -rf "%(newpath)s"
fi
""" % vars())
self.lines2sh("06-checkout", lines, 'checkout')
# self.lines2sh("96-pullall", lines2)
pass
def explode_pp_node(self, td_):
'''
Преобразует неоднозначное описание yaml-ноды пакета в git_url и branch
'''
git_url = None
git_branch = 'master'
if isinstance(td_, str):
git_url = td_
else:
git_url = td_.url
if 'branch' in td_:
git_branch = td_.branch
if 'cache' in td_:
git_url = expandpath(td_.cache)
path_to_dir = os.path.join(self.src_dir, giturl2folder(git_url))
setup_path = path_to_dir
if 'subdir' in td_:
subdir = td_.subdir
setup_path = path_to_dir
return git_url, git_branch, path_to_dir, setup_path
def pip_install_offline_cmd(self, target):
'''
Get options for installing by pip only using offline downloaded wheel packages
'''
our_whl_path = os.path.relpath(self.our_whl_path, self.curdir)
ext_whl_path = os.path.relpath(self.ext_whl_path, self.curdir)
scmd = f' -m pip install {target} --no-index --no-cache-dir --use-deprecated=legacy-resolver --find-links="{ext_whl_path}" --find-links="{our_whl_path}" --force-reinstall --ignore-installed '
return scmd
def pip_install_offline(self, target):
'''
Installing by pip only using offline downloaded wheel packages
'''
opts_ = self.pip_install_offline_cmd(target)
scmd = f'{self.root_dir}/ebin/python3 {opts_} '
self.cmd(scmd)
pass
def install_terra_pythons(self):
# if not self.pp.terra.pip and not self.pp.terra.projects:
# return
# Пока хардкодим вставку нашего питон-пипа. потом конечно надо бы избавится.
root_dir = self.root_dir
os.chdir(self.curdir)
os.chdir(os.path.join('in', 'src', 'pip'))
scmd = f'''{self.root_dir}/ebin/python3 setup.py install --single-version-externally-managed --root / '''
os.system(scmd)
os.chdir(self.curdir)
args = self.args
terra_ = True
if self.args.debug:
terra_ = False
pip_args_ = self.pip_args_from_sources(terra=terra_)
our_whl_path = os.path.relpath(self.our_whl_path, self.curdir)
ext_whl_path = os.path.relpath(self.ext_whl_path, self.curdir)
# os.system(f'''{self.root_dir}/ebin/python3 -m pip install {pip_args_} --find-links="{our_whl_path}" --find-links="{ext_whl_path}"''')
scmd = f'''
{self.root_dir}/ebin/python3 -m pip install setuptools --find-links="{our_whl_path}" --find-links="{ext_whl_path}" --force-reinstall --ignore-installed --no-warn-script-location
'''
self.cmd(scmd)
if self.args.debug:
scmd = f'''
{self.root_dir}/ebin/python3 -m pip install pip {our_whl_path}/*.whl {ext_whl_path}/*.whl --find-links="{our_whl_path}" --find-links="{ext_whl_path}" --force-reinstall --ignore-installed --no-warn-script-location
'''
else:
scmd = f'''
{self.root_dir}/ebin/python3 -m pip install {pip_args_} --find-links="{our_whl_path}" --find-links="{ext_whl_path}" --force-reinstall --ignore-installed --no-warn-script-location
'''
os.chdir(self.curdir)
self.cmd(scmd)
if self.tvars.fc_version == '32':
os.system(f"rm -f {root_dir}/local/lib/python3.8/site-packages/typing.*")
# if self.args.debug:
# pl_ = self.get_wheel_list_to_install()
# # pls_ = " ".join(pl_)
# for pls_ in pl_:
# if 'urllib3' in pls_:
# wt_ = 1
# scmd = '%(root_dir)s/ebin/python3 -m pip install %(pls_)s --no-deps --force-reinstall --no-dependencies --ignore-installed ' % vars()
# print(scmd)
# os.system(scmd)
# wtf_path = f'{root_dir}/local/lib/python3.8/site-packages/enum'
# if os.path.exists(wtf_path):
# print('Fucking enum34 here')
# sys.exit(0)
# # ext_whl_path = os.path.join(self.in_bin, "extwheel")
# if self.pp.terra.pip:
# for pip_ in self.pp.terra.pip:
# self.pip_install_offline(pip_)
# # scmd = f'{root_dir}/ebin/python3 -m pip install {pip_} --no-index --no-cache-dir --find-links="{ext_whl_path}" --force-reinstall --ignore-installed '
# # print(scmd)
# # os.system(scmd)
# os.system(f"rm -f {root_dir}/local/lib/python3.8/site-packages/typing.*")
if self.pp.terra.projects:
nodes_ = self.pp.terra.projects
if self.args.debug:
nodes_ += (self.pp.build.projects or [])
for td_ in nodes_:
git_url, git_branch, path_to_dir, setup_path = self.explode_pp_node(td_)
os.chdir(setup_path)
# make_setup_if_not_exists()
if setup_path.endswith('pip'):
continue
# if 'dm-psi' in setup_path:
# wrrr = 1
# if '18' in setup_path:
# wrrr = 1
release_mod = ''
# scmd = "%(root_dir)s/ebin/python3 setup.py install --single-version-externally-managed %(release_mod)s --root / --force " % vars()
self.cmd(f"{root_dir}/ebin/python3 setup.py install --single-version-externally-managed {release_mod} --root / --force ") #--no-deps
# os.chdir(setup_path)
# for reqs_ in glob.glob(f'**/package.json', recursive=True):
# if not 'node_modules' in reqs_:
# os.chdir(setup_path)
# dir_ = os.path.split(reqs_)[0]
# if dir_:
# os.chdir(dir_)
# os.system(f"yarn install ")
# os.system(f"yarn build ")
if self.tvars.fc_version == '32':
scmd = f"rm -f {root_dir}/local/lib/python3.8/site-packages/typing.*"
print(scmd)
os.system(scmd)
pass
def install_repos(self):
root_dir = self.root_dir
args = self.args
packages = []
lines = []
for rp_ in self.ps.repos or []:
if rp_.lower().endswith('.gpg'):
lines.append(f'sudo rpm --import {rp_} ')
elif rp_.endswith('.rpm'):
lines.append(f'sudo dnf install --nogpgcheck {rp_} -y ')
else:
lines.append(f'sudo dnf config-manager --add-repo {rp_} -y ')
pass
self.lines2sh("00-install-repos", lines, "install-repos")
pass
def download_packages(self):
root_dir = self.root_dir
args = self.args
packages = []
lines = []
base = dnf.Base()
base.fill_sack()
q_ = base.sack.query()
self.installed_packages = q_.installed()
lines = []
lines_src = []
in_bin = os.path.relpath(self.in_bin, start=self.curdir)
scmd = f"rm -rf '{in_bin}/rpms'"
lines.append(scmd)
scmd = "sudo yum-config-manager --enable remi"
lines.append(scmd)
pls_ = [p for p in self.need_packages + self.ps.build + self.ps.terra if isinstance(p, str)]
purls_ = [p.url for p in self.need_packages + self.ps.build + self.ps.terra if not isinstance(p, str)]
packages = " ".join(self.dependencies(pls_, local=False) + purls_)
scmd = 'dnf download --skip-broken --downloaddir "%(in_bin)s/rpms" --arch=x86_64 --arch=x86_64 --arch=noarch %(packages)s -y ' % vars()
lines.append(scmd)
scmd = 'dnf download --skip-broken --downloaddir "%(in_bin)s/src-rpms" --arch=x86_64 --arch=noarch --source %(packages)s -y ' % vars()
lines_src.append(scmd)
for pack_ in self.ps.remove_from_download or []:
scmd = f'rm -f {in_bin}/rpms/{pack_}* '
lines.append(scmd)
# for package in self.dependencies(pls_, local=False) + purls_:
# # потом написать идемпотентность, проверки на установленность, пока пусть долго, по одному ставит
# scmd = 'dnf download --downloaddir "%(in_bin)s/rpms" --arch=x86_64 "%(package)s" -y ' % vars()
# lines.append(scmd)
# scmd = 'dnf download --downloaddir "%(in_bin)s/src-rpms" --arch=x86_64 --source "%(package)s" -y ' % vars()
# lines_src.append(scmd)
self.lines2sh("01-download-rpms", lines, "download-rpms")
self.lines2sh("90-download-sources-for-rpms", lines_src, "download-sources-for-rpms")
shfilename = "02-install-rpms"
ilines = [
"""
sudo dnf install --nogpgcheck --skip-broken %(in_bin)s/rpms/*.rpm -y --allowerasing
""" % | |
{}".format(type(not_found_)))
if payload_ is not None and not isinstance(payload_, (dict, Payload)):
raise Exception("Expected payload_ to be a Payload, received: {}".format(type(payload_)))
if tag_ is not None and not isinstance(tag_, (bytes, str)):
raise Exception("Expected tag_ to be a str, received: {}".format(type(tag_)))
self.entity = entity_
self.error = error_
self.not_found = not_found_
self.payload = payload_
self.tag = tag_
self.unknown_fields = unknown_fields
class PayloadResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~PayloadResult]
'''
results_ = [PayloadResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class PhaseResult(Type):
_toSchema = {'error': 'error', 'phase': 'phase'}
_toPy = {'error': 'error', 'phase': 'phase'}
def __init__(self, error=None, phase=None, **unknown_fields):
'''
error : Error
phase : str
'''
error_ = Error.from_json(error) if error else None
phase_ = phase
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if phase_ is not None and not isinstance(phase_, (bytes, str)):
raise Exception("Expected phase_ to be a str, received: {}".format(type(phase_)))
self.error = error_
self.phase = phase_
self.unknown_fields = unknown_fields
class PhaseResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~PhaseResult]
'''
results_ = [PhaseResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class PinApplicationResult(Type):
_toSchema = {'application_name': 'application-name', 'error': 'error'}
_toPy = {'application-name': 'application_name', 'error': 'error'}
def __init__(self, application_name=None, error=None, **unknown_fields):
'''
application_name : str
error : Error
'''
application_name_ = application_name
error_ = Error.from_json(error) if error else None
# Validate arguments against known Juju API types.
if application_name_ is not None and not isinstance(application_name_, (bytes, str)):
raise Exception("Expected application_name_ to be a str, received: {}".format(type(application_name_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
self.application_name = application_name_
self.error = error_
self.unknown_fields = unknown_fields
class PinApplicationsResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~PinApplicationResult]
'''
results_ = [PinApplicationResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class PinnedLeadershipResult(Type):
_toSchema = {'result': 'result'}
_toPy = {'result': 'result'}
def __init__(self, result=None, **unknown_fields):
'''
result : typing.Sequence[str]
'''
result_ = result
# Validate arguments against known Juju API types.
if result_ is not None and not isinstance(result_, (bytes, str, list)):
raise Exception("Expected result_ to be a Sequence, received: {}".format(type(result_)))
self.result = result_
self.unknown_fields = unknown_fields
class Placement(Type):
_toSchema = {'directive': 'directive', 'scope': 'scope'}
_toPy = {'directive': 'directive', 'scope': 'scope'}
def __init__(self, directive=None, scope=None, **unknown_fields):
'''
directive : str
scope : str
'''
directive_ = directive
scope_ = scope
# Validate arguments against known Juju API types.
if directive_ is not None and not isinstance(directive_, (bytes, str)):
raise Exception("Expected directive_ to be a str, received: {}".format(type(directive_)))
if scope_ is not None and not isinstance(scope_, (bytes, str)):
raise Exception("Expected scope_ to be a str, received: {}".format(type(scope_)))
self.directive = directive_
self.scope = scope_
self.unknown_fields = unknown_fields
class PortRange(Type):
_toSchema = {'from_port': 'from-port', 'protocol': 'protocol', 'to_port': 'to-port'}
_toPy = {'from-port': 'from_port', 'protocol': 'protocol', 'to-port': 'to_port'}
def __init__(self, from_port=None, protocol=None, to_port=None, **unknown_fields):
'''
from_port : int
protocol : str
to_port : int
'''
from_port_ = from_port
protocol_ = protocol
to_port_ = to_port
# Validate arguments against known Juju API types.
if from_port_ is not None and not isinstance(from_port_, int):
raise Exception("Expected from_port_ to be a int, received: {}".format(type(from_port_)))
if protocol_ is not None and not isinstance(protocol_, (bytes, str)):
raise Exception("Expected protocol_ to be a str, received: {}".format(type(protocol_)))
if to_port_ is not None and not isinstance(to_port_, int):
raise Exception("Expected to_port_ to be a int, received: {}".format(type(to_port_)))
self.from_port = from_port_
self.protocol = protocol_
self.to_port = to_port_
self.unknown_fields = unknown_fields
class PrivateAddress(Type):
_toSchema = {'target': 'target'}
_toPy = {'target': 'target'}
def __init__(self, target=None, **unknown_fields):
'''
target : str
'''
target_ = target
# Validate arguments against known Juju API types.
if target_ is not None and not isinstance(target_, (bytes, str)):
raise Exception("Expected target_ to be a str, received: {}".format(type(target_)))
self.target = target_
self.unknown_fields = unknown_fields
class PrivateAddressResults(Type):
_toSchema = {'private_address': 'private-address'}
_toPy = {'private-address': 'private_address'}
def __init__(self, private_address=None, **unknown_fields):
'''
private_address : str
'''
private_address_ = private_address
# Validate arguments against known Juju API types.
if private_address_ is not None and not isinstance(private_address_, (bytes, str)):
raise Exception("Expected private_address_ to be a str, received: {}".format(type(private_address_)))
self.private_address = private_address_
self.unknown_fields = unknown_fields
class ProfileChangeResult(Type):
_toSchema = {'error': 'error', 'new_profile_name': 'new-profile-name', 'old_profile_name': 'old-profile-name', 'profile': 'profile', 'subordinate': 'subordinate'}
_toPy = {'error': 'error', 'new-profile-name': 'new_profile_name', 'old-profile-name': 'old_profile_name', 'profile': 'profile', 'subordinate': 'subordinate'}
def __init__(self, error=None, new_profile_name=None, old_profile_name=None, profile=None, subordinate=None, **unknown_fields):
'''
error : Error
new_profile_name : str
old_profile_name : str
profile : CharmLXDProfile
subordinate : bool
'''
error_ = Error.from_json(error) if error else None
new_profile_name_ = new_profile_name
old_profile_name_ = old_profile_name
profile_ = CharmLXDProfile.from_json(profile) if profile else None
subordinate_ = subordinate
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if new_profile_name_ is not None and not isinstance(new_profile_name_, (bytes, str)):
raise Exception("Expected new_profile_name_ to be a str, received: {}".format(type(new_profile_name_)))
if old_profile_name_ is not None and not isinstance(old_profile_name_, (bytes, str)):
raise Exception("Expected old_profile_name_ to be a str, received: {}".format(type(old_profile_name_)))
if profile_ is not None and not isinstance(profile_, (dict, CharmLXDProfile)):
raise Exception("Expected profile_ to be a CharmLXDProfile, received: {}".format(type(profile_)))
if subordinate_ is not None and not isinstance(subordinate_, bool):
raise Exception("Expected subordinate_ to be a bool, received: {}".format(type(subordinate_)))
self.error = error_
self.new_profile_name = new_profile_name_
self.old_profile_name = old_profile_name_
self.profile = profile_
self.subordinate = subordinate_
self.unknown_fields = unknown_fields
class ProfileChangeResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ProfileChangeResult]
'''
results_ = [ProfileChangeResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ProfileInfoResult(Type):
_toSchema = {'application_name': 'application-name', 'error': 'error', 'profile': 'profile', 'revision': 'revision'}
_toPy = {'application-name': 'application_name', 'error': 'error', 'profile': 'profile', 'revision': 'revision'}
def __init__(self, application_name=None, error=None, profile=None, revision=None, **unknown_fields):
'''
application_name : str
error : Error
profile : CharmLXDProfile
revision : int
'''
application_name_ = application_name
error_ = Error.from_json(error) if error else None
profile_ = CharmLXDProfile.from_json(profile) if profile else None
revision_ = revision
# Validate arguments against known Juju API types.
if application_name_ is not None and not isinstance(application_name_, (bytes, str)):
raise Exception("Expected application_name_ to be a str, received: {}".format(type(application_name_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if profile_ is not None and not isinstance(profile_, (dict, CharmLXDProfile)):
raise Exception("Expected profile_ to be a CharmLXDProfile, received: {}".format(type(profile_)))
if revision_ is not None and not isinstance(revision_, int):
raise Exception("Expected revision_ to be a int, received: {}".format(type(revision_)))
self.application_name = application_name_
self.error = error_
self.profile = profile_
self.revision = revision_
self.unknown_fields = unknown_fields
class ProviderInterfaceInfo(Type):
_toSchema = {'interface_name': 'interface-name', 'mac_address': 'mac-address', 'provider_id': 'provider-id'}
_toPy = {'interface-name': 'interface_name', 'mac-address': 'mac_address', 'provider-id': 'provider_id'}
def __init__(self, interface_name=None, mac_address=None, provider_id=None, **unknown_fields):
'''
interface_name : str
mac_address : str
provider_id : str
'''
interface_name_ = interface_name
mac_address_ = mac_address
provider_id_ = provider_id
# Validate arguments against known Juju API types.
if interface_name_ is not None and not isinstance(interface_name_, (bytes, str)):
raise Exception("Expected interface_name_ to be a str, received: {}".format(type(interface_name_)))
if mac_address_ is not None and | |
free rotor entropy evaluation - used for low frequencies below the cut-off if qh=grimme is specified
def calc_freerot_entropy(frequency_wn, temperature, freq_scale_factor):
"""
Entropic contributions (J/(mol*K)) according to a free-rotor description for a list of vibrational modes
Sr = R(1/2 + 1/2ln((8pi^3u'kT/h^2))
"""
# This is the average moment of inertia used by Grimme
Bav = 10.0e-44
mu = [PLANCK_CONSTANT / (8 * math.pi**2 * freq * SPEED_OF_LIGHT * freq_scale_factor) for freq in frequency_wn]
mu_primed = [entry * Bav /(entry + Bav) for entry in mu]
factor = [8 * math.pi**3 * entry * BOLTZMANN_CONSTANT * temperature / PLANCK_CONSTANT**2 for entry in mu_primed]
entropy = [(0.5 + math.log(entry**0.5)) * GAS_CONSTANT for entry in factor]
return entropy
# A damping function to interpolate between RRHO and free rotor vibrational entropy values
def calc_damp(frequency_wn, FREQ_CUTOFF):
alpha = 4
damp = [1 / (1+(FREQ_CUTOFF/entry)**alpha) for entry in frequency_wn]
return damp
# The funtion to compute the "black box" entropy values (and all other thermochemical quantities)
class calc_bbe:
def __init__(self, file, QH, FREQ_CUTOFF, temperature, conc, freq_scale_factor, solv, spc):
# List of frequencies and default values
frequency_wn, rotemp, linear_mol, link, freqloc, linkmax, symmno = [], [0.0,0.0,0.0], 0, 0, 0, 0, 1
with open(file) as f: g_output = f.readlines()
# read any single point energies if requested
if spc != False and spc != 'link':
name, ext = os.path.splitext(file)
try: self.sp_energy = sp_energy(name+'_'+spc+ext)
except IOError: pass
if spc == 'link': self.sp_energy = sp_energy(file)
#count number of links
for line in g_output:
# only read first link + freq not other link jobs
if line.find("Normal termination") != -1: linkmax += 1
if line.find('Frequencies --') != -1: freqloc = linkmax
# Iterate over output
if freqloc == 0: freqloc = len(g_output)
for line in g_output:
# link counter
if line.find("Normal termination")!= -1:
link += 1
# reset frequencies if in final freq link
if link == freqloc: frequency_wn = []
# if spc specified will take last Energy from file, otherwise will break after freq calc
if link > freqloc: break
# Iterate over output: look out for low frequencies
if line.strip().startswith('Frequencies -- '):
for i in range(2,5):
try:
x = float(line.strip().split()[i])
# only deal with real frequencies
if x > 0.00: frequency_wn.append(x)
except IndexError: pass
# For QM calculations look for SCF energies, last one will be the optimized energy
if line.strip().startswith('SCF Done:'): self.scf_energy = float(line.strip().split()[4])
# For MP2 calculations replace with EUMP2
if line.strip().find('EUMP2 =') > -1: self.scf_energy = float((line.strip().split()[5]).replace('D', 'E'))
# For ONIOM calculations use the extrapolated value rather than SCF value
if line.strip().find("ONIOM: extrapolated energy") > -1: self.scf_energy = (float(line.strip().split()[4]))
# For Semi-empirical or Molecular Mechanics calculations
if line.strip().find("Energy= ") > -1 and line.strip().find("Predicted")==-1 and line.strip().find("Thermal")==-1: self.scf_energy = (float(line.strip().split()[1]))
# look for thermal corrections, paying attention to point group symmetry
if line.strip().startswith('Zero-point correction='): self.zero_point_corr = float(line.strip().split()[2])
if line.strip().find('Multiplicity') > -1: mult = float(line.strip().split()[5])
if line.strip().startswith('Molecular mass:'): molecular_mass = float(line.strip().split()[2])
if line.strip().startswith('Rotational symmetry number'): symmno = int((line.strip().split()[3]).split(".")[0])
if line.strip().startswith('Full point group'):
if line.strip().split()[3] == 'D*H' or line.strip().split()[3] == 'C*V': linear_mol = 1
if line.strip().startswith('Rotational temperature '): rotemp = [float(line.strip().split()[3])]
if line.strip().startswith('Rotational temperatures'): rotemp = [float(line.strip().split()[3]), float(line.strip().split()[4]), float(line.strip().split()[5])]
# skip the next steps if unable to parse the frequencies or zpe from the output file
if hasattr(self, "zero_point_corr"):
# create a list of frequencies equal to cut-off value
cutoffs = [FREQ_CUTOFF for freq in frequency_wn]
# Translational and electronic contributions to the energy and entropy do not depend on frequencies
Utrans = calc_translational_energy(temperature)
Strans = calc_translational_entropy(molecular_mass, conc, temperature, solv)
Selec = calc_electronic_entropy(mult)
# Rotational and Vibrational contributions to the energy entropy
if len(frequency_wn) > 0:
ZPE = calc_zeropoint_energy(frequency_wn, freq_scale_factor)
Urot = calc_rotational_energy(self.zero_point_corr, symmno, temperature, linear_mol)
Uvib = calc_vibrational_energy(frequency_wn, temperature, freq_scale_factor)
Srot = calc_rotational_entropy(self.zero_point_corr, linear_mol, symmno, rotemp, temperature)
# Calculate harmonic entropy, free-rotor entropy and damping function for each frequency
Svib_rrho = calc_rrho_entropy(frequency_wn, temperature, freq_scale_factor)
if FREQ_CUTOFF > 0.0: Svib_rrqho = calc_rrho_entropy(cutoffs, temperature, 1.0)
Svib_free_rot = calc_freerot_entropy(frequency_wn, temperature, freq_scale_factor)
damp = calc_damp(frequency_wn, FREQ_CUTOFF)
# Compute entropy (cal/mol/K) using the two values and damping function
vib_entropy = []
for j in range(0,len(frequency_wn)):
if QH == "grimme": vib_entropy.append(Svib_rrho[j] * damp[j] + (1-damp[j]) * Svib_free_rot[j])
elif QH == "truhlar":
if FREQ_CUTOFF > 0.0:
if frequency_wn[j] > FREQ_CUTOFF: vib_entropy.append(Svib_rrho[j])
else: vib_entropy.append(Svib_rrqho[j])
else: vib_entropy.append(Svib_rrho[j])
qh_Svib, h_Svib = sum(vib_entropy), sum(Svib_rrho)
# monatomic species have no vibrational or rotational degrees of freedom
else: ZPE, Urot, Uvib, Srot, h_Svib, qh_Svib = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
# Add terms (converted to au) to get Free energy - perform separately for harmonic and quasi-harmonic values out of interest
self.enthalpy = self.scf_energy + (Utrans + Urot + Uvib + GAS_CONSTANT * temperature) / j_to_au
# single point correction replaces energy from optimization with single point value
if hasattr(self, 'sp_energy'):
try: self.enthalpy = self.enthalpy - self.scf_energy + self.sp_energy
except TypeError: pass
self.zpe = ZPE / j_to_au
self.entropy, self.qh_entropy = (Strans + Srot + h_Svib + Selec) / j_to_au, (Strans + Srot + qh_Svib + Selec) / j_to_au
self.gibbs_free_energy, self.qh_gibbs_free_energy = self.enthalpy - temperature * self.entropy, self.enthalpy - temperature * self.qh_entropy
def main():
# Start a log for the results
log = Logger("Goodvibes","dat", "output")
# get command line inputs. Use -h to list all possible arguments and default values
parser = OptionParser(usage="Usage: %prog [options] <input1>.log <input2>.log ...")
parser.add_option("-t", dest="temperature", action="store", help="temperature (K) (default 298.15)", default="298.15", type="float", metavar="TEMP")
parser.add_option("-q", dest="QH", action="store", help="Type of quasi-harmonic correction (Grimme or Truhlar) (default Grimme)", default="grimme", type="string", metavar="QH")
parser.add_option("-f", dest="freq_cutoff", action="store", help="Cut-off frequency (wavenumbers) (default = 100)", default="100.0", type="float", metavar="FREQ_CUTOFF")
parser.add_option("-c", dest="conc", action="store", help="concentration (mol/l) (default 1 atm)", default="0.040876", type="float", metavar="CONC")
parser.add_option("-v", dest="freq_scale_factor", action="store", help="Frequency scaling factor (default 1)", default=False, type="float", metavar="SCALE_FACTOR")
parser.add_option("-s", dest="solv", action="store", help="Solvent (H2O, toluene, DMF, AcOH, chloroform) (default none)", default="none", type="string", metavar="SOLV")
parser.add_option("--spc", dest="spc", action="store", help="Indicates single point corrections (default False)", type="string", default=False, metavar="SPC")
parser.add_option("--ti", dest="temperature_interval", action="store", help="initial temp, final temp, step size (K)", default=False, metavar="TI")
parser.add_option("--ci", dest="conc_interval", action="store", help="initial conc, final conc, step size (mol/l)", default=False, metavar="CI")
parser.add_option("--xyz", dest="xyz", action="store_true", help="write Cartesians to an xyz file (default False)", default=False, metavar="XYZ")
(options, args) = parser.parse_args()
options.QH = options.QH.lower() # case insensitive
# if necessary create an xyz file for Cartesians
if options.xyz == True: xyz = XYZout("Goodvibes","xyz", "output")
# Get the filenames from the command line prompt
files = []
if len(sys.argv) > 1:
for elem in sys.argv[1:]:
try:
if os.path.splitext(elem)[1] in [".out", ".log"]:
for file in glob(elem):
if options.spc == False or options.spc == 'link': files.append(file)
else:
if file.find('_'+options.spc+".") == -1: files.append(file)
except IndexError: pass
# Start printing results
start = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())
log.Write(" GoodVibes v" + __version__ + " " + start + "\n REF: " + goodvibes_ref +"\n\n")
if options.temperature_interval == False: log.Write(" Temperature = "+str(options.temperature)+" Kelvin")
# If not at standard temp, need to correct the molarity of 1 atmosphere (assuming Pressure is still 1 atm)
if options.conc == 0.040876:
options.conc = atmos/(GAS_CONSTANT*options.temperature); log.Write(" Pressure = 1 atm")
else: log.Write(" Concentration = "+str(options.conc)+" mol/l")
# attempt to automatically obtain frequency scale factor. Requires all outputs to be same level of theory
if options.freq_scale_factor == False:
l_o_t = [level_of_theory(file) for file in files]
def all_same(items): return all(x == items[0] for x in items)
if all_same(l_o_t) == True:
for scal in scaling_data: # search through database of scaling factors
if l_o_t[0].upper().find(scal['level'].upper()) > -1 or l_o_t[0].upper().find(scal['level'].replace("-","").upper()) > -1:
options.freq_scale_factor = scal['zpe_fac']; ref = scaling_refs[scal['zpe_ref']]
log.Write("\n\n " + "Found vibrational scaling factor for " + l_o_t[0] + " level of theory" + "\n REF: " + ref)
elif all_same(l_o_t) == False: log.Write("\n " + (textwrap.fill("CAUTION: different levels of theory found - " + '|'.join(l_o_t), 128, subsequent_indent=' ')))
if options.freq_scale_factor == False: options.freq_scale_factor = 1.0 # if no scaling factor is found use 1.0
log.Write("\n Frequency scale factor "+str(options.freq_scale_factor))
# checks to see whether the available free space of a requested solvent is defined
freespace = get_free_space(options.solv)
if freespace != 1000.0: log.Write("\n Specified solvent "+options.solv+": free volume | |
- m.x246 + 728.4*m.x971 + 364.2*m.x976 + 121.4*m.x981 == 0)
m.c1742 = Constraint(expr= m.x242 - m.x247 + 728.4*m.x972 + 364.2*m.x977 + 121.4*m.x982 == 0)
m.c1743 = Constraint(expr= m.x243 - m.x248 + 728.4*m.x973 + 364.2*m.x978 + 121.4*m.x983 == 0)
m.c1744 = Constraint(expr= m.x244 - m.x249 + 728.4*m.x974 + 364.2*m.x979 + 121.4*m.x984 == 0)
m.c1745 = Constraint(expr= m.x245 - m.x250 + 728.4*m.x975 + 364.2*m.x980 + 121.4*m.x985 == 0)
m.c1747 = Constraint(expr=(m.x2502 + m.x2503)*m.x1001 + m.x1751 == 0)
m.c1748 = Constraint(expr=(m.x2502 + m.x2503)*m.x1006 + m.x1756 == 0)
m.c1749 = Constraint(expr=(m.x2502 + m.x2503)*m.x1011 + m.x1761 == 0)
m.c1750 = Constraint(expr=(m.x2502 + m.x2503)*m.x1016 + m.x1766 == 0)
m.c1751 = Constraint(expr=(m.x2502 + m.x2503)*m.x1021 + m.x1771 == 0)
m.c1752 = Constraint(expr=(m.x2502 + m.x2503)*m.x1026 + m.x1776 == 0)
m.c1753 = Constraint(expr=(m.x2502 + m.x2503)*m.x1031 + m.x1781 == 0)
m.c1754 = Constraint(expr=(m.x2502 + m.x2503)*m.x1036 + m.x1786 == 0)
m.c1755 = Constraint(expr=(m.x2502 + m.x2503)*m.x1041 + m.x1791 == 0)
m.c1756 = Constraint(expr=(m.x2502 + m.x2503)*m.x1046 + m.x1796 == 0)
m.c1757 = Constraint(expr=(m.x2502 + m.x2503)*m.x1051 + m.x1801 == 0)
m.c1758 = Constraint(expr=(m.x2502 + m.x2503)*m.x1056 + m.x1806 == 0)
m.c1759 = Constraint(expr=(m.x2502 + m.x2503)*m.x1061 + m.x1811 == 0)
m.c1760 = Constraint(expr=(m.x2502 + m.x2503)*m.x1066 + m.x1816 == 0)
m.c1761 = Constraint(expr=(m.x2502 + m.x2503)*m.x1071 + m.x1821 == 0)
m.c1762 = Constraint(expr=(m.x2502 + m.x2503)*m.x1076 + m.x1826 == 0)
m.c1763 = Constraint(expr=(m.x2502 + m.x2503)*m.x1081 + m.x1831 == 0)
m.c1764 = Constraint(expr=(m.x2502 + m.x2503)*m.x1086 + m.x1836 == 0)
m.c1765 = Constraint(expr=(m.x2502 + m.x2503)*m.x1091 + m.x1841 == 0)
m.c1766 = Constraint(expr=(m.x2502 + m.x2503)*m.x1096 + m.x1846 == 0)
m.c1767 = Constraint(expr=(m.x2502 + m.x2503)*m.x1101 + m.x1851 == 0)
m.c1768 = Constraint(expr=(m.x2502 + m.x2503)*m.x1106 + m.x1856 == 0)
m.c1769 = Constraint(expr=(m.x2502 + m.x2503)*m.x1111 + m.x1861 == 0)
m.c1770 = Constraint(expr=(m.x2502 + m.x2503)*m.x1116 + m.x1866 == 0)
m.c1771 = Constraint(expr=(m.x2502 + m.x2503)*m.x1121 + m.x1871 == 0)
m.c1772 = Constraint(expr=(m.x2502 + m.x2503)*m.x1126 + m.x1876 == 0)
m.c1773 = Constraint(expr=(m.x2502 + m.x2503)*m.x1131 + m.x1881 == 0)
m.c1774 = Constraint(expr=(m.x2502 + m.x2503)*m.x1136 + m.x1886 == 0)
m.c1775 = Constraint(expr=(m.x2502 + m.x2503)*m.x1141 + m.x1891 == 0)
m.c1776 = Constraint(expr=(m.x2502 + m.x2503)*m.x1146 + m.x1896 == 0)
m.c1777 = Constraint(expr=(m.x2502 + m.x2503)*m.x1151 + m.x1901 == 0)
m.c1778 = Constraint(expr=(m.x2502 + m.x2503)*m.x1156 + m.x1906 == 0)
m.c1779 = Constraint(expr=(m.x2502 + m.x2503)*m.x1161 + m.x1911 == 0)
m.c1780 = Constraint(expr=(m.x2502 + m.x2503)*m.x1166 + m.x1916 == 0)
m.c1781 = Constraint(expr=(m.x2502 + m.x2503)*m.x1171 + m.x1921 == 0)
m.c1782 = Constraint(expr=(m.x2502 + m.x2503)*m.x1176 + m.x1926 == 0)
m.c1783 = Constraint(expr=(m.x2502 + m.x2503)*m.x1181 + m.x1931 == 0)
m.c1784 = Constraint(expr=(m.x2502 + m.x2503)*m.x1186 + m.x1936 == 0)
m.c1785 = Constraint(expr=(m.x2502 + m.x2503)*m.x1191 + m.x1941 == 0)
m.c1786 = Constraint(expr=(m.x2502 + m.x2503)*m.x1196 + m.x1946 == 0)
m.c1787 = Constraint(expr=(m.x2502 + m.x2503)*m.x1201 + m.x1951 == 0)
m.c1788 = Constraint(expr=(m.x2502 + m.x2503)*m.x1206 + m.x1956 == 0)
m.c1789 = Constraint(expr=(m.x2502 + m.x2503)*m.x1211 + m.x1961 == 0)
m.c1790 = Constraint(expr=(m.x2502 + m.x2503)*m.x1216 + m.x1966 == 0)
m.c1791 = Constraint(expr=(m.x2502 + m.x2503)*m.x1221 + m.x1971 == 0)
m.c1792 = Constraint(expr=(m.x2502 + m.x2503)*m.x1226 + m.x1976 == 0)
m.c1793 = Constraint(expr=(m.x2502 + m.x2503)*m.x1231 + m.x1981 == 0)
m.c1794 = Constraint(expr=(m.x2502 + m.x2503)*m.x1236 + m.x1986 == 0)
m.c1795 = Constraint(expr=(m.x2502 + m.x2503)*m.x1241 + m.x1991 == 0)
m.c1796 = Constraint(expr=(m.x2502 + m.x2503)*m.x1246 + m.x1996 == 0)
m.c1797 = Constraint(expr=(m.x2502 + m.x2503)*m.x1251 + m.x2001 == 0)
m.c1798 = Constraint(expr=(m.x2502 + m.x2503)*m.x1256 + m.x2006 == 0)
m.c1799 = Constraint(expr=(m.x2502 + m.x2503)*m.x1261 + m.x2011 == 0)
m.c1800 = Constraint(expr=(m.x2502 + m.x2503)*m.x1266 + m.x2016 == 0)
m.c1801 = Constraint(expr=(m.x2502 + m.x2503)*m.x1271 + m.x2021 == 0)
m.c1802 = Constraint(expr=(m.x2502 + m.x2503)*m.x1276 + m.x2026 == 0)
m.c1803 = Constraint(expr=(m.x2502 + m.x2503)*m.x1281 + m.x2031 == 0)
m.c1804 = Constraint(expr=(m.x2502 + m.x2503)*m.x1286 + m.x2036 == 0)
m.c1805 = Constraint(expr=(m.x2502 + m.x2503)*m.x1291 + m.x2041 == 0)
m.c1806 = Constraint(expr=(m.x2502 + m.x2503)*m.x1296 + m.x2046 == 0)
m.c1807 = Constraint(expr=(m.x2502 + m.x2503)*m.x1301 + m.x2051 == 0)
m.c1808 = Constraint(expr=(m.x2502 + m.x2503)*m.x1306 + m.x2056 == 0)
m.c1809 = Constraint(expr=(m.x2502 + m.x2503)*m.x1311 + m.x2061 == 0)
m.c1810 = Constraint(expr=(m.x2502 + m.x2503)*m.x1316 + m.x2066 == 0)
m.c1811 = Constraint(expr=(m.x2502 + m.x2503)*m.x1321 + m.x2071 == 0)
m.c1812 = Constraint(expr=(m.x2502 + m.x2503)*m.x1326 + m.x2076 == 0)
m.c1813 = Constraint(expr=(m.x2502 + m.x2503)*m.x1331 + m.x2081 == 0)
m.c1814 = Constraint(expr=(m.x2502 + m.x2503)*m.x1336 + m.x2086 == 0)
m.c1815 = Constraint(expr=(m.x2502 + m.x2503)*m.x1341 + m.x2091 == 0)
m.c1816 = Constraint(expr=(m.x2502 + m.x2503)*m.x1346 + m.x2096 == 0)
m.c1817 = Constraint(expr=(m.x2502 + m.x2503)*m.x1351 + m.x2101 == 0)
m.c1818 = Constraint(expr=(m.x2502 + m.x2503)*m.x1356 + m.x2106 == 0)
m.c1819 = Constraint(expr=(m.x2502 + m.x2503)*m.x1361 + m.x2111 == 0)
m.c1820 = Constraint(expr=(m.x2502 + m.x2503)*m.x1366 + m.x2116 == 0)
m.c1821 = Constraint(expr=(m.x2502 + m.x2503)*m.x1371 + m.x2121 == 0)
m.c1822 = Constraint(expr=(m.x2502 + m.x2503)*m.x1376 + m.x2126 == 0)
m.c1823 = Constraint(expr=(m.x2502 + m.x2503)*m.x1381 + m.x2131 == 0)
m.c1824 = Constraint(expr=(m.x2502 + m.x2503)*m.x1386 + m.x2136 == 0)
m.c1825 = Constraint(expr=(m.x2502 + m.x2503)*m.x1391 + m.x2141 == 0)
m.c1826 = Constraint(expr=(m.x2502 + m.x2503)*m.x1396 + m.x2146 == 0)
m.c1827 = Constraint(expr=(m.x2502 + m.x2503)*m.x1401 + m.x2151 == 0)
m.c1828 = Constraint(expr=(m.x2502 + m.x2503)*m.x1406 + m.x2156 == 0)
m.c1829 = Constraint(expr=(m.x2502 + m.x2503)*m.x1411 + m.x2161 == 0)
m.c1830 = Constraint(expr=(m.x2502 + m.x2503)*m.x1416 + m.x2166 == 0)
m.c1831 = Constraint(expr=(m.x2502 + m.x2503)*m.x1421 + m.x2171 == 0)
m.c1832 = Constraint(expr=(m.x2502 + m.x2503)*m.x1426 + m.x2176 == 0)
m.c1833 = Constraint(expr=(m.x2502 + m.x2503)*m.x1431 + m.x2181 == 0)
m.c1834 = Constraint(expr=(m.x2502 + m.x2503)*m.x1436 + m.x2186 == 0)
m.c1835 = Constraint(expr=(m.x2502 + m.x2503)*m.x1441 + m.x2191 == 0)
m.c1836 = Constraint(expr=(m.x2502 + m.x2503)*m.x1446 + m.x2196 == 0)
m.c1837 = Constraint(expr=(m.x2502 + m.x2503)*m.x1451 + m.x2201 == 0)
m.c1838 = Constraint(expr=(m.x2502 + m.x2503)*m.x1456 + m.x2206 == 0)
m.c1839 = Constraint(expr=(m.x2502 + m.x2503)*m.x1461 + m.x2211 == 0)
m.c1840 = Constraint(expr=(m.x2502 + m.x2503)*m.x1466 + m.x2216 == 0)
m.c1841 = Constraint(expr=(m.x2502 + m.x2503)*m.x1471 + m.x2221 == 0)
m.c1842 = Constraint(expr=(m.x2502 + m.x2503)*m.x1476 + m.x2226 == 0)
m.c1843 = Constraint(expr=(m.x2502 + m.x2503)*m.x1481 + m.x2231 == 0)
m.c1844 = Constraint(expr=(m.x2502 + m.x2503)*m.x1486 + m.x2236 == 0)
m.c1845 = Constraint(expr=(m.x2502 + m.x2503)*m.x1491 + m.x2241 == 0)
m.c1846 = Constraint(expr=(m.x2502 + m.x2503)*m.x1496 + m.x2246 == 0)
m.c1847 = Constraint(expr=(m.x2502 + m.x2503)*m.x1501 + m.x2251 == 0)
m.c1848 = Constraint(expr=(m.x2502 + m.x2503)*m.x1506 + m.x2256 == 0)
m.c1849 = Constraint(expr=(m.x2502 + m.x2503)*m.x1511 + m.x2261 == 0)
m.c1850 = Constraint(expr=(m.x2502 + m.x2503)*m.x1516 + m.x2266 == 0)
m.c1851 = Constraint(expr=(m.x2502 + m.x2503)*m.x1521 + m.x2271 == 0)
m.c1852 = Constraint(expr=(m.x2502 + m.x2503)*m.x1526 + m.x2276 == 0)
m.c1853 = Constraint(expr=(m.x2502 + m.x2503)*m.x1531 + m.x2281 == 0)
m.c1854 = Constraint(expr=(m.x2502 + m.x2503)*m.x1536 + m.x2286 == 0)
m.c1855 = Constraint(expr=(m.x2502 + m.x2503)*m.x1541 + m.x2291 == 0)
m.c1856 = Constraint(expr=(m.x2502 + m.x2503)*m.x1546 + m.x2296 == 0)
m.c1857 = Constraint(expr=(m.x2502 + m.x2503)*m.x1551 + m.x2301 == 0)
m.c1858 = Constraint(expr=(m.x2502 + m.x2503)*m.x1556 + m.x2306 == 0)
m.c1859 = Constraint(expr=(m.x2502 + m.x2503)*m.x1561 + m.x2311 == 0)
m.c1860 = Constraint(expr=(m.x2502 + m.x2503)*m.x1566 + m.x2316 == 0)
m.c1861 = Constraint(expr=(m.x2502 + m.x2503)*m.x1571 + m.x2321 == 0)
m.c1862 = Constraint(expr=(m.x2502 + m.x2503)*m.x1576 + m.x2326 == 0)
m.c1863 = Constraint(expr=(m.x2502 + m.x2503)*m.x1581 + m.x2331 == 0)
m.c1864 = Constraint(expr=(m.x2502 + m.x2503)*m.x1586 + m.x2336 == 0)
m.c1865 = Constraint(expr=(m.x2502 + m.x2503)*m.x1591 + m.x2341 == 0)
m.c1866 = Constraint(expr=(m.x2502 + m.x2503)*m.x1596 + m.x2346 == 0)
m.c1867 = Constraint(expr=(m.x2502 + m.x2503)*m.x1601 + m.x2351 == 0)
m.c1868 = Constraint(expr=(m.x2502 + m.x2503)*m.x1606 + m.x2356 == 0)
m.c1869 = Constraint(expr=(m.x2502 + m.x2503)*m.x1611 + m.x2361 == 0)
m.c1870 = Constraint(expr=(m.x2502 + m.x2503)*m.x1616 + m.x2366 == 0)
m.c1871 = Constraint(expr=(m.x2502 + m.x2503)*m.x1621 + m.x2371 == 0)
m.c1872 = Constraint(expr=(m.x2502 + m.x2503)*m.x1626 + m.x2376 == 0)
m.c1873 = Constraint(expr=(m.x2502 + m.x2503)*m.x1631 + m.x2381 == 0)
m.c1874 = Constraint(expr=(m.x2502 + m.x2503)*m.x1636 + m.x2386 == 0)
m.c1875 = Constraint(expr=(m.x2502 + m.x2503)*m.x1641 + m.x2391 == 0)
m.c1876 = Constraint(expr=(m.x2502 + m.x2503)*m.x1646 + m.x2396 == 0)
m.c1877 = Constraint(expr=(m.x2502 + m.x2503)*m.x1651 + m.x2401 == 0)
m.c1878 = Constraint(expr=(m.x2502 + m.x2503)*m.x1656 + m.x2406 == 0)
m.c1879 = Constraint(expr=(m.x2502 + m.x2503)*m.x1661 + m.x2411 == 0)
m.c1880 = Constraint(expr=(m.x2502 + m.x2503)*m.x1666 + m.x2416 == 0)
m.c1881 = Constraint(expr=(m.x2502 + m.x2503)*m.x1671 + m.x2421 == 0)
m.c1882 = Constraint(expr=(m.x2502 + m.x2503)*m.x1676 + m.x2426 == 0)
m.c1883 = Constraint(expr=(m.x2502 + m.x2503)*m.x1681 + m.x2431 == 0)
m.c1884 = Constraint(expr=(m.x2502 + m.x2503)*m.x1686 + m.x2436 == 0)
m.c1885 = Constraint(expr=(m.x2502 + m.x2503)*m.x1691 + m.x2441 == 0)
m.c1886 = Constraint(expr=(m.x2502 + m.x2503)*m.x1696 + m.x2446 == 0)
m.c1887 = Constraint(expr=(m.x2502 + m.x2503)*m.x1701 + m.x2451 == 0)
m.c1888 = Constraint(expr=(m.x2502 + m.x2503)*m.x1706 + m.x2456 == 0)
m.c1889 = Constraint(expr=(m.x2502 + m.x2503)*m.x1711 + m.x2461 == 0)
m.c1890 = Constraint(expr=(m.x2502 + m.x2503)*m.x1716 + m.x2466 == 0)
m.c1891 = Constraint(expr=(m.x2502 + m.x2503)*m.x1721 + m.x2471 == 0)
m.c1892 = Constraint(expr=(m.x2502 + m.x2503)*m.x1726 + m.x2476 == 0)
m.c1893 = Constraint(expr=(m.x2502 + m.x2503)*m.x1731 + m.x2481 == 0)
m.c1894 = Constraint(expr=(m.x2502 + m.x2503)*m.x1736 + m.x2486 == 0)
m.c1895 = Constraint(expr=(m.x2502 + m.x2503)*m.x1741 + m.x2491 == 0)
m.c1896 = Constraint(expr=(m.x2502 + m.x2503)*m.x1746 + m.x2496 == 0)
m.c1897 = Constraint(expr=-m.x2502*m.x1001 + m.x1752 == 0)
m.c1898 = Constraint(expr=-m.x2502*m.x1006 + m.x1757 == 0)
m.c1899 = Constraint(expr=-m.x2502*m.x1011 + m.x1762 == 0)
m.c1900 | |
<filename>src/oci/data_catalog/models/term_relationship_summary.py
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TermRelationshipSummary(object):
"""
Summary of a term relationship. Business term relationship between two terms in a business glossary.
"""
#: A constant which can be used with the lifecycle_state property of a TermRelationshipSummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a TermRelationshipSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a TermRelationshipSummary.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a TermRelationshipSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a TermRelationshipSummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a TermRelationshipSummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a TermRelationshipSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a TermRelationshipSummary.
#: This constant has a value of "MOVING"
LIFECYCLE_STATE_MOVING = "MOVING"
def __init__(self, **kwargs):
"""
Initializes a new TermRelationshipSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this TermRelationshipSummary.
:type key: str
:param display_name:
The value to assign to the display_name property of this TermRelationshipSummary.
:type display_name: str
:param description:
The value to assign to the description property of this TermRelationshipSummary.
:type description: str
:param related_term_key:
The value to assign to the related_term_key property of this TermRelationshipSummary.
:type related_term_key: str
:param related_term_display_name:
The value to assign to the related_term_display_name property of this TermRelationshipSummary.
:type related_term_display_name: str
:param related_term_description:
The value to assign to the related_term_description property of this TermRelationshipSummary.
:type related_term_description: str
:param related_term_path:
The value to assign to the related_term_path property of this TermRelationshipSummary.
:type related_term_path: str
:param related_term_glossary_key:
The value to assign to the related_term_glossary_key property of this TermRelationshipSummary.
:type related_term_glossary_key: str
:param uri:
The value to assign to the uri property of this TermRelationshipSummary.
:type uri: str
:param parent_term_key:
The value to assign to the parent_term_key property of this TermRelationshipSummary.
:type parent_term_key: str
:param parent_term_display_name:
The value to assign to the parent_term_display_name property of this TermRelationshipSummary.
:type parent_term_display_name: str
:param parent_term_description:
The value to assign to the parent_term_description property of this TermRelationshipSummary.
:type parent_term_description: str
:param parent_term_path:
The value to assign to the parent_term_path property of this TermRelationshipSummary.
:type parent_term_path: str
:param parent_term_glossary_key:
The value to assign to the parent_term_glossary_key property of this TermRelationshipSummary.
:type parent_term_glossary_key: str
:param time_created:
The value to assign to the time_created property of this TermRelationshipSummary.
:type time_created: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this TermRelationshipSummary.
Allowed values for this property are: "CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "MOVING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
"""
self.swagger_types = {
'key': 'str',
'display_name': 'str',
'description': 'str',
'related_term_key': 'str',
'related_term_display_name': 'str',
'related_term_description': 'str',
'related_term_path': 'str',
'related_term_glossary_key': 'str',
'uri': 'str',
'parent_term_key': 'str',
'parent_term_display_name': 'str',
'parent_term_description': 'str',
'parent_term_path': 'str',
'parent_term_glossary_key': 'str',
'time_created': 'datetime',
'lifecycle_state': 'str'
}
self.attribute_map = {
'key': 'key',
'display_name': 'displayName',
'description': 'description',
'related_term_key': 'relatedTermKey',
'related_term_display_name': 'relatedTermDisplayName',
'related_term_description': 'relatedTermDescription',
'related_term_path': 'relatedTermPath',
'related_term_glossary_key': 'relatedTermGlossaryKey',
'uri': 'uri',
'parent_term_key': 'parentTermKey',
'parent_term_display_name': 'parentTermDisplayName',
'parent_term_description': 'parentTermDescription',
'parent_term_path': 'parentTermPath',
'parent_term_glossary_key': 'parentTermGlossaryKey',
'time_created': 'timeCreated',
'lifecycle_state': 'lifecycleState'
}
self._key = None
self._display_name = None
self._description = None
self._related_term_key = None
self._related_term_display_name = None
self._related_term_description = None
self._related_term_path = None
self._related_term_glossary_key = None
self._uri = None
self._parent_term_key = None
self._parent_term_display_name = None
self._parent_term_description = None
self._parent_term_path = None
self._parent_term_glossary_key = None
self._time_created = None
self._lifecycle_state = None
@property
def key(self):
"""
**[Required]** Gets the key of this TermRelationshipSummary.
Unique term relationship key that is immutable.
:return: The key of this TermRelationshipSummary.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this TermRelationshipSummary.
Unique term relationship key that is immutable.
:param key: The key of this TermRelationshipSummary.
:type: str
"""
self._key = key
@property
def display_name(self):
"""
Gets the display_name of this TermRelationshipSummary.
A user-friendly display name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.This is the same as relationshipType for termRelationship
:return: The display_name of this TermRelationshipSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this TermRelationshipSummary.
A user-friendly display name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.This is the same as relationshipType for termRelationship
:param display_name: The display_name of this TermRelationshipSummary.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
Gets the description of this TermRelationshipSummary.
Detailed description of the term relationship usually defined at the time of creation.
:return: The description of this TermRelationshipSummary.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this TermRelationshipSummary.
Detailed description of the term relationship usually defined at the time of creation.
:param description: The description of this TermRelationshipSummary.
:type: str
"""
self._description = description
@property
def related_term_key(self):
"""
Gets the related_term_key of this TermRelationshipSummary.
Unique id of the related term.
:return: The related_term_key of this TermRelationshipSummary.
:rtype: str
"""
return self._related_term_key
@related_term_key.setter
def related_term_key(self, related_term_key):
"""
Sets the related_term_key of this TermRelationshipSummary.
Unique id of the related term.
:param related_term_key: The related_term_key of this TermRelationshipSummary.
:type: str
"""
self._related_term_key = related_term_key
@property
def related_term_display_name(self):
"""
Gets the related_term_display_name of this TermRelationshipSummary.
Name of the related term.
:return: The related_term_display_name of this TermRelationshipSummary.
:rtype: str
"""
return self._related_term_display_name
@related_term_display_name.setter
def related_term_display_name(self, related_term_display_name):
"""
Sets the related_term_display_name of this TermRelationshipSummary.
Name of the related term.
:param related_term_display_name: The related_term_display_name of this TermRelationshipSummary.
:type: str
"""
self._related_term_display_name = related_term_display_name
@property
def related_term_description(self):
"""
Gets the related_term_description of this TermRelationshipSummary.
Description of the related term.
:return: The related_term_description of this TermRelationshipSummary.
:rtype: str
"""
return self._related_term_description
@related_term_description.setter
def related_term_description(self, related_term_description):
"""
Sets the related_term_description of this TermRelationshipSummary.
Description of the related term.
:param related_term_description: The related_term_description of this TermRelationshipSummary.
:type: str
"""
self._related_term_description = related_term_description
@property
def related_term_path(self):
"""
Gets the related_term_path of this TermRelationshipSummary.
Full path of the related term.
:return: The related_term_path of this TermRelationshipSummary.
:rtype: str
"""
return self._related_term_path
@related_term_path.setter
def related_term_path(self, related_term_path):
"""
Sets the related_term_path of this TermRelationshipSummary.
Full path of the related term.
:param related_term_path: The related_term_path of this TermRelationshipSummary.
:type: str
"""
self._related_term_path = related_term_path
@property
def related_term_glossary_key(self):
"""
Gets the related_term_glossary_key of this TermRelationshipSummary.
Glossary key of the related term.
:return: The related_term_glossary_key of this TermRelationshipSummary.
:rtype: str
"""
return self._related_term_glossary_key
@related_term_glossary_key.setter
def related_term_glossary_key(self, related_term_glossary_key):
"""
Sets the related_term_glossary_key of this TermRelationshipSummary.
Glossary key of the related term.
:param related_term_glossary_key: The related_term_glossary_key of this TermRelationshipSummary.
:type: str
"""
self._related_term_glossary_key = related_term_glossary_key
@property
def uri(self):
"""
Gets the uri of this TermRelationshipSummary.
URI to the term relationship instance in the API.
:return: The uri of this TermRelationshipSummary.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this TermRelationshipSummary.
URI to the term relationship instance in the API.
:param uri: The uri of this TermRelationshipSummary.
:type: str
"""
self._uri = uri
@property
def parent_term_key(self):
"""
Gets the parent_term_key of | |
<gh_stars>0
from pyteal import *
import sys
class Constants:
"""
Constant strings used in the smart contracts
"""
Creator = Bytes("Creator") # Identified the account of the Asset creator, stored globally
AssetId = Bytes("AssetId") # ID of the asset, stored globally
amountPayment = Bytes("amountPayment") # Amount to be paid for the asset, stored locally on the seller's account
amountASA = Bytes("amountASA") # Amount of asset sold, stored locally on the seller's account
approveTransfer = Bytes("approveTransfer") # Approval variable, stored on the seller's and the buyer's accounts
setupSale = Bytes("setupSale") # Method call
buyASA = Bytes("buyASA") # Method call
executeTransfer = Bytes("executeTransfer") # Method call
royaltyFee = Bytes("royaltyFee") # Royalty fee in thousands
claimFees = Bytes("claimFees") # Method call
collectedFees = Bytes("collectedFees") # Amount of collected fees, stored globally
refund = Bytes("refund") # Method call
@Subroutine(TealType.none)
def sendPayment(receiver: Addr, amount: Int) -> Expr:
"""
This subroutine can be used to send payments from the smart
contract to other accounts using inner transactions
:param Addr receiver : The receiver of the payment
:param Int amount : Amount to send in microalgos
"""
return Seq([
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields({
TxnField.type_enum: TxnType.Payment,
TxnField.amount: amount,
TxnField.receiver: receiver,
TxnField.fee: Int(1000)
}),
InnerTxnBuilder.Submit(),
])
@Subroutine(TealType.none)
def transferAsset(sender: Addr, receiver: Addr, assetId: Int, amount: Int) -> Expr:
"""
This subroutine can be used to transfer an asset
from an account to another.
This subroutine can also be used to opt in an asset if ``amount``
is 0 and ``sender`` is equal to ``receiver``.
:param Addr sender : Asset sender
:param Addr receiver : Asset receiver
:param Int assetId : ID of the asset. Note that the id must also be passed in the ``foreignAssets``
field in the outer transaction (otherwise you will get a reference error)
:param Int amount : The amount of the asset to be transferred. A zero amount transferred to self allocates
that asset in the account's Asset map.
"""
return Seq([
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields({
TxnField.type_enum: TxnType.AssetTransfer,
TxnField.asset_amount: amount,
TxnField.asset_receiver: receiver,
TxnField.asset_sender: sender,
TxnField.xfer_asset: assetId,
TxnField.fee: Int(1000)
}),
InnerTxnBuilder.Submit(),
])
@Subroutine(TealType.uint64)
def getAccountASABalance(account: Addr, assetId: Int) -> TealType.uint64:
"""
This subroutine returns the amount of ASA held by a certain
account. Note that the asset id must also be passed in the ``foreignAssets``
field in the outer transaction (otherwise you will get a reference error)
:param Addr account : The account to verify
:param Int assetId : ASA Id
:return : Amount of ASA held by the account
Returns 0 if the account does not have
any ASA of type ``assetId``.
:rtype : Int
"""
AssetAccountBalance = AssetHolding.balance(account, assetId)
return Seq([
AssetAccountBalance,
If(AssetAccountBalance.hasValue() == Int(1)) \
.Then(AssetAccountBalance.value()) \
.Else(Int(0))
])
@Subroutine(TealType.uint64)
def computeRoyaltyFee(amount: Int, royaltyFee: Int) -> TealType.uint64:
"""
This subroutine computes the fee given a specific ``amount`` and the
predefined ``royaltyFee``.
The ``royaltyFee`` variable must be expressed in thousands.
Note that we assume that amount * royaltyFee will not overflow.
In case it does, it will trigger an error and the transaction will
fail.
:param Int amount : The amount paid
:param Int royaltyFee : The royalty fee (in thousands)
:return : Fee to be paid in microAlgos
:rtype : Int
"""
# If Mul() overflows the transaction will fail
remainder = Mod(Mul(amount, royaltyFee), Int(1000))
division = Div(Mul(amount, royaltyFee), Int(1000))
# Computes the royalty fee. If the fee is equal to 0, or the amount is very small
# the fee will be 0.
# If the royalty fee is larger or equal to 1000 then we return the original amount.
# If the remainder of royaltyFee * amount / 1000 is larger than 500 we round up the
# result and return 1 + royaltyFee * amount / 1000. Otherwise we just return
# royaltyFee * amount / 1000.
return If(Or(royaltyFee == Int(0), division == Int(0))).Then(Int(0)) \
.ElseIf(royaltyFee >= Int(1000)).Then(amount) \
.ElseIf(remainder > Int(500)).Then(division + Int(1)) \
.Else(division)
def approval_program():
serviceCost = Int(2000) # cost of 2 inner transactions
# [Step 1] Sequence used to initialize the smart contract. Should be called only at creation
royaltyFeeArg = Btoi(Txn.application_args[2])
initialize = Seq([
Assert(Txn.type_enum() == TxnType.ApplicationCall), # Check if it's an application call
Assert(Txn.application_args.length() == Int(3)), # Check that there are 3 arguments, Creator, AssetId and Royalty Fee
Assert(royaltyFeeArg >= Int(0) and royaltyFeeArg <= Int(1000)), # verify that the Royalty fee is between 0 and 1000
App.globalPut(Constants.Creator, Txn.application_args[0]), # Save the initial creator
App.globalPut(Constants.AssetId, Btoi(Txn.application_args[1])), # Save the asset ID
App.globalPut(Constants.royaltyFee, royaltyFeeArg), # Save the royalty fee
Approve()
])
# [Step 2] Sequence that sets up the sale of an ASA
# There should be 3 arguments:
# 1. The first argument is the command to execute, in this case "setupSale"
# 2. The second one is the payment amount
# 3. The third one is the amount of ASA transfered
# We first verify the the seller has enough ASA to sell, and then we locally save the arguments
priceArg = Btoi(Txn.application_args[1])
amountOfASAArg = Btoi(Txn.application_args[2])
setupSale = Seq([
Assert(Txn.application_args.length() == Int(3)), # Check that there are 3 arguments
Assert(Global.group_size() == Int(1)), # Verify that it is only 1 transaction
Assert(priceArg != Int(0)), # Check that the price is different than 0
Assert(amountOfASAArg != Int(0)), # Check that the amount of ASA to transfer is different than 0
Assert( # Verify that the seller has enough ASA to sell
getAccountASABalance(Txn.sender(), App.globalGet(Constants.AssetId))
>= amountOfASAArg),
Assert(priceArg > serviceCost), # Check that the price is greater than the service cost
App.localPut(Txn.sender(), Constants.amountPayment, priceArg), # Save the price
App.localPut(Txn.sender(), Constants.amountASA, amountOfASAArg), # Save the amount of ASA to transfer
App.localPut(Txn.sender(), Constants.approveTransfer, Int(0)), # Reject transfer until payment is done
Approve()
])
# [Step 3] Sequence that approves the payment for the ASA
# This step requires 2 transaction.
# The first transaction is a NoOp App call transaction. There should be 3 arguments:
# 1. The first argument is the command to execute, in this case "buyASA"
# 2. The second argument is the asset id
# 3. The third argument is the amount of ASA to buy
# Moreover, in the first transaction we also pass the seller's address
# The second transaction is a payment (the receiver is the app).
# Save some useful variables
seller = Gtxn[0].accounts[1] # Save seller's address
amountToBePaid = App.localGet(seller, Constants.amountPayment) # Amount to be paid
amountAssetToBeTransfered = App.localGet(seller, Constants.amountASA) # Amount of ASA
approval = App.localGet(seller, Constants.approveTransfer) # Variable that checks if the transfer has alraedy been approved
buyer = Gtxn[0].sender()
buyASA = Seq([
Assert(Gtxn[0].application_args.length() == Int(3)), # Check that there are 3 arguments
Assert(Global.group_size() == Int(2)), # Check that there are 2 transactions
Assert(Gtxn[1].type_enum() == TxnType.Payment), # Check that the second transaction is a payment
Assert(App.globalGet(Constants.AssetId) == Btoi(Gtxn[0].application_args[1])), # Check that the assetId is correct
Assert(approval == Int(0)), # Check that the transfer has not been issued yet
Assert(amountToBePaid == Gtxn[1].amount()), # Check that the amount to be paid is correct
Assert(amountAssetToBeTransfered == Btoi(Gtxn[0].application_args[2])), # Check that there amount of ASA to sell is correct
Assert(Global.current_application_address() == Gtxn[1].receiver()), # Check that the receiver of the payment is the App
Assert( # Verify that the seller has enough ASA to sell
getAccountASABalance(seller, App.globalGet(Constants.AssetId))
>= amountAssetToBeTransfered),
App.localPut(seller, Constants.approveTransfer, Int(1)), # Approve the transfer from seller' side
App.localPut(buyer, Constants.approveTransfer, Int(1)), # Approve the transfer from buyer' side
Approve()
])
# [Step 4] Sequence that transfers the ASA, pays the seller and sends royalty fees to the creator
# This step requires 1 transaction.
# The transaction is a NoOp App call transaction. There should be 1 arguments
# 1. The first argument is the command to execute, in this case "executeTransfer"
# We also account for the serviceCost to pay the inner transaction
royaltyFee = App.globalGet(Constants.royaltyFee)
collectedFees = App.globalGet(Constants.collectedFees)
feesToBePaid = computeRoyaltyFee(amountToBePaid - serviceCost, royaltyFee)
executeTransfer = Seq([
Assert(Gtxn[0].application_args.length() == Int(1)), # Check that there is only 1 argument
Assert(Global.group_size() | |
<reponame>conzty01/RA_Scheduler
from unittest.mock import MagicMock, patch
from scheduleServer import app
import unittest
from helperFunctions.helperFunctions import stdRet, AuthenticatedUser
class TestSchedule_addNewDuty(unittest.TestCase):
def setUp(self):
# Set up a number of items that will be used for these tests.
# -- Mock the os.environ method so that we can create the server. --
# Helper Dict for holding the os.environ configuration
self.helper_osEnviron = {
"CLIENT_ID": "TEST CLIENT_ID",
"PROJECT_ID": "TEST PROJECT_ID",
"AUTH_URI": "TEST AUTH_URI",
"TOKEN_URI": "TEST TOKEN_URI",
"AUTH_PROVIDER_X509_CERT_URL": "TEST AUTH_PROVIDER_X509_CERT_URL",
"CLIENT_SECRET": "TEST CLIENT_SECRET",
"REDIRECT_URIS": "TEST1,TEST2,TEST3,TEST4",
"JAVASCRIPT_ORIGINS": "TEST5,TEST6",
"EXPLAIN_TEMPLATE_LOADING": "FALSE",
"LOG_LEVEL": "WARNING",
"USE_ADHOC": "FALSE",
"SECRET_KEY": "TEST SECRET KEY",
"OAUTHLIB_RELAX_TOKEN_SCOPE": "1",
"OAUTHLIB_INSECURE_TRANSPORT": "1",
"HOST_URL": "https://localhost:5000",
"DATABASE_URL": "postgres://ra_sched"
}
# Create a dictionary patcher for the os.environ method
self.patcher_osEnviron = patch.dict("os.environ",
self.helper_osEnviron)
# Start the os patchers (No mock object is returned since we used patch.dict())
self.patcher_osEnviron.start()
# -- Create an instance of ScheduleServer that we may test against. --
# Mark the application as being tested
app.config["TESTING"] = True
# Disable the login_required decorator
app.config["LOGIN_DISABLED"] = True
# Reinitialize the Login Manager to accept the new configuration
app.login_manager.init_app(app)
# Create the test server
self.server = app.test_client()
# -- Create a patcher for the getAuth() method from helperFunctions --
# since we have disabled the login manager for testing
# First we must create an object for the auth_level that we can manipulate
# as needed for the tests. By default, the auth_level is set to 1.
self.mocked_authLevel = MagicMock(return_value=1)
# In order for the authLevel to respond to __lt__, __gt__, and __eq__ calls,
# we need to create lambda functions that can effectively implement the
# respective magic methods.
self.mocked_authLevel_ltMock = lambda me, other: me.return_value < other
self.mocked_authLevel_gtMock = lambda me, other: me.return_value > other
self.mocked_authLevel_eqMock = lambda me, other: me.return_value == other
# We then set the auth_level mock to return the __lt__ Mock
self.mocked_authLevel.__lt__ = self.mocked_authLevel_ltMock
# We then set the auth_level mock to return the __gt__ Mock
self.mocked_authLevel.__gt__ = self.mocked_authLevel_ltMock
# We then set the auth_level mock to return the __eq__ Mock
self.mocked_authLevel.__eq__ = self.mocked_authLevel_ltMock
# Set the ra_id and hall_id to values that can be used throughout
self.user_ra_id = 1
self.user_hall_id = 1
self.associatedResHalls = [
{
"id": self.user_hall_id,
"auth_level": self.mocked_authLevel,
"name": "Test Hall"
}
]
# Assemble all of the desired values into an Authenticated User Object
self.helper_getAuth = AuthenticatedUser(
"<EMAIL>",
self.user_ra_id,
"Test",
"User",
self.associatedResHalls
)
# Create the patcher for the getAuth() method
self.patcher_getAuth = patch("schedule.schedule.getAuth", autospec=True)
# Start the patcher - mock returned
self.mocked_getAuth = self.patcher_getAuth.start()
# Configure the mocked_getAuth to return the helper_getAuth dictionary
self.mocked_getAuth.return_value = self.helper_getAuth
# -- Create a patcher for the appGlobals file --
self.patcher_appGlobals = patch("schedule.schedule.ag", autospec=True)
# Start the patcher - mock returned
self.mocked_appGlobals = self.patcher_appGlobals.start()
# Configure the mocked appGlobals as desired
self.mocked_appGlobals.baseOpts = {"HOST_URL": "https://localhost:5000"}
self.mocked_appGlobals.conn = MagicMock()
self.mocked_appGlobals.UPLOAD_FOLDER = "./static"
self.mocked_appGlobals.ALLOWED_EXTENSIONS = {"txt", "csv"}
# -- Create a patchers for the logging --
self.patcher_loggingDEBUG = patch("logging.debug", autospec=True)
self.patcher_loggingINFO = patch("logging.info", autospec=True)
self.patcher_loggingWARNING = patch("logging.warning", autospec=True)
self.patcher_loggingCRITICAL = patch("logging.critical", autospec=True)
self.patcher_loggingERROR = patch("logging.error", autospec=True)
# Start the patcher - mock returned
self.mocked_loggingDEBUG = self.patcher_loggingDEBUG.start()
self.mocked_loggingINFO = self.patcher_loggingINFO.start()
self.mocked_loggingWARNING = self.patcher_loggingWARNING.start()
self.mocked_loggingCRITICAL = self.patcher_loggingCRITICAL.start()
self.mocked_loggingERROR = self.patcher_loggingERROR.start()
def tearDown(self):
# Stop all of the patchers
self.patcher_getAuth.stop()
self.patcher_appGlobals.stop()
self.patcher_osEnviron.stop()
# Stop all of the logging patchers
self.patcher_loggingDEBUG.stop()
self.patcher_loggingINFO.stop()
self.patcher_loggingWARNING.stop()
self.patcher_loggingCRITICAL.stop()
self.patcher_loggingERROR.stop()
def resetAuthLevel(self):
# This function serves to reset the auth_level of the session
# to the default value which is 1.
self.mocked_authLevel.return_value = 1
def test_withoutAuthorizedUser_returnsNotAuthorizedResponse(self):
# Test to ensure that when an unauthorized user attempts to reach this API
# endpoint, a NOT AUTHORIZED response is returned to the user. An authorized
# user is one whose auth_level is at least 2 (AHD).
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
# Reset the auth_level to 1
self.resetAuthLevel()
# -- Act --
# Make a request to the desired API endpoint
resp = self.server.post("/schedule/api/addNewDuty",
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# -- Assert --
# Assert that we received a json response
self.assertTrue(resp.is_json)
# Assert that the json is formatted as expected
self.assertEqual(resp.json, stdRet(-1, "NOT AUTHORIZED"))
# Assert that we received a 200 status code
self.assertEqual(resp.status_code, 200)
# Assert that no additional call to the DB was made
self.mocked_appGlobals.conn.cursor().execute.assert_not_called()
def test_withAuthorizedUser_withoutValidRA_returnsInvalidRASelectionResponse(self):
# Test to ensure that when an authorized user attempts to use this API,
# if an invalid RA is provided, this method will return an Invalid RA
# Selection response.
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
# Set the auth_level of this session to 2
self.mocked_authLevel.return_value = 2
# Generate the various objects that will be used in this test
desiredRAID = 8
desiredDateStr = "2021-01-26"
desiredPointVal = 1
# Configure the appGlobals.conn.cursor.execute mock to return different values
# after subsequent calls.
self.mocked_appGlobals.conn.cursor().fetchone.side_effect = [
None, # First query is for the RA ID
None, # Second query is for the day info
None # Third query is for the schedule
]
# -- Act --
# Make a request to the desired API endpoint
resp = self.server.post("/schedule/api/addNewDuty",
json=dict(
id=desiredRAID,
pts=desiredPointVal,
dateStr=desiredDateStr
),
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# -- Assert --
# Assert that the last time appGlobals.conn.cursor().execute was called,
# it was a query for the RA.
self.mocked_appGlobals.conn.cursor().execute.assert_called_with(
"SELECT ra_id FROM staff_membership WHERE ra_id = %s AND res_hall_id = %s;",
(desiredRAID, self.user_hall_id)
)
# Assert that we received the expected response
self.assertEqual(resp.json, stdRet(-1, "Chosen RA is not a Valid Selection"))
# Assert that appGlobals.conn.cursor().close was called
self.mocked_appGlobals.conn.cursor().close.assert_called_once()
def test_withAuthorizedUser_withoutValidDay_returnsInvalidDayResponse(self):
# Test to ensure that when an authorized user attempts to use this API,
# if an invalid Day is provided, this method will return an Invalid Day
# Selection response.
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
# Set the auth_level of this session to 2
self.mocked_authLevel.return_value = 2
# Generate the various objects that will be used in this test
desiredRAID = 15
desiredDateStr = "2021-01-26"
desiredPointVal = 4
# Configure the appGlobals.conn.cursor.execute mock to return different values
# after subsequent calls.
self.mocked_appGlobals.conn.cursor().fetchone.side_effect = [
(desiredRAID,), # First query is for the RA ID
None, # Second query is for the day info
None # Third query is for the schedule
]
# -- Act --
# Make a request to the desired API endpoint
resp = self.server.post("/schedule/api/addNewDuty",
json=dict(
id=desiredRAID,
pts=desiredPointVal,
dateStr=desiredDateStr
),
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# -- Assert --
# Assert that the last time appGlobals.conn.cursor().execute was called,
# it was a query for the RA.
self.mocked_appGlobals.conn.cursor().execute.assert_called_with(
"SELECT id, month_id FROM day WHERE date = TO_DATE(%s, 'YYYY-MM-DD');",
(desiredDateStr,)
)
# Assert that we received the expected response
self.assertEqual(resp.json, stdRet(0, "Invalid Date"))
# Assert that appGlobals.conn.cursor().close was called
self.mocked_appGlobals.conn.cursor().close.assert_called_once()
def test_withAuthorizedUser_withoutValidSchedule_createsNewScheduleEntry(self):
# Test to ensure that when an authorized user attempts to use this API,
# if no schedule is available, this will create a new schedule record.
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
# Set the auth_level of this session to 2
self.mocked_authLevel.return_value = 2
# Generate the various objects that will be used in this test
desiredRAID = 1
desiredDateStr = "2021-01-26"
desiredPointVal = 3
expectedMonthID = 9
expectedDayID = 17
expectedScheduleID = 60
desiredFlagState = False
duplicateCheck = False
# Configure the appGlobals.conn.cursor.execute mock to return different values
# after subsequent calls.
self.mocked_appGlobals.conn.cursor().fetchone.side_effect = [
(desiredRAID,), # First query is for the RA ID
(expectedDayID, expectedMonthID), # Second query is for the day info
None, # Third query is for the schedule
(expectedScheduleID,), # Fourth call will return the new schedule ID
(duplicateCheck,)
]
# -- Act --
# Make a request to the desired API endpoint
resp = self.server.post("/schedule/api/addNewDuty",
json=dict(
id=desiredRAID,
pts=desiredPointVal,
dateStr=desiredDateStr,
flag=desiredFlagState
),
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
| |
else 1
dist_hs = kwargs['dist_hs']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
objtype = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
# create tail
movez = np.array((0, 0, rs1 + dist_hs + lh / 2))
tkwargs = kwargs.copy()
tkwargs['left_hand'] = False
tail_list1 = create_ecoli_tail(-movez, **tkwargs)
tkwargs['left_hand'] = True
tail_list2 = create_ecoli_tail(movez, **tkwargs)
# create head
vsgeo = ellipse_base_geo() # velocity node geo of sphere
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
vsobj = objtype()
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
return vsobj, tail_list1, tail_list2
def createEcoliComp_ellipse(name='...', **kwargs):
vsobj, tail_list = createEcoli_ellipse(name=name, **kwargs)
vsgeo = vsobj.get_u_geo()
center = kwargs['center']
rel_Us = kwargs['rel_Us']
rel_Uh = kwargs['rel_Uh']
ecoli_comp = sf.ForceFreeComposite(center=center.copy(), norm=vsgeo.get_geo_norm().copy(),
name=name)
ecoli_comp.add_obj(vsobj, rel_U=rel_Us)
for ti in tail_list:
ecoli_comp.add_obj(ti, rel_U=rel_Uh)
rot_norm = kwargs['rot_norm']
rot_theta = kwargs['rot_theta'] * np.pi
ecoli_comp.node_rotation(norm=rot_norm.copy(), theta=rot_theta, rotation_origin=center.copy())
return ecoli_comp
def createEcoli_tunnel(**kwargs):
ch = kwargs['ch']
rh1 = kwargs['rh1']
rh2 = kwargs['rh2']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
ls = kwargs['ls']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor']
dist_hs = kwargs['dist_hs']
center = kwargs['center']
rT1 = kwargs['rT1']
rT2 = kwargs['rT2']
ntT = kwargs['ntT']
eT = kwargs['eT']
Tfct = kwargs['Tfct']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
movesz = 0.5 * (dist_hs - ls + lh) + ls / 2
movehz = -1 * (0.5 * (dist_hs + ls - lh) + lh / 2)
# movesz = (ls + dist_hs) / 2
# movehz = (lh + dist_hs) / 2
moves = np.array((0, 0, movesz)) + center # move distance of sphere
moveh = np.array((rT1 - rh1, 0, movehz)) + center # move distance of helix
lT = (rT1 + rh2) * 2
objtype = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
err_msg = 'the regularized family methods requires eT==0. '
assert np.isclose(eT, 0), err_msg
# create helix
tail_list = create_ecoli_tail(moveh, **kwargs)
# create head
vsobj = objtype()
node_dof = vsobj.get_n_unknown()
vsgeo = create_capsule(rs1, rs2, ls, ds, node_dof)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
fsgeo.node_zoom_z(1 - ds / (0.5 * (rs1 + rs2)) * es)
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
vsobj.move(moves * zoom_factor)
# create T shape
dtT = 2 * np.pi / ntT
vTobj = objtype()
node_dof = vTobj.get_n_unknown()
# # dbg
# OptDB = PETSc.Options( )
# factor = OptDB.getReal('dbg_move_factor', 1)
# PETSc.Sys.Print('--------------------> DBG: dbg_move_factor = %f' % factor)
# moveT = np.array((0, 0, moveh[-1] + lh / 2 + rh2 * factor))
moveT = np.array((0, 0, movehz + lh / 2)) + center
vTgeo = tunnel_geo()
if 'dualPotential' in matrix_method:
vTgeo.set_check_epsilon(False)
vTgeo.set_dof(node_dof)
fTgeo = vTgeo.create_deltatheta(dth=dtT, radius=rT2, factor=Tfct, length=lT, epsilon=eT,
with_cover=1)
vTobj.set_data(fTgeo, vTgeo, name='T_shape_0')
theta = -np.pi / 2
vTobj.node_rotation(norm=np.array((0, 1, 0)), theta=theta)
vTobj.zoom(zoom_factor)
vTobj.move(moveT * zoom_factor)
theta = np.pi / 4 - ch * np.pi
vsobj.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
for ti in tail_list:
ti.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
vTobj.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
return vsobj, tail_list, vTobj
def createEcoliComp_tunnel(name='...', **kwargs):
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
center = kwargs['center']
rel_Us = kwargs['rel_Us']
rel_Uh = kwargs['rel_Uh']
if not with_T_geo:
kwargs['rT1'] = kwargs['rh1']
vsobj, tail_list, vTobj = createEcoli_tunnel(**kwargs)
ecoli_comp = sf.ForceFreeComposite(center, norm=vsobj.get_u_geo().get_geo_norm(), name=name)
ecoli_comp.add_obj(vsobj, rel_U=rel_Us)
for ti in tail_list:
ecoli_comp.add_obj(ti, rel_U=rel_Uh)
if with_T_geo:
ecoli_comp.add_obj(vTobj, rel_U=rel_Uh)
return ecoli_comp
def create_ecoli_2part(**problem_kwargs):
# create a ecoli contain two parts, one is head and one is tail.
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
center = problem_kwargs['center']
update_order = problem_kwargs['update_order'] if 'update_order' in problem_kwargs.keys() else 1
update_fun = problem_kwargs['update_fun'] if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
head_obj, tail_obj_list = createEcoli_ellipse(name='ecoli0', **problem_kwargs)
head_obj.set_name('head_obj')
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_obj_list)
head_geo = head_obj.get_u_geo()
# ecoli_comp = sf.ForceFreeComposite(center=head_geo.get_center(), norm=head_geo.get_geo_norm(), name='ecoli_0')
ecoli_comp = sf.ForceFreeComposite(center=center, norm=head_geo.get_geo_norm(), name='ecoli_0')
ecoli_comp.add_obj(obj=head_obj, rel_U=rel_Us)
ecoli_comp.add_obj(obj=tail_obj, rel_U=rel_Uh)
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_rotlets_tail_2part(rotlet_strength=0, **problem_kwargs):
# create a swimmer with a infinite small head (the limit is a rotlet) and tail(s).
ch = problem_kwargs['ch']
ph = problem_kwargs['ph']
dist_hs = problem_kwargs['dist_hs']
lh = ph * ch # length of helix
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj0 = sf.StokesFlowObj()
tail_obj0.combine(tail_list)
tail_obj = sf.FundSoltObj()
tail_obj.set_data(tail_obj0.get_u_geo(), tail_obj0.get_f_geo(), name='rotlets_tail_obj')
location = np.array((0, 0, lh / 2 + dist_hs))
tnorm = tail_obj0.get_u_geo().get_geo_norm()
torque = tnorm * rotlet_strength
tail_obj.add_point_force(location=location, force=torque,
StokesletsHandle=light_rotlets_matrix_3d)
givenT = np.hstack((np.zeros(3), -1 * torque))
ecoli_comp = sf.GivenForceComposite(center=np.zeros(3), norm=tnorm,
name='rotlets_tail_comp', givenF=givenT)
ecoli_comp.add_obj(obj=tail_obj, rel_U=np.zeros(6))
update_order = problem_kwargs['update_order'] \
if 'update_order' in problem_kwargs.keys() \
else 1
update_fun = problem_kwargs['update_fun'] \
if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_ecoli_2part_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
ti = problem_kwargs['ti'] if 'ti' in problem_kwargs.keys() else 0
omega_tail = problem_kwargs['omega_tail'] if 'omega_tail' in problem_kwargs.keys() else 0
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
head_obj = ecoli_comp.get_obj_list()[0]
tail_obj = ecoli_comp.get_obj_list()[1]
head_obj.node_rotation(head_obj.get_u_geo().get_geo_norm(), psi_tail - omega_tail * ti)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
ecoli_comp.move(now_center)
return ecoli_comp
def get_ecoli_nodes_2part_at(*args, **kwargs):
ecoli_comp = create_ecoli_2part_at(*args, **kwargs)
return [i0.get_u_geo().get_nodes() for i0 in ecoli_comp.get_obj_list()]
def get_ecoli_nodes_split_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
n_tail = problem_kwargs['n_tail']
ti = problem_kwargs['ti'] if 'ti' in problem_kwargs.keys() else 0
omega_tail = problem_kwargs['omega_tail'] if 'omega_tail' in problem_kwargs.keys() else 0
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
head_obj = ecoli_comp.get_obj_list()[0]
tail_obj = ecoli_comp.get_obj_list()[1]
head_obj.node_rotation(head_obj.get_u_geo().get_geo_norm(), psi_tail - omega_tail * ti)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
ecoli_comp.move(now_center)
t0 = np.split(tail_obj.get_u_nodes(), 2 * n_tail)
t1 = np.vstack(t0[1::2])
t2 = np.vstack(t0[0::2])
t3 = ecoli_comp.get_obj_list()[0].get_u_nodes()
return t1, t2, t3
def get_ellipsoid_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
ds = problem_kwargs['ds']
rs1 = problem_kwargs['rs1']
rs2 = problem_kwargs['rs2']
vsgeo = ellipse_base_geo()
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.set_geo_norm(vsgeo.get_geo_norm() * -1)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=np.pi / 2)
vsgeo.node_rotation(np.array((0, 1, 0)), theta)
vsgeo.node_rotation(np.array((0, 0, 1)), phi)
vsgeo.node_rotation(vsgeo.get_geo_norm(), psi_tail)
vsgeo.move(now_center - vsgeo.get_center())
return [vsgeo.get_nodes(), ]
def create_ecoli_dualTail(**problem_kwargs):
# create a swimmer with two tails in the ends. one is left hand and one is right hand.
# the swimmer contain three parts, i.e. head, upper tail and down tail.
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
update_order = problem_kwargs['update_order'] if 'update_order' in problem_kwargs.keys() else 1
update_fun = problem_kwargs['update_fun'] if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
head_obj, tail_obj_l1, tail_obj_l2 = createEcoli_2tails(name='ecoli0', **problem_kwargs)
head_obj.set_name('head_obj')
tail_obj1 = sf.StokesFlowObj()
tail_obj1.set_name('tail_obj1')
tail_obj1.combine(tail_obj_l1)
tail_obj2 = sf.StokesFlowObj()
tail_obj2.set_name('tail_obj2')
tail_obj2.combine(tail_obj_l2)
head_geo = head_obj.get_u_geo()
tnorm = head_geo.get_geo_norm()
ecoli_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=tnorm, name='ecoli_0')
ecoli_comp.add_obj(obj=head_obj, rel_U=rel_Us)
ecoli_comp.add_obj(obj=tail_obj1, rel_U=rel_Uh)
ecoli_comp.add_obj(obj=tail_obj2, rel_U=-rel_Uh)
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_ecoli_dualTail_at(theta, phi, psi_tail1, psi_tail2, center=np.zeros(3),
**problem_kwargs):
assert 1 == 2
ecoli_comp = create_ecoli_dualTail(**problem_kwargs)
# ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
# ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
# tail_obj1 = ecoli_comp.get_obj_list()[1]
# tail_obj1.node_rotation(tail_obj1.get_u_geo().get_geo_norm(), psi_tail1)
# tail_obj2 = ecoli_comp.get_obj_list()[2]
# tail_obj2.node_rotation(tail_obj2.get_u_geo().get_geo_norm(), psi_tail2)
return ecoli_comp
def create_sphere(namehandle='sphereObj', **kwargs):
matrix_method = kwargs['matrix_method']
rs = kwargs['rs']
sphere_velocity = kwargs['sphere_velocity']
ds = kwargs['ds']
es = kwargs['es']
sphere_coord = kwargs['sphere_coord']
objtype = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
obj_sphere = objtype()
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.set_dof(obj_sphere.get_n_unknown())
sphere_geo0.create_delta(ds, rs)
sphere_geo0.set_rigid_velocity([0, 0, 0, 0, 0, 0])
sphere_geo1 = sphere_geo0.copy()
if 'pf' in matrix_method:
sphere_geo1.node_zoom((rs + ds * es) / rs)
obj_sphere.set_data(sphere_geo1, sphere_geo0)
obj_list = []
for i0, (t_coord, t_velocity) in enumerate(zip(sphere_coord, sphere_velocity)):
obj2 = obj_sphere.copy()
obj2.set_name('%s_%d' % (namehandle, i0))
obj2.move(t_coord)
obj2.get_u_geo().set_rigid_velocity(t_velocity)
obj_list.append(obj2)
return obj_list
def create_one_ellipse(namehandle='ellipseObj', **kwargs):
matrix_method = kwargs['matrix_method']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
sphere_velocity = kwargs['sphere_velocity']
ds = kwargs['ds']
es = kwargs['es']
sphere_coord = kwargs['sphere_coord']
objtype | |
range(borders):
core.ChangeGC(
gc, xcffib.xproto.GC.Foreground, [self.conn.color_pixel(colors[i])]
)
rect = xcffib.xproto.RECTANGLE.synthetic(
coord, coord, outer_w - coord * 2, outer_h - coord * 2
)
core.PolyFillRectangle(pixmap, gc, 1, [rect])
coord += borderwidths[i]
self._set_borderpixmap(depth, pixmap, gc, borderwidth, width, height)
def _set_borderpixmap(self, depth, pixmap, gc, borderwidth, width, height):
core = self.conn.conn.core
outer_w = width + borderwidth * 2
outer_h = height + borderwidth * 2
with PixmapID(self.conn.conn) as border:
core.CreatePixmap(depth, border, self.wid, outer_w, outer_h)
most_w = outer_w - borderwidth
most_h = outer_h - borderwidth
core.CopyArea(pixmap, border, gc, borderwidth, borderwidth, 0, 0, most_w, most_h)
core.CopyArea(pixmap, border, gc, 0, 0, most_w, most_h, borderwidth, borderwidth)
core.CopyArea(pixmap, border, gc, borderwidth, 0, 0, most_h, most_w, borderwidth)
core.CopyArea(pixmap, border, gc, 0, borderwidth, most_w, 0, borderwidth, most_h)
core.ChangeWindowAttributes(self.wid, xcffib.xproto.CW.BorderPixmap, [border])
class _Window:
_window_mask = 0 # override in child class
def __init__(self, window, qtile):
base.Window.__init__(self)
self.window, self.qtile = window, qtile
self.hidden = True
self.icons = {}
window.set_attribute(eventmask=self._window_mask)
self._group = None
try:
g = self.window.get_geometry()
self._x = g.x
self._y = g.y
self._width = g.width
self._height = g.height
self._depth = g.depth
except xcffib.xproto.DrawableError:
# Whoops, we were too early, so let's ignore it for now and get the
# values on demand.
self._x = None
self._y = None
self._width = None
self._height = None
self._depth = None
self.float_x: Optional[int] = None
self.float_y: Optional[int] = None
self._float_width: int = self._width
self._float_height: int = self._height
self.bordercolor = None
self.state = NormalState
self._float_state = FloatStates.NOT_FLOATING
self._demands_attention = False
self.hints = {
"input": True,
"icon_pixmap": None,
"icon_window": None,
"icon_x": 0,
"icon_y": 0,
"icon_mask": 0,
"window_group": None,
"urgent": False,
# normal or size hints
"width_inc": None,
"height_inc": None,
"base_width": 0,
"base_height": 0,
}
self.update_hints()
x = property(fset=_geometry_setter("x"), fget=_geometry_getter("x"))
y = property(fset=_geometry_setter("y"), fget=_geometry_getter("y"))
width = property(
fset=_geometry_setter("width"),
fget=_geometry_getter("width"),
)
height = property(
fset=_geometry_setter("height"),
fget=_geometry_getter("height"),
)
depth = property(
fset=_geometry_setter("depth"),
fget=_geometry_getter("depth"),
)
@property
def wid(self):
return self.window.wid
@property
def group(self):
return self._group
def has_fixed_ratio(self) -> bool:
try:
if (
"PAspect" in self.hints["flags"]
and self.hints["min_aspect"] == self.hints["max_aspect"]
):
return True
except KeyError:
pass
return False
def has_fixed_size(self) -> bool:
try:
if (
"PMinSize" in self.hints["flags"]
and "PMaxSize" in self.hints["flags"]
and 0 < self.hints["min_width"] == self.hints["max_width"]
and 0 < self.hints["min_height"] == self.hints["max_height"]
):
return True
except KeyError:
pass
return False
def has_user_set_position(self):
try:
if "USPosition" in self.hints["flags"] or "PPosition" in self.hints["flags"]:
return True
except KeyError:
pass
return False
def update_name(self):
try:
self.name = self.window.get_name()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
hook.fire("client_name_updated", self)
def update_wm_class(self) -> None:
self._wm_class = self.window.get_wm_class()
def get_wm_class(self) -> Optional[List[str]]:
return self._wm_class
def get_wm_type(self):
return self.window.get_wm_type()
def get_wm_role(self):
return self.window.get_wm_window_role()
def is_transient_for(self):
"""What window is this window a transient windor for?"""
wid = self.window.get_wm_transient_for()
return self.qtile.windows_map.get(wid)
def update_hints(self):
"""Update the local copy of the window's WM_HINTS
See http://tronche.com/gui/x/icccm/sec-4.html#WM_HINTS
"""
try:
h = self.window.get_wm_hints()
normh = self.window.get_wm_normal_hints()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if normh:
self.hints.update(normh)
if h and "UrgencyHint" in h["flags"]:
if self.qtile.current_window != self:
self.hints["urgent"] = True
hook.fire("client_urgent_hint_changed", self)
elif self.urgent:
self.hints["urgent"] = False
hook.fire("client_urgent_hint_changed", self)
if h and "InputHint" in h["flags"]:
self.hints["input"] = h["input"]
if getattr(self, "group", None):
if self.group.floating_layout.match(self):
self.floating = True
self.group.layout_all()
return
def update_state(self):
triggered = ["urgent"]
if self.qtile.config.auto_fullscreen:
triggered.append("fullscreen")
state = self.window.get_net_wm_state()
for s in triggered:
setattr(self, s, (s in state))
@property
def urgent(self):
return self.hints["urgent"] or self._demands_attention
@urgent.setter
def urgent(self, val):
self._demands_attention = val
# TODO unset window hint as well?
if not val:
self.hints["urgent"] = False
def info(self):
if self.group:
group = self.group.name
else:
group = None
float_info = {
"x": self.float_x,
"y": self.float_y,
"width": self._float_width,
"height": self._float_height,
}
return dict(
name=self.name,
x=self.x,
y=self.y,
width=self.width,
height=self.height,
group=group,
id=self.window.wid,
wm_class=self.get_wm_class(),
floating=self._float_state != FloatStates.NOT_FLOATING,
float_info=float_info,
maximized=self._float_state == FloatStates.MAXIMIZED,
minimized=self._float_state == FloatStates.MINIMIZED,
fullscreen=self._float_state == FloatStates.FULLSCREEN,
)
@property
def state(self):
return self.window.get_wm_state()[0]
@state.setter
def state(self, val):
if val in (WithdrawnState, NormalState, IconicState):
self.window.set_property("WM_STATE", [val, 0])
@property
def opacity(self):
assert hasattr(self, "window")
opacity = self.window.get_property("_NET_WM_WINDOW_OPACITY", unpack=int)
if not opacity:
return 1.0
else:
value = opacity[0]
# 2 decimal places
as_float = round(value / 0xFFFFFFFF, 2)
return as_float
@opacity.setter
def opacity(self, opacity: float) -> None:
if 0.0 <= opacity <= 1.0:
real_opacity = int(opacity * 0xFFFFFFFF)
assert hasattr(self, "window")
self.window.set_property("_NET_WM_WINDOW_OPACITY", real_opacity)
def kill(self):
if "WM_DELETE_WINDOW" in self.window.get_wm_protocols():
data = [
self.qtile.core.conn.atoms["WM_DELETE_WINDOW"],
xcffib.xproto.Time.CurrentTime,
0,
0,
0,
]
u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
e = xcffib.xproto.ClientMessageEvent.synthetic(
format=32,
window=self.window.wid,
type=self.qtile.core.conn.atoms["WM_PROTOCOLS"],
data=u,
)
self.window.send_event(e)
else:
self.window.kill_client()
self.qtile.core.conn.flush()
def hide(self):
# We don't want to get the UnmapNotify for this unmap
with self.disable_mask(EventMask.StructureNotify):
with self.qtile.core.disable_unmap_events():
self.window.unmap()
self.hidden = True
def unhide(self):
self.window.map()
self.state = NormalState
self.hidden = False
@contextlib.contextmanager
def disable_mask(self, mask):
self._disable_mask(mask)
yield
self._reset_mask()
def _disable_mask(self, mask):
self.window.set_attribute(eventmask=self._window_mask & (~mask))
def _reset_mask(self):
self.window.set_attribute(eventmask=self._window_mask)
def _grab_click(self):
# Grab button 1 to focus upon click when unfocussed
for amask in self.qtile.core._auto_modmasks():
self.qtile.core.conn.conn.core.GrabButton(
True,
self.window.wid,
EventMask.ButtonPress,
xcffib.xproto.GrabMode.Sync,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.Atom._None,
xcffib.xproto.Atom._None,
1,
amask,
)
def _ungrab_click(self):
# Ungrab button 1 when focussed
self.qtile.core.conn.conn.core.UngrabButton(
xcffib.xproto.Atom.Any,
self.window.wid,
xcffib.xproto.ModMask.Any,
)
def get_pid(self):
return self.window.get_net_wm_pid()
def place(
self,
x,
y,
width,
height,
borderwidth,
bordercolor,
above=False,
margin=None,
respect_hints=False,
):
"""
Places the window at the specified location with the given size.
Parameters
==========
x: int
y: int
width: int
height: int
borderwidth: int
bordercolor: string
above: bool, optional
margin: int or list, optional
space around window as int or list of ints [N E S W]
above: bool, optional
If True, the geometry will be adjusted to respect hints provided by the
client.
"""
# TODO: self.x/y/height/width are updated BEFORE
# place is called, so there's no way to know if only
# the position is changed, so we are sending
# the ConfigureNotify every time place is called
#
# # if position change and size don't
# # send a configure notify. See ICCCM 4.2.3
# send_notify = False
# if (self.x != x or self.y != y) and \
# (self.width == width and self.height == height):
# send_notify = True
# #for now, we just:
send_notify = True
# Adjust the placement to account for layout margins, if there are any.
if margin is not None:
if isinstance(margin, int):
margin = [margin] * 4
x += margin[3]
y += margin[0]
width -= margin[1] + margin[3]
height -= margin[0] + margin[2]
# Optionally adjust geometry to respect client hints
if respect_hints:
flags = self.hints.get("flags", {})
if "PMinSize" in flags:
width = max(width, self.hints.get("min_width", 0))
height = max(height, self.hints.get("min_height", 0))
if "PMaxSize" in flags:
width = min(width, self.hints.get("max_width", 0)) or width
height = min(height, self.hints.get("max_height", 0)) or height
if "PAspect" in flags and self._float_state == FloatStates.FLOATING:
min_aspect = self.hints["min_aspect"]
max_aspect = self.hints["max_aspect"]
if width / height < min_aspect[0] / min_aspect[1]:
height = width * min_aspect[1] // min_aspect[0]
elif width / height > max_aspect[0] / max_aspect[1]:
height = width * max_aspect[1] // max_aspect[0]
if self.hints["base_width"] and self.hints["width_inc"]:
width_adjustment = (width - self.hints["base_width"]) % self.hints["width_inc"]
width -= width_adjustment
if self.fullscreen:
x += int(width_adjustment / 2)
if self.hints["base_height"] and self.hints["height_inc"]:
height_adjustment = (height - self.hints["base_height"]) % self.hints[
"height_inc"
]
height -= height_adjustment
if self.fullscreen:
y += int(height_adjustment / 2)
# save x and y float offset
if self.group is not None and self.group.screen is not None:
self.float_x = x - self.group.screen.x
self.float_y = y - self.group.screen.y
self.x = x
self.y = y
self.width = width
self.height = height
kwarg = dict(
x=x,
y=y,
width=width,
height=height,
)
if above:
kwarg["stackmode"] = StackMode.Above
self.window.configure(**kwarg)
self.paint_borders(bordercolor, borderwidth)
if send_notify:
self.send_configure_notify(x, y, width, height)
def paint_borders(self, color, width):
self.borderwidth = width
self.bordercolor = color
self.window.configure(borderwidth=width)
self.window.paint_borders(self.depth, color, width, self.width, self.height)
def send_configure_notify(self, x, y, width, height):
"""Send a synthetic ConfigureNotify"""
window = self.window.wid
above_sibling = False
override_redirect = False
event = xcffib.xproto.ConfigureNotifyEvent.synthetic(
event=window,
window=window,
above_sibling=above_sibling,
x=x,
y=y,
width=width,
height=height,
border_width=self.borderwidth,
override_redirect=override_redirect,
)
self.window.send_event(event, mask=EventMask.StructureNotify)
@property
def can_steal_focus(self):
return self.window.get_wm_type() != "notification"
def _do_focus(self):
"""
Focus the window if we can, and return whether or not it was successful.
"""
# don't focus hidden windows, they should be mapped. this is generally
# a bug somewhere in the qtile code, but | |
import angr
import pyvex
import claripy
from angr.errors import SimReliftException, UnsupportedIRStmtError, SimStatementError, SimUninitializedAccessError
from angr.state_plugins.inspect import BP_AFTER, BP_BEFORE
from angr.state_plugins.sim_action_object import SimActionObject
from angr.state_plugins.sim_action import SimActionData
from angr.engines import vex
import collections
import logging
l = logging.getLogger(name=__name__)
from utils import isDefinitelyEqual_Solver, isDefinitelyNotEqual_Solver, describeAst
def makeSpeculative(proj, state, window=250, misforwarding=False):
"""
window: size of speculative window (~ROB) in x86 instructions.
misforwarding: whether to enable misforwarding features, i.e., speculatively
missing a forward from an inflight store.
"""
proj.engines.register_plugin('specvex', SimEngineSpecVEX())
proj.engines.order = ['specvex' if x=='vex' else x for x in proj.engines.order] # replace 'vex' with 'specvex'
if proj.engines.has_plugin('vex'): proj.engines.release_plugin('vex')
#state.options.discard(angr.options.LAZY_SOLVES) # turns out LAZY_SOLVES is not on by default
state.register_plugin('spec', SpecState(window))
state.spec.arm(state, misforwarding=misforwarding)
assert state.spec.ins_executed == 0
class SimEngineSpecVEX(angr.SimEngineVEX):
"""
Execution engine which allows bounded wrong-path speculation.
Based on the default SimEngineVEX.
"""
def lift(self, **kwargs):
"""
An override of the lift method in SimEngineVEX base class.
Ensures that any instruction containing a load is considered the end of its irsb.
This is necessary in order for us to be able to fork during loads, because jumping
into the middle of an irsb causes problems (VEX temp variables won't be correct)
"""
firsttry = super().lift(**kwargs)
def isLoad(stmt):
if type(stmt) == pyvex.IRStmt.WrTmp and type(stmt.data) == pyvex.IRExpr.Load: return True
if type(stmt) == pyvex.IRStmt.LoadG: return True
return False
stops = [nextInstruction(firsttry, stmt) for stmt in firsttry.statements if isLoad(stmt)]
stops = list(set(addr for (addr, _) in stops if addr is not None)) # list(set()) removes duplicates
if stops:
l.debug("Adding stop points {}".format([hex(stop) for stop in stops]))
extra_stop_points = kwargs.pop('extra_stop_points', [])
if extra_stop_points is None: extra_stop_points = []
extra_stop_points.extend(stops)
return super().lift(extra_stop_points=extra_stop_points, **kwargs)
else:
return firsttry
def _handle_statement(self, state, successors, stmt):
"""
An override of the _handle_statement method in SimEngineVEX base class.
Much code copied from there; see SimEngineVEX class for more information/docs.
"""
if type(stmt) == pyvex.IRStmt.IMark:
ins_addr = stmt.addr + stmt.delta
state.scratch.ins_addr = ins_addr
# Raise an exception if we're suddenly in self-modifying code
for subaddr in range(stmt.len):
if subaddr + stmt.addr in state.scratch.dirty_addrs:
raise SimReliftException(state)
state._inspect('instruction', BP_AFTER)
#l.debug("IMark: %#x", stmt.addr)
state.scratch.num_insns += 1
state._inspect('instruction', BP_BEFORE, instruction=ins_addr)
if state.spec.mispredicted:
return False # report path as deadended
if state.spec.hook_loads and type(stmt) == pyvex.IRStmt.WrTmp and type(stmt.data) == pyvex.IRExpr.Load:
self._handleWrTmpLoadWithPossibleForwarding(state, successors, stmt)
# we've now completely handled this statement manually, we're done
return True
if state.spec.hook_loads and type(stmt) == pyvex.IRStmt.LoadG:
self._handleLoadGWithPossibleForwarding(state, successors, stmt)
# we've now completely handled this statement manually, we're done
return True
# now for everything else
try:
stmt_handler = self.stmt_handlers[stmt.tag_int]
except IndexError:
l.error("Unsupported statement type %s", (type(stmt)))
if angr.options.BYPASS_UNSUPPORTED_IRSTMT not in state.options:
raise UnsupportedIRStmtError("Unsupported statement type %s" % (type(stmt)))
state.history.add_event('resilience', resilience_type='irstmt', stmt=type(stmt).__name__, message='unsupported IRStmt')
return None
else:
exit_data = stmt_handler(self, state, stmt)
# handling conditional exits is where the magic happens
if exit_data is not None:
target, guard, jumpkind = exit_data
l.debug("time {}: forking for conditional exit to {} under guard {}".format(state.spec.ins_executed, target, guard))
# Unlike normal SimEngineVEX, we always proceed down both sides of the branch
# (to simulate possible wrong-path execution, i.e. branch misprediction)
# and add the path constraints later, only after _spec_window_size instructions have passed
branchcond = guard
notbranchcond = claripy.Not(branchcond)
exit_state = None
cont_state = None
if hasattr(state.spectre, 'takepath') and state.spectre.takepath:
npath = state.spectre.takepath.popleft()
if npath == '1':
exit_state = state
elif npath == '0':
cont_state = state
else:
exit_state = state.copy()
cont_state = state
if exit_state is not None:
exit_state.spec.path.append('1')
if not state.solver.is_true(branchcond): exit_state.spec.conditionals.append(branchcond) # don't bother adding a deferred 'True' constraint
successors.add_successor(exit_state, target, guard, jumpkind, add_guard=False,
exit_stmt_idx=state.scratch.stmt_idx, exit_ins_addr=state.scratch.ins_addr)
if cont_state is not None:
cont_state.spec.path.append('0')
if not state.solver.is_true(notbranchcond): cont_state.spec.conditionals.append(notbranchcond) # don't bother adding a deferred 'True' constraint
return True
else:
return False
# We don't add the guard for the exit_state (add_guard=False).
# Unfortunately, the call to add the 'default' successor at the end of an irsb
# (line 313 in vex/engine.py as of this writing) leaves add_guard as default (True).
# For the moment, rather than patching this, we just don't record the guard at
# all on the cont_state.
# TODO not sure if this will mess us up. Is scratch.guard used for merging?
# Haven't thought about how speculation should interact with merging.
# More fundamentally, what is scratch.guard used for when add_guard=False? Anything?
#cont_state.scratch.guard = claripy.And(cont_state.scratch.guard, notbranchcond)
return True
def _handleWrTmpLoadWithPossibleForwarding(self, state, successors, stmt):
# we duplicate the processing for WrTmp loads ourselves, because we potentially need to fork during load processing
# this is basically an inlined version of what goes on in angr for a WrTmp load, patched to handle possible forwarding
load = stmt.data
with state.history.subscribe_actions() as data_deps:
state._inspect('expr', BP_BEFORE, expr=load)
load_size_bits = pyvex.const.get_type_size(load.type)
load_size_bytes = load_size_bits // state.arch.byte_width
with state.history.subscribe_actions() as addr_actions:
addr = self.handle_expression(state, load.addr)
if angr.options.UNINITIALIZED_ACCESS_AWARENESS in state.options:
if getattr(addr._model_vsa, 'uninitialized', False):
raise SimUninitializedAccessError('addr', addr)
if angr.options.DO_LOADS not in state.options:
results = (state, state.solver.Unconstrained("load_expr_%#x_%d" % (state.scratch.ins_addr, state.scratch.stmt_idx), load_size_bits))
else:
results = performLoadWithPossibleForwarding(state, addr, load_size_bytes, load_endness=load.endness)
for (l_state, l_value) in results:
if load.type.startswith('Ity_F'):
l_value = l_value.raw_to_fp()
if angr.options.TRACK_MEMORY_ACTIONS in l_state.options:
addr_ao = SimActionObject(addr, deps=addr_actions, state=l_state)
r = SimActionData(l_state, l_state.memory.id, SimActionData.READ, addr=addr_ao, size=load_size_bits, data=l_value)
l_state.history.add_action(r)
if angr.options.SIMPLIFY_EXPRS in l_state.options:
l_value = state.solver.simplify(l_value)
if l_state.solver.symbolic(l_value) and angr.options.CONCRETIZE in l_state.options:
concrete_value = l_state.solver.BVV(l_state.solver.eval(l_value), len(l_value))
l_state.add_constraints(l_value == concrete_value)
l_value = concrete_value
l_state._inspect('expr', BP_AFTER, expr=load, expr_result=l_value)
l_state.scratch.store_tmp(stmt.tmp, l_value, deps=data_deps)
# now we tell angr about the fork, so it continues executing the state
if l_state is not state:
# For these "new states" (which angr currently doesn't know about), we
# also have to finish the current instruction for the state: we will be
# "branching" to the next instruction, and don't want to skip the rest
# of the VEX statements in this instruction
# we do this by executing the entire current irsb (basic block), but with
# arguments to _handle_irsb such that only a few statements (those
# between where we are and where the next instruction starts) are executed
(next_instr_addr, next_instr_stmt_idx) = nextInstruction(state.scratch.irsb, stmt)
self._handle_irsb(l_state, successors, l_state.scratch.irsb, state.scratch.stmt_idx+1, next_instr_stmt_idx-1 if next_instr_stmt_idx is not None else None, None)
# finally, we tell angr about the new state, so it will continue executing it
# (and we tell it to start executing at whatever the next instruction is)
l.debug("time {}: forking for misforwarding on a load of addr {}".format(state.spec.ins_executed, addr))
target = next_instr_addr if next_instr_addr is not None else self.handle_expression(l_state, l_state.scratch.irsb.next) # if next_instr_addr is None, then target the first instruction of the next irsb
jumpkind = 'Ijk_Boring' # seems like a reasonable choice? what is this used for?
guard = claripy.BVV(1, 1) # boolean True
successors.add_successor(l_state, target, guard, jumpkind, add_guard=False, exit_stmt_idx=None, exit_ins_addr=None)
def _handleLoadGWithPossibleForwarding(self, state, successors, stmt):
# Like for WrTmpLoads, we also duplicate the processing for LoadG's ourselves, because we potentially need to fork during load processing
# this is again basically an inlined version of what goes on in angr for a LoadG, patched to handle possible forwarding
with state.history.subscribe_actions() as addr_deps:
addr = self.handle_expression(state, stmt.addr)
with state.history.subscribe_actions() as alt_deps:
alt = self.handle_expression(state, stmt.alt)
with state.history.subscribe_actions() as guard_deps:
guard = self.handle_expression(state, stmt.guard)
if guard is not None and state.solver.satisfiable(extra_constraints=[claripy.Not(guard)]):
raise ValueError("not implemented yet: conditional load with condition that could be false")
read_type, converted_type = stmt.cvt_types
read_size_bits = pyvex.const.get_type_size(read_type)
converted_size_bits = pyvex.const.get_type_size(converted_type)
read_size = read_size_bits // state.arch.byte_width
results = performLoadWithPossibleForwarding(state, addr, read_size, load_endness=stmt.end)
for (l_state, l_value) in results:
if read_size_bits == converted_size_bits:
converted_expr = l_value
elif "S" in stmt.cvt:
converted_expr = l_value.sign_extend(converted_size_bits - read_size_bits)
elif "U" in stmt.cvt:
converted_expr = l_value.zero_extend()
else:
raise SimStatementError("Unrecognized IRLoadGOp %s!" % stmt.cvt)
l_value = l_state.solver.If(guard != 0, converted_expr, alt)
l_state.scratch.store_tmp(stmt.dst, l_value, deps=addr_deps + alt_deps + guard_deps)
if angr.options.TRACK_MEMORY_ACTIONS in l_state.options:
data_ao = SimActionObject(converted_expr)
alt_ao = SimActionObject(alt, deps=alt_deps, state=l_state)
addr_ao = SimActionObject(addr, deps=addr_deps, state=l_state)
guard_ao = SimActionObject(guard, deps=guard_deps, state=l_state)
size_ao = SimActionObject(converted_size_bits)
r = SimActionData(l_state, l_state.memory.id, SimActionData.READ, addr=addr_ao, data=data_ao, condition=guard_ao, size=size_ao, fallback=alt_ao)
l_state.history.add_action(r)
# for comments on the below, see comments in our handling of WrTmp loads above
if l_state is not state:
(next_instr_addr, next_instr_stmt_idx) = nextInstruction(state.scratch.irsb, stmt)
self._handle_irsb(l_state, successors, l_state.scratch.irsb, | |
if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
or br[0] < 0 or br[1] < 0:
# If not, just return the image as is
target_weight[joint_id] = 0
continue
# # Generate gaussian
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])
v = target_weight[joint_id]
if v > 0.5:
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return target, target_weight
def __getitem__(self, idx):
vid_info = self.samples[idx]
base_path = vid_info['base_path']
vid_size = vid_info['frame_size']
input_data = []
np.zeros((self.clip_length, self.final_shape[0], self.final_shape[1], 3)) - 1
bbox_data = np.zeros((self.clip_length, 4)) - 1
hand_crops = np.zeros((self.clip_length, 4)) - 1
hand_pts_coords = np.zeros((self.clip_length, self.num_keypoints, 3)) - 1
org_hand_pts = np.zeros((self.clip_length, self.num_keypoints, 2)) - 1
obj_ids = np.zeros(self.clip_length, dtype=np.int64) - 1
labels = np.zeros(self.clip_length) - 1
unannotated = np.zeros((self.clip_length, 21), dtype=np.int32) - 1 # 21 keypoints
np.zeros((self.clip_length, 21), dtype=np.int32)
padding = np.zeros((self.clip_length, 4), dtype=np.int32) # pl, pt, pr, pb
target = np.zeros((self.clip_length, self.num_keypoints, self.heatmap_size[1], self.heatmap_size[0]), dtype=np.float32) - 1
target_weight = np.zeros((self.clip_length, self.num_keypoints, 1), dtype=np.float32) - 1
frame_ids = np.zeros(self.clip_length, dtype=np.int64)
frame_paths = []
for frame_ind in range(len(vid_info['frames'])):
frame = vid_info['frames'][frame_ind]
width, height = vid_info['frame_size']
frame_path = frame['img_path']
frame['vid_id']
frame_id = frame['frame_id']
# Extract bbox and label data from video info
frame_paths.append(frame_path)
frame_ids[frame_ind] = frame_id
# Load frame, convert to RGB from BGR and normalize from 0 to 1
input_data = cv2.imread(frame_path)[..., ::-1]
for obj in frame['objs']:
trackid = obj['trackid'] # Let's ignore trackid for now, only one annotation per image
obj_id = obj['id']
label = 0 if obj['c'] == 'left' else 1 # 0: left hand, 1: right hand
unann = obj['occ']
obj_bbox = obj['bbox'] # [xmin, ymin, xmax, ymax]
hand_pts = obj['hand_pts'] # 21 points (x,y,visibility)
xmin, ymin, xmax, ymax = obj_bbox
# ensure bounding box encompasses all keypoints - error occurs otherwise
hand_pts = np.array(hand_pts).reshape((self.num_keypoints, 3))
_mask = hand_pts[:, -1] > 0
xpt_max, ypt_max, _ = np.max(hand_pts[_mask], axis=0)
xpt_min, ypt_min, _ = np.min(hand_pts[_mask], axis=0)
xtl_adjust = np.clip(xmin - xpt_min, a_min=0, a_max=None)
ytl_adjust = np.clip(ymin - ypt_min, a_min=0, a_max=None)
xbr_adjust = np.clip(xpt_max - xmax, a_min=0, a_max=None)
ybr_adjust = np.clip(ypt_max - ymax, a_min=0, a_max=None)
xmin -= xtl_adjust
ymin -= ytl_adjust
xmax += xbr_adjust
ymax += ybr_adjust
# expand area around bbox
sc = self.sc
w = xmax - xmin
h = ymax - ymin
cx = xmin + w / 2
cy = ymin + h / 2
w *= sc
h *= sc
xmin = int(cx - (w / 2))
ymin = int(cy - (h / 2))
xmax = int(cx + (w / 2))
ymax = int(cy + (h / 2))
# Pad images so hand is still in center of crop
pl = pt = pr = pb = 0
if xmin < 0:
pl = abs(xmin)
if ymin < 0:
pt = abs(ymin)
if xmax > (width + pl):
pr = abs(width - xmax)
if ymax > (height + pt):
pb = abs(height - ymax)
hand_crop = [xmin + pl, ymin + pt, xmax, ymax]
# incase annotations include invalid coords
vis = hand_pts[:, -1]
if self.mask_occ:
for i, v in enumerate(vis):
if v == 1:
unann[i] = True
org_hand_pts[frame_ind] = hand_pts[:, :2]
hand_pts += np.array([[pl, pt, 0]]) # Adjust keypoints by padding
# hand_pts[:,0] = np.clip(hand_pts[:,0], 0, width)
# hand_pts[:,1] = np.clip(hand_pts[:,1], 0, height)
hand_pts[:, 2] = np.clip(hand_pts[:, 2], 0, 1)
# Let's make the obj_id numeric only
obj_id = int(''.join((obj_id.split('_')[-4:])))
bbox_data[frame_ind] = obj_bbox
obj_ids[frame_ind] = obj_id
labels[frame_ind] = label
hand_pts_coords[frame_ind] = hand_pts
hand_crops[frame_ind] = hand_crop
unannotated[frame_ind] = unann
padding[frame_ind] = [pl, pt, pr, pb]
# Crop hand and resize, perform same transforms to ground truth keypoints
mask = [True if (1 - o) else False for o in unann] # need a mask because invalid keypoints messes up the preprocessing
vid_data, temp, out_params = self.transforms(cv2.copyMakeBorder(input_data, pt, pb, pl, pr, cv2.BORDER_CONSTANT, value=0)[None], {'bbox_data': hand_pts_coords[None, :, mask, :2], 'hand_crop': hand_crop, 'label': labels})
flipped = out_params['flip']
angle = out_params.get('out_rot', None)
hand_pts_coords[None, :, mask, :2] = temp
obj_trgt, obj_trgt_wght = self.generate_target(hand_pts_coords[0])
target[frame_ind] = obj_trgt
target_weight[frame_ind] = obj_trgt_wght
np.zeros((height, width, 3), dtype=np.float32)
aux_input = np.zeros((self.num_keypoints, self.heatmap_size[1], self.heatmap_size[0]), dtype=np.float32)
aux_data = np.zeros((self.clip_length, self.final_shape[0], self.final_shape[1], 3), dtype=np.float32)
aux_pts_coords = np.zeros((self.clip_length, self.num_keypoints, 3)) - 1
obj_crop = obj_pad = [-1, -1, -1, -1]
if not self.t1_to_t0[frame_id] is None and not self.t1_to_t0[frame_id]['frame_id'] is None: # 1st frame may not have a prior
# Extract and resize this object's keypoints on given frame
img_objs = self.img_id_to_kpts[self.t1_to_t0[frame_id]['frame_id']]
for key, obj in img_objs.items():
if key != trackid: # Only this current object
continue
np.copy(obj['hand_pts'])
aux_frame_path = self.t1_to_t0[frame_id]['frame_path']
# print('Aux frame path: {}'.format(aux_frame_path))
'''
obj_kpts[:,2] = np.clip(obj_kpts[:,2], 0, 1)
#Apply same transformation to keypoints TODO
#flip prior if target if flip augmentation was applied
if flipped:
#flip all x-positions
obj_kpts[:,0] = self.image_width - obj_kpts[:,0]
obj_trgt, _ = self.generate_target(obj_kpts)
aux_input = obj_trgt
'''
try:
obj_kpts = np.copy(img_objs[trackid]['hand_pts'])
obj_mask = img_objs[trackid]['mask']
img_objs[trackid]['bbox']
img_objs[trackid]['center']
obj_crop = img_objs[trackid]['crop']
pl, pt, pr, pb = img_objs[trackid]['padding']
obj_pad = [pl, pt, pr, pb]
obj_kpts[:, 2] = np.clip(obj_kpts[:, 2], 0, 1)
aux_pts_coords[0] = obj_kpts
aux_input_data = cv2.imread(os.path.join(base_path, aux_frame_path))[..., ::-1]
aux_data, temp, out_params = self.transforms(cv2.copyMakeBorder(aux_input_data, pt, pb, pl, pr, cv2.BORDER_CONSTANT, value=0)[None], {'bbox_data': obj_kpts[None, None, obj_mask, :2], 'hand_crop': obj_crop, 'label': labels, 'in_rot': angle})
aux_data = np.array(aux_data)
# this section may be unnecessary
'''
aux_pts_coords[None,:,obj_mask,:2] = temp
#flip prior, if target flip augmentation was applied
if flipped and not out_params['flip']:
aux_data[0] = cv2.flip(aux_data[0], 1)
#flip all x-positions
aux_pts_coords[...,0] = self.image_width - aux_pts_coords[...,0]
elif not flipped and out_params['flip']:
#Not flipped in target, but flipped in prior
#ideally remove this randomization
aux_data[0] = cv2.flip(aux_data[0], 1)
#flip all x-positions
aux_pts_coords[...,0] = self.image_width - aux_pts_coords[...,0]
#transform keypoints to fit current image crop, and then generate that as a heatmap prior
#important for small variations in bounding box placement and aspect ratio
###Unscale from time t-1 params###
if flipped:
aux_pts_coords[...,0] = (self.image_width - aux_pts_coords[...,0]) #undo any flipping
#scale coordinates to crop size
obj_crop_h = (obj_crop[3]-obj_crop[1])
obj_crop_w = (obj_crop[2]-obj_crop[0])
aux_pts_coords[:,:,0] *= (obj_crop_w/self.image_width)
aux_pts_coords[:,:,1] *= (obj_crop_h/self.image_height)
#approx to int
aux_pts_coords = np.ceil(aux_pts_coords)
#Undo crop
aux_pts_coords[:,:,0] += obj_crop[0]
aux_pts_coords[:,:,1] += obj_crop[1]
'''
# Subtract padding if was added
aux_pts_coords = np.copy(obj_kpts)[None]
aux_pts_coords[:, :, 0] -= pl
aux_pts_coords[:, :, 1] -= pt
###Rescale to time t properties###
crop_xmin, crop_ymin, crop_xmax, crop_ymax = hand_crop
pl, pt, pb, pr = padding[frame_ind]
mask = np.array(aux_pts_coords[..., -1], dtype=np.bool)
# adjust keypoints by crop
aux_pts_coords[mask, 0], aux_pts_coords[mask, 1] = crop_coords(aux_pts_coords[mask, 0] + pl, aux_pts_coords[mask, 1] + pt, crop_xmin, crop_ymin, crop_xmax, crop_ymax)
# add rotation if necessary
if angle is not None:
aux_pts_coords[:, mask.squeeze(), :2] = rotate_coords(aux_pts_coords[:, mask.squeeze(), :2], \
(crop_ymax - crop_ymin, crop_xmax - crop_xmin), angle)
# adjust for resized input image
aux_pts_coords[mask, 0], aux_pts_coords[mask, 1] = resize_pt_coords(aux_pts_coords[mask, 0], aux_pts_coords[mask, 1], (crop_ymax - crop_ymin, crop_xmax - crop_xmin), (self.image_height, self.image_width))
if flipped:
if not out_params['flip']:
aux_data[0] = cv2.flip(aux_data[0], 1)
aux_pts_coords[..., 0] = (self.image_width - aux_pts_coords[..., 0]) # Add flipping, if any
temp[..., 0] = (self.image_width - temp[..., 0])
elif not flipped and out_params['flip']:
# Not flipped in target, but flipped in prior
# ideally remove this randomization
aux_data[0] = cv2.flip(aux_data[0], 1)
obj_trgt, _ = self.generate_target(aux_pts_coords[0])
aux_input = obj_trgt
except KeyError: # No kpts to crop around image or object doesn't exist at frame
pass
'''
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig = plt.figure(1)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(334)
pl, pt, pr, pb = padding[frame_ind]
ax1.imshow(cv2.copyMakeBorder(input_data, pt, pb, pl, pr, cv2.BORDER_CONSTANT, value=0))
| |
va='center')
plt.show()
def save_to_netcdf(self, out_path):
"""Saves the present state of the grid to a netCDF4 file
:param out_path: Path to the output file
:type out_path: str
"""
# Save the data to NetCDF:
ncout = Dataset(out_path, mode='w', format='NETCDF4')
# Create data dimensions:
ncout.createDimension('lat', self.ydim)
ncout.createDimension('lon', self.xdim)
# create lon axis:
lon = ncout.createVariable('lon', np.float32, ('lon',))
lon.units = 'degrees_east'
lon.long_name = 'longitude'
lon[:] = self.out_lon
# create lat axis:
lat = ncout.createVariable('lat', np.float32, ('lat',))
lat.units = 'degrees_north'
lat.long_name = 'latgitude'
lat[:] = self.out_lat
# create data axes:
# (1) Cloud-sliced NO2:
csutno2 = ncout.createVariable('csutno2', np.float32, ('lon', 'lat'))
csutno2.units = 'pptv'
csutno2.long_name = 'UT NO2 mixing ratio (180-450 hPa) obtained using cloud-slicing'
csutno2[:] = self.g_no2_vmr
# (2a) Double-weighting error:
utdblerr = ncout.createVariable('utdblerr', np.float32, ('lon', 'lat'))
utdblerr.units = 'pptv'
utdblerr.long_name = 'Standard error of the NO2 mixing ratios in the UT (180-450 hPa) obtained using cloud-slicing'
utdblerr[:] = self.g_slope_err
# (2b) Gaussian-weighting error:
utgauserr = ncout.createVariable('utgauserr', np.float32, ('lon', 'lat'))
utgauserr.units = 'pptv'
utgauserr.long_name = 'Standard error of the NO2 mixing ratios in the UT (180-450 hPa) obtained using cloud-slicing'
utgauserr[:] = self.g_gaus_wgt
# (3) Number of observations in each gridsquare:
nobs = ncout.createVariable('nobs', np.float32, ('lon', 'lat'))
nobs.units = 'unitless'
nobs.long_name = 'Number of observations in each gridsquare used to obtain cloud-sliced UT NO2 mixing ratios'
nobs[:] = self.g_cnt
# (4) Mean cloud pressure for season between 450-180 hPa:
utcld = ncout.createVariable('utcld', np.float32, ('lon', 'lat'))
utcld.units = 'hPa'
utcld.long_name = 'Mean cloud pressure between 450 and 180 hPa'
utcld[:] = self.g_cld_p
# (5) Mean NO2 mixing ratio at 450-180 hPa for scenes with clouds:
cldutno2 = ncout.createVariable('cldutno2', np.float32, ('lon', 'lat'))
cldutno2.units = 'pptv'
cldutno2.long_name = 'UT NO2 mixing ratio (180-450 hPa) obtained if clouds are present'
cldutno2[:] = self.true_no2
# (6) Mean NO2 mixing ratio at 450-180 hPa under all conditions (all-sky):
askutno2 = ncout.createVariable('askutno2', np.float32, ('lon', 'lat'))
askutno2.units = 'pptv'
askutno2.long_name = 'UT NO2 mixing ratio (180-450 hPa) obtained under all conditions (all-sky)'
askutno2[:] = self.g_askut_no2
# (7) Cloud fraction:
utcldfrc = ncout.createVariable('utcldfrc', np.float32, ('lon', 'lat'))
utcldfrc.units = 'unitless'
utcldfrc.long_name = 'GEOS-FP cloud fraction obtained as sum of 3D cloud fractions across range of interest (180-450 hPa)'
utcldfrc[:] = self.g_cld_fr
# (8) O3 sampled coincident with cloud-slicing retrieval:
uto3 = ncout.createVariable('uto3', np.float32, ('lon', 'lat'))
uto3.units = 'ppbv'
uto3.long_name = 'GEOS-Chem ozone obtained coincident with cloud-sliced NO2'
uto3[:] = self.true_o3
# Close the file:
ncout.close()
class GeosChemDay:
"""A class for reading, preprocessing and accessing Geoschem data on a given day
"""
def __init__(self, file_path, temperature_correction=False, cloud_height_test=False):
"""Reads the data at file_path and returns a GeosChemDay object containing that data
:param file_path: Path to the netcdf4 file containing the GeosChem data
:type file_path: str
:param temperature_correction: Whether to apply temperature correction
:type temperature_correction: bool
:param cloud_height_test: Whether to test effect of systematic underestimate in cloud height
:type cloud_height_test: bool
:returns: A GeosChemDay class
:rtype: GeosChemDay
"""
print('File path: ',file_path, flush=True)
self.temperature_correction = temperature_correction
self.cloud_height_test = cloud_height_test
# Read dataset:
fh = Dataset(file_path, mode='r')
# Extract data of interest:
tlon, tlat, tgcno2, tcldfr, tcldhgt, tadn, tbxhgt, tpedge, tpause, tgco3, tdegk = \
fh.variables['LON'], fh.variables['LAT'], \
fh.variables['IJ-AVG-S__NO2'], fh.variables['TIME-SER__CF'], \
fh.variables['TIME-SER__CThgt'], fh.variables['TIME-SER__AIRDEN'], \
fh.variables['BXHGHT-S__BXHEIGHT'], fh.variables['PEDGE-S__PSURF'], \
fh.variables['TR-PAUSE__TP-PRESS'], fh.variables['IJ-AVG-S__O3'], \
fh.variables['DAO-3D-S__TMPU']
self.t_lon = tlon[:]
self.t_lat = tlat[:]
self.t_gc_no2 = tgcno2[:]
self.t_cld_fr = tcldfr[:]
self.t_cld_hgt = tcldhgt[0, :, :]
self.t_adn = tadn[:] # in molec/cm3
self.t_bx_hgt = tbxhgt[:]
self.t_p_edge = tpedge[:]
self.t_pause = tpause[0, :, :]
self.t_gc_o3 = tgco3[:]
self.t_deg_k = tdegk[:]
# Convert box height from m to cm:
self.t_bx_hgt = self.t_bx_hgt * 1e2
if self.cloud_height_test:
# Lower cloud heights by 1 km to roughly mimic lower altitude
# clouds retrieved for TROPOMI assuming clouds are reflective
# boundaries with uniform reflectivity:
# Calculate cloud top height in m:
t_cld_hgt = pres2alt(self.t_cld_hgt*1e2)
# Lower the clouds by 1 km (1000 m) (this won't work for low-altitude
# clouds):
t_cld_hgt = t_cld_hgt - 1e3
# Convert back to Pa and convert that to hPa:
self.t_cld_hgt = alt2pres(t_cld_hgt)*1e-2
#Get outputs ready here for tidyness:
self.no2_2d = None
self.trop_col = None
self.strat_col = None
self.gcutno2 = None
self.gascnt = None
self.grad = None
self.level_min = None
self.level_max = None
self.askind = None
def prepare_no2_pixel(self, x, y):
"""Extracts preprocesed no2 from the geoschem pixel at x,y
:param x: The x index of the pixel
:type x: int
:param y: The y index of the pixel
:type y: int
"""
# Calculate corresponding mid-pressure values:
tp_mid = np.zeros(len(self.t_p_edge[:, y, x]))
# Get mid-pressure values, except for highest layer:
for k in range(len(self.t_p_edge[:, y, x]) - 1):
tp_mid[k] = np.multiply(0.5, (self.t_p_edge[k, y, x] + self.t_p_edge[k + 1, y, x]))
# Estimate mid-pressure for highest value (doesn't need to
# be accurate, as surpassing the range of interset):
# Data output from the model includes 47 vertical layers. This means that only 46 pressure centres can be calculated as the calculation requires pressure edges.
tp_mid[46] = np.multiply(0.5, (self.t_p_edge[46, y, x] + (self.t_p_edge[46, y, x] - 0.1)))
# Get model layer of tropopause:
tppind = np.argmin(abs(tp_mid - self.t_pause[y, x]))
# Get indices that fall between 450 and 180 hPa for estimating
# "true' all-sky UT NO2 and partial columns:
lind = np.where((tp_mid >= P_MIN) & (tp_mid <= P_MAX))[0]
# Get UT NO2 under "true" all-sky conditions:
# Make sure this is below the tropopause:
# If below tropopause, use full extent (180-450 hPa):
if lind[len(lind) - 1] <= tppind:
self.askind = lind
# If above tropopause, trim to tropopause-450 hPa:
if lind[len(lind) - 1] > tppind:
self.askind = lind[np.where(lind <= tppind)[0]]
# If tropopause below 450 hPa, skip entirely:
if self.t_pause[y, x] > P_MAX:
#print("Tropopause less than P_MAX in geoschem pixel x:{}, y:{}".format(x,y))
return # continue
# Get Guassian weights that allocate higher weights to points
# closest to the pressure centre (315 hPa):
# Equation is:
# w = exp(-(p-315)^2/2*135^2 ) where 315 hPa is the centre and
# 135 hPa is the standard deviation.
self.twgt = np.exp((-(tp_mid[self.askind] - 315) ** 2) / (2 * 135 ** 2))
# Get model level of cloud top height closest to lowest
# pressure-altitude of interest (P_MIN):
self.lcld = np.argmin(abs(self.t_cld_hgt[y, x] - tp_mid))
# Skip if cloud top height ouside pressure range of interest:
self.level_min, self.level_max = np.amin(lind), np.amax(lind)
if (self.temperature_correction):
# Equation is from the TROPOMI product ATBD (p. 32, Eqn 18)
# (product document abbrevation: S5P-KNMI-L2-0005-RP)
self.temp_corr = 1 - (3.16e-3 * (self.t_deg_k[self.level_min:, y, x] - 220.)) + \
(3.39e-6 * ((self.t_deg_k[self.level_min:, y, x] - 220) ** 2))
else:
# Set to 1 so that no scaling is applied:
# (might be a more eloquent way to do this)
self.temp_corr = np.ones(len(self.t_gc_no2[self.level_min:, y, x]))
# Calculate NO2 gradient:
regr = LinearRegression()
# Pressure (hPa)
x_vals=tp_mid[self.level_min:self.level_max].reshape(-1,1)
# NO2 (ppbv)
y_vals=self.t_gc_no2[self.level_min:self.level_max, y, x].reshape(-1,1)
# NO2 (pptv)
y_vals=y_vals*1e3
# Perform regression:
regr.fit(x_vals,y_vals)
# Define gradient from regression slope (pptv/hPa):
self.grad = regr.coef_
# Get partial NO2 column in molec/m2 from cloud top height
# to highest model level (output up to level 47):
# print(t_gc_no2[self.level_min:tppind,y,x])
# print(t_gc_no2[self.level_min:tppind,y,x]*1.5)
self.no2_2d = np.sum(self.t_gc_no2[self.level_min:, y, x]
* 1e-5
* self.temp_corr
* self.t_adn[self.level_min:, y, x]
* self.t_bx_hgt[self.level_min:, y, x])
# Get stratospheric column from 180 hPa aloft:
# Previous approach (remove when model simulations done):
# tppind=np.where(tpmid<180.)[0]
self.strat_col = np.sum(self.t_gc_no2[tppind:, y, x]
* 1e-5 * self.t_adn[tppind:, y, x]
* self.t_bx_hgt[tppind:, y, x])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Shorten directory name to up to "GC/", then define the subdirectory
# as 'geosfp' + dirreg + 'iccw/' in get_file_list.
# This is now done in get_gc_file_list
parser.add_argument("--gc_dir")
parser.add_argument("--out_dir")
parser.add_argument("--resolution", default="4x5", help="Can be 8x10, 4x5, 2x25 or 1x1")
parser.add_argument("--region", default="EU", help="Can be EU, NA, or CH")
parser.add_argument("--strat_filter_threshold", default="002", help="")
#parser.add_argument("--start_date", default="2016-06-01")
#parser.add_argument("--end_date", default="2017-08-31")
parser.add_argument("-p", "--plot", type=bool, default=False)
parser.add_argument("--do_temp_correct", type=bool)
parser.add_argument("--apply_cld_frac_filter", type=bool)
parser.add_argument("--do_cld_hght_test", type=bool)
args = parser.parse_args()
# Get files:
gc_dir = args.gc_dir
STR_RES = args.resolution
REGION | |
<reponame>gordonwatts/desktop-rucio<filename>tests/grid/test_datasets.py
# Test out everything with datasets.
from src.grid.datasets import dataset_mgr, DatasetQueryStatus
from src.grid.rucio import RucioException
from tests.grid.utils_for_tests import simple_dataset, dummy_logger
from time import sleep
import datetime
import os
import pytest
@pytest.fixture()
def rucio_2file_dataset(simple_dataset):
class rucio_dummy:
def __init__(self, ds):
self._ds = ds
self.CountCalled = 0
self.CountCalledDL = 0
self._cache_mgr = None
def get_file_listing(self, ds_name, log_func = None):
self.CountCalled += 1
if ds_name == self._ds.Name:
return self._ds.FileList
return None
def download_files(self, ds_name, data_dir, log_func = None):
if self._cache_mgr is not None:
self._cache_mgr.add_ds(self._ds)
if log_func is not None:
log_func('downloading ' + ds_name)
self.CountCalledDL += 1
return rucio_dummy(simple_dataset)
@pytest.fixture()
def rucio_do_nothing():
class rucio_dummy:
def __init__(self):
self.CountCalled = 0
self.CountCalledDL = 0
def get_file_listing(self, ds_name, log_func = None):
self.CountCalled += 1
sleep(1)
return None
def download_files(self, ds_name, data_dir, log_func = None):
self.CountCalledDL += 1
sleep(1)
return rucio_dummy()
@pytest.fixture()
def rucio_2file_dataset_take_time(simple_dataset):
class rucio_dummy:
def __init__(self, ds):
self._ds = ds
self.CountCalled = 0
self.CountCalledDL = 0
self._cache_mgr = None
self.DLCalled = False
def get_file_listing(self, ds_name, log_func = None):
sleep(0.005)
self.CountCalled += 1
if ds_name == self._ds.Name:
return self._ds.FileList
return None
def download_files(self, ds_name, data_dir, log_func = None):
self.DLCalled = True
sleep(0.005)
if self._cache_mgr is not None:
self._cache_mgr.add_ds(self._ds)
self.CountCalledDL += 1
return rucio_dummy(simple_dataset)
@pytest.fixture()
def rucio_2file_dataset_with_fails(simple_dataset):
class rucio_dummy:
def __init__(self, ds):
self._ds = ds
self.CountCalled = 0
self.CountCalledDL = 0
self._cache_mgr = None
self.DLSleep = None
def get_file_listing(self, ds_name, log_func = None):
self.CountCalled += 1
if self.CountCalled < 5:
raise RucioException("Please Try again Due To Internet Being Out")
if ds_name == self._ds.Name:
return self._ds.FileList
return None
def download_files(self, ds_name, data_dir, log_func = None):
self.CountCalledDL += 1
if self.DLSleep is not None:
sleep(self.DLSleep)
if self.CountCalledDL < 5:
raise RucioException("Please try again due to internet being out")
if self._cache_mgr is not None:
self._cache_mgr.add_ds(self._ds)
return rucio_dummy(simple_dataset)
@pytest.fixture()
def rucio_2file_dataset_shows_up_later(simple_dataset):
class rucio_dummy:
def __init__(self, ds):
self._ds = ds
self.CountCalled = 0
def get_file_listing(self, ds_name, log_func = None):
self.CountCalled += 1
if self.CountCalled < 2:
return None
if ds_name == self._ds.Name:
return self._ds.FileList
return None
return rucio_dummy(simple_dataset)
@pytest.fixture()
def cache_empty():
'Create an empty cache that will save anything saved in it.'
class cache_good_dummy():
def __init__(self):
self._ds_list = {}
self._in_progress = []
self._in_download = []
self._downloaded_ds = {}
def get_download_directory(self):
return 'totally-bogus'
def add_ds(self, ds_info):
self._downloaded_ds[ds_info.Name] = ds_info
def get_listing(self, ds_name):
if ds_name in self._ds_list:
return self._ds_list[ds_name]
return None
def save_listing(self, ds_info):
self._ds_list[ds_info.Name] = ds_info
self._in_progress.remove(ds_info.Name)
def mark_query(self, ds_name):
self._in_progress.append(ds_name)
def query_in_progress(self, ds_name):
return ds_name in self._in_progress
def get_queries(self):
return self._in_progress
def get_ds_contents(self, ds_name):
if ds_name in self._downloaded_ds:
return [f.filename for f in self._downloaded_ds[ds_name].FileList]
return None
def mark_downloading(self, ds_name):
self._in_download.append(ds_name)
def download_in_progress(self, ds_name):
return ds_name in self._in_download
def get_downloading(self):
return self._in_download
def mark_download_done(self, ds_name):
self._in_download.remove(ds_name)
return cache_good_dummy()
@pytest.fixture()
def cache_with_ds(cache_empty, simple_dataset):
'Create a cache with a dataset called dataset1'
cache_empty.add_ds(simple_dataset)
return cache_empty
def test_dataset_query_queued(rucio_2file_dataset, cache_empty):
'Queue a dataset'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
status, files = dm.get_ds_contents('a_dataset')
# Should have queued the result since this was a new ds manager
assert status == DatasetQueryStatus.query_queued
assert None is files
def wait_some_time(check):
'Simple method to wait until check returns false. Will wait up to about a second so as not to delay things before throwing an assert.'
counter = 0
while check():
sleep(0.01)
counter += 1
assert counter < 100
def test_dataset_query_resolved(rucio_2file_dataset, cache_empty, simple_dataset):
'Queue and look for a dataset query result'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
_ = dm.get_ds_contents(simple_dataset.Name)
# Wait for the dataset query to run
wait_some_time(lambda: rucio_2file_dataset.CountCalled == 0)
# Now, make sure that we get back what we want here.
status, files = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
assert len(simple_dataset.FileList) == len(files)
# Make sure we didn't re-query for this.
assert 1 == rucio_2file_dataset.CountCalled == 1
_ = cache_empty.get_listing(simple_dataset.Name)
def test_query_for_bad_dataset(rucio_2file_dataset, cache_empty, simple_dataset):
'Ask for a bad dataset, and get back a null'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
_ = dm.get_ds_contents('bogus_ds')
wait_some_time(lambda: rucio_2file_dataset.CountCalled == 0)
# Make sure it comes back as bad.
status, files = dm.get_ds_contents('bogus_ds')
assert DatasetQueryStatus.does_not_exist == status
assert None is files
# Make sure that a timeout of an hour has been set on the dataset.
info = cache_empty.get_listing('bogus_ds')
assert datetime.datetime.now() == info.Created
def test_look_for_good_dataset_that_fails_a_bunch(rucio_2file_dataset_with_fails, cache_empty, simple_dataset):
'Queue and look for a good dataset that takes a few queries to show up with results'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset_with_fails, seconds_between_retries=0.01)
_ = dm.get_ds_contents(simple_dataset.Name)
# Wait for the dataset query to run
wait_some_time(lambda: rucio_2file_dataset_with_fails.CountCalled < 5)
# Now, make sure that we get back what we want and that the number of tries matches what we think
# it should have.
status, files = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
assert 5 == rucio_2file_dataset_with_fails.CountCalled
def test_two_queries_for_good_dataset(rucio_2file_dataset_take_time, cache_empty, simple_dataset):
'Make sure second query does not trigger second web download'
# Query twice, make sure we don't forget as we are doing this!
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset_take_time)
_ = dm.get_ds_contents(simple_dataset.Name)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.query_queued == status
# Wait for the dataset query to run
wait_some_time(lambda: rucio_2file_dataset_take_time.CountCalled == 0)
# Now, make sure that we get back what we want here.
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
# Make sure we didn't re-query for this, and the expiration date is not set.
# Make sure to wait long enough for other timing stuff above to fall apart.
sleep(0.02)
assert 1 == rucio_2file_dataset_take_time.CountCalled
def test_dataset_appears(rucio_2file_dataset_shows_up_later, cache_empty, simple_dataset):
'After a bad dataset has aged, automatically queue a new query'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset_shows_up_later)
_ = dm.get_ds_contents(simple_dataset.Name)
wait_some_time(lambda: rucio_2file_dataset_shows_up_later.CountCalled == 0)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.does_not_exist == status
# Query, but demand a quick re-check
status, _ = dm.get_ds_contents(simple_dataset.Name, maxAgeIfNotSeen=datetime.timedelta(seconds=0))
assert DatasetQueryStatus.query_queued == status
wait_some_time(lambda: rucio_2file_dataset_shows_up_later.CountCalled == 1)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
def test_dataset_always_missing_noretry(rucio_2file_dataset_shows_up_later, cache_empty, simple_dataset):
'Do not requery for the dataset'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset_shows_up_later)
_ = dm.get_ds_contents(simple_dataset.Name)
wait_some_time(lambda: rucio_2file_dataset_shows_up_later.CountCalled == 0)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.does_not_exist == status
# Query, but demand a quick re-check
status, _ = dm.get_ds_contents(simple_dataset.Name, maxAgeIfNotSeen=None)
assert DatasetQueryStatus.does_not_exist == status
assert 1 == rucio_2file_dataset_shows_up_later.CountCalled
def test_dataset_always_missing_longretry(rucio_2file_dataset_shows_up_later, cache_empty, simple_dataset):
'Do not requery for the dataset'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset_shows_up_later)
_ = dm.get_ds_contents(simple_dataset.Name)
wait_some_time(lambda: rucio_2file_dataset_shows_up_later.CountCalled == 0)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.does_not_exist == status
# Query, but demand a quick re-check
status, _ = dm.get_ds_contents(simple_dataset.Name, maxAgeIfNotSeen=datetime.timedelta(seconds=1000))
assert DatasetQueryStatus.does_not_exist == status
assert 1 == rucio_2file_dataset_shows_up_later.CountCalled
def test_good_dataset_retry(rucio_2file_dataset, cache_empty, simple_dataset):
'Do a requery for the dataset'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
_ = dm.get_ds_contents(simple_dataset.Name)
wait_some_time(lambda: rucio_2file_dataset.CountCalled == 0)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
# Query, but demand a quick re-check
status, _ = dm.get_ds_contents(simple_dataset.Name, maxAge=datetime.timedelta(seconds=0))
assert DatasetQueryStatus.query_queued == status
wait_some_time(lambda: rucio_2file_dataset.CountCalled == 1)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
assert 2 == rucio_2file_dataset.CountCalled
def test_good_dataset_longretry(rucio_2file_dataset, cache_empty, simple_dataset):
'Do not requery for the dataset'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
_ = dm.get_ds_contents(simple_dataset.Name)
wait_some_time(lambda: rucio_2file_dataset.CountCalled == 0)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
# Query, but demand a quick re-check
status, _ = dm.get_ds_contents(simple_dataset.Name, maxAge=datetime.timedelta(seconds=1000))
assert DatasetQueryStatus.results_valid == status
assert 1 == rucio_2file_dataset.CountCalled
def test_good_dataset_maxAgeIfNotSeenNoEffect(rucio_2file_dataset, cache_empty, simple_dataset):
'Do not requery for the dataset'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
_ = dm.get_ds_contents(simple_dataset.Name)
wait_some_time(lambda: rucio_2file_dataset.CountCalled == 0)
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
# Query, but demand a quick re-check
status, _ = dm.get_ds_contents(simple_dataset.Name, maxAgeIfNotSeen=datetime.timedelta(seconds=0))
assert DatasetQueryStatus.results_valid == status
assert 1 == rucio_2file_dataset.CountCalled
def test_good_dataset_content_restart(rucio_do_nothing, rucio_2file_dataset, cache_empty, simple_dataset):
dm0 = dataset_mgr(cache_empty, rucio_mgr=rucio_do_nothing)
_ = dm0.get_ds_contents(simple_dataset.Name)
wait_some_time(lambda: rucio_do_nothing.CountCalled == 0)
# Start up a new one that should pick up the ball where it was dropped.
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
wait_some_time(lambda: rucio_2file_dataset.CountCalled == 0)
# Now, make sure that we get back what we want here.
status, _ = dm.get_ds_contents(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
def test_dataset_download_query(rucio_2file_dataset, cache_empty, simple_dataset):
'Queue a download and look for it to show up'
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
status, files = dm.download_ds(simple_dataset.Name)
assert files is None
assert DatasetQueryStatus.query_queued == status
def test_dataset_download_good(rucio_2file_dataset, cache_empty, simple_dataset):
'Queue a download and look for it to show up'
rucio_2file_dataset._cache_mgr = cache_empty
dm = dataset_mgr(cache_empty, rucio_mgr=rucio_2file_dataset)
_ = dm.download_ds(simple_dataset.Name)
# Wait for the dataset query to run
wait_some_time(lambda: rucio_2file_dataset.CountCalledDL == 0)
# Now, make sure that we get back what we want here.
status, files = dm.download_ds(simple_dataset.Name)
assert DatasetQueryStatus.results_valid == status
| |
<reponame>mattijsstam/osmnx<gh_stars>0
"""Interact with the OSM APIs."""
import datetime as dt
import json
import logging as lg
import re
import socket
import time
from collections import OrderedDict
from hashlib import sha1
from pathlib import Path
from urllib.parse import urlparse
import numpy as np
import requests
from dateutil import parser as date_parser
from . import projection
from . import settings
from . import utils
from . import utils_geo
from ._errors import CacheOnlyModeInterrupt
# capture getaddrinfo function to use original later after mutating it
_original_getaddrinfo = socket.getaddrinfo
def _get_osm_filter(network_type):
"""
Create a filter to query OSM for the specified network type.
Parameters
----------
network_type : string {"all_private", "all", "bike", "drive", "drive_service", "walk"}
what type of street network to get
Returns
-------
string
"""
# define built-in queries to send to the API. specifying way["highway"]
# means that all ways returned must have a highway tag. the filters then
# remove ways by tag/value.
filters = dict()
# driving: filter out un-drivable roads, service roads, private ways, and
# anything specifying motor=no. also filter out any non-service roads that
# are tagged as providing certain services
filters["drive"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|bridleway|bus_guideway|construction|corridor|cycleway|elevator|'
f"escalator|footway|path|pedestrian|planned|platform|proposed|raceway|service|"
f'steps|track"]'
f'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
f'["service"!~"alley|driveway|emergency_access|parking|parking_aisle|private"]'
)
# drive+service: allow ways tagged 'service' but filter out certain types
filters["drive_service"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|bridleway|bus_guideway|construction|corridor|cycleway|elevator|'
f'escalator|footway|path|pedestrian|planned|platform|proposed|raceway|steps|track"]'
f'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
f'["service"!~"emergency_access|parking|parking_aisle|private"]'
)
# walking: filter out cycle ways, motor ways, private ways, and anything
# specifying foot=no. allow service roads, permitting things like parking
# lot lanes, alleys, etc that you *can* walk on even if they're not
# exactly pleasant walks. some cycleways may allow pedestrians, but this
# filter ignores such cycleways.
filters["walk"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|bus_guideway|construction|cycleway|motor|planned|platform|'
f'proposed|raceway"]'
f'["foot"!~"no"]["service"!~"private"]'
)
# biking: filter out foot ways, motor ways, private ways, and anything
# specifying biking=no
filters["bike"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|bus_guideway|construction|corridor|elevator|escalator|footway|'
f'motor|planned|platform|proposed|raceway|steps"]'
f'["bicycle"!~"no"]["service"!~"private"]'
)
# to download all ways, just filter out everything not currently in use or
# that is private-access only
filters["all"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|construction|planned|platform|proposed|raceway"]'
f'["service"!~"private"]'
)
# to download all ways, including private-access ones, just filter out
# everything not currently in use
filters[
"all_private"
] = '["highway"]["area"!~"yes"]["highway"!~"abandoned|construction|planned|platform|proposed|raceway"]'
if network_type in filters:
osm_filter = filters[network_type]
else: # pragma: no cover
raise ValueError(f'Unrecognized network_type "{network_type}"')
return osm_filter
def _save_to_cache(url, response_json, sc):
"""
Save a HTTP response JSON object to a file in the cache folder.
Function calculates the checksum of url to generate the cache file's name.
If the request was sent to server via POST instead of GET, then URL should
be a GET-style representation of request. Response is only saved to a
cache file if settings.use_cache is True, response_json is not None, and
sc = 200.
Users should always pass OrderedDicts instead of dicts of parameters into
request functions, so the parameters remain in the same order each time,
producing the same URL string, and thus the same hash. Otherwise the cache
will eventually contain multiple saved responses for the same request
because the URL's parameters appeared in a different order each time.
Parameters
----------
url : string
the URL of the request
response_json : dict
the JSON response
sc : int
the response's HTTP status code
Returns
-------
None
"""
if settings.use_cache:
if sc != 200:
utils.log(f"Did not save to cache because status code is {sc}")
elif response_json is None:
utils.log("Did not save to cache because response_json is None")
else:
# create the folder on the disk if it doesn't already exist
cache_folder = Path(settings.cache_folder)
cache_folder.mkdir(parents=True, exist_ok=True)
# hash the url to make the filename succinct but unique
# sha1 digest is 160 bits = 20 bytes = 40 hexadecimal characters
filename = sha1(url.encode("utf-8")).hexdigest() + ".json"
cache_filepath = cache_folder / filename
# dump to json, and save to file
cache_filepath.write_text(json.dumps(response_json), encoding="utf-8")
utils.log(f'Saved response to cache file "{cache_filepath}"')
def _url_in_cache(url):
"""
Determine if a URL's response exists in the cache.
Calculates the checksum of url to determine the cache file's name.
Parameters
----------
url : string
the URL to look for in the cache
Returns
-------
filepath : pathlib.Path
path to cached response for url if it exists, otherwise None
"""
# hash the url to generate the cache filename
filename = sha1(url.encode("utf-8")).hexdigest() + ".json"
filepath = Path(settings.cache_folder) / filename
# if this file exists in the cache, return its full path
return filepath if filepath.is_file() else None
def _retrieve_from_cache(url, check_remark=False):
"""
Retrieve a HTTP response JSON object from the cache, if it exists.
Parameters
----------
url : string
the URL of the request
check_remark : string
if True, only return filepath if cached response does not have a
remark key indicating a server warning
Returns
-------
response_json : dict
cached response for url if it exists in the cache, otherwise None
"""
# if the tool is configured to use the cache
if settings.use_cache:
# return cached response for this url if exists, otherwise return None
cache_filepath = _url_in_cache(url)
if cache_filepath is not None:
response_json = json.loads(cache_filepath.read_text(encoding="utf-8"))
# return None if check_remark is True and there is a server
# remark in the cached response
if check_remark and "remark" in response_json:
utils.log(f'Found remark, so ignoring cache file "{cache_filepath}"')
return None
utils.log(f'Retrieved response from cache file "{cache_filepath}"')
return response_json
def _get_http_headers(user_agent=None, referer=None, accept_language=None):
"""
Update the default requests HTTP headers with OSMnx info.
Parameters
----------
user_agent : string
the user agent string, if None will set with OSMnx default
referer : string
the referer string, if None will set with OSMnx default
accept_language : string
make accept-language explicit e.g. for consistent nominatim result
sorting
Returns
-------
headers : dict
"""
if user_agent is None:
user_agent = settings.default_user_agent
if referer is None:
referer = settings.default_referer
if accept_language is None:
accept_language = settings.default_accept_language
headers = requests.utils.default_headers()
headers.update(
{"User-Agent": user_agent, "referer": referer, "Accept-Language": accept_language}
)
return headers
def _get_host_by_name(host):
"""
Resolve IP address from host using Google's public API for DNS over HTTPS.
Necessary fallback as socket.gethostbyname will not always work when using
a proxy. See https://developers.google.com/speed/public-dns/docs/doh/json
Parameters
----------
host : string
the host to consistently resolve the IP address of
Returns
-------
ip_address : string
resolved IP address
"""
dns_url = f"https://dns.google/resolve?name={host}"
response = requests.get(dns_url)
data = response.json()
# status = 0 means NOERROR: standard DNS response code
if response.ok and data["Status"] == 0:
ip_address = data["Answer"][0]["data"]
utils.log(f"Google resolved '{host}' to '{ip_address}'")
return ip_address
# in case host could not be resolved return the host itself
else:
utils.log(f"Google could not resolve '{host}'. Response status: {data['Status']}")
return host
def _config_dns(url):
"""
Force socket.getaddrinfo to use IP address instead of host.
Resolves the URL's domain to an IP address so that we use the same server
for both 1) checking the necessary pause duration and 2) sending the query
itself even if there is round-robin redirecting among multiple server
machines on the server-side. Mutates the getaddrinfo function so it uses
the same IP address everytime it finds the host name in the URL.
For example, the domain overpass-api.de just redirects to one of its
subdomains (currently z.overpass-api.de and lz4.overpass-api.de). So if we
check the status endpoint of overpass-api.de, we may see results for
subdomain z, but when we submit the query itself it gets redirected to
subdomain lz4. This could result in violating server lz4's slot management
timing.
Parameters
----------
url : string
the URL to consistently resolve the IP address of
Returns
-------
None
"""
host = urlparse(url).netloc.split(":")[0]
try:
ip = socket.gethostbyname(host)
except socket.gaierror: # pragma: no cover
# this error occurs sometimes when using a proxy. instead, you must
# get IP address using google's public JSON API for DNS over HTTPS
ip = _get_host_by_name(host)[0]
def _getaddrinfo(*args):
if args[0] == host:
utils.log(f"Resolved {host} to {ip}")
return _original_getaddrinfo(ip, *args[1:])
else:
return _original_getaddrinfo(*args)
socket.getaddrinfo = _getaddrinfo
def _get_pause(base_endpoint, recursive_delay=5, default_duration=60):
"""
Get a pause duration from the Overpass API status endpoint.
Check the Overpass API status endpoint to determine how long to wait until
the next slot is available. You can disable this via the `ox.config`
function's `overpass_rate_limit` argument.
Parameters
----------
base_endpoint : string
base Overpass API | |
self.cmd.cli_ctx.cloud.name
if cloud_name.lower() == "azurecloud":
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=self.context.get_subscription_id(),
resource_group=self.context.get_resource_group_name(),
namespace="Microsoft.ContainerService",
type="managedClusters",
name=self.context.get_name(),
)
self.context.external_functions.add_monitoring_role_assignment(
cluster, cluster_resource_id, self.cmd
)
else:
# Create the DCR Association here
addon_consts = self.context.get_addon_consts()
CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME")
self.context.external_functions.ensure_container_insights_for_monitoring(
self.cmd,
cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],
self.context.get_subscription_id(),
self.context.get_resource_group_name(),
self.context.get_name(),
self.context.get_location(),
remove_monitoring=False,
aad_route=self.context.get_enable_msi_auth_for_monitoring(),
create_dcr=False,
create_dcra=True,
)
# ingress appgw addon
ingress_appgw_addon_enabled = self.context.get_intermediate("ingress_appgw_addon_enabled", default_value=False)
if ingress_appgw_addon_enabled:
self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)
# virtual node addon
virtual_node_addon_enabled = self.context.get_intermediate("virtual_node_addon_enabled", default_value=False)
if virtual_node_addon_enabled:
self.context.external_functions.add_virtual_node_role_assignment(
self.cmd, cluster, self.context.get_vnet_subnet_id()
)
# attach acr
enable_managed_identity = self.context.get_enable_managed_identity()
attach_acr = self.context.get_attach_acr()
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or cluster.identity_profile["kubeletidentity"] is None:
logger.warning(
"Your cluster is successfully created, but we failed to attach "
"acr to it, you can manually grant permission to the identity "
"named <ClUSTER_NAME>-agentpool in MC_ resource group to give "
"it permission to pull from ACR."
)
else:
kubelet_identity_object_id = cluster.identity_profile["kubeletidentity"].object_id
self.context.external_functions.ensure_aks_acr(
self.cmd,
assignee=kubelet_identity_object_id,
acr_name_or_id=attach_acr,
subscription_id=self.context.get_subscription_id(),
is_service_principal=False,
)
def put_mc(self, mc: ManagedCluster) -> ManagedCluster:
if self.check_is_postprocessing_required(mc):
# send request
poller = self.client.begin_create_or_update(
resource_group_name=self.context.get_resource_group_name(),
resource_name=self.context.get_name(),
parameters=mc,
headers=self.context.get_aks_custom_headers(),
)
self.immediate_processing_after_request(mc)
# poll until the result is returned
cluster = LongRunningOperation(self.cmd.cli_ctx)(poller)
self.postprocessing_after_mc_created(cluster)
else:
cluster = sdk_no_wait(
self.context.get_no_wait(),
self.client.begin_create_or_update,
resource_group_name=self.context.get_resource_group_name(),
resource_name=self.context.get_name(),
parameters=mc,
headers=self.context.get_aks_custom_headers(),
)
return cluster
def create_mc(self, mc: ManagedCluster) -> ManagedCluster:
"""Send request to create a real managed cluster.
:return: the ManagedCluster object
"""
self._ensure_mc(mc)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
error_msg = ""
for _ in range(0, max_retry):
try:
cluster = self.put_mc(mc)
return cluster
# CloudError was raised before, but since the adoption of track 2 SDK,
# HttpResponseError would be raised instead
except (CloudError, HttpResponseError) as ex:
error_msg = str(ex)
if "not found in Active Directory tenant" in ex.message:
time.sleep(3)
else:
raise map_azure_error_to_cli_error(ex)
raise AzCLIError("Maximum number of retries exceeded. " + error_msg)
class AKSManagedClusterUpdateDecorator(BaseAKSManagedClusterDecorator):
def __init__(
self, cmd: AzCliCommand, client: ContainerServiceClient, raw_parameters: Dict, resource_type: ResourceType
):
"""Internal controller of aks_update.
Break down the all-in-one aks_update function into several relatively independent functions (some of them have
a certain order dependency) that only focus on a specific profile or process a specific piece of logic.
In addition, an overall control function is provided. By calling the aforementioned independent functions one
by one, a complete ManagedCluster object is gradually updated and finally requests are sent to update an
existing cluster.
"""
super().__init__(cmd, client)
self.__raw_parameters = raw_parameters
self.resource_type = resource_type
self.init_models()
self.init_context()
self.agentpool_decorator_mode = AgentPoolDecoratorMode.MANAGED_CLUSTER
self.init_agentpool_decorator_context()
def init_models(self) -> None:
"""Initialize an AKSManagedClusterModels object to store the models.
:return: None
"""
self.models = AKSManagedClusterModels(self.cmd, self.resource_type)
def init_context(self) -> None:
"""Initialize an AKSManagedClusterContext object to store the context in the process of assemble the
ManagedCluster object.
:return: None
"""
self.context = AKSManagedClusterContext(
self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE
)
def init_agentpool_decorator_context(self) -> None:
"""Initialize an AKSAgentPoolAddDecorator object to assemble the AgentPool profile.
:return: None
"""
self.agentpool_decorator = AKSAgentPoolUpdateDecorator(
self.cmd, self.client, self.__raw_parameters, self.resource_type, self.agentpool_decorator_mode
)
self.agentpool_context = self.agentpool_decorator.context
self.context.attach_agentpool_context(self.agentpool_context)
def check_raw_parameters(self):
"""Helper function to check whether any parameters are set.
If the values of all the parameters are the default values, the command execution will be terminated early and
raise a RequiredArgumentMissingError. Neither the request to fetch or update the ManagedCluster object will be
sent.
:return: None
"""
# exclude some irrelevant or mandatory parameters
excluded_keys = ("cmd", "client", "resource_group_name", "name")
# check whether the remaining parameters are set
# the default value None or False (and other empty values, like empty string) will be considered as not set
is_changed = any(v for k, v in self.context.raw_param.items() if k not in excluded_keys)
# special cases
# some parameters support the use of empty string or dictionary to update/remove previously set values
is_default = (
self.context.get_cluster_autoscaler_profile() is None and
self.context.get_api_server_authorized_ip_ranges() is None and
self.context.get_nodepool_labels() is None
)
if not is_changed and is_default:
# Note: Uncomment the followings to automatically generate the error message.
# option_names = [
# '"{}"'.format(format_parameter_name_to_option_name(x))
# for x in self.context.raw_param.keys()
# if x not in excluded_keys
# ]
# error_msg = "Please specify one or more of {}.".format(
# " or ".join(option_names)
# )
# raise RequiredArgumentMissingError(error_msg)
raise RequiredArgumentMissingError(
'Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--load-balancer-outbound-ports" or '
'"--load-balancer-idle-timeout" or '
'"--nat-gateway-managed-outbound-ip-count" or '
'"--nat-gateway-idle-timeout" or '
'"--auto-upgrade-channel" or '
'"--attach-acr" or "--detach-acr" or '
'"--uptime-sla" or '
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--windows-admin-password" or '
'"--enable-managed-identity" or '
'"--assign-identity" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac" or '
'"--enable-public-fqdn" or '
'"--disable-public-fqdn" or '
'"--tags" or '
'"--nodepool-labels" or '
'"--enble-windows-gmsa".'
)
def _ensure_mc(self, mc: ManagedCluster) -> None:
"""Internal function to ensure that the incoming `mc` object is valid and the same as the attached `mc` object
in the context.
If the incomding `mc` is not valid or is inconsistent with the `mc` in the context, raise a CLIInternalError.
:return: None
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
if self.context.mc != mc:
raise CLIInternalError(
"Inconsistent state detected. The incoming `mc` is not the same as the `mc` in the context."
)
def fetch_mc(self) -> ManagedCluster:
"""Get the ManagedCluster object currently in use and attach it to internal context.
Internally send request using ContainerServiceClient and parameters name (cluster) and resource group name.
:return: the ManagedCluster object
"""
mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())
# attach mc to AKSContext
self.context.attach_mc(mc)
return mc
def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Update agentpool profile for the ManagedCluster object.
:return: the ManagedCluster object
"""
self._ensure_mc(mc)
if not mc.agent_pool_profiles:
raise UnknownError(
"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of "
"updating agentpool profile."
)
agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)
mc.agent_pool_profiles[0] = agentpool_profile
# update nodepool labels for all nodepools
nodepool_labels = self.context.get_nodepool_labels()
if nodepool_labels is not None:
for agent_profile in mc.agent_pool_profiles:
agent_profile.node_labels = nodepool_labels
return mc
def update_auto_scaler_profile(self, mc):
"""Update autoscaler profile for the ManagedCluster object.
:return: the ManagedCluster object
"""
self._ensure_mc(mc)
cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()
if cluster_autoscaler_profile is not None:
# update profile (may clear profile with empty dictionary)
mc.auto_scaler_profile = cluster_autoscaler_profile
return mc
def update_tags(self, mc: ManagedCluster) -> ManagedCluster:
"""Update tags for the ManagedCluster object.
:return: the ManagedCluster object
"""
self._ensure_mc(mc)
tags = self.context.get_tags()
if tags is not None:
mc.tags = tags
return mc
def process_attach_detach_acr(self, mc: ManagedCluster) -> None:
"""Attach or detach acr for the cluster.
The function "ensure_aks_acr" will be called to create or delete an AcrPull role assignment for the acr, which
internally used AuthorizationManagementClient to send the request.
:return: None
"""
self._ensure_mc(mc)
subscription_id = self.context.get_subscription_id()
assignee, is_service_principal = self.context.get_assignee_from_identity_or_sp_profile()
attach_acr = self.context.get_attach_acr()
detach_acr = self.context.get_detach_acr()
if attach_acr:
self.context.external_functions.ensure_aks_acr(
self.cmd,
assignee=assignee,
acr_name_or_id=attach_acr,
subscription_id=subscription_id,
is_service_principal=is_service_principal,
)
if detach_acr:
self.context.external_functions.ensure_aks_acr(
self.cmd,
assignee=assignee,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True,
is_service_principal=is_service_principal,
)
def update_sku(self, mc: ManagedCluster) -> ManagedCluster:
"""Update sku (uptime sla) for the ManagedCluster object.
:return: the ManagedCluster object
"""
self._ensure_mc(mc)
if self.context.get_uptime_sla():
mc.sku = self.models.ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if self.context.get_no_uptime_sla():
mc.sku = self.models.ManagedClusterSKU(
name="Basic",
tier="Free"
)
return mc
def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Update load balancer profile for the ManagedCluster object.
:return: the ManagedCluster object
"""
self._ensure_mc(mc)
if not mc.network_profile:
raise UnknownError(
"Encounter an unexpected error while getting network profile from the cluster in the process of "
"updating its load balancer profile."
)
load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()
load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()
load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()
load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()
load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()
# In the internal function "_update_load_balancer_profile", it will check whether the provided parameters
# have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will
# remain unchanged.
mc.network_profile.load_balancer_profile = _update_load_balancer_profile(
managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,
outbound_ips=load_balancer_outbound_ips,
outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,
outbound_ports=load_balancer_outbound_ports,
idle_timeout=load_balancer_idle_timeout,
profile=mc.network_profile.load_balancer_profile,
models=self.models.load_balancer_models)
return mc
def update_nat_gateway_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Update nat gateway profile for the ManagedCluster object.
:return: | |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import wx
#import wx.lib.buttons as buttons
import PyDatabase
import images
import string
import MyValidator
from PhrResource import ALPHA_ONLY, DIGIT_ONLY, strVersion, g_UnitSnNum
import MyThread
import time
import types
import os
class FrameMaintain(wx.Frame):
def __init__(self):
title = strVersion
wx.Frame.__init__(self, None, -1, title)
self.panel = panel = wx.Panel(self)
self.Maximize()
icon=images.getProblemIcon()
self.SetIcon(icon)
PersonFix(panel)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
def OnCloseWindow(self, event):
self.Destroy()
class PersonFix(object):
def __init__(self, panel):
self.panel = panel
panel.Hide()
panel.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.NORMAL, face=u'宋体'))
try:
color = panel.GetParent().GetMenuBar().GetBackgroundColour()
except:
color = (236, 233, 216)
panel.SetBackgroundColour(color)
titleText = wx.StaticText(panel, -1, u"人员信息维护")
self.dispText = wx.StaticText(panel, -1, u"当前浏览人数:")
self.g1 = wx.Gauge(panel, -1, 50, size=(-1,10))
titleText.SetFont(wx.Font(20, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.NORMAL, face=u'黑体'))
topsizer0 = wx.BoxSizer(wx.HORIZONTAL)
topsizer0.Add(titleText, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL)
numsizer = wx.BoxSizer(wx.HORIZONTAL)
numsizer.Add(self.dispText, 0, wx.ALL| wx.ALIGN_BOTTOM)
numsizer.Add(self.g1, 1, wx.ALL|wx.EXPAND| wx.ALIGN_BOTTOM)
self.ckbRank = wx.CheckBox(panel, -1, u"军衔")
self.cbRank = wx.ComboBox(panel, size=(100,-1))
self.ckbRankTime = wx.CheckBox(panel, -1, u"时间")
self.cbRankTime = wx.ComboBox(panel, size=(60,-1))
self.ckbAddr = wx.CheckBox(panel, -1, u"籍贯")
self.cbAddr = wx.ComboBox(panel, size=(60,-1))
self.ckbSex = wx.CheckBox(panel, -1, u"性别")
self.cbSex = wx.ComboBox(panel, -1, u"男", choices=[u"男", u"女"], size=(50,-1))
self.ckbMarried = wx.CheckBox(panel, -1, u"婚否")
self.cbMarried = wx.ComboBox(panel, -1, u"未婚", choices=[u"未婚", u"已婚"], size=(50,-1))
lblname = wx.StaticText(panel, -1, u"姓名:")
self.Text_Select = wx.TextCtrl(panel, size=(100,-1))
btn_Select = wx.Button(panel, -1, u"查询", size=(60,-1))
btn_Select.SetDefault()
topsizer = wx.BoxSizer(wx.HORIZONTAL)
itemSelect = [self.ckbRank, self.cbRank, (-1,-1), self.ckbRankTime, self.cbRankTime, (-1,-1), self.ckbAddr, self.cbAddr, (-1,-1), self.ckbSex, self.cbSex, (-1,-1), self.ckbMarried, self.cbMarried]
for item in itemSelect:
if types.TupleType == type(item):
topsizer.Add(item, 1, wx.ALL| wx.EXPAND)
else:
topsizer.Add(item, 0, wx.ALL| wx.ALIGN_CENTER_VERTICAL)
topsizer.Add((-1,-1), 5, wx.ALL| wx.EXPAND)
topsizer.Add(lblname, 0, wx.ALL| wx.ALIGN_CENTER_VERTICAL)
topsizer.Add(self.Text_Select, 0, wx.ALL)
topsizer.Add(btn_Select, 0, wx.ALL)
lbl5 = wx.StaticText(panel, -1, u"姓 名:")
self.Text_Name = wx.TextCtrl(panel)
lbl6 = wx.StaticText(panel, -1, u"联系电话:")
self.Text_Tel = wx.TextCtrl(panel,validator = MyValidator.MyValidator(DIGIT_ONLY))
lbl7 = wx.StaticText(panel, -1, u"详细地址:")
self.Text_AddrAll = wx.TextCtrl(panel)
iteminfo = [lbl5, self.Text_Name]
infosizer1 = wx.BoxSizer(wx.HORIZONTAL)
for item in iteminfo:
infosizer1.Add(item, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5)
infosizer1.Add((-1,-1), 1, wx.ALL| wx.EXPAND)
infosizer1.Add(lbl6, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5)
infosizer1.Add(self.Text_Tel, 3, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5)
infosizer1.Add((-1,-1), 1, wx.ALL| wx.EXPAND)
infosizer2 = wx.BoxSizer(wx.HORIZONTAL)
infosizer2.Add(lbl7, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5)
infosizer2.Add(self.Text_AddrAll, 1, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5)
btn_Add = wx.Button(panel, -1, u"添加")
btn_Delete = wx.Button(panel, -1, u"删除")
btn_Modify = wx.Button(panel, -1, u"修改")
btn_Help = wx.Button(panel, -1, u"帮助")
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
for item in [btn_Add, btn_Delete, btn_Modify, btn_Help]:
btnSizer.Add((20,-1), 1)
btnSizer.Add(item)
btnSizer.Add((20,-1), 1)
self.list = wx.ListCtrl(panel, -1, style=wx.LC_REPORT| wx.LC_VRULES | wx.LC_HRULES)
self.list.SetImageList(wx.ImageList(1, 20), wx.IMAGE_LIST_SMALL)
listsizer = wx.BoxSizer(wx.VERTICAL)
listsizer.AddSizer(topsizer,0, wx.ALL| wx.EXPAND, 0)
listsizer.Add(self.list, 1, wx.ALL|wx.EXPAND, 0)
lbltree = wx.StaticText(panel, -1, u"单位树")
self.ckbModify = wx.CheckBox(panel, -1, u"编辑模式")
self.tree = wx.TreeCtrl(panel, size=(180, -1))
self.root = self.tree.AddRoot(u"单位")
treetopsizer = wx.BoxSizer(wx.HORIZONTAL)
treetopsizer.Add(lbltree, 0, wx.ALL)
treetopsizer.Add((-1,-1), 1, wx.ALL| wx.EXPAND)
treetopsizer.Add(self.ckbModify, 0, wx.ALL)
treesizer = wx.BoxSizer(wx.VERTICAL)
treesizer.AddSizer(treetopsizer, 0, wx.ALL| wx.EXPAND, 5)
treesizer.Add(self.tree, 1, wx.ALL|wx.EXPAND, 0)
midsizer = wx.BoxSizer(wx.HORIZONTAL)
midsizer.AddSizer(treesizer, 0, wx.ALL| wx.EXPAND, 5)
midsizer.AddSizer(listsizer, 1, wx.ALL| wx.EXPAND, 5)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.AddSizer(topsizer0, 0, wx.ALL| wx.ALIGN_CENTER_HORIZONTAL, 5)
mainSizer.AddSizer(numsizer, 0, wx.ALL| wx.ALIGN_LEFT | wx.EXPAND, 0)
mainSizer.Add(wx.StaticLine(panel), 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5)
mainSizer.AddSizer(midsizer, 1, wx.ALL| wx.EXPAND, 0)
mainSizer.AddSizer(infosizer1, 0, wx.ALL| wx.EXPAND, 5)
mainSizer.AddSizer(infosizer2, 0, wx.ALL| wx.EXPAND, 5)
mainSizer.Add(wx.StaticLine(panel), 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5)
mainSizer.Add(btnSizer, 0, wx.EXPAND|wx.BOTTOM, 5)
panel.SetSizer(mainSizer)
mainSizer.Fit(panel)
panel.popMenuPPFix = wx.Menu()
pmList_1 = panel.popMenuPPFix.Append(1151, u"删除")
panel.popMenuPPFix.AppendSeparator()
pmList_2 = panel.popMenuPPFix.Append(1152, u"将所有结果导出为xls文件")
self.list.Bind(wx.EVT_MENU, self.OnPopItemSelected, pmList_1)
self.list.Bind(wx.EVT_MENU, self.OnPopItemSelected, pmList_2)
self.list.Bind(wx.EVT_CONTEXT_MENU, self.OnShowPop)
self.treeDict = {}
self.treeSel = None
self.listIndex = 0
self.unitDict = {}
self.rankDict = {}
self.roadDict = {}
self.DTable = 'PersonInfo'
panel.Bind(MyThread.EVT_UPDATE_BARGRAPH, self.OnUpdate)
panel.Bind(MyThread.EVT_EXPORT_XLS, self.OnExport)
self.thread = []
self.thread.append(MyThread.CalcGaugeThread(panel, 0))
self.lstHead = [u"序号", u"编号", u"姓名",u"性别",u"军衔",u"单位",u"籍贯", u"详细地址",u"婚否",u"电话", u"军衔时间"]
[self.list.InsertColumn(i, item) for (i, item) in zip(range(len(self.lstHead)), self.lstHead)]
self.list.SetColumnWidth(0, 50)
self.list.SetColumnWidth(3, 60)
infoPerson = [self.cbSex, self.cbMarried, self.cbRank, self.cbAddr]
[item.SetEditable(False) for item in infoPerson]
self.InitData()
panel.Show()
itemchecklst = [self.ckbRank, self.ckbRankTime, self.ckbAddr, self.ckbSex, self.ckbMarried]
[panel.Bind(wx.EVT_CHECKBOX, self.OnCkbInfo, item) for item in itemchecklst]
panel.Bind(wx.EVT_CHECKBOX, self.OnCkbModify, self.ckbModify)
panel.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged, self.tree)
panel.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnActivate, self.tree)
panel.Bind(wx.EVT_COMBOBOX, self.OnCbChanged, self.cbRank)
panel.Bind(wx.EVT_COMBOBOX, self.OnCbChanged, self.cbRankTime)
panel.Bind(wx.EVT_COMBOBOX, self.OnCbChanged, self.cbAddr)
panel.Bind(wx.EVT_COMBOBOX, self.OnCbChanged, self.cbSex)
panel.Bind(wx.EVT_COMBOBOX, self.OnCbChanged, self.cbMarried)
panel.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.list)
panel.Bind(wx.EVT_BUTTON, self.OnAdd, btn_Add)
panel.Bind(wx.EVT_BUTTON, self.OnDelete, btn_Delete)
panel.Bind(wx.EVT_BUTTON, self.OnModify, btn_Modify)
panel.Bind(wx.EVT_BUTTON, self.OnHelp, btn_Help)
panel.Bind(wx.EVT_BUTTON, self.OnSelect, btn_Select)
panel.Bind(wx.EVT_PAINT, self.OnPaint)
def OnExport(self, evt):
if evt.flag:
for item in self.thread:
item.Stop()
running = 1
while running:
running = 0
for t in self.thread:
running = running + t.IsRunning()
time.sleep(0.1)
self.g1.Show(False)
wx.MessageBox(u"导出成功!", u"提示")
def OnUpdate(self, evt):
self.g1.SetValue(evt.count)
def InitData(self):
lstType = PyDatabase.DBSelect(u"ID like '%%%%'", "RankDays", ['RankSn', 'LevelRank'], 1)
list_Type = []
for item in lstType:
self.rankDict[item[0]] = item[1]
list_Type.append(item[1])
self.cbRank.SetItems(list_Type)
self.cbRank.Select(0)
lstRTime = []
for i in range(2020, 2000, -1):
lstRTime.append(`i`)
self.cbRankTime.SetItems(lstRTime)
self.cbRankTime.Select(12)
lstAddr = PyDatabase.DBSelect(u"ID like '%%%%'", "RoadDays", ['AddrSn','Address'], 1)
list_Addr = []
for item in lstAddr:
self.roadDict[item[0]] = item[1]
list_Addr.append(item[1])
self.cbAddr.SetItems(list_Addr)
self.cbAddr.Select(0)
self.InitTree()
self.OnSelect(None)
self.g1.Show(False)
def OnDispPPNum(self):
strDisp = self.list.GetItemCount()
self.dispText.SetLabel(u"当前浏览人数:" + `strDisp`)
def OnCkbInfo(self, event):
self.OnSelect(None)
def OnCkbModify(self, event):
itemchecklst = [self.ckbRank, self.ckbRankTime, self.ckbAddr, self.ckbSex, self.ckbMarried]
[item.SetValue(False) for item in itemchecklst]
[item.Enable(not self.ckbModify.GetValue()) for item in itemchecklst]
def InitTree(self):
strResult = PyDatabase.DBSelect('', 'UnitTab', ['UnitSn'], 0)
# self.unitDict = []
lstunitname = []
for row in strResult:
self.unitDict[row[1]] = row[2]
lstunitname.append((row[1], row[2].split('|->')))
self.CreateTreeByList(lstunitname)
self.tree.Expand(self.root)
def CreateTreeByList(self, lststr):
'''Create Tree by list'''
if len(lststr) == 0:
return
flagModRoot = True
if len(lststr) >= 2:
if lststr[0][1][0] != lststr[1][1][0]:
flagModRoot = False
for item in lststr:
parentItem = self.root
if flagModRoot:
itemlst = item[1][1:]
else:
itemlst = item[1]
for ichild in itemlst:
sibitem, cookie = self.tree.GetFirstChild(parentItem)
while sibitem.IsOk():
'''parent node is the same'''
if self.GetItemText(sibitem) == ichild:
break
sibitem = self.tree.GetNextSibling(sibitem)
if self.GetItemText(sibitem) != ichild:
parentItem = self.tree.AppendItem(parentItem, ichild)
else:
parentItem = sibitem
# Save the TreeItemId
self.treeDict[item[0]] = parentItem
if flagModRoot:
self.tree.SetItemText(self.root, lststr[0][1][0])
def GetItemText(self, item):
if item:
return self.tree.GetItemText(item)
else:
return ""
def OnSelChanged(self, event):
item = event.GetItem()
if item in self.treeDict.values():
self.treeSel = item
else:
self.treeSel = None
def OnActivate(self, event):
item = event.GetItem()
if item in self.treeDict.values():
curUsn = self.treeDict.keys()[self.treeDict.values().index(item)]
strResult = PyDatabase.DBSelect(curUsn, self.DTable, ['UnitSn'], 2)
self.treeSel = item
else:
strResult = PyDatabase.DBSelect("", self.DTable, ['Sn'], 0)
self.treeSel = None
self.FlashList(strResult)
itemchecklst = [self.ckbRank, self.ckbAddr, self.ckbSex, self.ckbMarried,self.ckbModify]
[item.SetValue(False) for item in itemchecklst]
[item.Enable(not self.ckbModify.GetValue()) for item in itemchecklst[:-1]]
self.OnDispPPNum()
def OnOutXls(self):
dlg = wx.DirDialog(self.panel, u"请选择一个保存目录:", style=wx.DD_DEFAULT_STYLE)
pathXls = ""
if dlg.ShowModal() == wx.ID_OK:
pathXls = dlg.GetPath()
dlg.Destroy()
if pathXls == "":
wx.MessageBox(u"未选择保存目录!", u"提示")
return
head = [self.list.GetColumn(index).GetText() for index in range(self.list.GetColumnCount())]
lstStr = []
for index in range(self.list.GetItemCount()):
lstStr.append([self.list.GetItem(index, col=icol).GetText() for icol in range(self.list.GetColumnCount())])
self.thread.append(MyThread.ExportXlsThread(self.panel, lstStr, head, pathXls))
self.g1.Show(True)
for item in self.thread:
item.Start()
def OnShowPop(self, event):
if self.list.GetItemCount() != 0:
self.list.PopupMenu(self.panel.popMenuPPFix)
def OnPopItemSelected(self, event):
try:
item = self.panel.popMenuPPFix.FindItemById(event.GetId())
text = item.GetText()
if text == u"删除":
self.OnDelete(None)
if text == u"将所有结果导出为xls文件":
self.OnOutXls()
except:
pass
def DispColorList(self, list):
for i in range(list.GetItemCount()):
if i%4 == 0: list.SetItemBackgroundColour(i, (233, 233, 247))
if i%4 == 1: list.SetItemBackgroundColour(i, (247, 247, 247))
if i%4 == 2: list.SetItemBackgroundColour(i, (247, 233, 233))
if i%4 == 3: list.SetItemBackgroundColour(i, (233, 247, 247))
def OnCbChanged(self, event):
itemchecklst = [self.ckbRank, self.ckbRankTime, self.ckbAddr, self.ckbSex, self.ckbMarried]
flagNum = 0
for item in itemchecklst:
if not item.GetValue():
flagNum += 1
if flagNum == len(itemchecklst):
return
if self.ckbModify.GetValue():
return
self.OnSelect(None)
def OnSelect(self, event):
# Clear last select result
if len(self.rankDict) == 0:
return
self.ClearTxt()
strName = self.Text_Select.GetValue()
# fuzzy select
lstsql = []
if self.ckbRank.GetValue():
lstsql.append("RankSn = '" + self.rankDict.keys()[self.rankDict.values().index(self.cbRank.GetValue())] + "'")
if self.ckbRankTime.GetValue():
lstsql.append("RankTime = '" + self.cbRankTime.GetValue() + "-12-1" + "'")
if self.ckbAddr.GetValue():
lstsql.append("AddrSn = '" + self.roadDict.keys()[self.roadDict.values().index(self.cbAddr.GetValue())] + "'")
if self.ckbMarried.GetValue():
lstsql.append("Married = '" + self.cbMarried.GetValue() + "'")
if self.ckbSex.GetValue():
lstsql.append("Sex = '" + self.cbSex.GetValue() + "'")
if self.treeSel is not None:
curUsn = self.treeDict.keys()[self.treeDict.values().index(self.treeSel)]
lstsql.append("UnitSn = '" + curUsn + "'")
strsql = ""
for item in lstsql:
strsql += item + " and "
strsql += "Name"
strResult = PyDatabase.DBSelect(strName, self.DTable, [strsql], 0)
self.FlashList(strResult)
self.listIndex = -1
self.OnDispPPNum()
lstPInfo = [self.cbRank, self.cbRankTime, self.cbAddr, self.cbSex, self.cbMarried, self.tree, self.Text_Name, self.Text_Tel, self.Text_AddrAll]
[item.Enable(True) for item in lstPInfo]
def FlashList(self, strResult):
self.list.DeleteAllItems()
for row | |
import os
import math
import yaml
import sys
import argparse
import pathlib
import itertools
from copy import copy
from enum import Enum, IntEnum
from abc import ABC
import PIL
from reportlab.lib import utils
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase.ttfonts import TTFError
from reportlab.lib.units import mm
from reportlab.lib.enums import TA_CENTER
from reportlab.pdfgen import canvas
from reportlab.graphics import renderPDF
from reportlab.platypus import Frame, Paragraph, Table, TableStyle
from reportlab.platypus.flowables import Flowable, Spacer, Image
from reportlab.lib.styles import ParagraphStyle, StyleSheet1
from reportlab.lib.enums import TA_CENTER
from reportlab.lib.fonts import addMapping
from reportlab.platypus.doctemplate import LayoutError
from svglib.svglib import svg2rlg
ASSET_DIR = pathlib.Path(__file__).parent.resolve() / "assets"
def ExistingFile(p):
"""Argparse type for absolute paths that exist"""
p = pathlib.Path(p).absolute()
if p.exists():
return p
else:
raise argparse.ArgumentTypeError(f"`{p}` does not exist")
# Returns the best orientation for the given image aspect ration
def best_orientation(image_path, card_width, card_height):
image = PIL.Image.open(image_path)
image_width, image_height = image.size
if (image_width > image_height) == (card_width > card_height):
return Orientation.NORMAL
else:
return Orientation.TURN90
# Returns the width and height an image should be to fit into the available
# space, while maintaining aspect ratio
def get_image_size(path, available_width, available_height):
img = utils.ImageReader(path)
image_width, image_height = img.getSize()
width_ratio = available_width / image_width
height_ratio = available_height / image_height
best_ratio = min(width_ratio, height_ratio)
return (image_width * best_ratio, image_height * best_ratio)
# TODO: Clean up the font object, it seems a bit crude
# TODO: Also manage colours
class Fonts(ABC):
styles = {}
# Scaling factor between the font size and its actual height in mm
FONT_SCALE = None
FONT_DIR = ASSET_DIR / "fonts"
def __init__(self):
self._register_fonts()
self.paragraph_styles = StyleSheet1()
self.paragraph_styles.add(
ParagraphStyle(
name="title",
fontName=self.styles["title"][0],
fontSize=self.styles["title"][1] * self.FONT_SCALE,
leading=self.styles["title"][1] * self.FONT_SCALE + 0.5 * mm,
spaceAfter=0.5 * mm,
alignment=TA_CENTER,
textTransform="uppercase",
)
)
self.paragraph_styles.add(
ParagraphStyle(
name="subtitle",
fontName=self.styles["subtitle"][0],
fontSize=self.styles["subtitle"][1] * self.FONT_SCALE,
textColor=self.styles["subtitle"][2],
backColor="red",
leading=self.styles["subtitle"][1] * self.FONT_SCALE + 0.5 * mm,
alignment=TA_CENTER,
borderPadding=(0, 6),
)
)
self.paragraph_styles.add(
ParagraphStyle(
name="text",
fontName=self.styles["text"][0],
fontSize=self.styles["text"][1] * self.FONT_SCALE,
leading=self.styles["text"][1] * self.FONT_SCALE + 0.5 * mm,
spaceBefore=1 * mm,
)
)
self.paragraph_styles.add(
ParagraphStyle(
name="legendary_action",
fontName=self.styles["text"][0],
fontSize=self.styles["text"][1] * self.FONT_SCALE,
leading=self.styles["text"][1] * self.FONT_SCALE + 0.5 * mm,
spaceBefore=0,
)
)
self.paragraph_styles.add(
ParagraphStyle(
name="modifier",
fontName=self.styles["text"][0],
fontSize=self.styles["text"][1] * self.FONT_SCALE,
leading=self.styles["text"][1] * self.FONT_SCALE + 0.5 * mm,
alignment=TA_CENTER,
)
)
self.paragraph_styles.add(
ParagraphStyle(
name="action_title",
fontName=self.styles["modifier_title"][0],
fontSize=self.styles["modifier_title"][1] * self.FONT_SCALE,
leading=self.styles["modifier_title"][1] * self.FONT_SCALE + 0.5 * mm,
spaceBefore=1 * mm,
)
)
self.paragraph_styles.add(
ParagraphStyle(
name="modifier_title",
fontName=self.styles["modifier_title"][0],
fontSize=self.styles["modifier_title"][1] * self.FONT_SCALE,
leading=self.styles["modifier_title"][1] * self.FONT_SCALE + 0.5 * mm,
alignment=TA_CENTER,
)
)
def set_font(self, canvas, section, custom_scale=1.0):
canvas.setFont(
self.styles[section][0],
self.styles[section][1] * self.FONT_SCALE * custom_scale,
)
return self.styles[section][1]
def _register_fonts(self):
raise NotImplemented
class FreeFonts(Fonts):
FONT_SCALE = 1.41
styles = {
"title": ("Universal Serif", 2.5 * mm, "black"),
"subtitle": ("ScalySans", 1.5 * mm, "white"),
"challenge": ("Universal Serif", 2.25 * mm, "black"),
"category": ("Universal Serif", 2.25 * mm, "black"),
"subcategory": ("Universal Serif", 1.5 * mm, "black"),
"heading": ("ScalySansBold", 1.5 * mm, "black"),
"text": ("ScalySans", 1.5 * mm, "black"),
"artist": ("ScalySans", 1.5 * mm, "white"),
"modifier_title": ("Universal Serif", 1.5 * mm, "black"),
}
def _register_fonts(self):
pdfmetrics.registerFont(
TTFont("Universal Serif", self.FONT_DIR / "Universal Serif.ttf")
)
pdfmetrics.registerFont(TTFont("ScalySans", self.FONT_DIR / "ScalySans.ttf"))
pdfmetrics.registerFont(
TTFont("ScalySansItalic", self.FONT_DIR / "ScalySans-Italic.ttf")
)
pdfmetrics.registerFont(
TTFont("ScalySansBold", self.FONT_DIR / "ScalySans-Bold.ttf")
)
pdfmetrics.registerFont(
TTFont("ScalySansBoldItalic", self.FONT_DIR / "ScalySans-BoldItalic.ttf")
)
addMapping("ScalySans", 0, 0, "ScalySans") # normal
addMapping("ScalySans", 0, 1, "ScalySansItalic") # italic
addMapping("ScalySans", 1, 0, "ScalySansBold") # bold
addMapping("ScalySans", 1, 1, "ScalySansBoldItalic") # italic and bold
class AccurateFonts(Fonts):
FONT_SCALE = 1.41
styles = {
"title": ("ModestoExpanded", 2.5 * mm, "black"),
"subtitle": ("ModestoTextLight", 1.5 * mm, "white"),
"challenge": ("ModestoExpanded", 2.25 * mm, "black"),
"category": ("ModestoExpanded", 2.25 * mm, "black"),
"subcategory": ("ModestoExpanded", 1.5 * mm, "black"),
"heading": ("ModestoTextBold", 1.5 * mm, "black"),
"text": ("ModestoTextLight", 1.5 * mm, "black"),
"artist": ("ModestoTextLight", 1.25 * mm, "white"),
"modifier_title": ("ModestoExpanded", 1.5 * mm, "black"),
}
def _register_fonts(self):
pdfmetrics.registerFont(
TTFont("ModestoExpanded", self.FONT_DIR / "ModestoExpanded-Regular.ttf")
)
pdfmetrics.registerFont(
TTFont("ModestoTextLight", self.FONT_DIR / "ModestoText-Light.ttf")
)
pdfmetrics.registerFont(
TTFont(
"ModestoTextLightItalic",
self.FONT_DIR / "ModestoText-LightItalic.ttf",
)
)
pdfmetrics.registerFont(
TTFont("ModestoTextBold", self.FONT_DIR / "ModestoText-Bold.ttf")
)
pdfmetrics.registerFont(
TTFont(
"ModestoTextBoldItalic",
self.FONT_DIR / "ModestoText-BoldItalic.ttf",
)
)
addMapping("ModestoTextLight", 0, 0, "ModestoTextLight") # normal
addMapping("ModestoTextLight", 0, 1, "ModestoTextLightItalic") # italic
addMapping("ModestoTextLight", 1, 0, "ModestoTextBold") # bold
addMapping("ModestoTextLight", 1, 1, "ModestoTextBoldItalic") # italic and bold
# Draws a line across the frame, unless it is at the top of the frame, in which
# case nothing is drawn
class LineDivider(Flowable):
def __init__(
self,
xoffset=0,
width=None,
fill_color="red",
line_height=0.25 * mm,
spacing=1 * mm,
):
self.xoffset = xoffset
self.width = width
self.fill_color = fill_color
self.spacing = spacing
self.line_height = line_height
self.height = self.line_height + self.spacing
def _at_top(self):
at_top = False
frame = getattr(self, "_frame", None)
if frame:
at_top = getattr(frame, "_atTop", None)
return at_top
def wrap(self, *args):
if self._at_top():
return (0, 0)
else:
return (self.width, self.height)
def draw(self):
if not self._at_top():
canvas = self.canv
canvas.setFillColor(self.fill_color)
canvas.rect(self.xoffset, 0, self.width, self.line_height, stroke=0, fill=1)
class KeepTogether(Flowable):
def __init__(self, flowables):
self.flowables = flowables
self._available_height = None
self._available_width = None
def wrap(self, aW, aH):
self._available_width = aW
self._available_height = aH
height = 0
width = 0
for flowable in self.flowables:
w, h = flowable.wrap(aW, 0xFFFFFFFF)
height += flowable.getSpaceBefore()
height += h
height += flowable.getSpaceAfter()
if w > width:
width = w
return width, height
def drawOn(self, canvas, x, y, _sW=0):
y -= self.flowables[0].getSpaceBefore()
for flowable in self.flowables[::-1]:
y += flowable.getSpaceBefore()
width, height = flowable.wrap(self._available_width, self._available_height)
flowable.drawOn(canvas, x, y, _sW=_sW)
y += height
y += flowable.getSpaceAfter()
self._available_height -= (
flowable.getSpaceBefore() + height + flowable.getSpaceBefore()
)
class Orientation(Enum):
NORMAL = 1
TURN90 = 2
class Border(IntEnum):
LEFT = 0
RIGHT = 1
BOTTOM = 2
TOP = 3
class TemplateTooSmall(Exception):
pass
class CardLayout(ABC):
CARD_CORNER_DIAMETER = 3 * mm
BACKGROUND_CORNER_DIAMETER = 2 * mm
LOGO_WIDTH = 42 * mm
STANDARD_BORDER = 2.5 * mm
STANDARD_MARGIN = 1.0 * mm
TEXT_MARGIN = 2 * mm
BASE_WIDTH = 63 * mm
BASE_HEIGHT = 89 * mm
TITLE_BAR_HEIGHT = 4.8 * mm
def __init__(
self,
title,
subtitle,
artist,
image_path,
background,
border_color="red",
border_front=(0, 0, 0, 0), # uninitialized
border_back=(0, 0, 0, 0), # uninitialized
width=0, # uninitialized
height=0, # uninitialized
bleed=0, # uninitialized
fonts=FreeFonts(),
):
self.frames = []
self.title = title
self.subtitle = subtitle
self.artist = artist
self.fonts = fonts
self.background_image_path = background
self.border_color = border_color
self.border_front = tuple([v + bleed for v in border_front])
self.border_back = tuple([v + bleed for v in border_back])
self.width = width + 2 * bleed
self.height = height + 2 * bleed
self.bleed = bleed
self.front_image_path = os.path.abspath(image_path)
self.front_orientation = best_orientation(
self.front_image_path, self.width, self.height
)
self.elements = []
self.front_margins = tuple(
[x + self.STANDARD_MARGIN for x in self.border_front]
)
def set_size(self, canvas):
canvas.setPageSize((self.width * 2, self.height))
def draw(self, canvas, split):
self.set_size(canvas)
self._draw_front(canvas)
self._draw_back(canvas)
self.fill_frames(canvas)
self._draw_frames(canvas, split)
def fill_frames(self, canvas):
pass
def _draw_front_frame(self, canvas, width, height):
front_frame = Frame(
self.border_front[Border.LEFT],
self.border_front[Border.BOTTOM],
width - self.border_front[Border.LEFT] - self.border_front[Border.RIGHT],
height - self.border_front[Border.TOP] - self.border_front[Border.BOTTOM],
leftPadding=self.TEXT_MARGIN,
bottomPadding=self.TEXT_MARGIN,
rightPadding=self.TEXT_MARGIN,
topPadding=self.TEXT_MARGIN,
)
# DEBUG
# front_frame.drawBoundary(canvas)
title_paragraph = self._get_title_paragraph()
# Nasty hack alert!
# There is no way to know how big the text will be and Frame only
# supports top to bottom layout. This means we have no way of
# knowing the maximum image size.
#
# As a hack to get around this, we have to:
# 1. mock out the paragraphs drawOn method
# 2. "draw" the paragraph
# 3. Calculate how tall it was
# 4. Reset the frame and restore the original drawOn
def mock(*args, **kwargs):
pass
original_drawOn = title_paragraph.drawOn
title_paragraph.drawOn = mock
result = front_frame.add(title_paragraph, canvas)
if not result:
raise Exception("Failed to draw title in front frame")
title_height = (
front_frame.y1 + front_frame.height - front_frame._y + self.TEXT_MARGIN
)
title_paragraph.drawOn = original_drawOn
front_frame._reset()
available_height = front_frame.height - title_height - self.TEXT_MARGIN * 2
image_width, image_height = get_image_size(
self.front_image_path,
front_frame.width,
available_height,
)
elements = []
# Add spacer if image doesn't fully fill frame
space = front_frame.height - (image_height + title_height)
if space > 0:
elements.append(Spacer(front_frame.width, space / 2))
elements.append(Image(self.front_image_path, image_width, image_height))
# Add second spacer
if space > 0:
elements.append(Spacer(front_frame.width, space / 2))
elements.append(title_paragraph)
front_frame.addFromList(elements, canvas)
def _draw_frames(self, canvas, split=False):
frames = iter(self.frames)
current_frame = next(frames)
# Draw the elements
while len(self.elements) > 0:
element = self.elements.pop(0)
if type(element) == LineDivider:
# Don't place a Line Divider if there is nothing after it
if len(self.elements) == 0:
break
# | |
1.00 12.82 ? 67 PRO B CB 1
ATOM 2259 C CG . PRO B 1 66 ? 60.392 -37.253 -0.989 1.00 13.96 ? 67 PRO B CG 1
ATOM 2260 C CD . PRO B 1 66 ? 59.044 -37.723 -1.305 1.00 13.95 ? 67 PRO B CD 1
ATOM 2261 N N . MET B 1 67 ? 58.515 -38.127 3.253 1.00 11.76 ? 68 MET B N 1
ATOM 2262 C CA . MET B 1 67 ? 57.708 -37.557 4.292 1.00 11.60 ? 68 MET B CA 1
ATOM 2263 C C . MET B 1 67 ? 58.278 -36.239 4.862 1.00 12.46 ? 68 MET B C 1
ATOM 2264 O O . MET B 1 67 ? 57.518 -35.369 5.269 1.00 12.82 ? 68 MET B O 1
ATOM 2265 C CB . MET B 1 67 ? 57.560 -38.512 5.430 1.00 11.12 ? 68 MET B CB 1
ATOM 2266 C CG . MET B 1 67 ? 56.717 -39.747 5.079 1.00 11.28 ? 68 MET B CG 1
ATOM 2267 S SD . MET B 1 67 ? 56.571 -40.910 6.464 1.00 12.16 ? 68 MET B SD 1
ATOM 2268 C CE . MET B 1 67 ? 58.212 -41.611 6.409 1.00 11.73 ? 68 MET B CE 1
ATOM 2269 N N . CYS B 1 68 ? 59.586 -36.162 4.876 1.00 13.39 ? 69 CYS B N 1
ATOM 2270 C CA . CYS B 1 68 ? 60.352 -35.149 5.633 1.00 14.51 ? 69 CYS B CA 1
ATOM 2271 C C . CYS B 1 68 ? 59.761 -35.009 7.025 1.00 13.65 ? 69 CYS B C 1
ATOM 2272 O O . CYS B 1 68 ? 59.428 -36.032 7.695 1.00 12.52 ? 69 CYS B O 1
ATOM 2273 C CB . CYS B 1 68 ? 60.424 -33.831 4.851 1.00 14.28 ? 69 CYS B CB 1
ATOM 2274 S SG . CYS B 1 68 ? 61.026 -34.037 3.165 1.00 16.46 ? 69 CYS B SG 1
ATOM 2275 N N . SER B 1 69 ? 59.535 -33.771 7.509 1.00 13.88 ? 70 SER B N 1
ATOM 2276 C CA . SER B 1 69 ? 59.031 -33.627 8.882 1.00 12.36 ? 70 SER B CA 1
ATOM 2277 C C . SER B 1 69 ? 57.617 -34.109 9.197 1.00 11.95 ? 70 SER B C 1
ATOM 2278 O O . SER B 1 69 ? 57.326 -34.211 10.381 1.00 8.91 ? 70 SER B O 1
ATOM 2279 C CB . SER B 1 69 ? 59.107 -32.184 9.373 1.00 13.34 ? 70 SER B CB 1
ATOM 2280 O OG . SER B 1 69 ? 60.468 -31.826 9.613 1.00 15.73 ? 70 SER B OG 1
ATOM 2281 N N . THR B 1 70 ? 56.825 -34.550 8.242 1.00 9.08 ? 71 THR B N 1
ATOM 2282 C CA . THR B 1 70 ? 55.516 -35.113 8.571 1.00 10.52 ? 71 THR B CA 1
ATOM 2283 C C . THR B 1 70 ? 55.687 -36.453 9.304 1.00 9.73 ? 71 THR B C 1
ATOM 2284 O O . THR B 1 70 ? 54.786 -36.886 10.031 1.00 10.78 ? 71 THR B O 1
ATOM 2285 C CB . THR B 1 70 ? 54.600 -35.301 7.397 1.00 8.98 ? 71 THR B CB 1
ATOM 2286 O OG1 . THR B 1 70 ? 55.105 -36.257 6.471 1.00 10.25 ? 71 THR B OG1 1
ATOM 2287 C CG2 . THR B 1 70 ? 54.324 -33.916 6.633 1.00 9.13 ? 71 THR B CG2 1
ATOM 2288 N N . SER B 1 71 ? 56.887 -37.046 9.171 1.00 10.47 ? 72 SER B N 1
ATOM 2289 C CA . SER B 1 71 ? 57.239 -38.233 9.929 1.00 10.29 ? 72 SER B CA 1
ATOM 2290 C C . SER B 1 71 ? 57.290 -37.995 11.431 1.00 10.18 ? 72 SER B C 1
ATOM 2291 O O . SER B 1 71 ? 57.239 -38.953 12.255 1.00 10.87 ? 72 SER B O 1
ATOM 2292 C CB . SER B 1 71 ? 58.582 -38.766 9.449 1.00 10.80 ? 72 SER B CB 1
ATOM 2293 O OG . SER B 1 71 ? 59.674 -37.901 9.754 1.00 13.69 ? 72 SER B OG 1
ATOM 2294 N N . LYS B 1 72 ? 57.447 -36.738 11.845 1.00 12.25 ? 73 LYS B N 1
ATOM 2295 C CA . LYS B 1 72 ? 57.491 -36.418 13.302 1.00 12.15 ? 73 LYS B CA 1
ATOM 2296 C C . LYS B 1 72 ? 56.180 -36.848 13.987 1.00 12.52 ? 73 LYS B C 1
ATOM 2297 O O . LYS B 1 72 ? 56.174 -37.193 15.186 1.00 10.46 ? 73 LYS B O 1
ATOM 2298 C CB . LYS B 1 72 ? 57.762 -34.920 13.491 1.00 13.83 ? 73 LYS B CB 1
ATOM 2299 C CG . LYS B 1 72 ? 59.201 -34.571 13.185 1.00 15.89 ? 73 LYS B CG 1
ATOM 2300 C CD . LYS B 1 72 ? 59.466 -33.096 13.171 1.00 18.00 ? 73 LYS B CD 1
ATOM 2301 C CE . LYS B 1 72 ? 60.945 -32.695 13.137 1.00 20.85 ? 73 LYS B CE 1
ATOM 2302 N NZ . LYS B 1 72 ? 61.383 -32.758 11.745 1.00 23.85 ? 73 LYS B NZ 1
ATOM 2303 N N . VAL B 1 73 ? 55.079 -36.929 13.255 1.00 10.71 ? 74 VAL B N 1
ATOM 2304 C CA . VAL B 1 73 ? 53.817 -37.353 13.861 1.00 11.50 ? 74 VAL B CA 1
ATOM 2305 C C . VAL B 1 73 ? 53.879 -38.817 14.288 1.00 11.03 ? 74 VAL B C 1
ATOM 2306 O O . VAL B 1 73 ? 53.464 -39.178 15.403 1.00 11.18 ? 74 VAL B O 1
ATOM 2307 C CB . VAL B 1 73 ? 52.638 -37.132 12.908 1.00 10.45 ? 74 VAL B CB 1
ATOM 2308 C CG1 . VAL B 1 73 ? 51.368 -37.720 13.496 1.00 10.62 ? 74 VAL B CG1 1
ATOM 2309 C CG2 . VAL B 1 73 ? 52.506 -35.620 12.613 1.00 11.22 ? 74 VAL B CG2 1
ATOM 2310 N N . MET B 1 74 ? 54.477 -39.678 13.425 1.00 10.44 ? 75 MET B N 1
ATOM 2311 C CA . MET B 1 74 ? 54.621 -41.094 13.821 1.00 11.00 ? 75 MET B CA 1
ATOM 2312 C C . MET B 1 74 ? 55.501 -41.307 15.002 1.00 12.21 ? 75 MET B C 1
ATOM 2313 O O . MET B 1 74 ? 55.191 -42.055 15.947 1.00 10.69 ? 75 MET B O 1
ATOM 2314 C CB . MET B 1 74 ? 55.068 -41.902 12.606 1.00 12.47 ? 75 MET B CB 1
ATOM 2315 C CG . MET B 1 74 ? 54.855 -43.381 12.810 1.00 12.40 ? 75 MET B CG 1
ATOM 2316 S SD . MET B 1 74 ? 53.149 -43.978 12.561 1.00 14.38 ? 75 MET B SD 1
ATOM 2317 C CE . MET B 1 74 ? 52.976 -43.845 10.790 1.00 15.81 ? 75 MET B CE 1
ATOM 2318 N N . ALA B 1 75 ? 56.615 -40.550 15.026 1.00 9.88 ? 76 ALA B N 1
ATOM 2319 C CA . ALA B 1 75 ? 57.493 -40.651 | |
<reponame>fyndata/lib-cl-sii-python<filename>cl_sii/cte/f29/data_models.py
"""
CTE Form 29 Data Models
=======================
"""
from __future__ import annotations
import dataclasses
import logging
from dataclasses import field as dc_field
from datetime import date
from typing import (
Any,
ClassVar,
Iterator,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
Type,
Union,
)
from cl_sii.rcv.data_models import PeriodoTributario
from cl_sii.rut import Rut
logger = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class CteForm29NaturalKey:
contribuyente_rut: Rut
periodo_tributario: PeriodoTributario
folio: int
@dataclasses.dataclass(frozen=True)
class CteForm29:
contribuyente_rut: Rut
periodo_tributario: PeriodoTributario
folio: int
apellido_paterno_o_razon_social: Optional[str] = dc_field(default=None, repr=False)
apellido_materno: Optional[str] = dc_field(default=None, repr=False)
nombres: Optional[str] = dc_field(default=None, repr=False)
calle_direccion: Optional[str] = dc_field(default=None, repr=False)
numero_direccion: Optional[str] = dc_field(default=None, repr=False)
comuna_direccion: Optional[str] = dc_field(default=None, repr=False)
telefono: Optional[str] = dc_field(default=None, repr=False)
correo_electronico: Optional[str] = dc_field(default=None, repr=False)
representante_legal_rut: Optional[Rut] = dc_field(default=None, repr=False)
total_a_pagar_en_plazo_legal: Optional[int] = dc_field(default=None, repr=False)
total_a_pagar_con_recargo: Optional[int] = dc_field(default=None, repr=False)
pct_condonacion: Optional[int] = dc_field(default=None, repr=False)
num_res_condonacion: Optional[str] = dc_field(default=None, repr=False)
fecha_condonacion: Optional[date] = dc_field(default=None, repr=False)
tipo_declaracion: Optional[str] = dc_field(default=None, repr=False)
banco: Optional[str] = dc_field(default=None, repr=False)
medio_pago: Optional[str] = dc_field(default=None, repr=False)
fecha_presentacion: Optional[date] = dc_field(default=None, repr=False)
extra: Mapping[int, object] = dc_field(default_factory=dict, repr=False)
"""
Any SII Form 29 codes that do not have their own field go in `extra`.
"""
_strict_codes: bool = dc_field(default=False, repr=False)
"""
Consider unknown codes as invalid and reject them.
The default is `False` because the SII Form 29 has a large number of codes and not all of them
are known to this class. Also, the SII may add new codes at any time in the future.
"""
CODE_FIELD_MAPPING: ClassVar[Mapping[int, Optional[str]]]
"""
Map SII Form 29 numeric codes to their respective field names.
If a numeric code is valid, but no field has been created for it, use `None` so that this class
considers it as "known". Numeric codes that are not included here are considered unknown or
invalid, even if they appear in the SII Form 29.
"""
CODE_FIELD_MAPPING = {
1: 'apellido_paterno_o_razon_social', # "APELLIDO PATERNO O RAZÓN SOCIAL"
2: 'apellido_materno', # Apellido Materno
3: 'contribuyente_rut', # "N DE RUT"
5: 'nombres', # Nombres
6: 'calle_direccion', # "DIRECCION"
7: 'folio', # "FOLIO"
8: 'comuna_direccion', # "COMUNA"
9: 'telefono', # Teléfono
15: 'periodo_tributario', # "PERIODO TRIBUTARIO"
20: None, # Exportaciones | Monto Neto
30: None, # "PPM ART. 84, A) PERD. ART. 90"
48: None, # "RET. IMP. ÚNICO TRAB. ART. 74 N 1 LIR"
55: 'correo_electronico', # Correo Electrónico
60: 'pct_condonacion', # "PORCENTAJE CONDONACION TGR"
62: None, # "PPM NETO DET."
77: None, # "REMANENTE DE CRÉDITO FISC."
89: None, # "IMP. DETERM. IVA DETERM."
91: 'total_a_pagar_en_plazo_legal', # "TOTAL A PAGAR DENTRO DEL PLAZO"
92: None, # "REAJUSTES"
93: None, # "Intereses y multas"
94: 'total_a_pagar_con_recargo', # "Total a pagar con recargo"
110: None, # Boletas | Cantidad
111: None, # Boletas | Débitos
115: None, # "TASA PPM 1ra. CATEGORIA"
142: None, # "VENTAS Y/O SERV. EXENTOS O NO GRAVADOS"
151: None, # "RET, TASAS DE 10 % SOBRE LAS RENT."
153: None, # "RET, TASAS DE 10% o 20% SOBRE LAS RENT."
314: 'representante_legal_rut', # Rut Representante Legal
315: 'fecha_presentacion', # "FECHA TIMBRE CAJA"
500: None, # "CANTIDAD FACTURAS"
501: None, # "LIQUIDACION DE FACTURAS"
502: None, # "DÉBITOS FACTURAS EMITIDAS"
503: None, # "CANTIDAD FACTURAS EMITIDAS"
504: None, # "REMANENTE CREDITO MES ANTERIOR"
509: None, # "CANT. DCTOS. NOTAS DE CRÉDITOS EMITIDAS"
510: None, # "DÉBITOS NOTAS DE CRÉDITOS EMITIDAS"
511: None, # "CRÉD. IVA POR DCTOS. ELECTRONICOS"
512: None, # "CANT. DE DCTOS. NOTAS DE DÉBITO EMIT."
513: None, # "NOTAS DE DÉBITOS EMITIDAS"
514: None, # IVA por documentos electrónicos recibidos | Sin derecho a Crédito
515: None, # Facturas de Compra recibidas c/ret. total y Fact. de Inicio emitida | Cantidad
516: None, # Facturas de Compra recibidas con retención parcial | Cantidad
517: None, # Facturas de Compra recibidas con retención parcial | Débitos
519: None, # "CANT. DE DCTOS. FACT. RECIB. DEL GIRO"
520: None, # "CRÉDITO REC. Y REINT./FACT. DEL GIRO"
521: None, # "MONTO NETO / INTERNAS AFECTAS"
524: None, # "CANT. FACT. ACTIVO FIJO"
525: None, # "CRÉD. RECUP. Y REINT. FACT. ACTIVO FIJO"
527: None, # "CANT. NOTAS DE CRÉDITO RECIBIDAS"
528: None, # "CRÉDITO RECUP. Y REINT NOTAS DE CRÉD"
531: None, # "CANT. NOTAS DE DÉBITO RECIBIDAS"
532: None, # "NOTAS DE DÉBITO CRÉD, RECUP. Y REINT."
534: None, # Declaraciones de Ingreso (DIN) importaciones del giro | Cantidad
535: None, # Declaraciones de Ingreso (DIN) importaciones del giro | Créd., Recup. y Reint.
536: None, # Declaraciones de Ingreso (DIN) import. activo fijo | Cantidad
537: None, # "TOTAL CRÉDITOS"
538: None, # "TOTAL DÉBITOS"
547: None, # "TOTAL DETERMINADO"
553: None, # Declaraciones de Ingreso (DIN) import. activo fijo | Créd., Recup. y Reint.
562: None, # "MONTO SIN DER. A CRED. FISCAL"
563: None, # "BASE IMPONIBLE"
564: None, # "CANT. DOC. SIN DER. A CRED. FISCAL"
584: None, # "CANT.INT.EX.NO GRAV.SIN DER. CRED.FISCAL"
585: None, # Exportaciones | Cantidad
586: None, # "CANT. VTAS. Y/O SERV. PREST. INT. EXENT."
587: None, # Facturas de Compra recibidas c/ret. total y Fact. de Inicio emitida | Monto
595: None, # "SUB TOTAL IMP. DETERMINADO ANVERSO"
596: None, # "RETENCION CAMBIO DE SUJETO"
601: None, # Fax
610: 'numero_direccion', # Nº Dirección
708: None, # "CANT. NOTAS CRED. EMIT. VALES MAQ. IVA"
709: None, # "MONTO NOTAS CRED. EMIT. VALES MAQ. IVA."
755: None, # IVA Postergado
756: None, # Casillero (checkbox) "Postergación pago de IVA"
761: None, # "CANT. FACT. SUPERMERCADOS Y SIMILARES"
762: None, # "CRÉD. FACT. SUPERMERCADOS Y SIMILARES"
763: None, # "CANT. FACT. POR VENTA BIENES INMUEBLES"
764: None, # "DÉB. FACT. POR VENTA BIENES INMUEBLES"
765: None, # "CANT. FACT. ADQ. O CONSTR. B. INMUEBLES"
766: None, # "DÉB. FACT. ADQ. O CONSTR. B. INMUEBLES"
795: None, # "MONTO DE CONDONACION SII"
915: 'fecha_condonacion', # "Fecha de Vigencia de Condonacion"
922: 'num_res_condonacion', # "NUMERO RESOLUCION SII AUTO. CONDONACION"
9906: None, # "FECHA PRESENTACION DECL. PRIMITIVA"
}
def __post_init__(self) -> None:
# -----Set Fields from Extra-----
new_extra: MutableMapping[int, object] = {}
for code, value in self.extra.items():
field_name = self.get_field_name(code, strict=self._strict_codes)
if field_name is None:
# If there's no field for the code; leave it in `extra`.
new_extra[code] = value
else:
# There's a field for the code; remove it from `extra`.
current_field_value = getattr(self, field_name)
if current_field_value is None:
# The current field's value is empty, so we set it.
object.__setattr__(self, field_name, value)
else:
# The current field's value is not empty and we do not overwrite it. We give
# precedence to the current field's value because it may have been previously
# converted to a different data type (e.g. `periodo_tributario` has a code, but
# the code's value must be parsed and converted to an instance of
# `PeriodoTributario`).
pass
object.__setattr__(self, 'extra', new_extra)
# -----Validations-----
validate_field_type(self, 'contribuyente_rut', Rut)
validate_field_type(self, 'periodo_tributario', PeriodoTributario)
validate_field_type(self, 'folio', int)
validate_field_type(self, 'representante_legal_rut', (Rut, type(None)))
validate_field_type(self, 'fecha_presentacion', (date, type(None)))
if not all(isinstance(code, int) for code in self.extra):
raise TypeError("All codes in 'extra' must be integers")
# TODO: Validate the type of the other fields.
# -----Warn About Unknown Codes-----
if not self._strict_codes:
unknown_codes = self.get_all_codes(strict=False) - self.get_all_codes(strict=True)
if unknown_codes:
logger.warning(
"%s(contribuyente_rut=%r, periodo_tributario=%r, folio=%r)"
" contains invalid or unknown SII Form 29 codes: %s.",
self.__class__.__name__,
self.contribuyente_rut,
self.periodo_tributario,
self.folio,
', '.join(str(code) for code in sorted(unknown_codes)),
)
@classmethod
def get_field_name(cls, code: int, strict: bool = True) -> Optional[str]:
"""
Return the field name for the SII Form 29 code if a field name has been defined for the
code. Return ``None`` if the code is known, but no field name is associated with it.
:param code: SII Form 29 code.
:param strict: Whether to consider unknown codes as invalid and raise an exception.
:raises KeyError: If ``code`` is invalid and ``strict`` is ``True``.
"""
if not isinstance(code, int):
raise TypeError(f"An integer is required (got type '{code.__class__.__name__}')")
try:
return cls.CODE_FIELD_MAPPING[code]
except KeyError as e:
if strict:
raise KeyError(f"Invalid | |
<reponame>hashberg-io/bases
"""
Functions to generate random data.
"""
# pylint: disable = global-statement
from contextlib import contextmanager
from itertools import chain, islice
from random import Random # pylint: disable = import-self
from types import MappingProxyType
from typing import Any, Dict, Iterator, Mapping, Optional
from typing_validation import validate
from .alphabet import Alphabet
from .encoding import BaseEncoding, SimpleBaseEncoding, ZeropadBaseEncoding, BlockBaseEncoding, FixcharBaseEncoding
_default_options: Mapping[str, Any] = MappingProxyType({
"min_bytes": 0,
"max_bytes": 16,
"min_chars": 0,
"max_chars": 16,
})
_options: Mapping[str, Any] = MappingProxyType(_default_options)
_rand: Random = Random(0)
def reset_options() -> None:
"""
Resets random generation options to their default values.
"""
global _options
global _rand
_options = _default_options
_rand = Random(0)
def default_options() -> Mapping[str, Any]:
"""
Readonly view of the default random generation options.
"""
return _default_options
def get_options() -> Mapping[str, Any]:
"""
Readonly view of the current random generation options.
"""
return _options
@contextmanager
def options(*,
seed: Optional[int] = None,
min_bytes: Optional[int] = None,
max_bytes: Optional[int] = None,
min_chars: Optional[int] = None,
max_chars: Optional[int] = None,) -> Iterator[None]:
"""
Returns with-statement context manager for temporary option setting:
.. code-block:: python
with options(**options):
for value in rand_data(num_samples, encoding):
...
See :func:`set_options` for a description of the options.
"""
# pylint: disable = too-many-locals
for arg in (seed, min_bytes, max_bytes, min_chars, max_chars):
validate(arg, Optional[int])
global _options
global _rand
try:
_old_options = _options
_old_rand = _rand
set_options(seed=seed,
min_bytes=min_bytes, max_bytes=max_bytes,
min_chars=min_chars, max_chars=max_chars,)
yield
finally:
_options = _old_options
_rand = _old_rand
def set_options(*,
seed: Optional[int] = None,
min_bytes: Optional[int] = None,
max_bytes: Optional[int] = None,
min_chars: Optional[int] = None,
max_chars: Optional[int] = None,) -> None:
"""
Permanently sets random generation options:
.. code-block:: python
seed: int # set new random number generator, with this seed
min_bytes: int # min length of `bytes` value
max_bytes: int # max length of `bytes` value
min_chars: int # min length of `str` value
max_chars: int # max length of `str` value
"""
# pylint: disable = too-many-branches, too-many-locals, too-many-statements
for arg in (seed, min_bytes, max_bytes, min_chars, max_chars):
validate(arg, Optional[int])
global _options
global _rand
# set newly passed options
_new_options: Dict[str, Any] = {}
if seed is not None:
_rand = Random(seed)
if min_bytes is not None:
if min_bytes < 0:
raise ValueError("Value for min_bytes is negative.")
_new_options["min_bytes"] = min_bytes
if max_bytes is not None:
if max_bytes < 0:
raise ValueError("Value for max_bytes is negative.")
_new_options["max_bytes"] = max_bytes
if min_chars is not None:
if min_chars < 0:
raise ValueError("Value for min_chars is negative.")
_new_options["min_chars"] = min_chars
if max_chars is not None:
if max_chars < 0:
raise ValueError("Value for max_chars is negative.")
_new_options["max_chars"] = max_chars
# pass-through other options with former values
for k, v in _options.items():
if k not in _new_options:
_new_options[k] = v
# check compatibility conditions
if _new_options["min_bytes"] > _new_options["max_bytes"]:
raise ValueError("Value for min_bytes is larger than value for max_bytes.")
if _new_options["min_chars"] > _new_options["max_chars"]:
raise ValueError("Value for min_chars is larger than value for max_chars.")
# update options
_options = MappingProxyType(_new_options)
def rand_bytes(n: Optional[int] = None, *, encoding: Optional[BaseEncoding] = None) -> Iterator[bytes]:
"""
Generates a stream of random :obj:`bytes` objects.
If a number ``n`` is given, that number of samples is yelded.
If an encoding ``encoding`` is given, only bytes valid for that encoding are yielded.
Example usage:
>>> my_random_bytes = list(random.rand_bytes(4, encoding=base10))
>>> [list(b) for b in my_random_bytes]
[[0, 30, 135, 156, 223, 90, 134, 83, 6, 243, 245],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 49, 216, 87, 1, 2],
[70, 98, 190, 187, 66, 224, 178],
[0, 96, 63]]
:param n: the number of samples
:type n: :obj:`int` or :obj:`None`, *optional*
:param encoding: optional encoding for which the bytestrings must be valid
:type encoding: :obj:`~bases.encoding.base.BaseEncoding` or :obj:`None`, *optional*
"""
validate(n, Optional[int])
validate(encoding, Optional[BaseEncoding])
if encoding is None:
return rand_raw_bytes(n)
if isinstance(encoding, SimpleBaseEncoding):
return _rand_bytes_simple_enc(n, encoding)
if isinstance(encoding, ZeropadBaseEncoding):
return _rand_bytes_zeropad_enc(n, encoding)
if isinstance(encoding, BlockBaseEncoding):
return _rand_bytes_block_enc(n, encoding)
if isinstance(encoding, FixcharBaseEncoding):
return _rand_bytes_fixedchar_enc(n, encoding)
raise ValueError(f"Unsupported encoding type {type(encoding)}")
def rand_raw_bytes(n: Optional[int] = None, *, min_bytes: Optional[int] = None, max_bytes: Optional[int] = None) -> Iterator[bytes]:
"""
Generates a stream of random :obj:`bytes` objects.
If a number ``n`` is given, that number of samples is yelded.
The optional ``min_bytes`` and ``max_bytes`` parameters can be used to set a minimum/maximum length
for the :obj:`bytes` objects: if :obj:`None`, the values are fetched from :func:`get_options`.
:param n: the number of samples
:type n: :obj:`int` or :obj:`None`, *optional*
:param min_bytes: the minimum length for the bytestrings
:type min_bytes: :obj:`int` or :obj:`None`, *optional*
:param max_bytes: the maximum length for the bytestrings
:type max_bytes: :obj:`int` or :obj:`None`, *optional*
"""
validate(n, Optional[int])
validate(min_bytes, Optional[int])
validate(max_bytes, Optional[int])
if n is not None and n < 0:
raise ValueError()
if min_bytes is None:
min_bytes = _options["min_bytes"]
if max_bytes is None:
max_bytes = _options["max_bytes"]
rand = _rand
# main yielding loop
yielded = 0
while n is None or yielded < n:
# sample random length
l = rand.randint(min_bytes, max_bytes)
# yield random unsigned integer filling l bytes
i = rand.randrange(0, 256**l)
yield i.to_bytes(l, byteorder="big")
yielded += 1
def _rand_bytes_simple_enc(n: Optional[int], _: SimpleBaseEncoding) -> Iterator[bytes]:
if n is not None and n < 0:
raise ValueError()
min_bytes = _options["min_bytes"]
max_bytes = _options["max_bytes"]
rand = _rand
# main yielding loop
yielded = 0
while n is None or yielded < n:
# sample random length
l = rand.randint(min_bytes, max_bytes)
# yield random unsigned integer filling l bytes with no leading zero bytes
if l == 0:
i = 0
else:
i = rand.randrange(256**(l-1), 256**l)
yield i.to_bytes(l, byteorder="big")
yielded += 1
def _rand_bytes_zeropad_enc(n: Optional[int], _: ZeropadBaseEncoding) -> Iterator[bytes]:
if n is not None and n < 0:
raise ValueError()
min_bytes = _options["min_bytes"]
max_bytes = _options["max_bytes"]
rand = _rand
# main yielding loop
yielded = 0
while n is None or yielded < n:
# sample random length
l = rand.randint(min_bytes, max_bytes)
# sample random number of leading zero bytes
z = rand.randint(0, l)
# yield random unsigned integer filling l-z bytes
if l == z:
i = 0
else:
i = rand.randrange(256**(l-z-1), 256**(l-z))
yield i.to_bytes(l, byteorder="big")
yielded += 1
def _rand_bytes_block_enc(n: Optional[int], encoding: BlockBaseEncoding) -> Iterator[bytes]:
if n is not None and n < 0:
raise ValueError()
min_bytes = _options["min_bytes"]
max_bytes = _options["max_bytes"]
rand = _rand
# pre-compute valid bytestring lengths for block base encoding
block_nbytes = encoding.block_nbytes
nbytes2nchars = encoding.nbytes2nchars
valid_lengths = [l for l in range(min_bytes, max_bytes+1)
if l%block_nbytes == 0 or l%block_nbytes in nbytes2nchars]
# main yielding loop
yielded = 0
while n is None or yielded < n:
# sample random valid length
l = rand.choice(valid_lengths)
# yield random unsigned integer filling l bytes
i = rand.randrange(0, 256**l)
yield i.to_bytes(l, byteorder="big")
yielded += 1
def _rand_bytes_fixedchar_enc(n: Optional[int], _: FixcharBaseEncoding) -> Iterator[bytes]:
return rand_raw_bytes(n)
def rand_str(n: Optional[int] = None, *, encoding: Optional[BaseEncoding]=None, alphabet: Optional[Alphabet]=None) -> Iterator[str]:
"""
Generates a stream of random strings.
If a number ``n`` is given, that number of samples is yelded.
Exactly one of ``encoding`` or ``alphabet`` must be given:
- if an ``encoding`` is given, only strings valid for that encoding are yielded
- if an ``alphabet`` is given, only strings valid for that alphabet are yielded
Example usage:
>>> my_random_strings = list(random.rand_str(4, encoding=base32))
>>> my_random_strings
['2CQ7ZT6WNI', 'IGQJTGA', 'V6GW3UN64QDAFZA7', 'PUEMOPJ4']
:param n: the number of samples
:type n: :obj:`int` or :obj:`None`, *optional*
:param encoding: optional encoding for which the strings must be valid
:type encoding: :obj:`~bases.encoding.base.BaseEncoding` or :obj:`None`, *optional*
:param alphabet: optional alphabet for which the bytestrings must be valid
:type alphabet: :obj:`~bases.alphabet.abstract.Alphabet` or :obj:`None`, *optional*
:raises ValueError: unless exactly one of ``encoding`` or ``alphabet`` is specified
:raises ValueError: if an instance of a an unsupported (i.e. custom) base encoding subclass is passed to ``encoding``
"""
validate(n, Optional[int])
validate(encoding, Optional[BaseEncoding])
validate(alphabet, Optional[Alphabet])
if encoding is None:
if alphabet is None:
raise ValueError("One of 'encoding' or 'alphabet' must be specified.")
return _rand_alphabet_string(n, alphabet)
if alphabet is not None:
raise ValueError("Exactly one of 'encoding' or 'alphabet' must be specified.")
if isinstance(encoding, SimpleBaseEncoding):
return _rand_str_simple_enc(n, encoding)
if isinstance(encoding, ZeropadBaseEncoding):
return _rand_str_zeropad_enc(n, encoding)
| |
import numpy as np
import sys
import time
import gc
from sklearn import neighbors
from sklearn import svm
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
#from svmutil import *
from collections import Counter
from log_io import init_logging
###
def gene_projected_lda_feature(train_x_matrix, train_y_vector):
norm_time = 0
start_time = time.time()
train_norm_vector = np.linalg.norm(train_x_matrix, axis=0, ord=np.inf)[None, :]
train_x_matrix = np.true_divide(train_x_matrix, train_norm_vector, where=(train_norm_vector!=0))
norm_time = time.time() - start_time
train_x_matrix[np.isnan(train_x_matrix)] = 0
train_x_matrix[np.isinf(train_x_matrix)] = 1
min_class = min(train_y_vector)
max_class = max(train_y_vector)
ret_feature_matrix = []
lda_time = 0
start_time = time.time()
clf = LinearDiscriminantAnalysis()
lda_time = lda_time + time.time() - start_time
for i in range(min_class, max_class+1):
temp_y_vector = np.where(train_y_vector==i, 0, 1)
#print "FIT"
#print len(train_x_matrix)
#print len(temp_y_vector)
start_time = time.time()
clf.fit(train_x_matrix, temp_y_vector)
lda_time = lda_time + time.time() - start_time
ret_feature_matrix.append(clf.coef_)
ret_feature_matrix = np.squeeze(np.array(ret_feature_matrix))
ret_feature_matrix = np.absolute(ret_feature_matrix)
#print ret_feature_matrix
#print "Function end: gen_projected_lda_feature"
return ret_feature_matrix, norm_time, lda_time
def bi_gene_lda_model(train_x_matrix, train_y_vector):
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
#print train_x_matrix.shape
#print train_y_vector.shape
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
return clf, train_time
def gene_lda_model(train_x_matrix, train_y_vector):
clf = LinearDiscriminantAnalysis()
#print train_x_matrix.shape
#print train_y_vector.shape
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
return clf, train_time
def run_lda(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, proba=False):
clf, train_time = gene_lda_model(train_x_matrix, train_y_vector)
if proba == True:
predict_y = clf.predict(test_x_matrix)
start_time = time.time()
predict_y_proba = clf.predict_proba(test_x_matrix)
test_time = time.time() - start_time
else:
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
predict_y_proba = None
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
def run_rf(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, samples_leaf=20, proba=False):
np.random.seed(0)
#positive_index = np.where(train_y_vector==1)
#negative_index = np.where(train_y_vector==0)
#len_positive = len(np.where(train_y_vector == 1)[0])
#len_negative = len(train_y_vector) - len_positive
#logger.info("positive: " + str(len_positive))
#logger.info("negative: " + str(len_negative))
#if len_positive > len_negative:
# add_pare = '-w0 ' + str(len_positive/len_negative) + ' -w1 1'
#else:
# add_pare = '-w1 ' + str(len_negative/len_positive) + ' -w0 1'
clf = RandomForestClassifier(min_samples_leaf=samples_leaf, class_weight='balanced')
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
if proba is False:
predict_y_proba = None
else:
predict_y_proba = clf.predict_proba(test_x_matrix)
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
# train_x_matrix: row_num * col_num, train_y_vector: vector
def run_dt(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, n_neighbors, proba=False):
clf = DecisionTreeClassifier(random_state=0, class_weight='balanced')
#n_estimators = 10
#clf = OneVsRestClassifier(BaggingClassifier(neighbors.KNeighborsClassifier(n_neighbors, weights="distance"), max_samples=1.0 / n_estimators, n_estimators=n_estimators))
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
if proba == False:
predict_y_proba = None
else:
predict_y_proba = clf.predict_proba(test_x_matrix)
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
# train_x_matrix: row_num * col_num, train_y_vector: vector
def run_knn(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, n_neighbors, proba=False):
clf = neighbors.KNeighborsClassifier(n_neighbors, weights="distance")
#n_estimators = 10
#clf = OneVsRestClassifier(BaggingClassifier(neighbors.KNeighborsClassifier(n_neighbors, weights="distance"), max_samples=1.0 / n_estimators, n_estimators=n_estimators))
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
if proba == False:
predict_y_proba = None
else:
predict_y_proba = clf.predict_proba(test_x_matrix)
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
# run knn method with returning distance values
# train_x_matrix: numpy matrix with N * A: N: is number of training instances, A is number of attributes
# train_y_vector: numpy vector N * 1
# test_x_matrix: numpy matrix with N1 * A: N1 is the number of testing instances
# test_y_vector: numpy vector N1 * 1
# n_neighbors: top K value
# it returns three values
# distances: a numpy matrix D with N1 * n_neighbors, D_ij means the distance from test instance i to the jth nearest training instance
# indexes: a numpy matrix I with N1 * n_neighbors, it records the corresponding index for the jth nearest training instance
# the distance calculation: from [A11, A12, A13] to [A21, A22, A23] is dist = sqrt((A11-A21)^2 + (A12-A22)^2 + (A13-A23)^2)
def run_knn_with_dist(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, n_neighbors=1):
clf = neighbors.KNeighborsClassifier(n_neighbors, weights="distance")
min_class = min(train_y_vector)
max_class = max(train_y_vector)
distance_matrix = []
for i in range(min_class, max_class+1):
train_index = np.where(train_y_vector==i)[0]
knn_model = clf.fit(train_x_matrix[train_index, :], train_y_vector[train_index])
distances, indexes = knn_model.kneighbors(test_x_matrix, n_neighbors, True)
distance_matrix.append(distances)
distance_matrix = np.array(distance_matrix).reshape(max_class-min_class+1, len(test_y_vector))
distance_matrix = distance_matrix.T
start_time = time.time()
knn_model = clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y_vector = knn_model.predict(test_x_matrix)
test_time = time.time() - start_time
return distance_matrix, predict_y_vector, train_time, test_time
def get_pred_matrix(train_y_vector, index_matrix):
x_row, x_col = index_matrix.shape
pred_matrix = np.zeros([x_row, x_col]).astype(int)
for i in range(0, x_row):
pred_matrix[i] = train_y_vector[index_matrix[i]]
return pred_matrix
# find the first instance belongs to current_class d1 and the first instance does not belong to the current_class d2
# then the probability belongs to current_class is 1- (d1/(d1 + d2)) and the probability it does not belong to current_class is 1 - (d2/(d1 + d2))
# This function will transfer all class labels to be one continues vector wihch starts from 0
# return test_x_matrix_row * num_classes matrix, it contains the probability dist for each class
# the small return value means the higher probability
def run_knn_with_proba(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector):
train_row, train_col = train_x_matrix.shape
test_row, test_col = test_x_matrix.shape
min_class = min(train_y_vector)
if min_class != 0:
train_y_vector = train_y_vector - min_class
min_class = 0
max_class = max(train_y_vector)
num_classes = max_class + 1
dist_matrix, index_matrix, knn_model, train_time, test_time = run_knn_with_dist(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, n_neighbors)
start_time = time.time()
pred_matrix = get_pred_matrix(train_y_vector, index_matrix)
max_dist = dist_matrix.max() + 1.0
predict_proba_matrix = np.full([test_row, num_classes], max_dist)
predict_proba_matrix = knn_model.predict_proba(test_x_matrix)
#for i in range(0, test_row):
# instance_pred_vector = pred_matrix[i]
# pred_len = len(instance_pred_vector)
# for j in range(0, pred_len):
# c = instance_pred_vector[j]
# if predict_proba_matrix[i][c] != max_dist:
# continue
# predict_proba_matrix[i][c] = dist_matrix[i][j]
#predict_proba_matrix = predict_proba_matrix
#test_time = test_time + time.time() - start_time
#for i in range(0, test_row):
# proba_vector = predict_proba_matrix[i]
# vector_min = proba_vector.min()
# predict_proba_matrix[i] = 1- (predict_proba_matrix[i] - vector_min)/(max_dist - vector_min)
#predict_proba_matrix = (predict_proba_matrix - predict_proba_matrix.min(axis=0))/ (predict_proba_matrix.max(axis=0) - predict_proba_matrix.min(axis=0))
#print predict_proba_matrix
#for i in range(0, test_row):
# proba_vector = predict_proba_matrix[i]
# null_index = np.where(proba_vector==-1)
# not_null_index = np.where(proba_vector!=-1)[0]
# if len(not_null_index) == 1:
# predict_proba_matrix[i][not_null_index] = 1
# else:
# proba_vector = np.delete(proba_vector, null_index)
# sum_proba = sum(proba_vector)
# for j in not_null_index:
# predict_proba_matrix[i][j] = predict_proba_matrix[i][j]/sum_proba
# predict_proba_matrix[i][null_index] = 0
return predict_proba_matrix, train_time, test_time
# Libsvm
def run_sklearn_libsvm(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, proba=False):
train_y_vector = train_y_vector- min(train_y_vector)
test_y_vector = test_y_vector - min(test_y_vector)
train_x_matrix = train_x_matrix.astype(np.float64)
train_y_vector = train_y_vector.astype(np.float64)
test_x_matrix = test_x_matrix.astype(np.float64)
test_y_vector = test_y_vector.astype(np.float64)
weight_array = []
unique, counts = np.unique(train_y_vector, return_counts=True)
count_all = len(train_y_vector)
for i in counts:
weight_array.append(float(1)/i)
weight_array = np.array(weight_array)
start_time = time.time()
model = svm.libsvm.fit(train_x_matrix, train_y_vector, class_weight=weight_array)
train_time = time.time() - start_time
start_time = time.time()
predict_y = svm.libsvm.predict(test_x_matrix, *model)
test_time = time.time() - start_time
if proba is False:
predict_y_proba = None
else:
predict_y_proba = svm.libsvm.predict_proba(test_x_matrix, *model)
#predict_y_proba = None
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
#return accuracy_score(test_y_vector, predict_y), predict_y, train_time, test_time
def banlanced_binary_processing(train_x_matrix, train_y_vector, banlanced_ratio=3):
positive_index = np.where(train_y_vector==0.0)[0]
negative_index = np.where(train_y_vector==1.0)[0]
positive_len = len(positive_index)
negative_len = len(negative_index)
if positive_len > negative_len:
select_len = banlanced_ratio * negative_len
if positive_len > select_len:
select_index = np.random.choice(positive_len, select_len, replace=False)
positive_index = positive_index[select_index]
all_index = np.append(positive_index, negative_index)
train_x_matrix = train_x_matrix[all_index, :]
train_y_vector = train_y_vector[all_index]
else:
select_len = banlanced_ratio * positive_len
if negative_len > select_len:
select_index = np.random.choice(negative_len, select_len, replace=False)
negative_index = negative_index[select_index]
all_index = np.append(negative_index, positive_index)
train_x_matrix = train_x_matrix[all_index, :]
train_y_vector = train_y_vector[all_index]
return train_x_matrix, train_y_vector
def libsvm_load_predict(test_x_matrix, test_y_vector, save_file):
model = svm_load_model(save_file)
predict_y, predict_acc, predict_y_proba = svm_predict(test_y_vector, test_x_matrix, model, '-b 1')
print(predict_acc, predict_y, predict_y_proba)
#libsvm from the author's website
def run_libsvm(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, logger, proba=False, save_file='', weight=True):
train_y_vector = train_y_vector- min(train_y_vector)
test_y_vector = test_y_vector - min(test_y_vector)
#train_x_matrix = train_x_matrix.astype(np.float64)
#train_y_vector = train_y_vector.astype(np.float64)
#test_x_matrix = test_x_matrix.astype(np.float64)
#test_y_vector = test_y_vector.astype(np.float64)
if weight == True:
positive_index = np.where(train_y_vector==1)
negative_index = np.where(train_y_vector==0)
len_positive = len(np.where(train_y_vector == 1)[0])
len_negative = len(train_y_vector) - len_positive
logger.info("positive: " + str(len_positive))
logger.info("negative: " + str(len_negative))
if len_positive > len_negative:
add_pare = '-w0 ' + str(len_positive/len_negative) + ' -w1 1'
else:
add_pare = '-w1 ' + str(len_negative/len_positive) + ' -w0 1'
else:
add_pare = ''
train_x_matrix = train_x_matrix.tolist()
train_y_vector = train_y_vector.astype(np.integer).tolist()
test_x_matrix = test_x_matrix.tolist()
test_y_vector = test_y_vector.astype(np.integer).tolist()
#svm_model.predict = lambda self, x: svm_predict([0], [x], self)[0][0]
#prob = svm_problem([1,-1], [[1,0,1], [-1,0,-1]])
prob = svm_problem(train_y_vector, train_x_matrix)
#logger.info("libsvm parameter: " | |
in PHYSICAL kpc; deltaC is in units of critical density """
# return (3.0 * mass / (4.0 * np.pi * critical_density * deltaC))**(1./3.)
collectRadii = np.zeros(len(sim.Densities), dtype = np.float64) # empty array of desired densities in Msun/kpc**3
collectMasses = np.zeros(len(sim.Densities), dtype = np.float64) # empty array of masses at desired radii
get_virial_mr(sim.Densities.d, pmass[::-1], periodic_r[::-1], collectRadii, collectMasses)
#self.radii['virial'] = self.obj.yt_dataset.quan(collectRadii[0], 'kpc')
PiFac = 4./3. * np.pi
for ir,rvname in enumerate(['200c','500c','2500c']):
self.radii['r'+rvname] = self.obj.yt_dataset.quan(collectRadii[ir], 'kpc')
self.masses['m'+rvname] = self.obj.yt_dataset.quan(collectMasses[ir], 'Msun')
#self.masses['virial'] = 100.*critical_density * PiFac*self.radii['virial']**3
#self.masses['m200c'] = 200.*critical_density * PiFac*self.radii['r200c']**3
#self.masses['m500c'] = 500.*critical_density * PiFac*self.radii['r500c']**3
#self.masses['m2500c'] = 2500.*critical_density * PiFac*self.radii['r2500c']**3
#print('radii:',collectMasses,collectRadii,self.radii['r200c'],self.radii['r500c'],self.masses['m200c'],self.masses['m500c'],collectMasses[1]/self.masses['m200c'],collectMasses[2]/self.masses['m500c'])
# eq 1 of Mo et al 2002
self.radii['r200'] = (sim.G * mass / (100.0 * sim.Om_z * sim.H_z**2))**(1./3.)
# eq 1 of Mo et al 2002
vc = (np.sqrt( sim.G * mass / self.radii['r200'] )).to('km/s')
# eq 4 of Mo et al 2002 (K)
vT = self.obj.yt_dataset.quan(3.6e5 * (vc.d / 100.0)**2, 'K')
# convert units
#self.radii['virial'] = self.radii['virial'].to(self.obj.units['length'])
self.radii['r200c'] = self.radii['r200c'].to(self.obj.units['length'])
self.radii['r500c'] = self.radii['r500c'].to(self.obj.units['length'])
self.radii['r2500c'] = self.radii['r2500c'].to(self.obj.units['length'])
self.radii['r200'] = self.radii['r200'].to(self.obj.units['length'])
vc = vc.to(self.obj.units['velocity'])
vT = vT.to(self.obj.units['temperature'])
self.temperatures['virial'] = vT
for k in self.masses:
if isinstance(self.masses[k], float):
self.masses[k] = self.obj.yt_dataset.quan(self.masses[k], self.obj.units['mass'])
else:
self.masses[k] = self.masses[k].to(self.obj.units['mass'])
self.virial_quantities = dict(
#radius = self.radii['virial'],
r200c = self.radii['r200c'],
r500c = self.radii['r500c'],
r2500c = self.radii['r2500c'],
r200 = self.radii['r200'],
circular_velocity = vc,
temperature = vT
)
def _calculate_velocity_dispersions(self):
"""Calculate velocity dispersions for the various components."""
def get_sigma(filtered_v,filtered_m):
if len(filtered_v) == 0: return 0.0
mv = np.array([filtered_m[i]*filtered_v[i] for i in range(len(filtered_v))])
v_std = np.std(mv,axis=0)/np.mean(filtered_m)
return np.sqrt(v_std.dot(v_std))
ptypes = self.obj.data_manager.ptype[self.global_indexes]
v = self.obj.data_manager.vel[self.global_indexes]
m = self.obj.data_manager.mass[self.global_indexes]
self.velocity_dispersions['all'] = get_sigma(v,m)
self.velocity_dispersions['dm'] = get_sigma(v[ ptypes == ptype_ints['dm']],m[ ptypes == ptype_ints['dm']])
self.velocity_dispersions['baryon'] = get_sigma(v[(ptypes == ptype_ints['gas']) | (ptypes == ptype_ints['star'])],m[(ptypes == ptype_ints['gas']) | (ptypes == ptype_ints['star'])])
self.velocity_dispersions['gas'] = get_sigma(v[ ptypes == ptype_ints['gas']],m[ ptypes == ptype_ints['gas']])
self.velocity_dispersions['stellar'] = get_sigma(v[ ptypes == ptype_ints['star']],m[ ptypes == ptype_ints['star']])
#if np.log10(self.masses['total'])>12: print 'sigma',np.log10(self.masses['total']),self.velocity_dispersions['all'],self.velocity_dispersions['dm'],self.velocity_dispersions['gas'],self.velocity_dispersions['stellar']
for k,v in six.iteritems(self.velocity_dispersions):
self.velocity_dispersions[k] = self.obj.yt_dataset.quan(v, self.obj.units['velocity'])
def _calculate_angular_quantities(self):
"""Calculate angular momentum, spin, max_vphi and max_vr."""
pos = self.obj.yt_dataset.arr(self.obj.data_manager.pos[self.global_indexes], self.obj.units['length'])
vel = self.obj.yt_dataset.arr(self.obj.data_manager.vel[self.global_indexes], self.obj.units['velocity'])
mass = self.obj.yt_dataset.arr(self.obj.data_manager.mass[self.global_indexes], self.obj.units['mass'])
ptype = self.obj.yt_dataset.arr(self.obj.data_manager.ptype[self.global_indexes], self.obj.units['mass'])
px = mass * vel[:,0]
py = mass * vel[:,1]
pz = mass * vel[:,2]
x = (pos[:,0] - self.pos[0]).to('km')
y = (pos[:,1] - self.pos[1]).to('km')
z = (pos[:,2] - self.pos[2]).to('km')
Lx = np.sum( y*pz - z*py )
Ly = np.sum( z*px - x*pz )
Lz = np.sum( x*py - y*px )
L = np.sqrt(Lx**2 + Ly**2 + Lz**2)
#self.angular_momentum = self.obj.yt_dataset.quan(L, Lx.units)
self.angular_momentum_vector = self.obj.yt_dataset.arr([Lx.d,Ly.d,Lz.d], Lx.units)
# Bullock spin or lambda prime
#self.spin = self.angular_momentum / (1.4142135623730951 *
if self.virial_quantities['r200'] > 0:
self.virial_quantities['spin_param'] = self.obj.yt_dataset.quan(L, Lx.units) / (1.4142135623730951 *
self.masses['total'] *
self.virial_quantities['circular_velocity'].to('km/s') *
self.virial_quantities['r200'].to('km'))
else:
self.virial_quantities['spin_param'] = self.obj.yt_dataset.quan(0.0, '')
PHI = np.arctan2(Ly.d,Lx.d)
THETA = np.arccos(Lz.d/L.d)
ex = np.sin(THETA) * np.cos(PHI)
ey = np.sin(THETA) * np.sin(PHI)
ez = np.cos(THETA)
from caesar.utils import rotator
ALPHA = np.arctan2(Ly.d, Lz.d)
p = rotator(np.array([ex,ey,ez]), ALPHA)
BETA = np.arctan2(p[0],p[2])
self.rotation_angles = dict(ALPHA=ALPHA, BETA=BETA)
## need max_vphi and max_vr
rotated_pos = rotator(pos.d, ALPHA, BETA)
rotated_vel = rotator(vel.d, ALPHA, BETA)
r = np.sqrt(rotated_pos[:,0]**2 + rotated_pos[:,1]**2)
vphi = (rotated_vel[:,0] * -1. * rotated_pos[:,1] + rotated_vel[:,1] * rotated_pos[:,0]) / r
vr = (rotated_vel[:,0] * rotated_pos[:,0] + rotated_vel[:,1] * rotated_pos[:,1]) / r
self.max_vphi = self.obj.yt_dataset.quan(np.max(vphi), self.obj.units['velocity'])
self.max_vr = self.obj.yt_dataset.quan(np.max(vr) , self.obj.units['velocity'])
def _calculate_radial_quantities(self):
""" Calculate various component radii and half radii """
from caesar.group_funcs import get_half_mass_radius, get_full_mass_radius
r = np.empty(len(self.global_indexes), dtype=np.float64)
get_periodic_r(self.obj.simulation.boxsize.d, self.pos.d, self.obj.data_manager.pos[self.global_indexes], r)
rsort = np.argsort(r)
r = r[rsort]
mass = self.obj.data_manager.mass[self.global_indexes][rsort]
ptype = self.obj.data_manager.ptype[self.global_indexes][rsort]
radial_categories = dict(
total = [ptype_ints['gas'],ptype_ints['star'],ptype_ints['dm'],ptype_ints['bh'],ptype_ints['dust']],
baryon = [ptype_ints['gas'],ptype_ints['star']],
gas = [ptype_ints['gas']],
stellar = [ptype_ints['star']],
dm = [ptype_ints['dm']],
)
half_masses = {}
for k,v in six.iteritems(self.masses):
half_masses[k] = 0.5 * v
for k,v in six.iteritems(radial_categories):
if k == 'dm' and self.obj_type == 'galaxy': continue
binary = 0
for p in v:
binary += 2**p
full_r = get_full_mass_radius(r[::-1], ptype[::-1], binary)
self.radii[k] = self.obj.yt_dataset.quan(full_r, self.obj.units['length'])
half_r = get_half_mass_radius(mass, r, ptype, half_masses[k], binary)
self.radii['%s_half_mass' % k] = self.obj.yt_dataset.quan(half_r, self.obj.units['length'])
def write_IC_mask(self, ic_ds, filename, search_factor = 2.5,radius_type='total'):
"""Write MUSIC initial condition mask to disk. If called on
a galaxy it will look for the parent halo in the IC.
Parameters
----------
ic_ds : yt dataset
The initial condition dataset via ``yt.load()``.
filename : str
The filename of which to write the mask to. If a full
path is not supplied then it will be written in the
current directory.
search_factor : float, optional
How far from the center to select DM particles. Default is
2.5
print_extents : bool, optional
Print MUSIC extents for cuboid after mask creation
Examples
--------
>>> import yt
>>> import caesar
>>>
>>> snap = 'my_snapshot.hdf5'
>>> ic = 'IC.dat'
>>>
>>> ds = yt.load(snap)
>>> ic_ds = yt.load(ic)
>>>
>>> obj = caesar.load('caesar_my_snapshot.hdf5', ds)
>>> obj.galaxies[0].write_IC_mask(ic_ds, 'mymask.txt')
"""
from caesar.zoom_funcs import write_IC_mask
write_IC_mask(self, ic_ds, filename, search_factor,radius_type=radius_type)
def vtk_vis(self, rotate=False):
"""Method to render this group's points via VTK.
Parameters
----------
rotate : boolean
Align angular momentum vector with the z-axis before
rendering?
Notes
-----
Opens up a pyVTK window; you must have VTK installed to use
this method. It is easiest to install via
``conda install vtk``.
"""
self.obj.data_manager.load_particle_data()
from caesar.vtk_funcs import group_vis
group_vis(self, rotate=rotate)
def info(self):
"""Method to quickly print out object attributes."""
pdict = {}
for k,v in six.iteritems(self.__dict__):
if k in info_blacklist: continue
pdict[k] = v
from pprint import pprint
pprint(pdict)
pdict = None
def contamination_check(self, lowres=[2,3,5], search_factor=2.5,
printer=True):
"""Check for low resolution particle contamination.
This method checks for low-resolution particles within
``search_factor`` of the maximum halo radius. When this
method is called on a galaxy, it refers to the parent halo.
Parameters
----------
lowres : list, optional
Particle types to be considered low-res. Defaults to
[2,3,5]; if your simulation contains blackholes you will
want to pass in [2,3]; if your simulation contains active
dust particles you will not include 3.
search_factor : float, optional
Factor to expand the maximum halo radius search distance
by. Default is 2.5
printer : boolean, optional
Print results?
Notes
-----
This method currently ONLY works on GADGET/GIZMO HDF5 files.
"""
from yt.funcs import mylog
from caesar.zoom_funcs import construct_lowres_tree
construct_lowres_tree(self, lowres)
if self.obj_type == 'halo':
halo = self
ID = 'Halo %d' % self.GroupID
elif self.obj_type == 'galaxy':
if self.halo == None:
raise Exception('Galaxy %d has no halo!' % self.GroupID)
halo = self.halo
ID = "Galaxy %d's halo (ID %d)" % (self.GroupID, halo.GroupID)
r = halo.radii['virial'].d * search_factor
result = self.obj._lowres['TREE'].query_ball_point(halo.pos.d, r)
ncontam = len(result)
lrmass = np.sum(self.obj._lowres['MASS'][result])
self.contamination = lrmass / halo.masses['total'].d
if not printer:
return
if ncontam > 0:
mylog.warning('%s has %0.2f%% mass contamination ' \
'(%d LR particles with %0.2e % s)' %
(ID, self.contamination * 100.0, ncontam,
lrmass, halo.masses['total'].units))
else:
mylog.info('%s has NO contamination!' % ID)
class Galaxy(Group):
"""Galaxy class which has the central boolean."""
obj_type = 'galaxy'
def __init__(self,obj):
super(Galaxy, self).__init__(obj)
self.central = False
self.halo = None
self.clouds = []
self.cloud_index_list = np.array([])
class Halo(Group):
"""Halo class which has the dmlist attribute, and child boolean."""
obj_type = 'halo'
dmlist = GroupList('dmlist')
def __init__(self,obj):
super(Halo, self).__init__(obj)
self.child = False
self.galaxies = []
self.central_galaxy = None
self.satellite_galaxies = []
self.galaxy_index_list = np.array([])
class Cloud(Group):
"""Cloud class which has the central boolean."""
obj_type = 'cloud'
def __init__(self,obj):
super(Cloud, self).__init__(obj)
self.central = False
self.galaxy = None
self.halo = None
def create_new_group(obj, group_type):
"""Simple function to create a new instance of a specified
:class:`group.Group`.
Parameters
----------
obj : :class:`main.CAESAR`
Main caesar object.
group_type : {'halo', 'galaxy','cloud'}
Which type of group? Options are: `halo` and `galaxy`.
Returns
-------
group : :class:`group.Group`
Subclass :class:`group.Halo` or :class:`group.Galaxy`.
"""
if group_type == 'halo':
return Halo(obj)
elif group_type == 'galaxy':
return Galaxy(obj)
elif group_type == 'cloud':
return Cloud(obj)
''' New group functions from | |
"""network3.py
~~~~~~~~~~~~~~
A Theano-based program for training and running simple neural
networks.
Supports several layer types (fully connected, convolutional, max
pooling, softmax), and activation functions (sigmoid, tanh, and
rectified linear units, with more easily added).
When run on a CPU, this program is much faster than network.py and
network2.py. However, unlike network.py and network2.py it can also
be run on a GPU, which makes it faster still.
Because the code is based on Theano, the code is different in many
ways from network.py and network2.py. However, where possible I have
tried to maintain consistency with the earlier programs. In
particular, the API is similar to network2.py. Note that I have
focused on making the code simple, easily readable, and easily
modifiable. It is not optimized, and omits many desirable features.
This program incorporates ideas from the Theano documentation on
convolutional neural nets (notably,
http://deeplearning.net/tutorial/lenet.html ), from <NAME>'s
implementation of dropout (https://github.com/mdenil/dropout ), and
from <NAME> (http://colah.github.io ).
"""
#### Libraries
# Standard library
import cPickle
import gzip
# Third-party libraries
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.nnet import softmax
from theano.tensor import shared_randomstreams
from theano.tensor.signal import downsample
# Activation functions for neurons
def linear(z): return z
def ReLU(z): return T.maximum(0.0, z)
from theano.tensor.nnet import sigmoid
from theano.tensor import tanh
#### Constants
GPU = True
if GPU:
print "Trying to run under a GPU. If this is not desired, then modify "+\
"network3.py\nto set the GPU flag to False."
try: theano.config.device = 'gpu'
except: pass # it's already set
theano.config.floatX = 'float32'
else:
print "Running with a CPU. If this is not desired, then the modify "+\
"network3.py to set\nthe GPU flag to True."
def generate_input_normalizer(training_data):
#we initialize our inputs with a normal distribution - this works by generating a normal distribution based on the mean and standard deviation of our training data since it should be a reasonable way to generalize for test data and so on. It helps to make it a normal distribution so that we can most of the time keep our neurons from saturating straight off, just as we do with weights and biases. Just needed to write this out to make sure I gots it
#See our written notes
'''The following line is basically:
for sample in training_data[0]:
for frame in sample:
return frame
'''
input_x = [frame for sample in training_data[0] for frame in sample]#for sample in x: for frame in x: return frame
mean = sum(input_x)/float(len(input_x))
stddev = np.linalg.norm(input_x-mean)/np.sqrt(len(input_x))
return mean, stddev
def normalize_input(data, mean, stddev):
data[0] = (data[0]-mean)/stddev
return data
#### Load the data
def load_data_shared(filename="../data/mnist.pkl.gz", normalize_x=False):
f = gzip.open(filename, 'rb')
training_data, validation_data, test_data = cPickle.load(f)
if normalize_x:
#normalize input data.
input_normalizer_mean, input_normalizer_stddev = generate_input_normalizer(training_data)
training_data = normalize_input(training_data, input_normalizer_mean, input_normalizer_stddev)
validation_data = normalize_input(validation_data, input_normalizer_mean, input_normalizer_stddev)
test_data = normalize_input(test_data, input_normalizer_mean, input_normalizer_stddev)
def shared(data):
"""Place the data into shared variables. This allows Theano to copy
the data to the GPU, if one is available.
"""
shared_x = theano.shared(
np.asarray(data[0], dtype=theano.config.floatX), borrow=True)
shared_y = theano.shared(
np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
return shared_x, T.cast(shared_y, "int32")
return [shared(training_data), shared(validation_data), shared(test_data)]
#### Main class used to construct and train networks
class Network(object):
def __init__(self, layers, mini_batch_size):
"""Takes a list of `layers`, describing the network architecture, and
a value for the `mini_batch_size` to be used during training
by stochastic gradient descent.
"""
self.layers = layers
self.mini_batch_size = mini_batch_size
#self.params = [param for layer in self.layers for param in layer.params]
#BEGIN MOMENTUM IMPLEMENTATION
#Start them both off with the same things, we just SHOULDN'T UPDATE the weights and biases
#with our velocities, we only update the params
#should be fine right now, having them both be the same shared variables at first
#self.velocities = [velocity for layer in self.layers for velocity in layer.params]
#print np.array(self.params).shape
'''
self.w = theano.shared(
np.asarray(
np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
'''
'''
self.velocities = theano.shared(
np.zeros(
shape=self.params,
dtype=theano.config.floatX
),
borrow=True)
'''
#self.velocities = theano.shared(np.zeros_like(self.params))
self.params = [param for layer in self.layers for param in layer.params]
self.velocities = theano.clone(self.params)
#END MOMENTUM IMPLEMENTATION
self.x = T.matrix("x")
self.y = T.ivector("y")
init_layer = self.layers[0]
init_layer.set_inpt(self.x, self.x, self.mini_batch_size)
for j in xrange(1, len(self.layers)):
prev_layer, layer = self.layers[j-1], self.layers[j]
layer.set_inpt(
prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)
self.output = self.layers[-1].output
self.output_dropout = self.layers[-1].output_dropout
def output_config(self, output_filename, training_data_subsections, early_stopping, automatic_scheduling, output_training_cost, output_training_accuracy, output_validation_accuracy, output_test_accuracy, print_results, config_index, config_count, run_index, run_count, output_types):
#Set all our things for graph output
self.output_filename=output_filename
self.training_data_subsections=training_data_subsections,
self.early_stopping=early_stopping
self.automatic_scheduling=automatic_scheduling,
self.output_training_cost=output_training_cost
self.output_training_accuracy=output_training_accuracy
self.output_validation_accuracy=output_validation_accuracy
self.output_test_accuracy=output_test_accuracy
self.print_results=print_results
self.config_index=config_index
self.config_count=config_count
self.run_index=run_index
self.run_count=run_count
self.output_types=output_types
def SGD(self, output_dict, training_data, epochs, mini_batch_size, eta,
validation_data, test_data, lmbda=0.0, momentum_coefficient=0.0,
scheduler_check_interval=10, param_decrease_rate=10):#Initialize early stopping stuff to reasonable defaults
"""Train the network using mini-batch stochastic gradient descent."""
training_x, training_y = training_data
validation_x, validation_y = validation_data
test_x, test_y = test_data
# compute number of minibatches for training, validation and testing
num_training_batches = size(training_data)/mini_batch_size
num_validation_batches = size(validation_data)/mini_batch_size
num_test_batches = size(test_data)/mini_batch_size
# define the (regularized) cost function, symbolic gradients, and updates
l2_norm_squared = sum([(layer.w**2).sum() for layer in self.layers])
cost = self.layers[-1].cost(self)+\
0.5*lmbda*l2_norm_squared/num_training_batches
#grads = T.grad(cost, self.params)
#updates = [(param, param-eta*grad) for param, grad in zip(self.params, grads)]
#BEGIN MOMENTUM IMPLEMENTATION
#need to compute our changes relative to params, never the velocities
grads = T.grad(cost, self.params)
self.velocities = [momentum_coefficient*velocity-eta*grad for velocity, grad in zip(self.velocities, grads)]
updates = [(param, param+velocity) for param, velocity in zip(self.params, self.velocities)]
#updates = [(param, param-eta*grad) for param, grad in zip(self.params, grads)]
#grads = T.grad(cost, self.velocities)
#this line modifies our self.velocities type to be all fucking weird and it's pissing me off
#self.velocities = [(param, param+momentum_coefficient*velocity-eta*grad) for velocity, grad in zip(self.velocities, grads)]
#then we update our velocities, trigger this with our functions
'''
def get_momentum_updates(cost, params, eta, momentum_coefficient):
grads = T.grad(cost, params)
self.velocities = [momentum_coefficient*velocity-eta*grad for velocity, grad in zip(self.velocities, grads)]
updates = [(param, param+velocity) for param, velocity in zip(params, self.velocities)]
return updates
'''
#updates = [(param, param+(momentum_coefficient*velocity-eta*grad)) for param, velocity, grad in zip(self.params, self.velocities, grads)]
#END MOMENTUM IMPLEMENTATION
# define functions to train a mini-batch, and to compute the
# accuracy in validation and test mini-batches.
i = T.lscalar() # mini-batch index
train_mb = theano.function(
[i], cost, updates=updates,
givens={
self.x:
training_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
self.y:
training_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
})
train_mb_accuracy = theano.function(
[i], self.layers[-1].accuracy(self.y),
givens={
self.x:
training_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
self.y:
training_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
})
train_mb_cost = theano.function(
[i], self.layers[-1].cost(self) + 0.5*lmbda*l2_norm_squared/num_training_batches,
givens={
self.x:
training_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
self.y:
training_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
})
validate_mb_accuracy = theano.function(
[i], self.layers[-1].accuracy(self.y),
givens={
self.x:
validation_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
self.y:
validation_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
})
test_mb_accuracy = theano.function(
[i], self.layers[-1].accuracy(self.y),
givens={
self.x:
test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
self.y:
test_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
})
self.test_mb_predictions = theano.function(
[i], self.layers[-1].y_out,
givens={
self.x:
test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
})
# Do the actual training
best_training_accuracy = 0.0
best_validation_accuracy = 0.0
if self.early_stopping and self.output_training_cost:
scheduler_results = []
#Arange so we can do our vector multiplication
scheduler_x = np.arange(1, scheduler_check_interval+1)
param_stop_threshold = eta * param_decrease_rate**-6
for epoch in xrange(epochs):
for minibatch_index in xrange(num_training_batches):
cost_ij = train_mb(minibatch_index)
#I'd put an if statement to see if there are any outputs, but if there are no outputs, then there won't be anything to graph so there will never be a bug that arises from fucked up output_dict, never gets used
output_dict[self.run_index][epoch] = []
#output types
if self.output_training_cost:
#The rest of this line is already explained in the notes with good reason, but I added the float()
#So that we don't get json serialization errors for using the numpy.float32 type.
training_cost = float(np.mean([train_mb_cost(j) for j in xrange(num_training_batches)]))
output_dict[self.run_index][epoch].append(training_cost)
if self.output_training_accuracy:
training_accuracy = np.mean([train_mb_accuracy(j) for j in xrange(num_training_batches)])
training_accuracy *= 100#Percentage formatting
output_dict[self.run_index][epoch].append(training_accuracy)
if self.output_validation_accuracy:
validation_accuracy = np.mean([validate_mb_accuracy(j) for j in xrange(num_validation_batches)])
validation_accuracy *= 100#Percentage formatting
output_dict[self.run_index][epoch].append(validation_accuracy)
if self.output_test_accuracy:
test_accuracy = np.mean([test_mb_accuracy(j) for j in xrange(num_test_batches)])
test_accuracy *= 100#Percentage formatting
output_dict[self.run_index][epoch].append(test_accuracy)
#So we don't print until we've already computed calculations for this epoch
print "Epoch %i" % (epoch)
if self.print_results:
if self.output_training_cost:
print "\tTraining Cost: %f" % (training_cost)
if self.output_training_accuracy:
print "\tTraining Accuracy: %f%%" % (training_accuracy)
if self.output_validation_accuracy:
print "\tValidation Accuracy: %f%%" % (validation_accuracy)
if self.output_test_accuracy:
print "\tTest Accuracy: %f%%" % (test_accuracy)
if (self.early_stopping or self.automatic_scheduling) and self.output_training_cost:
#This is where we change things according to the parameter we want to schedule, since I think it would take a hell of a lot to make | |
Entering".format(self.username))
if self.user_token is not None:
headers = {"Content-Type" : "application/json"}
data = {"token": self.user_token}
res = requests.post(BASEURL + "editor/loadtasks", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
print(res.status_code)
pkg = res.json()
if len(pkg['editor']) > 0:
self.all_tasks = pkg["editor"]
self.this_task = None
for this_task in pkg['editor']:
if not clearerror:
if this_task["jobid"] is None and this_task["errstatus"] is None:
self.this_task = this_task
break
else:
if this_task["jobid"] is None:
self.this_task = this_task
if self.this_task is None:
raise RuntimeError("Cannot select a task!")
self.taskid = self.this_task['taskid']
self.projectid = self.this_task['projectid']
print(self.taskid, self.projectid)
LOG.info("username={}: loadtasks(): Taskid={} & Projectid={}".format(self.username, self.taskid, self.projectid))
else:
print('No tasks to select')
LOG.info("username={}: loadtasks(): No tasks to select!".format(self.username))
else:
print("User not logged in!")
LOG.error("username={}: loadtasks(): User not logged in!".format(self.username))
print('')
def loadtask(self):
"""
Load data from a specific task
"""
LOG.info("username={}: loadtask(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : <PASSWORD>.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid}
res = requests.post(BASEURL + "editor/loadtask", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: loadtask(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: loadtask(): User not logged in!".format(self.username))
print('')
def getaudio(self):
"""
Return a portion of audio for the task
"""
LOG.info("username={}: getaudio(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
params = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid}
res = requests.get(BASEURL + "editor/getaudio", params=params)
print(res.status_code)
if res.status_code == 200:
with open('taskrange.ogg', 'wb') as f:
f.write(res.content)
LOG.info("username={}: getaudio(): Save audio to taskrange.ogg".format(self.username))
else:
print('SERVER SAYS:', res.text)
LOG.error("username={}: getaudio(): ".format(self.username), res.text)
else:
print("User not logged in!")
LOG.error("username={}: getaudio(): User not logged in!".format(self.username))
print('')
def savetext(self):
"""
Save text to task text file
"""
LOG.info("username={}: savetext(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
text = codecs.open(self._html, "r", "utf-8").read()
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid, "text" : text}
res = requests.post(BASEURL + "editor/savetext", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: savetext(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: savetext(): User not logged in!".format(self.username))
print('')
def savealltext(self):
"""
Save text to all file in project
"""
LOG.info("username={}: savealltext(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
for opt in self.all_tasks:
taskid = opt["taskid"]
projectid = opt["projectid"]
LOG.info("SAVE: tid={} pid={}".format(taskid, projectid))
headers = {"Content-Type" : "application/json"}
text = codecs.open(self._html, "r", "utf-8").read()
data = {'token' : self.user_token, 'projectid' : projectid, 'taskid' : taskid, "text" : text}
res = requests.post(BASEURL + "editor/savetext", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: savealltext(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: savealltext(): User not logged in!".format(self.username))
print('')
def cleartext(self):
"""
Remove text from file
"""
LOG.info("username={}: cleartext(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid, "text" : ""}
res = requests.post(BASEURL + "editor/savetext", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: cleartext(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: cleartext(): User not logged in!".format(self.username))
print('')
def gettext(self):
"""
Return the task's text
"""
LOG.info("username={}: gettext(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid}
res = requests.post(BASEURL + "editor/gettext", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: gettext(): {}".format(self.username, res.text))
print(res.status_code)
pkg = res.json()
print('TEXT', pkg['text'])
else:
print("User not logged in!")
LOG.error("username={}: gettext(): User not logged in!".format(self.username))
print('')
def loadusers(self):
"""
Return the registered users
"""
LOG.info("username={}: loadusers(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token}
res = requests.post(BASEURL + "editor/loadusers", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: loadusers: {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: loadusers(): User not logged in!".format(self.username))
print('')
def taskdone(self):
"""
Assign the task to collator
"""
LOG.info("username={}: taskdone(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : <PASSWORD>.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid}
res = requests.post(BASEURL + "editor/taskdone", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: taskdone(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: taskdone(): User not logged in!".format(self.username))
print('')
def reassigntask(self):
"""
Re-assign task to editor
"""
LOG.info("username={}: reassigntask(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid}
res = requests.post(BASEURL + "editor/reassigntask", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: reassigntask(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: reassigntask(): User not logged in!".format(self.username))
print('')
def buildmaster(self):
"""
Build master docx MS-WORD
"""
LOG.info("username={}: buildmaster(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : <PASSWORD>.user_token, 'projectid' : self.projectid}
res = requests.post(BASEURL + "editor/buildmaster", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: buildmaster(): {}".format(self.username, res.text))
print(res.status_code)
if res.status_code == 200:
LOG.info("buildmaster(): Downloading MS-WORD document")
pkg = res.json()
LOG.info("Requesting URL: {}".format(BASEURL + "editor/{}".format(pkg["url"])))
res = requests.get(BASEURL + "editor/{}".format(pkg["url"]), stream=True)
with open(self._docx, "wb") as out_file:
shutil.copyfileobj(res.raw, out_file)
LOG.info("buildmaster(): Saved - {} {} bytes".format(self._docx, os.path.getsize(self._docx)))
else:
print("User not logged in!")
LOG.error("username={}: buildmaster(): User not logged in!".format(self.username))
print('')
def unlocktask(self):
"""
Cancel a scheduled job
"""
LOG.info("username={}: unlocktask(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid}
res = requests.post(BASEURL + "editor/unlocktask", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: unlocktask(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: unlocktask(): User not logged in!".format(self.username))
print('')
def diarize(self):
"""
Submit a diarize speech job
"""
LOG.info("username={}: diarize(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid, 'subsystem' : 'default'}
res = requests.post(BASEURL + "editor/diarize", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: diarize(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: diarize(): User not logged in!".format(self.username))
print('')
def recognize(self):
"""
Submit a recognize speech job
"""
LOG.info("username={}: recognize(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid, 'subsystem' : self.subsystem}
res = requests.post(BASEURL + "editor/recognize", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: recognize(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: recognize(): User not logged in!".format(self.username))
print('')
def align(self):
"""
Submit a align speech job
"""
LOG.info("username={}: align(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid, 'subsystem' : 'en_ZA_16000'}
res = requests.post(BASEURL + "editor/align", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: align(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: align(): User not logged in!".format(self.username))
print('')
def clearerror(self):
"""
Clear error status
"""
LOG.info("username={}: clearerror(): Entering".format(self.username))
if self.user_token is not None and self.projectid is not None:
headers = {"Content-Type" : "application/json"}
data = {'token' : self.user_token, 'projectid' : self.projectid, 'taskid' : self.taskid}
res = requests.post(BASEURL + "editor/clearerror", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: clearerror(): {}".format(self.username, res.text))
print(res.status_code)
pkg = res.json()
else:
print("User not logged in!")
LOG.error("username={}: clearerror(): User not logged in!".format(self.username))
print('')
def speechsubsystems(self):
"""
List speech subsystems
"""
LOG.info("username={}: speechsubsystems(): Entering".format(self.username))
if self.user_token is not None:
headers = {"Content-Type" : "application/json"}
services = ["diarize", "recognize", "align"]
data = {'token' : self.user_token, "service" : random.choice(services)}
res = requests.post(BASEURL + "editor/speechsubsystems", headers=headers, data=json.dumps(data))
print('SERVER SAYS:', res.text)
LOG.info("username={}: speechsubsystems(): {}".format(self.username, res.text))
print(res.status_code)
else:
print("User not logged in!")
LOG.error("username={}: speechsubsystems(): User not logged in!".format(self.username))
print('')
class Worker(threading.Thread):
def __init__(self, paths, user, number):
threading.Thread.__init__(self)
self.paths = paths
self.user = user
self.running = True
self.editor = Editor(user)
self.editor.gen_users()
self.thread_number = number
def run(self):
state = "login"
while self.running:
LOG.info("user={} thread#={} state={}".format(self.user, self.thread_number, state))
meth = getattr(self.editor, state)
res, text = meth()
LOG.info("user={} thread#={} | |
<filename>data-utils/config.py
resources = [
{
"name": "mousephenotype.org",
"keywords": [],
"pmids": [
"27626380",
"24652767",
"24197666",
"25127743",
"25343444",
"24642684",
"21677750",
"22968824",
"22940749",
"22991088",
"25992600",
"22566555",
"23519032",
"22211970",
"24194600",
"26147094",
"24634472",
"24932005",
"25093073",
"24046361",
"24033988",
"23315689",
"22926223",
"21185382",
"21737429",
"19933761",
"19689210",
"17905814",
"17218247",
"16933996",
"16254554",
"15908916",
"15340423",
"15340424",
"28650954",
"28650483",
"29026089",
"29348434",
"29352221",
"29396915",
"29626206",
"25129317",
"24667524",
"19864252",
"21051359",
"21822254",
"26292834",
"27012318",
"27829658",
"25311550",
"27827997",
"28026023",
"27175020",
"27170656",
"27357688",
"27423879",
"27482817",
"26774483",
"26748089",
"26876172",
"26869263",
"27348751",
"26214591",
"25811986",
"25480920",
"25463925",
"25831528",
"27335633",
"25108913",
"25202984",
"24793995",
"25002511",
"25024228",
"25347472",
"24771530",
"24599962",
"24795147",
"23918801",
"22781001",
"22728827",
"22801502",
"27580957",
"26563511",
"26876963",
"26603179",
"25833316",
"25849460",
"25825651",
"25414328",
"25179226",
"25256713",
"24850858",
"24552652",
"24386367",
"23844102",
"22996130",
"22961258",
"22872090",
"22415892",
"21185380",
"21930503",
"20929875",
"27150100",
"26512121",
"24795431",
"23855728",
"17448981",
"27262858",
"26314589",
"19783817",
"26092691",
"23673683",
"23760815",
"23261053",
"22422677",
"21334922",
"20095055",
"18781157",
"23195311",
"22028326",
"22035782",
"22078115",
"22121025",
"22127858",
"22170288",
"22190154",
"22331800",
"22396661",
"22422470",
"22424222",
"22429026",
"22570807",
"22628574",
"22713295",
"22719993",
"22736716",
"22761243",
"22815272",
"22826106",
"22853787",
"22877670",
"22878107",
"22886688",
"22899600",
"22935533",
"23029233",
"23044826",
"23185269",
"23362344",
"23378250",
"23408557",
"23443757",
"23550020",
"23640545",
"23712496",
"23713592",
"23792051",
"23834320",
"23860911",
"23884334",
"23913666",
"23996268",
"23996269",
"24007626",
"24014359",
"24078022",
"24127137",
"24151200",
"24185425",
"24268689",
"24309014",
"24413636",
"24427525",
"24557902",
"24647359",
"24667445",
"24814481",
"24820404",
"24867377",
"24875755",
"25064097",
"25064103",
"25065329",
"25092533",
"25161872",
"25184674",
"25271304",
"25284787",
"25340944",
"25352636",
"25427919",
"25446316",
"25596827",
"25605974",
"25623066",
"25651181",
"25678226",
"25752829",
"25838550",
"25944819",
"25994645",
"26013980",
"26085669",
"26125289",
"26140590",
"26331883",
"26377222",
"26394634",
"26426199",
"26441957",
"26498761",
"26677226",
"26746790",
"26752685",
"26808208",
"26828201",
"26911675",
"26954549",
"26977737",
"27183626",
"27210042",
"27300315",
"27317170",
"27328812",
"27338829",
"27377588",
"27458697",
"27459389",
"27478939",
"27494674",
"27612175",
"27705781",
"27760311",
"27768725",
"27775065",
"27826253",
"27718356",
"25870538",
"25087875",
"25250333",
"24349617",
"25398347",
"23660285",
"24376223",
"24391517",
"24444057",
"24518983",
"24699242",
"24792211",
"24866031",
"24866043",
"25053480",
"25137590",
"25182530",
"25281652",
"25447135",
"25481835",
"25591789",
"25609838",
"25645994",
"25862663",
"25948089",
"26013677",
"26106147",
"26178246",
"26223525",
"26265594",
"26286857",
"26340938",
"26350507",
"26420780",
"26544146",
"26627871",
"26719974",
"27087500",
"27135601",
"27267125",
"27350602",
"27383011",
"27392675",
"27496052",
"27535945",
"27334371",
"27380911",
"27534441",
"27221665",
"27374120",
"27261274",
"27075447",
"26659567",
"26611327",
"26208973",
"26263558",
"26147683",
"26206923",
"25144769",
"24740057",
"29116375",
"22282595",
"24619649",
"23226398",
"28158580",
"28177569",
"28525556",
"29618580",
"24064296",
"24703702",
"29651144",
"28082865",
"28089251",
"25526314",
"26742488",
"26680259",
"26795843",
"27033150",
"27725247",
"27880902",
"27009967",
"25550471",
"27141965",
"29474905",
"21398221",
"28627522",
"29343685",
"25920679",
"27098031",
"24685140",
"24162188",
"24085518",
"24327959",
"24096374",
"27412785",
"28123938",
"28069890",
"28164172",
"28072389",
"28198431",
"27754481",
"28251139",
"28264694",
"28222099",
"23175610",
"27996060",
"28051178",
"28719620",
"28666327",
"28658614",
"28663252",
"28746873",
"25085416",
"28186492",
"29748257",
"29136647",
"26989177",
"27215660",
"21763481",
"25869670",
"23185619",
"23817550",
"26671148",
"28552963",
"24726384",
"25215489",
"28446465",
"28598329",
"25527643",
"27529348",
"23827709",
"25183173",
"28165338",
"28317875",
"28363792",
"25562316",
"27003440",
"28095415",
"23031443",
"19048073",
"27822537",
"23193260",
"24058528",
"27194947",
"24316575",
"28461138",
"28355569",
"28333135",
"28343629",
"28494243",
"26269093",
"27510895",
"19481682",
"28372532",
"19151073",
"28562614",
"28388617",
"28555647",
"28323137",
"28623308",
"28649442",
"28619992",
"25683716",
"28973536",
"26341401",
"26752518",
"27666810",
"26525805",
"26078274",
"27050515",
"27956603",
"29180567",
"29618490",
"28286049",
"26438361",
"28424523",
"26494788",
"26878175",
"28495971",
"28107648",
"28875501",
"29274878",
"29519758",
"27302397",
"29282851",
"22144916",
"23100620",
"23583643",
"25026077",
"25282160",
"27010430",
"28700664",
"28663585",
"28414305",
"28416808",
"28369035",
"28319090",
"28659616",
"27340223",
"28403548",
"28414800",
"28445457",
"27940919",
"28630322",
"28717248",
"28087629",
"29352114",
"23281428",
"26825527",
"29118104",
"26053317",
"26579598",
"29100074",
"29274432",
"24509082",
"27032818",
"27537840",
"27070821",
"27195053",
"27385014",
"26919979",
"28302748",
"26324926",
"27897969",
"27153923",
"28296893",
"29192248",
"28355570",
"28739881",
"29317670",
"29114251",
"28475893",
"29447283",
"29520062",
"27517863",
"22664872",
"23103166",
"27965109",
"28720882",
"27899602",
"28729650",
"28740830",
"28511701",
"28246328",
"28704368",
"28775312",
"28835921",
"28645892",
"28768202",
"28811661",
"28456633",
"28881709",
"28931858",
"28850611",
"28864826",
"28740264",
"28895944",
"25356849",
"27841869",
"28052056",
"24173031",
"27699209",
"28167615",
"28439028",
"29929962",
"25248098",
"25451192",
"28737513",
"25805620",
"25880340",
"27341128",
"25852484",
"27008858",
"27231886",
"27504968",
"27151949",
"26747696",
"26276633",
"27177420",
"28392070",
"27637148",
"27857073",
"27886188",
"27936095",
"26768662",
"27965440",
"28419211",
"28409408",
"28791750",
"29035278",
"28844881",
"28706256",
"26716412",
"29180626",
"29263935",
"28120642",
"29203676",
"28368047",
"28430962",
"29627378",
"29784793",
"26637354",
"28330868",
"25087892",
"26935106",
"28600325",
"24529376",
"22520467",
"23954160",
"23973919",
"25308334",
"25816775",
"26910010",
"26929451",
"29059324",
"29231808",
"28394359",
"29255262",
"29396648",
"29125601",
"29233884",
"29290552",
"29357359",
"29410655",
"29423269",
"29391390",
"29337116",
"29451229",
"29369447",
"29539633",
"29552027",
"29719497",
"29549365",
"29662492",
"29170794",
"29563332",
"29323723",
"29643507",
"29554219",
"29472638",
"29740613",
"28694387",
"24895296",
"28132686",
"25737282",
"25645914",
"26522984",
"26665171",
"28878098",
"26344101",
"29126303",
"27348266",
"26544067",
"26909310",
"27783937",
"28636595",
"27184846",
"28507547",
"29049287",
"29126234",
"28811369",
"29149604",
"25737499",
"28536097",
"28660243",
"28827723",
"29217689",
"29751817",
"29739947",
"29674722",
"29618611",
"29703891",
"29224110",
"29773801",
"29633501",
"29632206",
"29736013",
"29459250",
"29717274",
"29737391",
"29698489",
"29859945",
"29198724",
"29581196",
"29799007",
"29910734",
"29809135",
"29910804",
"29953487",
"29955040",
"29925374",
"29665134",
"27006114",
"22729249",
"26634935",
"23675350",
"24646995",
"25690012",
"26242575",
"27910899",
"23972988",
"28488815",
"26769314",
"25712475",
"26496195",
"26195727",
"26808229",
"25774914",
"26640945",
"27075691",
"27901122",
"28719641",
"26037925",
"27647347",
"24836425",
"26214740",
"26365183",
"26490636",
"27487330",
"27501248",
"28930662",
"29632143",
"27738106",
"27189934",
"29378767",
"27760146",
"27986806",
"28363983",
"27390154",
"28476888",
"28395340",
"29078390",
"26826102",
"29101244",
"29045386",
"29272704",
"28836307",
"26481684",
"28537559",
"29317263",
"24606902",
"28985337",
"28179430",
"29546395",
"23028370",
"25101834",
"29073101",
"29143738",
"29254825",
"26699479",
"29511023",
"25129145",
"25713362",
"28445726",
"26437366",
"26168216",
"27797339",
"27013679",
"26980189",
"27551157",
"25806685",
"28846071",
"27810922",
"28120397",
"29695863",
"25669152",
"27655694",
"27729468",
"28288114",
"28855737",
"29382756",
"29526553",
"26344103",
"28812582",
"28628032",
"28550160",
"28779009",
"23580231",
"29247201",
"29163535",
"29184203",
"29368690",
"29321277",
"29229903",
"28951199",
"25222142",
"26807733",
"27453048",
"23166506",
"23300663",
"24706806",
"25282615",
"26431380",
"24284070",
"27445138",
"29034508",
"24139043",
"25961792",
"26863613",
"28115237",
"25393878",
"25198012",
"27330028",
"22579044",
"22802351",
"26903602",
"24784138",
"28533230",
"24856900",
"27892467",
"26119235",
"29293958",
"24274065",
"24844465",
"25100583",
"28007900",
"29625592",
"26119739",
"27764156",
"27879317",
"28696225",
"26046524",
"26582389",
"27511831",
"22138310",
"26696638",
"23620107",
"23902802",
"24721909",
"28729419",
"27264173",
"27640147",
"26890063",
"25824033",
"28722352",
"24284630",
"29030458",
"25228648",
"26994968",
"27414999",
"28468635",
"26138248",
"25071156",
"27297878",
"25829425",
"26187040",
"28859855",
"26993806",
"29691435",
"28330720",
"28774592",
"25153088",
"25150276",
"26616005",
"28121514",
"24346171",
"24357318",
"24453213",
"25066055",
"26551274",
"27568564",
"28920920",
"29457782",
"29558472",
"24210661",
"25757017",
"25378478",
"29361558",
"24646517",
"25568313",
"25773539",
"27500495",
"27939640",
"22308354",
"23596309",
"27548528",
"28716903",
"23762465",
"23349524",
"23941873",
"24517230",
"25456069",
"26015548",
"26950202",
"27035649",
"27732088",
"25242043",
"24876386",
"23277569",
"29163060",
"24719406",
"22020125",
"23637629",
"26023081",
"24550447",
"22319578",
"23267101",
"26391396",
"27214556",
"28169399",
"23418344",
"24013503",
"25519955",
"25970242",
"26028225",
"26258414",
"27827819",
"25446516",
"29626631",
"27330059",
"22513779",
"26842965",
"24396064",
"27257215",
"27266524",
"28439006",
"29581031",
"28092655",
"24413433",
"26548954",
"29083414",
"23793062",
"23870131",
"24687849",
"24293546",
"25985275",
"26102480",
"28931573",
"26398943",
"25299188",
"25662603",
"26402843",
"27460150",
"27470444",
"23717213",
"23912999",
"24739963",
"29844188",
"25715795",
"26553930",
"26965651",
"29545368",
"25985299",
"27635635",
"26709698",
"27568546",
"27435297",
"25119035",
"26582950",
"27542690",
"28322331",
"28899994",
"28698371",
"26607718",
"29371594",
"29118343",
"29113991",
"29031721",
"28334068",
"29045729",
"27554475",
"28878117",
"25594701",
"26604141",
"27418600",
"23608191",
"26687990",
"21925315",
"24923387",
"26215701",
"24557841",
"23911318",
"24066180",
"28746349",
"22487427",
"22890841",
"24519931",
"26192035",
"26586820",
"25187265",
"21122816",
"28114291",
"25220394",
"27381259",
"24452334",
"25059425",
"26903600",
"25990470",
"25526730",
"22876197",
"24860998",
"26385183",
"25368024",
"29193599",
"29024665",
"29229982",
"29263365",
"27178841",
"29483358",
"24726366",
"29686099",
"21625469",
"26169051",
"23775847",
"23990365",
"23028652",
"21734703",
"22922648",
"25155611",
"23180825",
"26011558",
"23472195",
"23341775",
"28266638",
"25782772",
"26188089",
"26727661",
"25892012",
"24362311",
"24849454",
"25802402",
"28287403",
"23454480",
"24075906",
"26305884",
"26590424",
"27203244",
"26392540",
"22550345",
"24825892",
"27428826",
"29155878",
"25605924",
"25992553",
"22745161",
"25827072",
"25211221",
"26595655",
"25263220",
"26329040",
"25981042",
"26457795",
"25126785",
"28957665",
"24179230",
"25959730",
"23820044",
"25581363",
"26833026",
"24135232",
"26755700",
"27660326",
"29229865",
"26042409",
"24091014",
"25754822",
"29053101",
"27210752",
"28851744",
"25680095",
"25614626",
"23989956",
"25525875",
"26223655",
"26229117",
"26320659",
"26880576",
"27477283",
"27496731",
"28137885",
"28533362",
"28801234",
"29056340",
"22447450",
"23142661",
"28781169",
"25729399",
"27502165",
"25298527",
"28702328",
"27481093",
"24391134",
"25521379",
"28791777",
"23791195",
"26346620",
"25259925",
"23626854",
"23818578",
"27621462",
"29467163",
"27524794",
"25838543",
"24334608",
"26401052",
"28031293",
"20799038",
"22184403",
"24014243",
"24062447",
"25217698",
"25340873",
"25613381",
"28978033",
"24737000",
"28732206",
"21473986",
"21240276",
"21655083",
"23468651",
"24726326",
"23765990",
"26278034",
"29321172",
"21606493",
"21949881",
"21983290",
"22499153",
"22915101",
"22956847",
"23742824",
"23827947",
"23885286",
"24668173",
"24860098",
"25070895",
"25080488",
"25170954",
"26002464",
"26035172",
"26136251",
"26553756",
"26839965",
"27076682",
"23028378",
"24336247",
"29880681",
"27226319",
"19525957",
"26072710",
"23401851",
"28041877",
"23897886",
"25180231",
"28104815",
"25980009",
"24339795",
"25347473",
"24391757",
"28609438",
"25605782",
"26023097",
"26177727",
"24853502",
"27466187",
"23929668",
"29551634",
"25901318",
"28238654",
"26924503",
"26053665",
"28442549",
"28981838",
"24302573",
"28341548",
"28427419",
"26913567",
"28418018",
"27841881",
"28448534",
"29031607",
"28577909",
"26703212",
"26450969",
"25195104",
"27189937",
"27374498",
"29670287",
"24855946",
"26459636",
"29082311",
"25712206",
"25712208",
"25770910",
"25843683",
"24412371",
"24995796",
"29234490",
"25425145",
"29632205",
"28614717",
"29281837",
"27995894",
"27013612",
"28683291",
"24825920",
"29311767",
"28826497",
"28094771",
"23516444",
"28082284",
"29180574",
"26740569",
"27001068",
"24818823",
"25026213",
"26044960",
"27493188",
"26095358",
"26246171",
"27524624",
"25056906",
"28239655",
"27048792",
"27699213",
"27444544",
"26947074",
"28751656",
"28445731",
"24777781",
"28206698",
"28077655",
"28846084",
"28966054",
"29768502",
"29615573",
"27866708",
"23936238",
"25340345",
"24009529",
"26275310",
"29084768",
"24695226",
"20208559",
"28351984",
"24700869",
"28291836",
"28054009",
"29056298",
"29072697",
"26862784",
"23908241",
"24239741",
"26523868",
"27184849",
"24807221",
"27064284",
"27053112",
"22595669",
"28430876",
"26785054",
"24826990",
"24356961",
"28900165",
"26748701",
"26975724",
"28733485",
"26711119",
"28530661",
"29731414",
"27038752",
"27375112",
"24078251",
"25918388",
"27310661",
"28183797",
"28621310",
"25038227",
"27362409",
"26697887",
"28530678",
"27769071",
"27882344",
"24080084",
"29426904",
"23690620",
"28007585",
"24844244",
"24811384",
"28192372",
"25731822",
"29909984",
"26130057",
"27020856",
"27693848",
"27889626",
"26902431",
"27445989",
"25915623",
"27482814",
"21750680",
"22005280",
"23934451",
"26660102",
"26822507",
"26952749",
"27943094",
"28892649",
"29133259",
"29259219",
"29551636",
"26335643",
"22179047",
"27986456",
"27013243",
"28578315",
"28578316",
"26601958",
"28986324",
"24833352",
"26268777",
"29150602",
"24613482",
"26829592",
"27106110",
"27798843",
"21652635",
"27497298",
"27915416",
"29126797",
"29391195",
"26321200",
"26201991",
"27019864",
"27143109",
"27233670",
"27159393",
"28213441",
"28422719",
"29606301",
"29290589",
"27633994",
"27759003",
| |
# Parent class to MassConservationApproach, SemiDiffusiveApproach, and GeneralApproach
import scipy
import numpy.linalg
import time
import math
import sys
class BistabilityFinder(object):
def __parent_run_optimization(self):
# get expression needed to evaluate the current subclass variable
self.__mangled_name = "self._" + self.__class__.__name__
self.__method = eval(self.__mangled_name + "__method")
self.__parallel_flag = eval(self.__mangled_name + "__parallel_flag")
self.__confidence_level_flag = eval(self.__mangled_name + "__confidence_level_flag")
self.__print_flag = eval(self.__mangled_name + "__print_flag")
self.__change_in_rel_error = eval(self.__mangled_name + "__change_in_rel_error")
self.__iterations = eval(self.__mangled_name + "__iterations")
if self.__parallel_flag:
samples = self.__initialize_mpi_optimization()
det_point_sets, det_point_sets_fun, obtained_minimums, smallest_value = self.__main_optimization_routine(samples)
return self.__finalize_mpi_optimization(det_point_sets, det_point_sets_fun, obtained_minimums, smallest_value)
else:
self.__my_rank = None
self.__comm = None
samples = self.__initialize_optimization()
det_point_sets, det_point_sets_fun, obtained_minimums, smallest_value = self.__main_optimization_routine(samples)
return self.__finalize_optimization(det_point_sets, det_point_sets_fun, obtained_minimums, smallest_value)
def __initialize_optimization(self):
# creating initial decision vectors for feasible point method
if self.__method == "GeneralApproach":
samples = self.__initialize_optimization_ga()
elif self.__method == "MassConservationApproach":
samples = self.__initialize_optimization_mca()
elif self.__method == "SemiDiffusiveApproach":
samples = self.__initialize_optimization_sda()
return samples
def __finalize_optimization(self, det_point_sets, det_point_sets_fun, obtained_minimums, smallest_value):
important_info = ''
if self.__confidence_level_flag:
important_info = self.__confidence_level(obtained_minimums, self.__change_in_rel_error, important_info)
important_info += str(len(det_point_sets_fun)) + " point(s) passed the optimization criteria. " + "\n"
else:
important_info += "Smallest value achieved by objective function: " + str(smallest_value) + "\n"
important_info += str(len(det_point_sets_fun)) + " point(s) passed the optimization criteria. " + "\n"
return det_point_sets, det_point_sets_fun, important_info
def __initialize_mpi_optimization(self):
# initializing MPI proccess
global MPI
from mpi4py import MPI
global mpi_mod
from .mpi_routines import MPIRoutines as mpi_mod
self.__comm = MPI.COMM_WORLD
self.__my_rank = self.__comm.Get_rank()
self.__num_cores = self.__comm.Get_size()
self.__comm.Barrier()
# creating initial decision vectors for feasible point method
if self.__method == "GeneralApproach":
samples = self.__initialize_optimization_ga()
elif self.__method == "MassConservationApproach":
samples = self.__initialize_optimization_mca()
elif self.__method == "SemiDiffusiveApproach":
samples = self.__initialize_optimization_sda()
return samples
def __finalize_mpi_optimization(self, det_point_sets, det_point_sets_fun, obtained_minimums, smallest_value):
important_info = ''
self.__comm.Barrier()
if self.__confidence_level_flag:
full_obtained_minimums = mpi_mod.gather_single_value(obtained_minimums, self.__iterations, self.__comm, self.__my_rank)
if self.__my_rank == 0:
important_info = self.__confidence_level(full_obtained_minimums, self.__change_in_rel_error, important_info)
else:
smallest_values = self.__comm.gather(smallest_value, root=0)
if self.__my_rank == 0:
min_value = min(smallest_values)
important_info += "Smallest value achieved by objective function: " + str(min_value) + "\n"
list_det_point_sets = mpi_mod.gather_list_of_values(det_point_sets, self.__comm, self.__my_rank)
list_det_point_sets_fun = mpi_mod.gather_list_of_values(det_point_sets_fun, self.__comm, self.__my_rank)
self.__comm.Barrier()
if self.__my_rank == 0:
important_info += str(len(list_det_point_sets)) + " point(s) passed the optimization criteria. " + "\n"
return list_det_point_sets, list_det_point_sets_fun, important_info
def __initialize_optimization_ga(self):
if self.__my_rank == 0 or self.__my_rank is None:
numpy.random.seed(self._GeneralApproach__seed)
if self._GeneralApproach__fix_reactions:
samples = numpy.random.rand(self.__iterations, len(self._GeneralApproach__bounds) -
len(self._GeneralApproach__fixed_reaction_indices))
self._GeneralApproach__temp_bounds = [self._GeneralApproach__bounds[i] for i in
range(len(self._GeneralApproach__bounds)) if i not in
self._GeneralApproach__fixed_reaction_indices]
ranges = numpy.asarray(self._GeneralApproach__temp_bounds, dtype=numpy.float64)
samples = samples * (ranges[:, 1] - ranges[:, 0]) + ranges[:, 0]
else:
self._GeneralApproach__temp_bounds = None
samples = numpy.random.rand(self.__iterations, len(self._GeneralApproach__bounds))
ranges = numpy.asarray(self._GeneralApproach__bounds, dtype=numpy.float64)
samples = samples * (ranges[:, 1] - ranges[:, 0]) + ranges[:, 0]
self._GeneralApproach__x_full = numpy.zeros(len(self._GeneralApproach__lagrangian_vars), dtype=numpy.float64)
# setting up equality and inequality constraints provided by the user
if self._GeneralApproach__constraints:
self._GeneralApproach__full_constraints = []
for i in self._GeneralApproach__constraints:
if i["type"] == "ineq":
self._GeneralApproach__full_constraints.append([lambda x, func: numpy.maximum(numpy.float64(0.0), numpy.float64(-1.0 * func(x))),
i["fun"]]) # TODO: make it be 0.5 times greater or something
elif i["type"] == "eq":
self._GeneralApproach__full_constraints.append([lambda x, func: numpy.float64(numpy.abs(func(x))), i["fun"]])
else:
print("The type of constraint provided is unknown. Please review the entered constraints.")
sys.exit()
else:
self._GeneralApproach__full_constraints = []
else:
samples = None
self._GeneralApproach__temp_bounds = None
self._GeneralApproach__x_full = None
self._GeneralApproach__full_constraints = None
if self.__parallel_flag:
sample_portion = mpi_mod.distribute_points(samples, self.__my_rank, self.__num_cores, self.__comm)
# broadcast important variables
self._GeneralApproach__temp_bounds = self.__comm.bcast(self._GeneralApproach__temp_bounds, root=0)
self._GeneralApproach__x_full = self.__comm.bcast(self._GeneralApproach__x_full, root=0)
self._GeneralApproach__full_constraints = self.__comm.bcast(self._GeneralApproach__full_constraints, root=0)
self.__comm.Barrier()
return sample_portion
else:
return samples
def __initialize_optimization_mca(self):
if self.__my_rank == 0 or self.__my_rank is None:
# Generate starting points uniformly with length ranges
numpy.random.seed(self._MassConservationApproach__seed)
samples = numpy.random.rand(self.__iterations, len(self._MassConservationApproach__bounds) -
len(self._MassConservationApproach__equality_bounds_indices)).astype(self._MassConservationApproach__numpy_dtype)
x_candidates = []
self._MassConservationApproach__x_full = numpy.zeros(len(self._MassConservationApproach__bounds), dtype=self._MassConservationApproach__numpy_dtype)
self._MassConservationApproach__non_equality_bounds_indices = [i for i in range(len(
self._MassConservationApproach__bounds)) if i not in self._MassConservationApproach__equality_bounds_indices]
self._MassConservationApproach__true_bounds = [(self._MassConservationApproach__numpy_dtype(self._MassConservationApproach__bounds[j][0]),
self._MassConservationApproach__numpy_dtype(self._MassConservationApproach__bounds[j][1]))
for j in self._MassConservationApproach__non_equality_bounds_indices]
ranges = numpy.asarray(self._MassConservationApproach__true_bounds, dtype=self._MassConservationApproach__numpy_dtype)
samples = samples * (ranges[:, 1] - ranges[:, 0]) + ranges[:, 0]
else:
samples = None
self._MassConservationApproach__x_full = None
self._MassConservationApproach__non_equality_bounds_indices = None
self._MassConservationApproach__true_bounds = None
if self.__parallel_flag:
sample_portion = mpi_mod.distribute_points(samples, self.__my_rank, self.__num_cores, self.__comm)
# broadcast important variables
self._MassConservationApproach__non_equality_bounds_indices = self.__comm.bcast(self._MassConservationApproach__non_equality_bounds_indices, root=0)
self._MassConservationApproach__x_full = self.__comm.bcast(self._MassConservationApproach__x_full, root=0)
self._MassConservationApproach__true_bounds = self.__comm.bcast(self._MassConservationApproach__true_bounds, root=0)
self.__comm.Barrier()
return sample_portion
else:
return samples
def __initialize_optimization_sda(self):
if self.__my_rank == 0 or self.__my_rank is None:
# Generate starting points uniformly with length ranges
numpy.random.seed(self._SemiDiffusiveApproach__seed)
samples = numpy.random.rand(self.__iterations, len(self._SemiDiffusiveApproach__bounds) -
len(self._SemiDiffusiveApproach__equality_bounds_indices)).astype(self._SemiDiffusiveApproach__numpy_dtype)
x_candidates = []
self._SemiDiffusiveApproach__x_full = numpy.zeros(len(self._SemiDiffusiveApproach__bounds), dtype=self._SemiDiffusiveApproach__numpy_dtype)
self._SemiDiffusiveApproach__non_equality_bounds_indices = [i for i in range(len(self._SemiDiffusiveApproach__bounds))
if i not in self._SemiDiffusiveApproach__equality_bounds_indices]
self._SemiDiffusiveApproach__true_bounds = [(self._SemiDiffusiveApproach__numpy_dtype(self._SemiDiffusiveApproach__bounds[j][0]),
self._SemiDiffusiveApproach__numpy_dtype(self._SemiDiffusiveApproach__bounds[j][1]))
for j in self._SemiDiffusiveApproach__non_equality_bounds_indices]
ranges = numpy.asarray(self._SemiDiffusiveApproach__true_bounds, dtype=self._SemiDiffusiveApproach__numpy_dtype)
samples = samples * (ranges[:, 1] - ranges[:, 0]) + ranges[:, 0]
else:
samples = None
self._SemiDiffusiveApproach__x_full = None
self._SemiDiffusiveApproach__non_equality_bounds_indices = None
self._SemiDiffusiveApproach__true_bounds = None
if self.__parallel_flag:
sample_portion = mpi_mod.distribute_points(samples, self.__my_rank, self.__num_cores, self.__comm)
# broadcast important variables
self._SemiDiffusiveApproach__non_equality_bounds_indices = self.__comm.bcast(self._SemiDiffusiveApproach__non_equality_bounds_indices, root=0)
self._SemiDiffusiveApproach__x_full = self.__comm.bcast(self._SemiDiffusiveApproach__x_full, root=0)
self._SemiDiffusiveApproach__true_bounds = self.__comm.bcast(self._SemiDiffusiveApproach__true_bounds, root=0)
self.__comm.Barrier()
return sample_portion
else:
return samples
def __main_optimization_routine(self, samples):
if self.__method == "GeneralApproach":
feasible_point_sets = [samples[i] for i in range(len(samples))]
else:
feasible_point_sets = self.__feasible_point_method(samples)
det_point_sets = []
det_point_sets_fun = []
smallest_value = numpy.float(1e8)
if self.__confidence_level_flag:
obtained_minimums = numpy.zeros(len(feasible_point_sets), dtype=numpy.float64)
else:
obtained_minimums = None
if len(feasible_point_sets) != 0:
if self.__comm is not None:
if self.__my_rank == 0:
print("")
print("Running the multistart optimization method ...")
self.__start_time = MPI.Wtime()
else:
print("")
print("Running the multistart optimization method ...")
self.__start_time = time.time()
for i in range(len(feasible_point_sets)):
with numpy.errstate(divide='ignore', invalid='ignore'):
if self.__method == "GeneralApproach":
result = self._GeneralApproach__run_global_optimization_routine(feasible_point_sets[i])
elif self.__method == "MassConservationApproach":
result = self._MassConservationApproach__run_global_optimization_routine(feasible_point_sets[i])
elif self.__method == "SemiDiffusiveApproach":
result = self._SemiDiffusiveApproach__run_global_optimization_routine(feasible_point_sets[i])
if self.__print_flag:
print("Global function value: " + str(result.fun))
print("Decision vector used: ")
print(result.x)
if abs(result.fun) > numpy.float64(1e-100):
if self.__method == "GeneralApproach":
result1 = self._GeneralApproach__run_local_optimization_routine(result.x)
elif self.__method == "MassConservationApproach":
result1 = self._MassConservationApproach__run_local_optimization_routine(result.x)
elif self.__method == "SemiDiffusiveApproach":
result1 = self._SemiDiffusiveApproach__run_local_optimization_routine(result.x)
if self.__print_flag:
print("Local function value: " + str(result1.fun))
print("Decision vector used: ")
print(result1.x)
if smallest_value > result1.fun:
smallest_value = result1.fun
if self.__confidence_level_flag:
obtained_minimums[i] = result1.fun
if abs(result1.fun) <= numpy.finfo(float).eps:
if self.__method == "GeneralApproach":
out = self._GeneralApproach__create_final_points(result1.x)
elif self.__method == "MassConservationApproach":
out = self._MassConservationApproach__create_final_points(result1.x)
elif self.__method == "SemiDiffusiveApproach":
out = self._SemiDiffusiveApproach__create_final_points(result1.x)
if out is not None:
det_point_sets.append(out)
det_point_sets_fun.append(result1.fun)
else:
if smallest_value > result.fun:
smallest_value = result.fun
if self.__method == "GeneralApproach":
out = self._GeneralApproach__create_final_points(result.x)
elif self.__method == "MassConservationApproach":
out = self._MassConservationApproach__create_final_points(result.x)
elif self.__method == "SemiDiffusiveApproach":
out = self._SemiDiffusiveApproach__create_final_points(result.x)
if out is not None:
det_point_sets.append(out)
det_point_sets_fun.append(result.fun)
if self.__confidence_level_flag:
obtained_minimums[i] = result.fun
if self.__print_flag:
print("")
if self.__comm is not None:
self.__comm.Barrier()
if self.__my_rank == 0:
self.__end_time = MPI.Wtime()
print("Elapsed time for multistart method: " + str(self.__end_time - self.__start_time))
print("")
else:
self.__end_time = time.time()
print("Elapsed time for multistart method: " + str(self.__end_time - self.__start_time))
print("")
else:
raise Exception("Optimization needs to be run with more iterations or different bounds.")
return det_point_sets, det_point_sets_fun, obtained_minimums, smallest_value
def __feasible_point_method(self, samples):
x_candidates = []
if self.__comm is not None:
if self.__my_rank == 0:
print("")
print("Running feasible point method for " + str(self.__iterations) + " iterations ...")
self.__start_time = MPI.Wtime()
else:
print("")
print("Running feasible point method for " + str(self.__iterations) + " iterations ...")
self.__start_time = time.time()
for n in range(len(samples)):
with numpy.errstate(divide='ignore', invalid='ignore'):
if self.__method == "MassConservationApproach":
result = self._MassConservationApproach__run_local_optimization_routine_penalty_1(samples[n])
elif self.__method == "SemiDiffusiveApproach":
result = self._SemiDiffusiveApproach__run_local_optimization_routine_penalty_1(samples[n])
if abs(result.fun) > numpy.float64(1e-100):
if self.__method == "MassConservationApproach":
result0 = self._MassConservationApproach__run_local_optimization_routine_penalty_2(result.x)
output = self._MassConservationApproach__feasible_point_check(result0.x, result0.fun)
elif self.__method == "SemiDiffusiveApproach":
result0 = self._SemiDiffusiveApproach__run_local_optimization_routine_penalty_2(result.x)
output = self._SemiDiffusiveApproach__feasible_point_check(result0.x, result0.fun)
if self.__print_flag:
print("Objective function value: " + str(result0.fun))
print("Decision vector used: ")
print(result0.x)
print("")
if output or self.__confidence_level_flag:
x_candidates.append(result0.x)
else:
if self.__method == "MassConservationApproach":
output = self._MassConservationApproach__feasible_point_check(result.x, result.fun)
elif self.__method == "SemiDiffusiveApproach":
output = self._SemiDiffusiveApproach__feasible_point_check(result.x, result.fun)
if self.__print_flag:
print("Objective function value: " + str(result.fun))
print("Decision vector used: ")
print(result.x)
print("")
if output or self.__confidence_level_flag:
x_candidates.append(result.x)
if self.__comm is not None:
self.__comm.Barrier()
if self.__my_rank == 0:
self.__end_time = MPI.Wtime()
print("Elapsed time for feasible point method: " + str(self.__end_time - self.__start_time))
# checking number of elements of feasible_point_sets for each core to see if we need to redistribute them
redistribute_flag = len(x_candidates) == len(samples)
val = self.__comm.allreduce(redistribute_flag, op=MPI.LAND)
if not val:
len_total = self.__comm.allreduce(len(x_candidates), op=MPI.SUM)
if len_total > 0:
array_of_feasibles = mpi_mod.gather_numpy_array_of_values(x_candidates, self.__comm, self.__my_rank)
x_candidates = mpi_mod.distribute_points(array_of_feasibles, self.__my_rank, self.__num_cores,
self.__comm)
else:
if self.__my_rank == 0:
print("No feasible points were found, please rerun the optimization | |
<reponame>nomadcoder-a/sedr<filename>lemur/manage.py
from __future__ import unicode_literals # at top of module
import os
import sys
import base64
import time
import requests
import json
from gunicorn.config import make_settings
from cryptography.fernet import Fernet
from lockfile import LockFile, LockTimeout
from flask import current_app
from flask.ext.script import Manager, Command, Option, prompt_pass
from flask.ext.migrate import Migrate, MigrateCommand, stamp
from flask_script.commands import ShowUrls, Clean, Server
from lemur import database
from lemur.users import service as user_service
from lemur.roles import service as role_service
from lemur.certificates import service as cert_service
from lemur.sources import service as source_service
from lemur.notifications import service as notification_service
from lemur.certificates.verify import verify_string
from lemur.sources.service import sync
from lemur import create_app
# Needed to be imported so that SQLAlchemy create_all can find our models
from lemur.users.models import User # noqa
from lemur.roles.models import Role # noqa
from lemur.authorities.models import Authority # noqa
from lemur.certificates.models import Certificate # noqa
from lemur.destinations.models import Destination # noqa
from lemur.domains.models import Domain # noqa
from lemur.notifications.models import Notification # noqa
from lemur.sources.models import Source # noqa
manager = Manager(create_app)
manager.add_option('-c', '--config', dest='config')
migrate = Migrate(create_app)
KEY_LENGTH = 40
DEFAULT_CONFIG_PATH = '~/.lemur/lemur.conf.py'
DEFAULT_SETTINGS = 'lemur.conf.server'
SETTINGS_ENVVAR = 'LEMUR_CONF'
CONFIG_TEMPLATE = """
# This is just Python which means you can inherit and tweak settings
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
ADMINS = frozenset([''])
THREADS_PER_PAGE = 8
# General
# These will need to be set to `True` if you are developing locally
CORS = False
debug = False
# this is the secret key used by flask session management
SECRET_KEY = '{flask_secret_key}'
# You should consider storing these separately from your config
LEMUR_TOKEN_SECRET = '{secret_token}'
LEMUR_ENCRYPTION_KEY = '{encryption_key}'
# this is a list of domains as regexes that only admins can issue
LEMUR_RESTRICTED_DOMAINS = []
# Mail Server
LEMUR_EMAIL = ''
LEMUR_SECURITY_TEAM_EMAIL = []
# Certificate Defaults
LEMUR_DEFAULT_COUNTRY = ''
LEMUR_DEFAULT_STATE = ''
LEMUR_DEFAULT_LOCATION = ''
LEMUR_DEFAULT_ORGANIZATION = ''
LEMUR_DEFAULT_ORGANIZATIONAL_UNIT = ''
# Logging
LOG_LEVEL = "DEBUG"
LOG_FILE = "lemur.log"
# Database
# modify this if you are not using a local database
SQLALCHEMY_DATABASE_URI = 'postgresql://lemur:lemur@localhost:5432/lemur'
# AWS
#LEMUR_INSTANCE_PROFILE = 'Lemur'
# Issuers
# These will be dependent on which 3rd party that Lemur is
# configured to use.
# CLOUDCA_URL = ''
# CLOUDCA_PEM_PATH = ''
# CLOUDCA_BUNDLE = ''
# number of years to issue if not specified
# CLOUDCA_DEFAULT_VALIDITY = 2
# VERISIGN_URL = ''
# VERISIGN_PEM_PATH = ''
# VERISIGN_FIRST_NAME = ''
# VERISIGN_LAST_NAME = ''
# VERSIGN_EMAIL = ''
"""
@MigrateCommand.command
def create():
database.db.create_all()
stamp(revision='head')
@MigrateCommand.command
def drop_all():
database.db.drop_all()
@manager.command
def check_revoked():
"""
Function attempts to update Lemur's internal cache with revoked
certificates. This is called periodically by Lemur. It checks both
CRLs and OCSP to see if a certificate is revoked. If Lemur is unable
encounters an issue with verification it marks the certificate status
as `unknown`.
"""
for cert in cert_service.get_all_certs():
try:
if cert.chain:
status = verify_string(cert.body, cert.chain)
else:
status = verify_string(cert.body, "")
cert.status = 'valid' if status else 'invalid'
except Exception as e:
cert.status = 'unknown'
database.update(cert)
@manager.shell
def make_shell_context():
"""
Creates a python REPL with several default imports
in the context of the current_app
:return:
"""
return dict(current_app=current_app)
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
output = CONFIG_TEMPLATE.format(
encryption_key=base64.b64encode(os.urandom(KEY_LENGTH)),
secret_token=base64.b64encode(os.urandom(KEY_LENGTH)),
flask_secret_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
@manager.option('-s', '--sources', dest='labels')
def sync_sources(labels):
"""
Attempts to run several methods Certificate discovery. This is
run on a periodic basis and updates the Lemur datastore with the
information it discovers.
"""
if not labels:
sys.stdout.write("Active\tLabel\tDescription\n")
for source in source_service.get_all():
sys.stdout.write(
"{active}\t{label}\t{description}!\n".format(
label=source.label,
description=source.description,
active=source.active
)
)
else:
start_time = time.time()
lock_file = "/tmp/.lemur_lock"
sync_lock = LockFile(lock_file)
while not sync_lock.i_am_locking():
try:
sync_lock.acquire(timeout=10) # wait up to 10 seconds
sys.stdout.write("[+] Staring to sync sources: {labels}!\n".format(labels=labels))
labels = labels.split(",")
if labels[0] == 'all':
sync()
else:
sync(labels=labels)
sys.stdout.write(
"[+] Finished syncing sources. Run Time: {time}\n".format(
time=(time.time() - start_time)
)
)
except LockTimeout:
sys.stderr.write(
"[!] Unable to acquire file lock on {file}, is there another sync running?\n".format(
file=lock_file
)
)
sync_lock.break_lock()
sync_lock.acquire()
sync_lock.release()
sync_lock.release()
@manager.command
def notify():
"""
Runs Lemur's notification engine, that looks for expired certificates and sends
notifications out to those that bave subscribed to them.
:return:
"""
sys.stdout.write("Starting to notify subscribers about expiring certificates!\n")
count = notification_service.send_expiration_notifications()
sys.stdout.write(
"Finished notifying subscribers about expiring certificates! Sent {count} notifications!\n".format(
count=count
)
)
class InitializeApp(Command):
"""
This command will bootstrap our database with any destinations as
specified by our config.
Additionally a Lemur user will be created as a default user
and be used when certificates are discovered by Lemur.
"""
option_list = (
Option('-p', '--password', dest='password'),
)
def run(self, password):
create()
user = user_service.get_by_username("lemur")
if not user:
if not password:
sys.stdout.write("We need to set Lemur's password to continue!\n")
password = prompt_pass("Password")
password1 = prompt_pass("<PASSWORD>")
if password != password1:
sys.stderr.write("[!] Passwords do not match!\n")
sys.exit(1)
role = role_service.get_by_name('admin')
if role:
sys.stdout.write("[-] Admin role already created, skipping...!\n")
else:
# we create an admin role
role = role_service.create('admin', description='this is the lemur administrator role')
sys.stdout.write("[+] Created 'admin' role\n")
user_service.create("lemur", password, '<PASSWORD>', True, None, [role])
sys.stdout.write("[+] Added a 'lemur' user and added it to the 'admin' role!\n")
else:
sys.stdout.write("[-] Default user has already been created, skipping...!\n")
sys.stdout.write("[+] Creating expiration email notifications!\n")
sys.stdout.write("[!] Using {0} as specified by LEMUR_SECURITY_TEAM_EMAIL for notifications\n".format("LEMUR_SECURITY_TEAM_EMAIL"))
intervals = current_app.config.get("LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS", [])
sys.stdout.write(
"[!] Creating {num} notifications for {intervals} days as specified by LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS\n".format(
num=len(intervals),
intervals=",".join([str(x) for x in intervals])
)
)
recipients = current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')
notification_service.create_default_expiration_notifications("DEFAULT_SECURITY", recipients=recipients)
sys.stdout.write("[/] Done!\n")
class CreateUser(Command):
"""
This command allows for the creation of a new user within Lemur
"""
option_list = (
Option('-u', '--username', dest='username', required=True),
Option('-e', '--email', dest='email', required=True),
Option('-a', '--active', dest='active', default=True),
Option('-r', '--roles', dest='roles', default=[])
)
def run(self, username, email, active, roles):
role_objs = []
for r in roles:
role_obj = role_service.get_by_name(r)
if role_obj:
role_objs.append(role_obj)
else:
sys.stderr.write("[!] Cannot find role {0}".format(r))
sys.exit(1)
password1 = prompt_pass("Password")
password2 = prompt_pass("<PASSWORD>")
if password1 != password2:
sys.stderr.write("[!] Passwords do not match")
sys.exit(1)
user_service.create(username, password1, email, active, None, role_objs)
sys.stdout.write("[+] Created new user: {0}".format(username))
class CreateRole(Command):
"""
This command allows for the creation of a new role within Lemur
"""
option_list = (
Option('-n', '--name', dest='name', required=True),
Option('-u', '--users', dest='users', default=[]),
Option('-d', '--description', dest='description', required=True)
)
def run(self, name, users, description):
user_objs = []
for u in users:
user_obj = user_service.get_by_username(u)
if user_obj:
user_objs.append(user_obj)
else:
sys.stderr.write("[!] Cannot find user {0}".format(u))
sys.exit(1)
role_service.create(name, description=description, users=users)
sys.stdout.write("[+] Created new role: {0}".format(name))
class LemurServer(Command):
"""
This is the main Lemur server, it runs the flask app with gunicorn and
uses any configuration options passed to it.
You can pass all standard gunicorn flags to this command as if you were
running gunicorn itself.
For example:
lemur start -w 4 -b 127.0.0.0:8002
Will start gunicorn with 4 workers bound to 127.0.0.0:8002
"""
description = 'Run the app within Gunicorn'
def get_options(self):
settings = make_settings()
options = (
Option(*klass.cli, action=klass.action)
for setting, klass in settings.iteritems() if klass.cli
)
return options
def run(self, *args, **kwargs):
from gunicorn.app.wsgiapp import WSGIApplication
app = WSGIApplication()
app.app_uri = 'lemur:create_app(config="{0}")'.format(kwargs.get('config'))
return app.run()
@manager.command
def create_config(config_path=None):
"""
Creates a new configuration file if one does not already exist
"""
if not config_path:
config_path = DEFAULT_CONFIG_PATH
config_path = os.path.expanduser(config_path)
dir = os.path.dirname(config_path)
if not os.path.exists(dir):
os.makedirs(dir)
config = generate_settings()
with open(config_path, 'w') as f:
f.write(config)
sys.stdout.write("[+] Created a new configuration file {0}\n".format(config_path))
@manager.command
def lock(path=None):
"""
Encrypts a given path. This directory can be used to store secrets needed for normal
Lemur operation. This is especially useful for storing secrets needed for communication
with third parties (e.g. external certificate authorities).
Lemur does not assume anything about the contents of the directory and will attempt to
encrypt all files contained within. Currently this has only been tested against plain
text files.
Path defaults ~/.lemur/keys
:param: path
"""
if not path:
path = os.path.expanduser('~/.lemur/keys')
dest_dir = os.path.join(path, "encrypted")
sys.stdout.write("[!] Generating a new key...\n")
key = Fernet.generate_key()
if not os.path.exists(dest_dir):
sys.stdout.write("[+] Creating encryption directory: {0}\n".format(dest_dir))
os.makedirs(dest_dir)
for root, dirs, files in os.walk(os.path.join(path, 'decrypted')):
for f in files:
source = os.path.join(root, f)
dest = os.path.join(dest_dir, f + ".enc")
with open(source, 'rb') as in_file, open(dest, 'wb') as out_file:
f = Fernet(key)
data = f.encrypt(in_file.read())
out_file.write(data)
sys.stdout.write("[+] Writing file: {0} Source: {1}\n".format(dest, source))
sys.stdout.write("[+] Keys have been encrypted with key {0}\n".format(key))
@manager.command
def unlock(path=None):
"""
Decrypts all of the files in a given directory with provided password.
This is most commonly used during the startup sequence of Lemur
| |
# -*- coding: utf-8 -*-
"""
Created on Tue May 14 11:23:19 2019
@author: Lee
"""
"""Goal is to run the surrounding 4 models based on the geometry input:
The surrounding 4 models will have varying secondary air flow rate:
25%, 50%, 125%, 150%.
The output will be static, and will be used to compare the 100% secondary airflow model
"""
import os
from os import path
# Input velocity
U_100 = 100 #m/s ---> to achieve 0.033 m3/s
def compute_velocities(U_100):
"""Compute the secondary air flow rates (25-150%) based on the 100% airflow rate"""
quarter_scale = 0.25 # 25 percent scale
half_scale = 0.50 # half scale
five_quarter_scale = 1.25 # adding 25% to U
six_quarter_scale = 1.5 # adding 50% to U
# Surrounding velocities RHS
U_25_RHS = U_100*quarter_scale
U_50_RHS = U_100*half_scale
U_100_RHS = U_100
U_125_RHS = U_100*five_quarter_scale
U_150_RHS = U_100*six_quarter_scale
# Surrounding velocities LHS
U_25_LHS = -1*U_100*quarter_scale
U_50_LHS = -1*U_100*half_scale
U_100_LHS = -1*U_100
U_125_LHS = -1*U_100*five_quarter_scale
U_150_LHS = -1*U_100*six_quarter_scale
# format RHS velocities as strings with max length 5
U_25_RHS_str = str(U_25_RHS)[:5]
U_50_RHS_str = str(U_50_RHS)[:5]
U_100_RHS_str = str(U_100_RHS)[:5]
U_125_RHS_str = str(U_125_RHS)[:5]
U_150_RHS_str = str(U_150_RHS)[:5]
# format LHS velocities as strings with max length 5
U_25_LHS_str = str(U_25_LHS)[:6]
U_50_LHS_str = str(U_50_LHS)[:6]
U_100_LHS_str = str(U_100_LHS)[:6]
U_125_LHS_str = str(U_125_LHS)[:6]
U_150_LHS_str = str(U_150_LHS)[:6]
return U_25_RHS_str, U_50_RHS_str, U_100_RHS_str, U_125_RHS_str, U_150_RHS_str, U_25_LHS_str, U_50_LHS_str, U_100_LHS_str, U_125_LHS_str, U_150_LHS_str
def locate_directories():
"""Locate the directories for the 100% and all surrounding cases"""
# Get the current working directory --should be the StoveOpt one
current_working_dir = os.getcwd() # absolute path of current working direcrtory
print("here is your current WD:" + current_working_dir)
# Steps from the StoveOpt parent folder to the counterFlowFlame2D folder
dir_steps = "/foamfiles/counterFlowFlame2D/"
# Extra steps to the various cases
step_25 = "case_25/"
step_50 = "case_50/"
step_100 = "case_100/"
step_125 = "case_125/"
step_150 = "case_150/"
# Full filepaths for the various cases
path_25 = current_working_dir + dir_steps + step_25
path_50 = current_working_dir + dir_steps + step_50
path_125 = current_working_dir + dir_steps + step_125
path_150 = current_working_dir + dir_steps + step_150
path_100 = current_working_dir + dir_steps + step_100
# return the
return path_100, path_25, path_50, path_125, path_150
def locate_zero_files(path_100, path_25, path_50, path_125, path_150):
"""locate and return the full path of the boundary condition velocity files for each of the five cases"""
# path step for the zero folder
zero_step = "0/"
# Full respective paths for the zero folders
path_0_100 = path_100 + zero_step
path_0_25 = path_25 + zero_step
path_0_50 = path_50 + zero_step
path_0_125 = path_125 + zero_step
path_0_150 = path_150 + zero_step
# filenames for the velocity boundary conditions
fname_0_100 = path_0_100 + "U"
fname_0_25 = path_0_25 + "U"
fname_0_50 = path_0_50 + "U"
fname_0_125 = path_0_125 + "U"
fname_0_150 = path_0_150 + "U"
return fname_0_100, fname_0_25, fname_0_50, fname_0_125, fname_0_150, path_0_100, path_0_25, path_0_50, path_0_125, path_0_150
def write_velocity_files(U_25_RHS_str, U_50_RHS_str, U_100_RHS_str, U_125_RHS_str, U_150_RHS_str, U_25_LHS_str, U_50_LHS_str, U_100_LHS_str, U_125_LHS_str, U_150_LHS_str, path_0_100, path_0_125, path_0_150, path_0_25, path_0_50):
"""Create the details file for the surrounding cases, and write the velocities in line two"""
fname = "details" # Filename
file_25_path = path_0_25
file_50_path = path_0_50
file_100_path = path_0_100
file_125_path = path_0_125
file_150_path = path_0_150
details_file_25 = file_25_path + fname
details_file_50 = file_50_path + fname
details_file_100 = file_100_path + fname
details_file_125 = file_125_path + fname
details_file_150 = file_150_path + fname
with open(details_file_25, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_25_RHS_str)
with open(details_file_50, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_50_RHS_str)
with open(details_file_100, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_100_RHS_str)
with open(details_file_125, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_125_RHS_str)
with open(details_file_150, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_150_RHS_str)
return details_file_25, details_file_50, details_file_100, details_file_125, details_file_150
def edit_boundary_conditions(fname_0_100, fname_0_25, fname_0_50, fname_0_125, fname_0_150, U_25_RHS_str, U_50_RHS_str, U_100_RHS_str, U_125_RHS_str, U_150_RHS_str, U_25_LHS_str, U_50_LHS_str, U_100_LHS_str, U_125_LHS_str, U_150_LHS_str):
"""Open each of the boundary condition files and insert the velocity strings into the respective files"""
#LC: might have to add in some functionality to ensure the pre-existing values are shorter than the overwrite
# The position for the beginning of the secondary air condition RHS (0 0.1 0) is 986
RHS_pos = 893
# Creating full strings for each surrounding case RHS
U_RHS_25 = "(0 " + U_25_RHS_str + " 0);"
U_RHS_50 = "(0 " + U_50_RHS_str + " 0);"
U_RHS_100 = "(0 " + U_100_RHS_str + " 0);"
U_RHS_125 = "(0 " + U_125_RHS_str + " 0);"
U_RHS_150 = "(0 " + U_150_RHS_str + " 0);"
# This is a problem---need to find the length of the string added and solve for the position of the next inlet
# The position for the beginning of the secondary air condition LHS (0 -0.1 0) is 1098
LHS_pos = 1098
# Creating full strings for each surrounding case LHS
U_LHS_25 = "(0 " + U_25_LHS_str + " 0);"
U_LHS_50 = "(0 " + U_50_LHS_str + " 0);"
U_LHS_100 = "(0 " + U_100_LHS_str + " 0);"
U_LHS_125 = "(0 " + U_125_LHS_str + " 0);"
U_LHS_150 = "(0 " + U_150_LHS_str + " 0);"
# Write the RHS inlet first
with open(fname_0_100, 'r+') as f:
f.seek(RHS_pos)
f.write(" ")
f.write("Secondary_air_RHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_RHS_100 + '\n')
f.write(" ")
f.write("}" + '\n')
f.write('\n')
f.write(" ")
f.write("Secondary_air_LHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_LHS_100 + '\n')
f.write(" ")
f.write("}" + '\n')
with open(fname_0_25, 'r+') as f:
f.seek(RHS_pos)
f.write(" ")
f.write("Secondary_air_RHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_RHS_25 + '\n')
f.write(" ")
f.write("}" + '\n')
f.write('\n')
f.write(" ")
f.write("Secondary_air_LHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_LHS_25 + '\n')
f.write(" ")
f.write("}" + '\n')
with open(fname_0_50, 'r+') as f:
f.seek(RHS_pos)
f.write(" ")
f.write("Secondary_air_RHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_RHS_50 + '\n')
f.write(" ")
f.write("}" + '\n')
f.write('\n')
f.write(" ")
f.write("Secondary_air_LHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_LHS_50 + '\n')
f.write(" ")
f.write("}" + '\n')
with open(fname_0_125, 'r+') as f:
f.seek(RHS_pos)
f.write(" ")
f.write("Secondary_air_RHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_RHS_125 + '\n')
f.write(" ")
f.write("}" + '\n')
f.write('\n')
f.write(" ")
f.write("Secondary_air_LHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_LHS_125 + '\n')
f.write(" ")
f.write("}" + '\n')
with open(fname_0_150, 'r+') as f:
f.seek(RHS_pos)
f.write(" ")
f.write("Secondary_air_RHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_RHS_150 + '\n')
f.write(" ")
f.write("}" + '\n')
f.write('\n')
f.write(" ")
f.write("Secondary_air_LHS" +'\n')
f.write(" ")
f.write("{" + '\n')
f.write(" ")
f.write("type" + " " + "fixedValue;" + '\n')
f.write(" ")
f.write("value" + " " + "uniform" + " " + U_LHS_150 + '\n')
f.write(" ")
f.write("}" + '\n')
# Find where to enter the next data
# Write the LHS secondary inlet data
return U_RHS_25, U_RHS_50, U_RHS_100, U_RHS_125, U_RHS_150, U_LHS_25, U_LHS_50, U_LHS_100, | |
turns)
turns = getattr(target_ex[0], ModelOutput.TOK)[:predict_turn + 1]
setattr(target_ex[0], ModelOutput.TOK, turns)
if self._support_batch_size == 0:
if self._meta_specs:
batch, = batch
_, ex, predict_turn = batch
ex = [self._dataset[i] for i in ex]
if predict_turn is not None:
truncate_target(ex, predict_turn)
return self._wrap_batch(textdata.Batch(ex, self._dataset), max_n_turns)
else:
return self._wrap_batch(
textdata.Batch([self._dataset[i] for i in batch if i is not None], self._dataset),
max_n_turns)
mb = MetaBatch(max_n_turns=max_n_turns)
for domain_batch in batch:
support_ex, target_ex, predict_turn = domain_batch
support_ex = [self._dataset[i] for i in support_ex]
target_ex = [self._dataset[i] for i in target_ex]
if predict_turn is not None:
truncate_target(target_ex, predict_turn)
mb.append((self._wrap_batch(textdata.Batch(support_ex, self._dataset), max_n_turns),
self._wrap_batch(textdata.Batch(target_ex, self._dataset), max_n_turns)))
return mb
def __iter__(self):
"""
loop over batches in the dataset
"""
while True:
for batch in self.create_batches():
yield self._examples_to_batch(batch, self.max_n_turns)
if not self._repeat:
break
def __len__(self):
""" Returns how many batches are in one pass of the dataset """
if self._support_batch_size == 0 and not self._meta_specs:
# plain batches
return self.n_batches(len(self._dataset.examples), self._batch_size, self._allow_incomplete)
if self._meta_specs:
# meta batches with target set size 1 specified in _meta_specs
id2dlg = {dlg.dlg_id: i for i, dlg in enumerate(self._dataset)}
return sum(1 for dlg in self._meta_specs if dlg.target_dlg in id2dlg)
n_examples = min([len(self._domain_dlgs[domain]) for domain in self._domains])
return self.n_batches(n_examples, self._batch_size, False)
class BPEField(RawField):
def __init__(self, text_embedder: EmbedderInterface, is_target=False):
self.is_target = is_target
self.meta = FieldMeta()
self.meta.vocab_size = text_embedder.n_vocab
self.meta.pad_token_idx = text_embedder.pad_idx # This is set to 0 in SPM train
self.meta.unk_token_idx = text_embedder.unk_idx
self.meta.bos_token_idx = text_embedder.bos_idx
self.meta.eos_token_idx = text_embedder.eos_idx
self.use_vocab = False
self.postprocessing = None
def load_meta(self, meta):
self.meta = meta
def preprocessing(self, dlg):
return dlg
def get_meta(self) -> FieldMeta:
return self.meta
class CustomBatchProcessor(BatchProcessor):
"""
Runs in a separate thread and does some preprocessing on the example dialogues
- selecting how many turns the dialogues passed to the network should contain
- cut off turns that are too long
- pad dialogues on the turn axis and the word axis
- embed dialogues using the text_embedder (e.g. fasttext, BERT)
"""
def __init__(self, embedder_cfg: EmbedderInterface.Config,
fixed_n_turns: bool = False,
all_responses: bool = False):
# Common setup in the process for it's lifetime
self.text_embedder = EmbedderInterface.from_config(embedder_cfg)
self.pad_token_idx = self.text_embedder.pad_idx
self.unk_token_idx = self.text_embedder.unk_idx
self.bos_token_idx = self.text_embedder.bos_idx
self.eos_token_idx = self.text_embedder.eos_idx
self.fixed_n_turns = fixed_n_turns
self.all_responses = all_responses
def process_batch(self, batch: Union[textdata.Batch, MetaBatch]):
"""
Processes a batch. If it is a meta-batch, independently process support and target sets.
"""
if isinstance(batch, (textdata.Batch, BatchLike)):
return self.process_batch_nometa(batch, self.fixed_n_turns, False)
meta_batch = MetaBatch(
# support set, target set.
# the target set may be fixed to predict the last turn available
# and always predicts exactly one response.
[(self.process_batch_nometa(domain[0], False, self.all_responses),
self.process_batch_nometa(domain[1], self.fixed_n_turns, False)) for domain in batch],
max_n_turns=getattr(batch, ModelInput.DLG_LEN)
)
return meta_batch
def process_batch_nometa(self, batch: Union[textdata.Batch, BatchLike], fixed_n_turns: bool, all_responses: bool):
"""
This does what `BPEField.postprocess` used to do, with some caveats
- returns a `Batch`-like object instead of raw tensors, since `postprocess` should be called during `Batch`
creation. The `Batch` is created before this call, so we must operate on it.
- nothing is allocated to the GPU here since `BatchQueue`'s dependency isn't implemented with
`torch.multiprocessing`.
"""
inputs, targets = [], []
n_turns = getattr(batch, ModelInput.DLG_LEN)
for input_turns, target_turns in zip(getattr(batch, ModelInput.SEQ), getattr(batch, ModelOutput.TOK)):
# ensure that the last turn is a user turn, i.e. the number of turns is even
if len(input_turns) % 2 == 1:
input_turns = input_turns[:-1]
target_turns = target_turns[:-1]
if fixed_n_turns:
endpoint = len(input_turns) - 1
startpoint = 0
elif len(input_turns) >= n_turns:
# Sample endpoint from [n_turns - 1, len(turns))
endpoint = np.random.randint(n_turns - 1, len(input_turns))
startpoint = endpoint - n_turns + 1
else:
# If the dialogue is too short, just sample a random length of the end of the dialogue
endpoint = len(input_turns) - 1
startpoints = list(range(endpoint - 1, -1, -2))
startpoint = startpoints[np.random.randint(0, len(startpoints))]
# INPUT
inputs.append([turn.tolist()[self.text_embedder.pieces_slice] for turn in input_turns[startpoint:endpoint]])
# TARGET
if all_responses:
# answers to all input turns
ep_slice = slice(startpoint + 1, endpoint + 1, 2)
else:
# answer to last input turn
ep_slice = slice(endpoint, endpoint + 1)
targets.append([turn.tolist()[self.text_embedder.pieces_slice] for turn in target_turns[ep_slice]])
batchsz = len(batch)
def make_batch_tensors(dlgs, is_input=True):
# there's an extra dimension to accommodate the retrieval case
n_seqs = [len(dlg) for dlg in dlgs]
n_words = [[len(t) for t in turns] for turns in dlgs]
max_n_turns = max(n_seqs)
max_n_tokens = max([max(dlg) for dlg in n_words])
embed_dim = self.text_embedder.embed_dim
max_shape = (batchsz, max_n_turns, max_n_tokens, embed_dim)
n_words = right_pad_fixed_shape(n_words, max_shape=max_shape[:2], dtype=int)
padded_turns = right_pad_fixed_shape(dlgs, max_shape=max_shape[:3], dtype=int, value=self.pad_token_idx)
emb = None
if is_input:
emb = self.text_embedder.embed_ids_batch(padded_turns.reshape(-1, max_n_tokens)).reshape(*max_shape)
return LongTensor(padded_turns), LongTensor(n_seqs), LongTensor(n_words), emb
def make_batch_target_tensors(dlgs):
padded_turns, _, n_words, _ = make_batch_tensors(dlgs, is_input=False)
if not all_responses:
# remove turn dimension
padded_turns = torch.squeeze(padded_turns, 1)
n_words = torch.squeeze(n_words, 1)
return padded_turns, n_words
fields = {
ModelInput.SEQ: make_batch_tensors(inputs),
ModelOutput.TOK: make_batch_target_tensors(targets)
}
assert fields[ModelInput.SEQ][0].shape[0] == fields[ModelOutput.TOK][0].shape[0]
return BatchLike(batch, updated_fields=fields)
class MetaData(CommonMetadata):
source_dict: FairseqDict
target_dict: FairseqDict
class DialogueDataHandler(DataHandler):
class Config(DataHandler.Config):
# determines by which field data is sorted
text_feature_name: str = ModelInput.SEQ
# shuffle: bool = False
# sort_within_batch: bool = False
max_turns: int = 12
n_workers: int = 4
max_load_workers: int = 4
seed: int = 42
featurized_cache_dir: str = ''
train_domains: List[str] = []
eval_domains: List[str] = []
test_domains: List[str] = []
# dictates how many samples go to a process, and determines the lifetime of the process
# make this larger for larger datasets
preproc_chunksize: int = 1000
all_responses: bool = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metadata_cls: Type = MetaData
self.metadata: MetaData = MetaData()
# hint for mypy (otherwise it assumes pytext's class Featurizer from parent
# class)
self.featurizer: TokenIdFeaturizer = kwargs['featurizer']
self.n_workers: int = kwargs['n_workers']
self.max_load_workers: int = kwargs['max_load_workers']
@classmethod
def from_config(cls, config: Config,
feature_config: ModelInputConfig,
target_config: ModelOutputConfig,
text_embedder_config: EmbedderInterface.Config,
**kwargs):
text_embedder: EmbedderInterface = EmbedderInterface.from_config(text_embedder_config)
features: Dict[str, Field] = {
ModelInput.SEQ: BPEField(text_embedder)
}
assert len(features)
targets: Dict[str, Field] = {
ModelOutputConfig._name: BPEField(text_embedder, is_target=True),
}
extra_fields = {
RAW_TEXT: RawField(),
ModelInput.DLG_LEN: RawField(),
ModelInput.DLG_ID: RawField(),
ModelInput.DOMAIN_ID: RawField(),
ModelInput.TASK_ID: RawField()
}
kwargs.update(config.items())
self = cls(
raw_columns=[], # ignored in our read function
features=features,
labels=targets,
extra_fields=extra_fields,
**kwargs,
)
self.max_turns = config.max_turns
self.text_embedder_cfg = text_embedder_config
self.all_responses = config.all_responses
self.preproc_chunksize = config.preproc_chunksize
self.train_domains = config.train_domains
self.eval_domains = config.eval_domains
self.featurized_cache_dir = config.featurized_cache_dir
self.test_domains = config.test_domains
self.text_embedder = text_embedder
self.seed = config.seed
return self
def preprocess_row(self, row_data: MetaDlgDataDialog) -> Dict[str, Any]:
featurized = self.featurizer.featurize(row_data)
res = {
# features
ModelInput.SEQ: featurized.token_ids,
# target
ModelOutputConfig._name: featurized.token_ids,
RAW_TEXT: featurized.token_ids,
}
return res
def _get_batch_iter(
self,
dataset: textdata.Dataset,
batch_size: int,
meta_batch_size: int = 1,
rank: int = 0,
world_size: int = 1,
repeat: bool = True,
n_workers: int = 4,
is_train: bool = True,
is_predict: bool = False,
**kwargs
) -> BatchIterator:
if world_size > 1 and kwargs.get("meta_batch_spec_file"):
raise RuntimeError("sharding not supported if meta_batch_spec_file is given")
dataset_shard, max_num_examples = self._get_dataset_shard(
dataset, rank, world_size
)
assert not (is_train and is_predict)
# Compute the per-worker batch size
assert (
batch_size >= world_size
), "batch size needs to be >= the distributed world size"
# TODO should we modify meta_batch_size here?
batch_size = batch_size // world_size
diter = DataIterator(
dataset_shard,
batch_size=batch_size,
repeat=repeat,
shuffle=self.shuffle,
allow_incomplete=not repeat,
meta_batch_size=meta_batch_size,
random_state=self.seed,
**kwargs
) # yields either textdata.Batch or MetaBatch (containing textdata.Batch objects)
n_batches = len(diter)
# enqueues BatchLike or a nested structure of BatchLike, because Batch is not pickleable
# CustomBatchProcessor produces the same.
bq = BatchQueue(
diter,
n_batches,
CustomBatchProcessor,
n_workers=n_workers,
qcap=3,
embedder_cfg=self.text_embedder_cfg,
fixed_n_turns=is_predict,
all_responses=self.all_responses,
)
return BatchPreparationPipeline(
diter, bq, processor=self._to_cuda_postprocess_batch, is_train=is_train)
def gen_dataset(
self, data: Iterable[MetaDlgDataDialog], include_label_fields: bool = True,
featurized_path: str = ''
) -> textdata.Dataset:
"""
Generate torchtext Dataset from raw in memory data.
Returns:
dataset(TorchText.Dataset)
*NOTE*: order will vary between `data` and what's loaded from `featurized_path`, or generated from
`parallel_featurize_batch`. This is fine since the featurized data encompasses everything needed for the
torchtext Dataset anyway.
"""
to_process = {}
to_process.update(self.features)
to_process.update(self.extra_fields)
if include_label_fields:
to_process.update(self.labels)
fields = {name: (name, field) for name, field in to_process.items()}
# Optimizations for parallel preproc and cached preproc
if featurized_path and os.path.exists(featurized_path):
with open(featurized_path, 'rb') as f:
| |
<gh_stars>100-1000
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_config.forms import SystemImporterFileCsvConfigForm
from dfirtrack_main.models import Analysisstatus, Systemstatus, Tag, Tagcolor
class SystemImporterFileCsvConfigFormTagSpecificTestCase(TestCase):
"""system importer file CSV config form tests"""
@classmethod
def setUpTestData(cls):
# create user
User.objects.create_user(
username='testuser_system_importer_file_csv_config',
password='<PASSWORD>',
)
# create objects
Analysisstatus.objects.create(analysisstatus_name='analysisstatus_1')
Systemstatus.objects.create(systemstatus_name='systemstatus_1')
tagcolor_1 = Tagcolor.objects.create(tagcolor_name='tagcolor_1')
Tag.objects.create(
tag_name='tag_1',
tagcolor=tagcolor_1,
)
""" tag """
def test_system_importer_file_csv_config_form_tag_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_tag': True,
'csv_column_tag': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_tag'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_tag_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_tag': False,
'csv_column_tag': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_tag'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_tag_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_tag': True,
'csv_default_tag': [
str(tag_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_tag'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_tag_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_tag': '2',
'csv_default_tag': [
str(tag_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_tag'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_tag_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_tag': True,
'csv_column_tag': '2',
'csv_default_tag': [
str(tag_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_tag'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_tag_from_csv_no_prefix(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_tag': True,
'csv_column_tag': '2',
'csv_tag_prefix_delimiter': 'tag_prefix_underscore',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_tag_prefix'],
[
'Choose prefix and delimiter for tag import from CSV to distinguish between manual set tags.'
],
)
def test_system_importer_file_csv_config_form_tag_from_csv_no_prefix_delimiter(
self,
):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_tag': True,
'csv_column_tag': '2',
'csv_tag_prefix': 'AUTO',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_tag_prefix'],
[
'Choose prefix and delimiter for tag import from CSV to distinguish between manual set tags.'
],
)
def test_system_importer_file_csv_config_form_tag_from_csv_valid(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_tag': True,
'csv_column_tag': '2',
'csv_tag_prefix': 'AUTO',
'csv_tag_prefix_delimiter': 'tag_prefix_underscore',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_tag_from_db_and_prefix(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_all',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_tag': [
str(tag_1),
],
'csv_tag_prefix': 'AUTO',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_tag_prefix'],
['Prefix and delimiter are not available when setting tags from database.'],
)
def test_system_importer_file_csv_config_form_tag_from_db_and_prefix_delimiter(
self,
):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_all',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_tag': [
str(tag_1),
],
'csv_tag_prefix_delimiter': 'tag_prefix_underscore',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_tag_prefix'],
['Prefix and delimiter are not available when setting tags from database.'],
)
def test_system_importer_file_csv_config_form_tag_from_db_prefix_and_prefix_delimiter(
self,
):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_all',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_tag': [
str(tag_1),
],
'csv_tag_prefix': 'AUTO',
'csv_tag_prefix_delimiter': 'tag_prefix_underscore',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_tag_prefix'],
['Prefix and delimiter are not available when setting tags from database.'],
)
def test_system_importer_file_csv_config_form_tag_from_db_and_remove_prefix(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_tag': [
str(tag_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_remove_tag'],
['Removing tags with prefix is only available when setting tags from CSV.'],
)
def test_system_importer_file_csv_config_form_tag_from_db_and_tagfree_systemstatus(
self,
):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# create object
systemstatus_2 = Systemstatus.objects.create(
systemstatus_name='systemstatus_2'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_choice_tagfree_systemstatus': True,
'csv_default_tagfree_systemstatus': str(systemstatus_2),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_all',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_tag': [
str(tag_1),
],
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_tagfree_systemstatus'],
['Alternative systemstatus only available with tags from CSV.'],
)
def test_system_importer_file_csv_config_form_tag_from_db_and_tagfree_analysisstatus(
self,
):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
tag_1 = Tag.objects.get(tag_name='tag_1').tag_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# create object
analysisstatus_2 = Analysisstatus.objects.create(
analysisstatus_name='analysisstatus_2'
).analysisstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_choice_tagfree_analysisstatus': True,
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_2),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_all',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_tag': [
str(tag_1),
],
| |
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.failIf(h1 == hash(t2))
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEquals(len(t), size)
self.assertEquals(t[-1], None)
self.assertEquals(t[5], None)
self.assertEquals(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEquals(t[:5], (None,) * 5)
self.assertEquals(t[-5:], (None,) * 5)
self.assertEquals(t[20:25], (None,) * 5)
self.assertEquals(t[-25:-20], (None,) * 5)
self.assertEquals(t[size - 5:], (None,) * 5)
self.assertEquals(t[size - 5:size], (None,) * 5)
self.assertEquals(t[size - 6:size - 2], (None,) * 4)
self.assertEquals(t[size:size], ())
self.assertEquals(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEquals(len(t), size)
t = t * 2
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@precisionbigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
try:
t = tuple(xrange(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
@precisionbigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
try:
t = tuple(xrange(size))
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '(0, 0')
self.assertEquals(s[-5:], '0, 0)')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [u''] * size
l2 = [u''] * size
self.failUnless(l1 == l2)
del l2
l2 = [u''] * (size + 1)
self.failIf(l1 == l2)
del l2
l2 = [2] * size
self.failIf(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEquals(len(l), size)
l = l + l
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEquals(len(l), size * 2)
self.failUnless(l[0] is l[-1])
self.failUnless(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(len(l), size * 5)
self.failUnless(5 in l)
self.failIf([1, 2, 3, 4, 5] in l)
self.failIf(0 in l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.failUnlessRaises(TypeError, hash, l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEquals(len(l), size)
self.assertEquals(l[-1], None)
self.assertEquals(l[5], None)
self.assertEquals(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEquals(l[:5], [None] * 5)
self.assertEquals(l[-5:], [None] * 5)
self.assertEquals(l[20:25], [None] * 5)
self.assertEquals(l[-25:-20], [None] * 5)
self.assertEquals(l[size - 5:], [None] * 5)
self.assertEquals(l[size - 5:size], [None] * 5)
self.assertEquals(l[size - 6:size - 2], [None] * 4)
self.assertEquals(l[size:size], [])
self.assertEquals(l[size:size+5], [])
l[size - 2] = 5
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [None, 5, None])
self.assertEquals(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEquals(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 4)
del l[-2:]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 2)
del l[0]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[0], 2)
del l[:2]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.failIf(l)
l = [''] * size
self.assertEquals(len(l), size)
l = l * 2
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEquals(len(l), size)
self.failUnless(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEquals(len(l), size * 2)
self.failUnless(l[size - 1] is l[-1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '[0, 0')
self.assertEquals(s[-5:], '0, 0]')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(minsize=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEquals(len(l), size+1)
self.failUnless(l[-3] is l[-2])
self.failIf(l[-2] is l[-1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(l.count(1), size)
self.assertEquals(l.count("1"), 0)
def basic_test_extend(self, size):
l = [file] * size
l.extend(l)
self.assertEquals(len(l), size * 2)
self.failUnless(l[0] is l[-1])
self.failUnless(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1L, 2L, 3L, 4L, 5L] * size
size *= 5
self.assertEquals(l.index(1), 0)
self.assertEquals(l.index(5, size - 5), size - 1)
self.assertEquals(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6L)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(minsize=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[:3], [1.0, "C", 1.0])
self.assertEquals(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = [u"a", u"b", u"c", u"d", u"e"] * size
size *= 5
self.assertEquals(len(l), size)
item = l.pop()
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"e")
self.assertEquals(l[-2:], [u"c", u"d"])
item = l.pop(0)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"a")
self.assertEquals(l[:2], [u"b", u"c"])
item = l.pop(size - 2)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"c")
self.assertEquals(l[-2:], [u"b", u"d"])
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEquals(len(l), size)
l.remove(10)
size -= 1
self.assertEquals(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 10])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEquals(len(l), size * 5)
self.assertEquals(l[-5:], [5, 4, 3, 2, 1])
self.assertEquals(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, | |
<gh_stars>0
from typing import List, Optional, Tuple
from framework import CardType, DeckList, Disruption, Manager, Card, Game
class LynaManager(Manager):
# Invoked
aleister = Card("Aleister the Invoker", CardType.MONSTER)
invocation = Card("Invocation", CardType.SPELL)
meltdown = Card("Magical Meltdown", CardType.SPELL)
terraforming = Card("Terraforming", CardType.SPELL)
# Dogma
ecclesia = Card("Dogmatika Eccelsia, the Virtuous", CardType.MONSTER)
maximus = Card("Dogmatika Maximus", CardType.MONSTER)
fleur = Card("Dogmatika Fleurdelis, the Knighted", CardType.MONSTER)
punishment = Card("Dogmatika Punishment", CardType.TRAP)
servant = Card("<NAME>", CardType.SPELL)
schism = Card("Sh<NAME>", CardType.TRAP)
# Lyna
fp_lyna = Card("Familiar-Possessed - Lyna", CardType.MONSTER)
lc_lyna = Card("Lyna the Light Charmer", CardType.MONSTER)
# Extenders
jester = Card("<NAME>", CardType.MONSTER)
souls = Card("Magicians' Souls", CardType.MONSTER)
# Trickstar Engine
candina = Card("Trickstar Candina", CardType.MONSTER)
corobane = Card("Trickstar Corobane", CardType.MONSTER)
lightstage = Card("Trickstar Lightstage", CardType.SPELL)
set_rotation = Card("Set Rotation", CardType.SPELL)
# Draw
desires = Card("Pot of Desires", CardType.SPELL)
upstart = Card("Upstart Goblin", CardType.SPELL)
knowledge = Card("Spellbook of Knowledge", CardType.SPELL)
# Hand Traps
nibiru = Card("Nibiru, the Primal Being", CardType.MONSTER)
ash = Card("Ash Blossom & Joyous Spring", CardType.MONSTER)
ogre = Card("Ghost Ogre & Snow Rabbit", CardType.MONSTER)
droll = Card("Droll & Lock Bird", CardType.MONSTER)
veiler = Card("Effect Veiler", CardType.MONSTER)
gamma = Card("PSY-Framegear Gamma", CardType.MONSTER)
driver = Card("PSY-Frame Driver", CardType.MONSTER)
crow = Card("D.D. Crow", CardType.MONSTER)
belle = Card("Ghost Belle & Haunted Mansion", CardType.MONSTER)
meister = Card("Skull Meister", CardType.MONSTER)
imperm = Card("Infinite Impermanence", CardType.TRAP)
# Misc
droplet = Card("Forbidden Droplet", CardType.SPELL)
called = Card("Called by the Grave", CardType.SPELL)
cyclone = Card("Cosmic Cyclone", CardType.SPELL)
prison = Card("Ice Dragon's Prison", CardType.TRAP)
# Extra Deck
carrier = Card("Union Carrier", CardType.EXTRA_DECK)
almiraj = Card("Salamangreat Almiraj", CardType.EXTRA_DECK)
gardna = Card("Secure Gardna", CardType.EXTRA_DECK)
artemis = Card("Artemis, the Magistus Moon Maiden", CardType.EXTRA_DECK)
mechaba = Card("Invoked Mechaba", CardType.EXTRA_DECK)
augoeides = Card("Invoked Augoeides", CardType.EXTRA_DECK)
purgatrio = Card("Invoked Purgatrio", CardType.EXTRA_DECK)
titaniklad = Card("Titaniklad the Ash Dragon", CardType.EXTRA_DECK)
apkalone = Card("El Shaddoll Apkalone", CardType.EXTRA_DECK)
ntss = Card("Elder Entity N'tss", CardType.EXTRA_DECK)
skull_knight = Card("Fossil Warrior Skull Knight", CardType.EXTRA_DECK)
winda = Card("El Shaddoll Winda", CardType.EXTRA_DECK)
construct = Card("El Shaddoll Construct", CardType.EXTRA_DECK)
omega = Card("Psy-framelord Omega", CardType.EXTRA_DECK)
# Disruptions
disr_winda = Disruption(repr(winda), 10)
disr_mechaba_m = Disruption(f"{repr(mechaba)} (M)", 2)
disr_mechaba_s = Disruption(f"{repr(mechaba)} (S)", 1)
disr_mechaba_t = Disruption(f"{repr(mechaba)} (T)", 1)
disr_fleur = Disruption(repr(fleur), 2)
disr_punishment = Disruption(repr(punishment), 2)
disr_prison = Disruption(repr(prison), 2)
disr_aleister = Disruption(repr(aleister), 1)
# Lists
hand_traps = (ash, ogre, veiler, imperm, nibiru, droll, crow, belle, meister, gamma)
protection = (belle, called)
card_to_set = (droplet, called, imperm, punishment, prison, cyclone)
spellcasters = (
artemis,
ecclesia,
maximus,
souls,
jester,
fp_lyna,
lc_lyna,
aleister,
) # for knowledge
discards = (driver, lc_lyna, fp_lyna, upstart, cyclone)
carrier_materials = (ecclesia, maximus, artemis, souls, aleister, jester)
dark_monsters = (
aleister,
jester,
souls,
titaniklad,
) # edge case where construct is in grave but apkalone is only dark was ignored
shaddolls = (construct, apkalone)
not_opt = (imperm, crow, meister, veiler, cyclone)
save_for_next_turn = (ecclesia, servant)
lyna_decklist = DeckList(
(
(aleister, 3),
(invocation, 2),
(meltdown, 3),
(terraforming, 1),
(ecclesia, 3),
(maximus, 2),
(fleur, 2),
(punishment, 2),
(servant, 3),
(schism, 1),
(prison, 2),
(imperm, 3),
(ash, 3),
(souls, 3),
(jester, 3),
(desires, 2),
(fp_lyna, 1),
(lc_lyna, 1),
),
(
(almiraj, 1),
(artemis, 1),
(gardna, 1),
(carrier, 1),
(mechaba, 1),
(purgatrio, 1),
(augoeides, 1),
(titaniklad, 1),
(ntss, 2),
(skull_knight, 1),
(apkalone, 1),
(winda, 1),
(construct, 1),
(omega, 1),
),
)
no_lyna_decklist = DeckList(
(
(aleister, 3),
(invocation, 3),
(meltdown, 3),
(terraforming, 1),
(ecclesia, 3),
(maximus, 2),
(fleur, 2),
(punishment, 2),
(servant, 3),
(schism, 1),
(prison, 2),
(imperm, 3),
(ash, 3),
(droll, 3),
(cyclone, 2),
(desires, 2),
(knowledge, 2),
),
(
(almiraj, 1),
(artemis, 1),
(gardna, 1),
(mechaba, 2),
(purgatrio, 1),
(augoeides, 1),
(titaniklad, 1),
(ntss, 2),
(skull_knight, 1),
(apkalone, 1),
(winda, 1),
(construct, 1),
(omega, 1),
),
)
default_decklist = lyna_decklist
#########
# Helpers
#########
@classmethod
def generate_stats(cls, end_games: List[Game]) -> List[List[str]]:
return [
["Winda", cls.percent_with_flags(end_games, ["winda"])],
["Mechaba", cls.percent_with_flags(end_games, ["mechaba"])],
["Both", cls.percent_with_flags(end_games, ["winda", "mechaba"])],
["3+ Disruptions", cls.percent_with_flags(end_games, ["3+ disruptions"])],
["Bricks", cls.percent_with_flags(end_games, ["brick"])],
]
def postprocess(self, game: Game):
if self.apkalone in game.grave and game.hopt_available(self.apkalone):
game.use_hopt(self.apkalone)
if self.schism in game.deck:
game.move(game.deck, game.hand, self.schism)
discard = self.select_schism_discard(game)
game.move(game.hand, game.grave, discard)
if self.construct in game.grave and game.hopt_available(self.construct):
game.use_hopt(self.construct)
if self.schism in game.grave:
game.move(game.grave, game.hand, self.schism)
return game
def endphase(self, game: Game):
for card in game.hand:
if card in self.card_to_set:
game.move(game.hand, game.backrow, card)
if self.schism in game.hand and self.can_summon_winda(game):
game.move(game.hand, game.backrow, self.schism)
if self.titaniklad in game.grave and game.hopt_available(self.titaniklad):
target = self.select_titaniklad_search_target(game)
if target:
game.use_hopt(self.titaniklad)
if target == self.ecclesia and game.hopt_available(
self.ecclesia, "search"
):
# we can summon ecclesia and then search
game.move(game.deck, game.monsters, target)
second_target = self.select_ecclesia_search_target(game)
if second_target:
game.move(game.deck, game.hand, second_target)
game.use_hopt(self.ecclesia, "search")
else:
game.move(game.deck, game.hand, target)
# Process Disruptions
if self.schism in game.backrow and self.can_summon_winda(game):
game.add_flag("winda")
game.disruptions.add(self.disr_winda)
pure_distruptions = 0
if self.mechaba in game.monsters:
for card in game.hand:
game.add_flag("mechaba")
if card.card_type == CardType.MONSTER:
game.disruptions.add(self.disr_mechaba_m)
elif card.card_type == CardType.SPELL:
game.disruptions.add(self.disr_mechaba_s)
elif card.card_type == CardType.TRAP:
game.disruptions.add(self.disr_mechaba_t)
if game.has_flag("mechaba"):
pure_distruptions += 1
if self.fleur in game.hand and any(
card in [self.ecclesia, self.maximus, self.fleur] for card in game.monsters
):
game.add_flag("fleur")
pure_distruptions += 1
game.disruptions.add(Disruption(repr(self.fleur), 1))
for card in game.hand:
if card in self.hand_traps:
if card == self.gamma and self.driver in game.banished:
continue
pure_distruptions += 1
game.disruptions.add(Disruption(repr(card), 1))
for card in game.backrow:
if card in self.card_to_set:
pure_distruptions += 1
if card == self.prison:
game.disruptions.add(self.disr_prison)
elif card == self.punishment:
game.disruptions.add(self.disr_punishment)
else:
game.disruptions.add(Disruption(repr(card), 1))
if pure_distruptions >= 3:
game.add_flag("3+ disruptions")
if pure_distruptions < 3 and not game.has_flag("winda"):
game.add_flag("brick")
return game
def can_summon_winda(self, game: Game) -> bool:
return any(card in self.shaddolls for card in game.grave) and any(
card in self.dark_monsters for card in game.grave
)
def get_redundant_cards_in_hand(
self, game: Game, include_useful: bool = False
) -> List[Card]:
redundant_cards = {} # higher value means more redundant
hand = game.hand.cards[:]
for card in hand:
if count := hand.count(card) == 1:
if game.hopt_available(card):
redundant_cards[card] = 0
else:
redundant_cards[card] = 1 if card in self.save_for_next_turn else 2
elif count == 2:
if card in self.not_opt:
redundant_cards[card] = 1
else:
redundant_cards[card] = 2
else:
redundant_cards[card] = 3
to_return = sorted(
redundant_cards.keys(), key=lambda x: redundant_cards[x], reverse=True
)
if include_useful:
return to_return
else:
return [card for card in to_return if redundant_cards[card] > 1]
#########
# Selects
#########
def select_ecclesia_search_target(self, game: Game) -> Optional[Card]:
if game.hopt_available(self.titaniklad):
# ecclesia summoned during main phase
if any(card.card_type == CardType.EXTRA_DECK for card in game.grave):
# we can summon maximus
if self.maximus not in game.hand and self.maximus in game.deck:
return self.maximus
elif self.punishment not in game.hand and self.punishment in game.deck:
return self.punishment
elif self.fleur in game.deck:
return self.fleur
else:
# we cannot summon maximus, would fleur be live?
if any(card.card_type == CardType.EXTRA_DECK for card in game.monsters):
# fleur live, search it
if self.fleur not in game.hand and self.fleur in game.deck:
return self.fleur
elif self.punishment in game.deck:
return self.punishment
else:
# fleur not live, search punishment and pray
if (
self.punishment not in game.hand
and self.punishment in game.deck
):
return self.punishment
elif self.fleur in game.deck:
return self.fleur
else:
# ecclesia summoned during end phase
if self.fleur not in game.hand and self.fleur in game.deck:
# only fleur makes a difference
return self.fleur
elif self.maximus not in game.hand and self.maximus in game.deck:
return self.maximus
elif self.punishment not in game.hand and self.punishment in game.deck:
return self.punishment
elif self.fleur in game.deck:
# might as well get a second fleur
return self.fleur
# no targets somehow
return None
def select_titaniklad_search_target(self, game: Game) -> Optional[Card]:
if self.ecclesia in game.deck and game.hopt_available(self.ecclesia, "search"):
# we can summon eccelsia and search
return self.ecclesia
else:
# ecclesia has been summoned already
if self.fleur not in game.hand and self.fleur in game.deck:
# only fleur makes a difference
return self.fleur
elif self.ecclesia not in game.hand and self.ecclesia in game.deck:
# search another ecclesia for next turn
return self.ecclesia
elif self.maximus not in game.hand and self.maximus in game.deck:
return self.maximus
elif self.ecclesia in game.deck:
# might as well get a second ecclesia
return self.ecclesia
elif self.fleur in game.deck:
# might as well get a second fleur
return self.fleur
elif self.maximus in game.deck:
# might as | |
<reponame>CloudChef/CloudEntries<gh_stars>0
# Copyright (c) 2021 Qianyun, Inc. All rights reserved.
import time
import os
import copy
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.utils import decrypt_password
from abstract_plugin.platforms.common.utils import validate_parameter
import abstract_plugin.platforms.common.constants as common_constants
from .base import Base
from .handler import client_error_handler
from abstract_plugin.platforms.common.compute import CommonCompute
from abstract_plugin.platforms.ksyun.restclient import Helper
from . import constants as ksyun_constants
class Compute(Base, CommonCompute):
def __init__(self):
super(Compute, self).__init__()
self.action_mapper = {
"create_snapshot": self.create_snapshot,
"restore_snapshot": self.restore_snapshot,
"delete_snapshot": self.delete_snapshot,
"modify_display_name": self.modify_display_name,
"resize": self.resize
}
def prepare_params(self):
nsg_id = self.get_nsg_id()
if not nsg_id:
raise NonRecoverableError("Can not get security group id, please set in environment variable, "
"the environment variable key is 'KSYUN_NSG'.")
instance_name = self.resource_config.get('instance_name')
hostname = None
if not instance_name:
instance_name, hostname = self.get_instance_names()
hostname = self.resource_config.get('hostname') or hostname or instance_name
if self.resource_config.get('instance_name'):
instance_name = self.resource_config.get('instance_name')
else:
instance_name, hostname = self.get_instance_names()
params_info = {
'ImageId': validate_parameter('image_id', self.resource_config),
'InstanceType': validate_parameter('flavor', self.resource_config),
'SubnetId': self.get_subnet(),
'MaxCount': 1,
'MinCount': 1,
'SecurityGroupId': nsg_id,
'InstancePassword': decrypt_password(validate_parameter('password', self.resource_config)),
'InstanceName': instance_name,
'ChargeType': self.resource_config.get('charge_type') or 'HourlyInstantSettlement',
'PurchaseTime': self.resource_config.get('purchase_time') or 0,
'SystemDisk.DiskType': self.resource_config.get('system_disk_config').get('volume_type'),
'SystemDisk.DiskSize': self.resource_config.get('system_disk_config').get('size')
}
if hostname:
params_info['HostName'] = hostname
if os.environ.get('KSYUN_SYSTEM_DISK_TYPE'):
params_info.update({
'SystemDisk.DiskType': os.environ.get('KSYUN_SYSTEM_DISK_TYPE'),
'SystemDisk.DiskSize': int(os.environ.get('KSYUN_SYSTEM_DISK_SIZE'))})
ip_address = self.get_ip()
if ip_address:
ip_address_info = {
'PrivateIpAddress': ip_address
}
params_info.update(ip_address_info)
return params_info
def describe_vm(self, instance_id):
res = Helper().execute_request('kec', 'describe_instances', {"InstanceId.1": instance_id})
return None if not res.get("InstancesSet") else res['InstancesSet'][0]
def get_vm_state(self, instance_id):
vm_info = self.describe_vm(instance_id)
return None if not vm_info else vm_info['InstanceState']['Name']
def wait_for_target_state(self, instance_id, target_state, timeout=600, sleep_interval=10):
timeout = time.time() + timeout
while time.time() < timeout:
instance_state = self.get_vm_state(instance_id)
ctx.logger.info('Waiting for server "{0}" to be {1}. current state: {2}'
.format(instance_id, target_state, instance_state))
if isinstance(target_state, tuple):
if instance_state in target_state:
return
else:
if instance_state == target_state:
return
time.sleep(sleep_interval)
raise NonRecoverableError("Waiting server to target state failed! the current "
"state is {0}, the target state is {1}".format(instance_state, target_state))
def update_runtime_properties(self, instance_id):
vm = self.describe_vm(instance_id)
raw_status = vm['InstanceState']['Name']
vm_status = ksyun_constants.KS_INSTANCE_STATE_CONVERT.get(raw_status, raw_status)
ctx.instance.runtime_properties.update({
common_constants.EXTERNAL_ID: vm.get('InstanceId'),
common_constants.EXTERNAL_NAME: vm.get('InstanceName'),
'external_hostname': vm.get('HostName'),
'VpcId': vm.get('NetworkInterfaceSet', [{}])[0].get('VpcId'),
'network_interface_id': vm['NetworkInterfaceSet'][0]['NetworkInterfaceId'],
'vm_info': vm,
'status': vm_status
})
self.set_ip_info(vm)
ctx.instance.update()
def set_ip_info(self, vm_info):
net_info = vm_info['NetworkInterfaceSet'][0]
network_info = {'ip': net_info.get('PrivateIpAddress'), 'name': 'PrivateIpAddress'}
ctx.instance.runtime_properties['ip'] = net_info.get('PrivateIpAddress')
if self.use_external_resource:
# There is not network connected to instance,instance is external.
ctx.instance.runtime_properties['networks'] = {'Network': network_info}
else:
# Create by CMP.
related_network = self.get_primary_network()
networks_runtime = ctx.instance.runtime_properties.get('networks')
networks_runtime[related_network.node.id].update(network_info)
if net_info.get('PublicIp'):
public_ip = net_info.get('PublicIp')
public_ip_info = {
'name': 'public_ip',
'ip': public_ip
}
ctx.instance.runtime_properties['networks'].update({'public_ip': public_ip_info})
ctx.instance.runtime_properties['ip'] = public_ip
ips = []
if net_info.get('PublicIp'):
ips.append(net_info.get('PublicIp'))
ips.append(net_info.get('PrivateIpAddress'))
ctx.instance.runtime_properties['ips'] = ips
ctx.instance.update()
def _create(self):
params = self.prepare_params()
display_params = copy.deepcopy(params)
ctx.instance.runtime_properties[common_constants.EXTERNAL_HOSTNAME] = params.get('HostName')
display_params['InstancePassword'] = '********'
ctx.logger.info("VM creating params is {0}".format(display_params))
return Helper().execute_request('kec', 'run_instances', params)['InstancesSet'][0]['InstanceId']
def create(self):
if self.use_external_resource is True:
instance_id = validate_parameter('resource_id', self.node_properties)
else:
instance_id = self._create()
self.wait_for_target_state(instance_id, ksyun_constants.KS_INSTANCE_STATE_ACTIVE)
self.update_runtime_properties(instance_id)
self.associate_eip()
def is_allocated_eip(self):
eip_node = self.get_eip_node()
if eip_node and eip_node.properties.get('resource_config').get('allocate_eip'):
return True
return False
def associate_eip(self):
if not self.is_allocated_eip():
return
eip_obj = self.get_eip()
vm = ctx.instance
eip_id = eip_obj.runtime_properties[common_constants.EXTERNAL_ID]
instance_id = vm.runtime_properties[common_constants.EXTERNAL_ID]
interface_id = vm.runtime_properties['network_interface_id']
ctx.logger.info(
'Start associate EIP:{} to Instance:{},interface_id:{}'.format(eip_id, instance_id, interface_id))
request_body = {
'AllocationId': eip_id,
'InstanceId': instance_id,
'InstanceType': 'Ipfwd',
'NetworkInterfaceId': interface_id
}
Helper().execute_request('eip', 'associate_address', request_body)
eip_obj = self.wait_eip_for_target_state(eip_id, [ksyun_constants.KS_EIP_STATE_ASSOCIATE])
networks = vm.runtime_properties['networks']
networks['public_ip'] = {'ip': eip_obj['PublicIp'], 'name': 'public_ip'}
vm.runtime_properties['networks'] = networks
vm.runtime_properties['ip'] = eip_obj['PublicIp']
ctx.instance.update()
self.update_eip_runtime()
ctx.logger.info('Associate EIP successfully...')
def disassociate_eip(self):
if not self.is_allocated_eip():
return
eip_obj = self.get_eip()
eip_id = eip_obj.runtime_properties[common_constants.EXTERNAL_ID]
ctx.logger.info('Disassociate EIP id:{}'.format(eip_id))
request_body = {
'AllocationId': eip_id
}
ctx.logger.info('Start to disassociate EIP:{}...'.format(eip_id))
Helper().execute_request('eip', 'disassociate_address', request_body)
self.wait_eip_for_target_state(eip_id, [ksyun_constants.KS_EIP_STATE_DISASSOCIATE])
vm = ctx.instance
networks = vm.runtime_properties['networks']
networks.pop('public_ip')
vm.runtime_properties['networks'] = networks
vm_info = self.describe_vm(vm.runtime_properties[common_constants.EXTERNAL_ID])
if vm_info:
vm.runtime_properties['ip'] = vm_info['PrivateIpAddress']
vm.update()
self.update_eip_runtime()
ctx.logger.info('Disassociate EIP successfully...')
def wait_eip_for_target_state(self, eip_id, statuses, timeout=600, interval=15):
request_body = {
'AllocationId.1': eip_id
}
eip_info = Helper().execute_request('eip', 'describe_addresses', request_body)['AddressesSet'][0]
while timeout:
if eip_info['State'] in statuses:
return eip_info
ctx.logger.info(
'Wait Eip:{} to be status:{},current status:{}...'.format(eip_id, ','.join(statuses),
eip_info['State']))
time.sleep(interval)
timeout -= interval
raise NonRecoverableError("Waiting eip to target state failed! the current "
"state is {0}, the target state:{1}".format(eip_info['State'], ','.join(statuses)))
def update_eip_runtime(self):
eip_instance = self.get_eip()
eip_id = eip_instance.runtime_properties[common_constants.EXTERNAL_ID]
eip_instance.runtime_properties = {}
request_body = {
'AllocationId.1': eip_id
}
eip_info = Helper().execute_request('eip', 'describe_addresses', request_body)['AddressesSet'][0]
eip_instance.runtime_properties[common_constants.EXTERNAL_ID] = eip_info['AllocationId']
eip_instance.runtime_properties[common_constants.EIP_ADDRESS] = eip_info['PublicIp']
eip_instance.runtime_properties[common_constants.EIP_STATUS] = eip_info['State']
eip_instance.runtime_properties[ksyun_constants.KS_EIP_TYPE] = eip_info.get('InstanceType')
instance_id = eip_info.get('InstanceId')
if instance_id and eip_info.get('InstanceType') == 'Ipfwd':
vm_info = self.describe_vm(eip_info['InstanceId'])
if vm_info:
eip_instance.runtime_properties[common_constants.EIP_RELATED_INSTANCE_ID] = eip_info['InstanceId']
eip_instance.runtime_properties[common_constants.EIP_RELATED_INSTANCE_NAME] = vm_info.get(
'InstanceName')
eip_instance.update()
def _start(self, instance_id):
vm_state = self.get_vm_state(instance_id)
if vm_state == ksyun_constants.KS_INSTANCE_STATE_ACTIVE:
ctx.logger.info("The virtual machine is active, No need to start!")
return
if vm_state != ksyun_constants.KS_INSTANCE_STATE_STOPPED:
raise NonRecoverableError("Only virtual machines that are in a stopped state can be started")
else:
Helper().execute_request('kec', 'start_instances', {"InstanceId.1": instance_id})
def start(self):
instance_id = ctx.instance.runtime_properties['external_id']
self._start(instance_id)
self.wait_for_target_state(instance_id, ksyun_constants.KS_INSTANCE_STATE_ACTIVE)
self.update_runtime_properties(instance_id)
def _stop(self, instance_id):
vm_state = self.get_vm_state(instance_id)
if not vm_state:
ctx.logger.info("The virtual machine isnot exist, No need to stop!")
return "not exist"
if vm_state == ksyun_constants.KS_INSTANCE_STATE_STOPPED:
ctx.logger.info("The virtual machine is stopped, No need to stop!")
return
stop_params = {
"InstanceId.1": instance_id,
"ForceStop": True,
"StoppedMode": "StopCharging"}
Helper().execute_request('kec', 'stop_instances', stop_params)
def stop(self):
instance_id = ctx.instance.runtime_properties['external_id']
if self._stop(instance_id) == "not exist":
return
self.wait_for_target_state(instance_id, ksyun_constants.KS_INSTANCE_STATE_STOPPED)
self.update_runtime_properties(instance_id)
def _reboot(self, instance_id):
vm_state = self.get_vm_state(instance_id)
if vm_state == ksyun_constants.KS_INSTANCE_STATE_STOPPED:
raise NonRecoverableError("Can not reboot virtual machine which state is stopped, you can start it!")
reboot_params = {
'InstanceId.1': instance_id,
'ForceReboot': True}
Helper().execute_request('kec', 'reboot_instances', reboot_params)
def reboot(self):
instance_id = ctx.instance.runtime_properties['external_id']
self._reboot(instance_id)
self.wait_for_target_state(instance_id, ksyun_constants.KS_INSTANCE_STATE_ACTIVE)
self.update_runtime_properties(instance_id)
def delete(self):
instance_id = ctx.instance.runtime_properties['external_id']
self.disassociate_eip()
Helper().execute_request('kec', 'terminate_instances', {"InstanceId.1": instance_id})
self.release_ip_in_delete_operation()
time.sleep(os.environ.get("KSYUN_VM_DELETE_WAIT_TIME_SECOND") or 5)
def process_external_resource(self):
instance_id = ctx.instance.runtime_properties['external_id']
self.update_runtime_properties(instance_id)
def describe_instance_local_volumes(self):
instance_id = ctx.instance.runtime_properties['external_id']
local_volumes = Helper().execute_request('kec', 'describe_local_volumes', {"InstanceId": instance_id})
local_volumes = local_volumes.get('LocalVolumeSet') or []
return local_volumes
def describe_instance_cloud_volumes(self):
instance_id = ctx.instance.runtime_properties['external_id']
cloud_volumes = Helper().execute_request('ebs', 'describe_instance_volumes', {"InstanceId": instance_id})
cloud_volumes = cloud_volumes.get('Attachments') or []
return cloud_volumes
@client_error_handler
def describe_local_volume_snapshot_by_id(self, snapshot_id):
snapshot = \
Helper().execute_request('kec', 'describe_local_volume_snapshots',
{'LocalVolumeSnapshotId': snapshot_id}).get('LocalVolumeSnapshotSet')[0]
return snapshot
@client_error_handler
def describe_cloud_volume_snapshot_by_id(self, snapshot_id):
snapshot = Helper().execute_request('ebs', 'describe_snapshots',
{'SnapshotId': snapshot_id}).get('Snapshots')[0]
return snapshot
def get_local_volume_snapshot_state(self, snapshot_id):
snapshot = self.describe_local_volume_snapshot_by_id(snapshot_id)
return snapshot['State']
def get_cloud_volume_snapshot_state(self, snapshot_id):
snapshot = self.describe_cloud_volume_snapshot_by_id(snapshot_id)
return snapshot['SnapshotStatus']
def wait_for_snapshot_available(self, snapshot_type, snapshot_id, timeout=1200, sleep_interval=10):
timeout = time.time() + timeout
while time.time() < timeout:
if snapshot_type == 'local_volume':
snapshot_state = self.get_local_volume_snapshot_state(snapshot_id)
ctx.logger.info('Waiting for snapshot "{0}" to be ACTIVE. current state: {1}'
.format(snapshot_id, snapshot_state))
else:
snapshot_state = self.get_cloud_volume_snapshot_state(snapshot_id)
ctx.logger.info('Waiting for snapshot "{0}" to be available. current state: {1}'
.format(snapshot_id, snapshot_state))
if snapshot_state in ksyun_constants.KS_SNAPSHOT_STATE_AVAILABLE:
return
time.sleep(sleep_interval)
raise NonRecoverableError("Waiting for snapshot available timeout({0} seconds)! the current "
"state is {1}".format(timeout, snapshot_state))
@client_error_handler
def create_local_volume_snapshot(self, volume_id, name, desc):
instance_id = ctx.instance.runtime_properties['external_id']
params = {
'LocalVolumeId': volume_id,
'LocalVolumeSnapshotName': name,
'LocalVolumeSnapshotDesc': desc
}
self.wait_for_target_state(
instance_id,
(ksyun_constants.KS_INSTANCE_STATE_ACTIVE, ksyun_constants.KS_INSTANCE_STATE_STOPPED))
resp = Helper().execute_request('kec', 'create_local_volume_snapshot', params)
self.wait_for_snapshot_available('local_volume', snapshot_id=resp['LocalVolumeSnapshotId'])
@client_error_handler
def create_cloud_volume_snapshot(self, volume_id, name, desc):
instance_id = ctx.instance.runtime_properties['external_id']
params = {
'VolumeId': volume_id,
'SnapshotName': name,
'SnapshotDesc': desc
}
self.wait_for_target_state(
instance_id,
(ksyun_constants.KS_INSTANCE_STATE_ACTIVE, ksyun_constants.KS_INSTANCE_STATE_STOPPED))
resp = Helper().execute_request('ebs', 'create_snapshot', params)
self.wait_for_snapshot_available('cloud_volume', snapshot_id=resp['SnapshotId'])
def create_snapshot(self, **kwargs):
snapshot_name = kwargs.get('snapshotName')
snapshot_description = kwargs.get('snapshotDesc') or snapshot_name
cloud_volumes = self.describe_instance_cloud_volumes()
for volume in cloud_volumes:
self.create_cloud_volume_snapshot(volume['VolumeId'], snapshot_name, snapshot_description)
local_volumes = self.describe_instance_local_volumes()
for volume in local_volumes:
self.create_local_volume_snapshot(volume['LocalVolumeId'], snapshot_name, snapshot_description)
@client_error_handler
def restore_local_volume_snapshot(self, volume_id, snapshot_id):
params = {
"LocalVolumeId": volume_id,
"LocalVolumeSnapshotId": snapshot_id
}
Helper().execute_request('kec', 'rollback_local_volume', params)
@client_error_handler
def restore_cloud_volume_snapshot(self, volume_id, snapshot_id):
params = {
"VolumeId": volume_id,
"SnapshotId": snapshot_id
}
Helper().execute_request('ebs', 'rollback_snapshot', params)
def restore_snapshot_by_name(self, snapshot_name):
found_snapshot = False
cloud_volume_snapshots = self.describe_cloud_volume_snapshots()
for snapshot in cloud_volume_snapshots:
if snapshot['SnapshotName'] == snapshot_name:
found_snapshot = True
self.restore_cloud_volume_snapshot(
snapshot['VolumeId'],
snapshot['SnapshotId']
)
ctx.logger.info('Rollback volume {0} with snapshot {1}'.format(
snapshot['VolumeId'], snapshot['SnapshotId']))
local_volume_snapshots = self.describe_local_volume_snapshots()
for snapshot in local_volume_snapshots:
if snapshot['LocalVolumeSnapshotName'] == snapshot_name:
found_snapshot = True
self.restore_local_volume_snapshot(
snapshot['SourceLocalVolumeId'],
snapshot['LocalVolumeSnapshotId']
)
ctx.logger.info('Rollback volume {0} with snapshot {1}'.format(
snapshot['SourceLocalVolumeId'], snapshot['LocalVolumeSnapshotId']))
if not found_snapshot:
raise NonRecoverableError('Snapshot {0} not found'.format(snapshot_name))
def restore_snapshot(self, **kwargs):
snapshot_name = validate_parameter('snapshotName', kwargs)
self.restore_snapshot_by_name(snapshot_name)
def describe_local_volume_snapshots(self):
local_volumes = self.describe_instance_local_volumes()
snapshots = []
for volume in local_volumes:
params = {'SourceLocalVolumeId': volume['LocalVolumeId']}
volume_snapshots = Helper().execute_request('kec', 'describe_local_volume_snapshots', params).get(
'LocalVolumeSnapshotSet') or []
snapshots.extend(volume_snapshots)
return snapshots
def describe_cloud_volume_snapshots(self):
cloud_volumes = self.describe_instance_cloud_volumes()
snapshots = []
for volume in cloud_volumes:
params = {'VolumeId': volume['VolumeId']}
volume_snapshots = Helper().execute_request('ebs', 'describe_snapshots', params).get('Snapshots') or []
snapshots.extend(volume_snapshots)
return snapshots
@client_error_handler
def delete_local_volume_snapshot(self, snapshot_id):
params = {"LocalVolumeSnapshotId.1": snapshot_id}
Helper().execute_request('kec', 'delete_local_volume_snapshot', params)
@client_error_handler
def delete_cloud_volume_snapshot(self, snapshot_id):
params = {"SnapshotId": snapshot_id}
Helper().execute_request('ebs', 'delete_snapshot', params)
def delete_snapshot_by_name(self, snapshot_name):
found_snapshot = False
cloud_volume_snapshots = self.describe_cloud_volume_snapshots()
for snapshot in cloud_volume_snapshots:
if snapshot['SnapshotName'] == snapshot_name:
found_snapshot = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.