filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_28635 | from __future__ import absolute_import
import os
import xarray as xr
import pandas as pd
import numpy as np
from impax.mins import minimize_polynomial
def construct_weather(**weather):
'''
Helper function to build out weather dataarray
Parameters
----------
weather: dict
dictionary of prednames and weather (either ``str`` file paths
or :py:class:`xarray.DataArray` objects) for each predname
Returns
-------
combined: DataArray
Combined :py:class:`~xarray.DataArray` of weather
variables, with variables concatenated along the
new `prednames` dimension
'''
prednames = []
weather_data = []
for pred, path in weather.items():
if hasattr(path, 'dims'):
weather_data.append(path)
else:
with xr.open_dataset(path) as ds:
weather_data.append(ds[pred].load())
prednames.append(pred)
return xr.concat(weather_data, pd.Index(prednames, name='prednames'))
def construct_covars(add_constant=True, **covars):
'''
Helper function to construct the covariates dataarray
Parameters
-----------
add_constant : bool
flag indicating whether a constant term should be added. The constant
term will have the same shape as the other covariate DataArrays
covars: dict
dictionary of covariate name, covariate (``str`` path or
:py:class:`xarray.DataArray`) pairs
Returns
-------
combined: DataArray
Combined :py:class:`~xarray.DataArray` of covariate
variables, with variables concatenated along the
new `covarnames` dimension
'''
covarnames = []
covar_data = []
for covar, path in covars.items():
if hasattr(path, 'dims'):
covar_data.append(path)
else:
with xr.open_dataset(path) as ds:
covar_data.append(ds[covar].load())
covarnames.append(covar)
if add_constant:
ones = xr.DataArray(
np.ones(shape=covar_data[0].shape),
coords=covar_data[0].coords,
dims=covar_data[0].dims)
covarnames.append('1')
covar_data.append(ones)
return xr.concat(covar_data, pd.Index(covarnames, name='covarnames'))
class Impact(object):
'''
Base class for computing an impact as specified by the Climate Impact Lab
'''
min_function = NotImplementedError
def impact_function(self, betas, weather):
'''
computes the dot product of betas and annual weather by outcome group
Parameters
----------
betas: DataArray
:py:class:`~xarray.DataArray` of hierid by predname by outcome
weather: DataArray
:py:class:`~xarray.DataArray` of hierid by predname by outcome
Returns
-------
DataArray
:py:class:`~xarray.DataArray` of impact by outcome by hierid
.. note::
overrides `impact_function` method in Impact base class
'''
return (betas*weather).sum(dim='prednames')
def compute(
self,
weather,
betas,
clip_flat_curve=True,
t_star=None):
'''
Computes an impact for a unique set of gdp, climate, weather and gamma
coefficient inputs. For each set of these, we take the analytic minimum
value between two points, save t_star to disk and compute analytical
min for function m_star for a given covariate set.
This operation is called for every adaptation scenario specified in the
run script.
Parameters
----------
weather: DataArray
weather :py:class:`~xarray.DataArray`
betas: DataArray
covarname by outcome :py:class:`~xarray.DataArray`
clip_flat_curve: bool
flag indicating that flat-curve clipping should be performed
on the result
t_star: DataArray
:py:class:`xarray.DataArray` with minimum temperatures used for
clipping
Returns
-------
:py:class `~xarray.Dataset` of impacts by hierid by outcome group
'''
# Compute Raw Impact
impact = self.impact_function(betas, weather)
if clip_flat_curve:
# Compute the min for flat curve adaptation
impact_flatcurve = self.impact_function(betas, t_star)
# Compare values and evaluate a max
impact = xr.ufuncs.maximum((impact - impact_flatcurve), 0)
impact = self.postprocess_daily(impact)
# Sum to annual
impact = impact.sum(dim='time')
impact_annual = self.postprocess_annual(impact)
return impact_annual
def get_t_star(self, betas, bounds, t_star_path=None):
'''
Read precomputed t_star
Parameters
----------
betas: DataArray
:py:class:`~xarray.DataArray` of betas as prednames by hierid
bounds: list
values between which to evaluate function
path: str
place to load t-star from
'''
try:
with xr.open_dataarray(t_star_path) as t_star:
return t_star.load()
except OSError:
pass
except (IOError, ValueError):
try:
os.remove(t_star_path)
except (IOError, OSError):
pass
# Compute t_star according to min function
t_star = self.compute_t_star(betas, bounds=bounds)
# write to disk
if t_star_path is not None:
if not os.path.isdir(os.path.dirname(t_star_path)):
os.makedirs(os.path.dirname(t_star_path))
t_star.to_netcdf(t_star_path)
return t_star
def compute_t_star(self, betas, bounds=None):
return self.min_function(betas, bounds=bounds)
def postprocess_daily(self, impact):
return impact
def postprocess_annual(self, impact):
return impact
class PolynomialImpact(Impact):
'''
Polynomial-specific Impact spec, with ln(gdppc) and climtas for covariates
'''
@staticmethod
def min_function(*args, **kwargs):
'''
helper function to call minimization function for given mortality
polynomial spec mortality_polynomial implements findpolymin through
`np.apply_along_axis`
Parameters
----------
betas: DataArray
:py:class:`~xarray.DataArray` of hierid by predname by outcome
dim: str
dimension to apply minimization to
bounds: list
values between which to search for t_star
Returns
-------
:py:class:`~xarray.DataArray` of hierid by predname by outcome
.. note:: overides `min_function` in Impact base class
'''
return minimize_polynomial(*args, **kwargs)
|
the-stack_106_28637 | """Show the profile in a donut."""
import plotly.graph_objects as go
from src.profile.colors import profile_colors
def make_donut(labels, values, title, colors):
"""Show the values in a donut."""
fig = go.Figure(
data=[
go.Pie(
title=dict(text=title),
labels=labels,
values=values,
hole=0.5,
marker_colors=colors,
marker_line=dict(color="white", width=2),
)
]
)
fig.update_layout(legend=dict(orientation="h", yanchor="bottom", xanchor="center", x=0.5, y=-0.2))
return fig
def make_profile(profile):
"""Make the profile."""
labels = []
values = []
for region in profile.regions():
labels.append(region.label())
values.append(region.loc())
fig = make_donut(labels, values, profile.name(), profile_colors)
return fig
def show_profile(profile):
"""Show the profile in a donut."""
fig = make_profile(profile)
fig.show()
|
the-stack_106_28639 | #!/usr/bin/env python
import logging
import tornado.ioloop
import tornado.options
import tornado.web
import json
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
try:
json.dumps(MessageMixin.cache)
except KeyError:
raise tornado.web.HTTPError(404)
class MessageMixin(object):
waiters = {}
cache = {}
cache_size = 200
def wait_for_messages(self, callback, cursor=None):
t = self.thread_id
cache = self.cache.setdefault(t, [])
waiters = self.waiters.setdefault(t, [])
if cursor:
index = 0
for i in xrange(len(cache)):
index = len(cache) - i - 1
if cache[index]["id"] == cursor:
break
recent = cache[index + 1:]
if recent:
callback(recent)
return None
waiters.append(callback)
def new_messages(self, posts):
t = self.thread_id
cache = self.cache.setdefault(t, [])
waiters = self.waiters.setdefault(t, [])
for callback in waiters:
try:
callback(posts)
except Exception:
logging.error("Error in waiter callback", exc_info=True)
waiters = []
cache.extend(posts)
if len(cache) > self.cache_size:
cache = cache[-self.cache_size:]
class MessageNewHandler(MainHandler, MessageMixin):
def post(self, thread_id):
self.thread_id = thread_id
post = self.get_argument("html")
redirect_to = self.get_argument("next", None)
if redirect_to:
self.redirect(redirect_to)
else:
self.write(post)
self.new_messages([post])
class MessageUpdatesHandler(MainHandler, MessageMixin):
@tornado.web.asynchronous
def post(self, thread_id):
self.thread_id = thread_id
try:
self.wait_for_messages(self.on_new_messages,
cursor=self.get_argument("cursor", None))
except KeyError:
raise tornado.web.HTTPError(404)
def on_new_messages(self, posts):
# Closed client connection
if self.request.connection.stream.closed():
return None
self.finish({"posts": posts})
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/api/1\.0/stream/(\d+)", MessageUpdatesHandler),
(r"/api/1\.0/streamp/(\d+)", MessageNewHandler),
]
tornado.web.Application.__init__(self, handlers)
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
the-stack_106_28640 | # Limites
from limite.telaGenerica2 import TelaGenerica
# Controles
# Utils
from PySimpleGUI import PySimpleGUI as sg
class TelaArmaAltera(TelaGenerica):
def __init__(self, controlador):
super().__init__(controlador)
self.__dados_da_arma = {
"ID": None,
"NOME": None,
"DADOS": None,
"FACES": None,
}
self.init_components()
def init_components(self):
sg.ChangeLookAndFeel('Reddit')
id = self.__dados_da_arma["ID"]
nome = self.__dados_da_arma["NOME"]
dados = self.__dados_da_arma["DADOS"]
faces = self.__dados_da_arma["FACES"]
mensagem = f"Modificar arma: id: {id}, nome: {nome}"
layout = [
[sg.Text(mensagem, justification='center', size=(30, 2))],
[sg.Text("Novo nome da arma:"), sg.InputText(nome, key="NOME")],
[sg.Text("Nova quantidade de dados:"), sg.Slider(range=(1, 10), orientation='h', size=(10, 20), default_value=dados, key="DADOS")],
[sg.Text("Novo numero de faces:"), sg.Slider(range=(1, 20), orientation='h', size=(10, 20), default_value=faces, key="FACES")],
[sg.Submit("Confirmar", key="CONFIRMA"), sg.Cancel("Cancelar", key="CANCELA")],
],
janela = sg.Window("Alterar arma", default_element_size=(40, 10)).Layout(layout)
super(TelaArmaAltera, self).cria_janela(janela)
def mostra_tela(self, dados: dict):
self.__dados_da_arma = dados
return super().mostra_tela()
|
the-stack_106_28641 |
# ----------------------------------------------------------------------------------------------
# Import dependencies
# ----------------------------------------------------------------------------------------------
from settings import *
from keras.utils import to_categorical
from random import shuffle
import progressbar
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import numpy as np
import _pickle as pickle
import time
import vae_definition
from vae_definition import VAE
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
import pretty_midi as pm
import sys
from import_midi import import_midi_from_folder
import data_class
from matplotlib2tikz import save as tikz_save
# ----------------------------------------------------------------------------------------------
# Set parameters for training session (not for VAE)
# ----------------------------------------------------------------------------------------------
# Path where the polyphonic models are saved:
model_path = 'models/autoencode/vae/'
model_filetype = '.pickle'
assert(output_length > 0)
assert(input_length > 0)
# ----------------------------------------------------------------------------------------------
# Build VAE model
# ----------------------------------------------------------------------------------------------
print('creating model...')
model = VAE()
model.create( input_dim=input_dim,
output_dim=output_dim,
use_embedding=use_embedding,
embedding_dim=embedding_dim,
input_length=input_length,
output_length=output_length,
latent_rep_size=latent_dim,
vae_loss=vae_loss,
optimizer=optimizer,
activation=activation,
lstm_activation=lstm_activation,
lstm_state_activation=lstm_state_activation,
epsilon_std=epsilon_std,
epsilon_factor=epsilon_factor,
include_composer_decoder=include_composer_decoder,
num_composers=num_composers,
composer_weight=composer_weight,
lstm_size=lstm_size,
cell_type=cell_type,
num_layers_encoder=num_layers_encoder,
num_layers_decoder=num_layers_decoder,
bidirectional=bidirectional,
decode=decode,
teacher_force=teacher_force,
learning_rate=learning_rate,
split_lstm_vector=split_lstm_vector,
history=history,
beta=beta,
prior_mean=prior_mean,
prior_std=prior_std,
decoder_additional_input=decoder_additional_input,
decoder_additional_input_dim=decoder_additional_input_dim,
extra_layer=extra_layer,
meta_instrument= meta_instrument,
meta_instrument_dim= meta_instrument_dim,
meta_instrument_length=meta_instrument_length,
meta_instrument_activation=meta_instrument_activation,
meta_instrument_weight = meta_instrument_weight,
signature_decoder = signature_decoder,
signature_dim = signature_dim,
signature_activation = signature_activation,
signature_weight = signature_weight,
composer_decoder_at_notes_output=composer_decoder_at_notes_output,
composer_decoder_at_notes_weight=composer_decoder_at_notes_weight,
composer_decoder_at_notes_activation=composer_decoder_at_notes_activation,
composer_decoder_at_instrument_output=composer_decoder_at_instrument_output,
composer_decoder_at_instrument_weight=composer_decoder_at_instrument_weight,
composer_decoder_at_instrument_activation=composer_decoder_at_instrument_activation,
meta_velocity=meta_velocity,
meta_velocity_length=meta_velocity_length,
meta_velocity_activation=meta_velocity_activation,
meta_velocity_weight=meta_velocity_weight,
meta_held_notes=meta_held_notes,
meta_held_notes_length=meta_held_notes_length,
meta_held_notes_activation=meta_held_notes_activation,
meta_held_notes_weight=meta_held_notes_weight,
meta_next_notes=meta_next_notes,
meta_next_notes_output_length=meta_next_notes_output_length,
meta_next_notes_weight=meta_next_notes_weight,
meta_next_notes_teacher_force=meta_next_notes_teacher_force,
activation_before_splitting=activation_before_splitting
)
encoder = model.encoder
decoder = model.decoder
autoencoder = model.autoencoder
print(encoder.summary())
print(decoder.summary())
print(autoencoder.summary())
if load_previous_checkpoint:
autoencoder.load_weights(previous_checkpoint_path +'autoencoder'+'Epoch'+str(previous_epoch)+'.pickle', by_name=False)
encoder.load_weights(previous_checkpoint_path+'encoder'+'Epoch'+str(previous_epoch)+'.pickle', by_name=False)
decoder.load_weights(previous_checkpoint_path+'decoder'+'Epoch'+str(previous_epoch)+'.pickle', by_name=False)
print("Successfully loaded previous epochs")
if reset_states:
autoencoder.reset_states()
encoder.reset_states()
decoder.reset_states()
# ----------------------------------------------------------------------------------------------
# Import and preprocess data
# ----------------------------------------------------------------------------------------------
print('loading data...')
# Get Train and test sets
folder = source_folder
V_train, V_test, D_train, D_test, T_train, T_test, I_train, I_test, Y_train, Y_test, X_train, X_test, C_train, C_test, train_paths, test_paths = import_midi_from_folder(folder)
train_set_size = len(X_train)
test_set_size = len(X_test)
print(len(train_paths))
print(len(test_paths))
print(C_test)
# ----------------------------------------------------------------------------------------------
# Prepare model path
# ----------------------------------------------------------------------------------------------
fd = {'include_composer_feature': include_composer_feature, 'highcrop': high_crop, 'lowcrop':low_crop, 'lr': learning_rate, 'opt': optimizer,
'bi': bidirectional, 'lstm_size': lstm_size, 'latent': latent_dim, 'trainsize': train_set_size, 'testsize': test_set_size, 'input_length': input_length,
'output_length': output_length, 'reset_states': reset_states, 'compdec': include_composer_decoder, 'num_layers_encoder': num_layers_encoder, 'num_layers_decoder': num_layers_decoder,
'beta': beta, 'lr': learning_rate, 'epsstd': epsilon_std}
model_name = t+'-_ls_inlen_%(input_length)s_outlen_%(output_length)s_beta_%(beta)s_lr_%(lr)s_lstmsize_%(lstm_size)s_latent_%(latent)s_trainsize_%(trainsize)s_testsize_%(testsize)s_epsstd_%(epsstd)s' % fd
model_path = model_path + model_name + '/'
if not os.path.exists(model_path):
os.makedirs(model_path)
# ----------------------------------------------------------------------------------------------
# Test function
# ----------------------------------------------------------------------------------------------
enumerated_metric_names = []
metric_names_total_dict = dict()
metric_names_count_dict = dict()
for name in autoencoder.metrics_names:
if name in metric_names_count_dict.keys():
metric_names_total_dict[name] += 1
else:
metric_names_total_dict[name] = 1
#initialize count dict
metric_names_count_dict[name] = 0
for name in autoencoder.metrics_names:
if metric_names_total_dict[name] > 1:
metric_names_count_dict[name] += 1
enumerated_metric_names.append(name + "_" + str(metric_names_count_dict[name]))
else:
enumerated_metric_names.append(name)
# initialize loss arrays
total_test_notes_loss_array = []
total_train_notes_loss_array = []
total_test_loss_array = []
total_train_loss_array = []
total_train_accuracy_array = []
total_test_accuracy_array = []
max_test_accuracy = 0
total_train_meta_instrument_accuracy_array = []
total_test_meta_instrument_accuracy_array = []
total_train_meta_instrument_loss_array = []
total_test_meta_instrument_loss_array = []
total_train_meta_velocity_accuracy_array = []
total_test_meta_velocity_accuracy_array = []
total_train_meta_velocity_loss_array = []
total_test_meta_velocity_loss_array = []
total_train_meta_held_notes_accuracy_array = []
total_test_meta_held_notes_accuracy_array = []
total_train_meta_held_notes_loss_array = []
total_test_meta_held_notes_loss_array = []
total_train_meta_next_notes_accuracy_array = []
total_test_meta_next_notes_accuracy_array = []
total_train_meta_next_notes_loss_array = []
total_test_meta_next_notes_loss_array = []
total_train_composer_accuracy_array = []
total_train_composer_loss_array = []
total_test_composer_accuracy_array = []
total_test_composer_loss_array = []
total_train_signature_accuracy_array = []
total_train_signature_loss_array = []
total_test_signature_accuracy_array = []
total_test_signature_loss_array = []
total_test_kl_loss_array = []
total_train_kl_loss_array = []
total_train_composer_instrument_accuracy_array = []
total_train_composer_instrument_loss_array = []
total_test_composer_instrument_accuracy_array = []
total_test_composer_instrument_loss_array = []
total_train_composer_notes_accuracy_array = []
total_train_composer_notes_loss_array = []
total_test_composer_notes_accuracy_array = []
total_test_composer_notes_loss_array = []
# Test function
def test():
global max_test_accuracy
print('\nTesting:')
total_test_loss = 0
total_test_accuracy = 0
total_test_notes_loss = 0
total_test_meta_instrument_loss = 0
total_test_meta_instrument_accuracy = 0
total_test_meta_velocity_loss = 0
total_test_meta_velocity_accuracy = 0
total_test_meta_held_notes_loss = 0
total_test_meta_held_notes_accuracy = 0
total_test_meta_next_notes_loss = 0
total_test_meta_next_notes_accuracy = 0
total_test_loss_composer = 0
total_test_accuracy_composer = 0
total_test_loss_signature = 0
total_test_signature_accuracy = 0
total_test_loss_composer_notes = 0
total_test_composer_notes_accuracy = 0
total_test_loss_composer_instrument = 0
total_test_composer_instrument_accuracy = 0
bar = progressbar.ProgressBar(maxval=test_set_size)
bar.start()
for test_song_num in range(len(X_test)):
X = X_test[test_song_num]
Y = Y_test[test_song_num]
C = C_test[test_song_num]
I = I_test[test_song_num]
V = V_test[test_song_num]
D = D_test[test_song_num]
S = normalized_S_test[test_song_num]
T = T_test[test_song_num] #not yet used
#calculate history if desired
if history:
#get the representation by feeding the inputs into the encoder
encoder_input_list = vae_definition.prepare_encoder_input_list(X,I,V,D)
representation_list = encoder.predict(encoder_input_list, batch_size=batch_size, verbose=False)
#roll the list by one to save the representation of the last sample for each input
H = np.zeros(representation_list.shape)
H[1:] = representation_list[:-1]
else:
H = np.zeros((X.shape[0], latent_dim))
input_list, output_list = vae_definition.prepare_autoencoder_input_and_output_list(X,Y,C,I,V,D,S,H, return_sample_weight=False)
loss = autoencoder.evaluate(input_list, output_list, batch_size=batch_size, verbose=False)
total_test_loss += loss[0]
if meta_instrument or meta_velocity or meta_held_notes or meta_next_notes:
count = 1
total_test_notes_loss += loss[enumerated_metric_names.index('decoder_loss_' + str(count))]
total_test_accuracy += loss[enumerated_metric_names.index('decoder_acc_1')]
if meta_instrument:
count += 1
try:
total_train_meta_instrument_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
total_train_meta_instrument_loss += np.mean(hist.history['decoder_loss_' + str(count)])
except:
count -= 1
if meta_velocity:
count += 1
try:
total_train_meta_velocity_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
total_train_meta_velocity_loss += np.mean(hist.history['decoder_loss_' + str(count)])
except:
count -= 1
if meta_held_notes:
count += 1
try:
total_train_meta_held_notes_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
total_train_meta_held_notes_loss += np.mean(hist.history['decoder_loss_' + str(count)])
except:
count -= 1
if meta_next_notes:
count += 1
try:
total_train_meta_next_notes_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
total_train_meta_next_notes_loss += np.mean(hist.history['decoder_loss_' + str(count)])
except:
count -= 1
else:
if len(enumerated_metric_names) > 2:
total_test_accuracy += loss[enumerated_metric_names.index('decoder_acc')]
total_test_notes_loss += loss[enumerated_metric_names.index('decoder_loss')]
else:
total_test_notes_loss += loss[0]
total_test_accuracy += loss[1]
if include_composer_decoder:
total_test_loss_composer += loss[enumerated_metric_names.index('composer_decoder_loss')]
total_test_accuracy_composer += loss[enumerated_metric_names.index('composer_decoder_acc')]
if signature_decoder:
total_test_loss_signature += loss[enumerated_metric_names.index('signature_decoder_loss')]
total_test_signature_accuracy += loss[enumerated_metric_names.index('signature_decoder_acc')]
if composer_decoder_at_notes_output:
total_test_loss_composer_notes += loss[enumerated_metric_names.index('composer_decoder_at_notes_loss')]
total_test_composer_notes_accuracy += loss[enumerated_metric_names.index('composer_decoder_at_notes_acc')]
if composer_decoder_at_instrument_output:
total_test_loss_composer_instrument += loss[enumerated_metric_names.index('composer_decoder_at_instruments_loss')]
total_test_composer_instrument_accuracy += loss[enumerated_metric_names.index('composer_decoder_at_instruments_acc')]
if reset_states:
autoencoder.reset_states()
bar.update(test_song_num+1)
plt.close('all')
f, axarr = plt.subplots(3,2, sharex=True, figsize=(15.0, 20.0))
f.suptitle(t)
if include_composer_decoder:
composer_accuracy = total_test_accuracy_composer/test_set_size
composer_loss = total_test_loss_composer/test_set_size
total_test_composer_loss_array.append(composer_loss)
total_test_composer_accuracy_array.append(composer_accuracy)
print('\nTest composer accuracy: ', composer_accuracy)
print('Test composer loss: ', composer_loss)
axarr[1,1].plot(total_test_composer_accuracy_array, label='Test composer accuracy')
axarr[1,0].plot(total_train_composer_accuracy_array, label='Train composer accuracy')
axarr[0,1].plot(total_test_composer_loss_array, label='Test composer loss')
axarr[0,0].plot(total_train_composer_loss_array, label='Train composer loss')
pickle.dump(total_test_composer_loss_array,open(model_path+'total_test_composer_loss_array.pickle', 'wb'))
pickle.dump(total_train_composer_loss_array,open(model_path+'total_train_composer_loss_array.pickle', 'wb'))
pickle.dump(total_test_composer_accuracy_array,open(model_path+'total_test_composer_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_composer_accuracy_array,open(model_path+'total_train_composer_accuracy_array.pickle', 'wb'))
if meta_instrument:
meta_instrument_accuracy = total_test_meta_instrument_accuracy/test_set_size
meta_instrument_loss = total_test_meta_instrument_loss/test_set_size
total_test_meta_instrument_loss_array.append(meta_instrument_loss)
total_test_meta_instrument_accuracy_array.append(meta_instrument_accuracy)
print('Test meta instrument accuracy: ', meta_instrument_accuracy)
print('Test meta instrument loss: ', meta_instrument_loss)
axarr[1,1].plot(total_test_meta_instrument_accuracy_array, label='Test instrument accuracy')
axarr[1,0].plot(total_train_meta_instrument_accuracy_array, label='Train instrument accuracy')
axarr[0,1].plot(total_test_meta_instrument_loss_array, label='Test instrument loss')
axarr[0,0].plot(total_train_meta_instrument_loss_array, label='Train instrument loss')
pickle.dump(total_test_meta_instrument_loss_array,open(model_path+'total_test_meta_instrument_loss_array.pickle', 'wb'))
pickle.dump(total_test_meta_instrument_accuracy_array,open(model_path+'total_test_meta_instrument_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_meta_instrument_loss_array,open(model_path+'total_train_meta_instrument_loss_array.pickle', 'wb'))
pickle.dump(total_train_meta_instrument_accuracy_array,open(model_path+'total_train_meta_instrument_accuracy_array.pickle', 'wb'))
if meta_held_notes:
meta_held_notes_accuracy = total_test_meta_held_notes_accuracy/test_set_size
meta_held_notes_loss = total_test_meta_held_notes_loss/test_set_size
total_test_meta_held_notes_loss_array.append(meta_held_notes_loss)
total_test_meta_held_notes_accuracy_array.append(meta_held_notes_accuracy)
print('Test meta held_notes accuracy: ', meta_held_notes_accuracy)
print('Test meta held_notes loss: ', meta_held_notes_loss)
axarr[1,1].plot(total_test_meta_held_notes_accuracy_array, label='Test held_notes accuracy')
axarr[1,0].plot(total_train_meta_held_notes_accuracy_array, label='Train held_notes accuracy')
axarr[0,1].plot(total_test_meta_held_notes_loss_array, label='Test held_notes loss')
axarr[0,0].plot(total_train_meta_held_notes_loss_array, label='Train held_notes loss')
pickle.dump(total_test_meta_held_notes_loss_array,open(model_path+'total_test_meta_held_notes_loss_array.pickle', 'wb'))
pickle.dump(total_test_meta_held_notes_accuracy_array,open(model_path+'total_test_meta_held_notes_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_meta_held_notes_loss_array,open(model_path+'total_train_meta_held_notes_loss_array.pickle', 'wb'))
pickle.dump(total_train_meta_held_notes_accuracy_array,open(model_path+'total_train_meta_held_notes_accuracy_array.pickle', 'wb'))
if meta_next_notes:
meta_next_notes_accuracy = total_test_meta_next_notes_accuracy/test_set_size
meta_next_notes_loss = total_test_meta_next_notes_loss/test_set_size
total_test_meta_next_notes_loss_array.append(meta_next_notes_loss)
total_test_meta_next_notes_accuracy_array.append(meta_next_notes_accuracy)
print('Test meta next_notes accuracy: ', meta_next_notes_accuracy)
print('Test meta next_notes loss: ', meta_next_notes_loss)
axarr[1,1].plot(total_test_meta_next_notes_accuracy_array, label='Test next_notes accuracy')
axarr[1,0].plot(total_train_meta_next_notes_accuracy_array, label='Train next_notes accuracy')
axarr[0,1].plot(total_test_meta_next_notes_loss_array, label='Test next_notes loss')
axarr[0,0].plot(total_train_meta_next_notes_loss_array, label='Train next_notes loss')
pickle.dump(total_test_meta_next_notes_loss_array,open(model_path+'total_test_meta_next_notes_loss_array.pickle', 'wb'))
pickle.dump(total_test_meta_next_notes_accuracy_array,open(model_path+'total_test_meta_next_notes_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_meta_next_notes_loss_array,open(model_path+'total_train_meta_next_notes_loss_array.pickle', 'wb'))
pickle.dump(total_train_meta_next_notes_accuracy_array,open(model_path+'total_train_meta_next_notes_accuracy_array.pickle', 'wb'))
if composer_decoder_at_notes_output:
composer_notes_accuracy = total_test_composer_notes_accuracy/test_set_size
composer_notes_loss = total_test_loss_composer_notes/test_set_size
total_test_composer_notes_loss_array.append(composer_notes_loss)
total_test_composer_notes_accuracy_array.append(composer_notes_accuracy)
print('Test composer_notes accuracy: ', composer_notes_accuracy)
print('Test composer_notes loss: ', composer_notes_loss)
axarr[1,1].plot(total_test_composer_notes_accuracy_array, label='Test composer_notes accuracy')
axarr[1,0].plot(total_train_composer_notes_accuracy_array, label='Train composer_notes accuracy')
axarr[0,1].plot(total_test_composer_notes_loss_array, label='Test composer_notes loss')
axarr[0,0].plot(total_train_composer_notes_loss_array, label='Train composer_notes loss')
pickle.dump(total_test_composer_notes_loss_array,open(model_path+'total_test_composer_notes_loss_array.pickle', 'wb'))
pickle.dump(total_test_composer_notes_accuracy_array,open(model_path+'total_test_composer_notes_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_composer_notes_loss_array,open(model_path+'total_train_composer_notes_loss_array.pickle', 'wb'))
pickle.dump(total_train_composer_notes_accuracy_array,open(model_path+'total_train_composer_notes_accuracy_array.pickle', 'wb'))
if composer_decoder_at_instrument_output:
composer_instrument_accuracy = total_test_composer_instrument_accuracy/test_set_size
composer_instrument_loss = total_test_loss_composer_instrument/test_set_size
total_test_composer_instrument_loss_array.append(composer_instrument_loss)
total_test_composer_instrument_accuracy_array.append(composer_instrument_accuracy)
print('Test composer_instrument accuracy: ', composer_instrument_accuracy)
print('Test composer_instrument loss: ', composer_instrument_loss)
axarr[1,1].plot(total_test_composer_instrument_accuracy_array, label='Test composer_instrument accuracy')
axarr[1,0].plot(total_train_composer_instrument_accuracy_array, label='Train composer_instrument accuracy')
axarr[0,1].plot(total_test_composer_instrument_loss_array, label='Test composer_instrument loss')
axarr[0,0].plot(total_train_composer_instrument_loss_array, label='Train composer_instrument loss')
pickle.dump(total_test_composer_instrument_loss_array,open(model_path+'total_test_composer_instrument_loss_array.pickle', 'wb'))
pickle.dump(total_test_composer_instrument_accuracy_array,open(model_path+'total_test_composer_instrument_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_composer_instrument_loss_array,open(model_path+'total_train_composer_instrument_loss_array.pickle', 'wb'))
pickle.dump(total_train_composer_instrument_accuracy_array,open(model_path+'total_train_composer_instrument_accuracy_array.pickle', 'wb'))
accuracy = total_test_accuracy/test_set_size
if max_test_accuracy < accuracy:
max_test_accuracy = accuracy
total_test_accuracy_array.append(accuracy)
notes_loss = total_test_notes_loss / test_set_size
total_test_notes_loss_array.append(notes_loss)
print('Test notes accuracy: ', accuracy)
print('Test notes loss: ', notes_loss)
axarr[1,1].plot(total_test_accuracy_array, label='Test notes accuracy')
axarr[1,0].plot(total_train_accuracy_array, label='Train notes accuracy')
axarr[0,1].plot(total_test_notes_loss_array, label='Test notes loss')
axarr[0,0].plot(total_train_notes_loss_array, label='Train notes loss')
pickle.dump(total_train_accuracy_array,open(model_path+'total_train_accuracy_array.pickle', 'wb'))
pickle.dump(total_test_accuracy_array,open(model_path+'total_test_accuracy_array.pickle', 'wb'))
pickle.dump(total_test_notes_loss_array,open(model_path+'total_test_notes_loss_array.pickle', 'wb'))
pickle.dump(total_train_notes_loss_array,open(model_path+'total_train_notes_loss_array.pickle', 'wb'))
if meta_velocity:
meta_velocity_accuracy = total_test_meta_velocity_accuracy/test_set_size
meta_velocity_loss = total_test_meta_velocity_loss/test_set_size
total_test_meta_velocity_loss_array.append(meta_velocity_loss)
total_test_meta_velocity_accuracy_array.append(meta_velocity_accuracy)
#Accuracy is logged for meta_velocity (it outputs accuracy metric for all losses) but it does not make sense, so don't show it or save it
#only plot and save it if it is combined with the held notes (which have accuracy)
if combine_velocity_and_held_notes or velocity_threshold_such_that_it_is_a_played_note >= 0.5:
print('Test meta velocity accuracy: ', meta_velocity_accuracy)
print('Test meta velocity loss: ', meta_velocity_loss)
if combine_velocity_and_held_notes:
axarr[1,1].plot(total_test_meta_velocity_accuracy_array, label='Test velocity accuracy')
axarr[1,0].plot(total_train_meta_velocity_accuracy_array, label='Train velocity accuracy')
axarr[0,1].plot(total_test_meta_velocity_loss_array, label='Test velocity loss')
axarr[0,0].plot(total_train_meta_velocity_loss_array, label='Train velocity loss')
pickle.dump(total_test_meta_velocity_loss_array,open(model_path+'total_test_meta_velocity_loss_array.pickle', 'wb'))
if combine_velocity_and_held_notes or velocity_threshold_such_that_it_is_a_played_note >= 0.5:
pickle.dump(total_test_meta_velocity_accuracy_array,open(model_path+'total_test_meta_velocity_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_meta_velocity_accuracy_array,open(model_path+'total_train_meta_velocity_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_meta_velocity_loss_array,open(model_path+'total_train_meta_velocity_loss_array.pickle', 'wb'))
if signature_decoder:
signature_accuracy = total_test_signature_accuracy/test_set_size
signature_loss = total_test_loss_signature/test_set_size
total_test_signature_loss_array.append(signature_loss)
total_test_signature_accuracy_array.append(signature_accuracy)
#Don't plot signature accuracy since it makes no sense in regression problem
#print('Test signature accuracy: ', signature_accuracy)
print('Test signature loss: ', signature_loss)
#axarr[1,1].plot(total_test_signature_accuracy_array, label='Test signature accuracy')
#axarr[1,0].plot(total_train_signature_accuracy_array, label='Train signature accuracy')
axarr[0,1].plot(total_test_signature_loss_array, label='Test signature loss')
axarr[0,0].plot(total_train_signature_loss_array, label='Train signature loss')
pickle.dump(total_test_signature_loss_array,open(model_path+'total_test_signature_loss_array.pickle', 'wb'))
#pickle.dump(total_test_signature_accuracy_array,open(model_path+'total_test_signature_accuracy_array.pickle', 'wb'))
pickle.dump(total_train_signature_loss_array,open(model_path+'total_train_signature_loss_array.pickle', 'wb'))
#pickle.dump(total_train_signature_accuracy_array,open(model_path+'total_train_signature_accuracy_array.pickle', 'wb'))
test_loss = total_test_loss/test_set_size
total_test_loss_array.append(test_loss)
if beta > 0:
#TODO. adjust by weights?
kl_loss = test_loss - notes_loss * 1.0
if include_composer_decoder: kl_loss -= composer_loss * composer_weight
if meta_instrument: kl_loss -= meta_instrument_loss * meta_instrument_weight
if meta_velocity: kl_loss -= meta_velocity_loss * meta_velocity_weight
if meta_held_notes: kl_loss -= meta_held_notes_loss * meta_held_notes_weight
if meta_next_notes: kl_loss -= meta_next_notes_loss * meta_next_notes_weight
if signature_decoder: kl_loss -= signature_loss * signature_weight
if composer_decoder_at_notes_output: kl_loss -= composer_notes_loss * composer_decoder_at_notes_weight
if composer_decoder_at_instrument_output: kl_loss -= composer_instrument_loss * composer_decoder_at_instrument_weight
#since you get the value back weighted, scale back by dividing by beta
kl_loss = kl_loss / beta
total_test_kl_loss_array.append(kl_loss)
axarr[2,1].plot(total_test_kl_loss_array, label='Test KL loss')
axarr[2,0].plot(total_train_kl_loss_array, label='Train KL loss')
print('Test KL loss: ', kl_loss)
pickle.dump(total_test_kl_loss_array,open(model_path+'total_test_kl_loss_array.pickle', 'wb'))
pickle.dump(total_train_kl_loss_array,open(model_path+'total_train_kl_loss_array.pickle', 'wb'))
print('Total test loss: ', test_loss)
axarr[0,1].plot(total_test_loss_array, label='Total test loss')
axarr[0,0].plot(total_train_loss_array, label='Total train loss')
pickle.dump(total_test_loss_array,open(model_path+'total_test_loss_array.pickle', 'wb'))
pickle.dump(total_train_loss_array,open(model_path+'total_train_loss_array.pickle', 'wb'))
axarr[2,1].set_title("Test KL loss",fontsize=10)
axarr[2,0].set_title("Train KL loss", fontsize=10)
axarr[1,1].set_title("Test accuracies - Max notes acc: %4.2f" % max_test_accuracy, fontsize=10)
axarr[1,0].set_title("Train accuracies", fontsize=10)
axarr[0,1].set_title("Test losses",fontsize=10)
axarr[0,0].set_title("Train losses", fontsize=10)
axarr[2,1].legend(loc='upper right', prop={'size': 8})
axarr[2,0].legend(loc='upper right', prop={'size': 8})
axarr[1,1].legend(loc='lower right', prop={'size': 8})
axarr[1,0].legend(loc='lower right', prop={'size': 8})
axarr[0,1].legend(loc='upper right', prop={'size': 8})
axarr[0,0].legend(loc='upper right', prop={'size': 8})
if show_plot: f.show()
if save_plot: f.savefig(model_path+'plot.png')
print('-'*50)
# ----------------------------------------------------------------------------------------------
# Save parameters file
# ----------------------------------------------------------------------------------------------
# Save Parameters to text file
with open(model_path + 'params.txt', "w", encoding='utf-8') as text_file:
text_file.write("load_from_pickle_instead_of_midi: %s" % load_from_pickle_instead_of_midi + '\n')
text_file.write("pickle_load_path: %s" % pickle_load_path + '\n')
text_file.write("epochs: %s" % epochs + '\n')
text_file.write("input_dim: %s" % input_dim + '\n')
text_file.write("output_dim: %s" % output_dim + '\n')
text_file.write("attach_instruments: %s" % attach_instruments + '\n')
text_file.write("instrument_dim: %s" % instrument_dim + '\n')
text_file.write("include_only_monophonic_instruments: %s" % include_only_monophonic_instruments + '\n')
text_file.write("instrument_attach_method: %s" % instrument_attach_method + '\n')
text_file.write("equal_mini_songs: %s" % equal_mini_songs + '\n')
text_file.write("train_set_size: %s" % train_set_size + '\n')
text_file.write("test_set_size: %s" % test_set_size + '\n')
text_file.write("batch_size: %s" % batch_size + '\n')
text_file.write("learning_rate: %s" % learning_rate + '\n')
text_file.write("beta: %s" % beta + '\n')
text_file.write("prior_mean: %s" % prior_mean + '\n')
text_file.write("prior_std: %s" % prior_std + '\n')
text_file.write("save_step: %s" % save_step + '\n')
text_file.write("shuffle_train_set: %s" % shuffle_train_set + '\n')
text_file.write("test_step: %s" % test_step + '\n')
text_file.write("bidirectional: %s" % bidirectional + '\n')
text_file.write("teacher_force: %s" % teacher_force + '\n')
text_file.write("include_composer_decoder: %s" % include_composer_decoder + '\n')
text_file.write("composer_weight: %s" % composer_weight + '\n')
text_file.write("include_composer_feature: %s" % include_composer_feature + '\n')
text_file.write("max_voices: %s" % max_voices + '\n')
text_file.write("num_layers_encoder: %s" % num_layers_encoder + '\n')
text_file.write("num_layers_decoder: %s" % num_layers_decoder + '\n')
text_file.write("optimizer: %s" % optimizer + '\n')
text_file.write("cell_type: %s" % cell_type + '\n')
text_file.write("lstm_size: %s" % lstm_size + '\n')
text_file.write("latent_dim: %s" % latent_dim + '\n')
text_file.write("split_lstm_vector: %s" % split_lstm_vector + '\n')
text_file.write("extra_layer: %s" % extra_layer + '\n')
text_file.write("history: %s" % history + '\n')
text_file.write("include_silent_note: %s" % include_silent_note + '\n')
text_file.write("silent_weight: %s" % silent_weight + '\n')
text_file.write("activation: %s" % activation + '\n')
text_file.write("lstm_activation: %s" % lstm_activation + '\n')
text_file.write("lstm_state_activation: %s" % lstm_state_activation + '\n')
text_file.write("decoder_additional_input: %s" % decoder_additional_input + '\n')
text_file.write("decoder_additional_input_dim: %s" % decoder_additional_input_dim + '\n')
text_file.write("decoder_input_composer: %s" % decoder_input_composer + '\n')
text_file.write("epsilon_std: %s" % epsilon_std + '\n')
text_file.write("epsilon_factor: %s" % epsilon_factor + '\n')
text_file.write("append_signature_vector_to_latent: %s" % append_signature_vector_to_latent + '\n')
text_file.write("song_completion: %s" % song_completion + '\n')
text_file.write("meta_instrument: %s" % meta_instrument + '\n')
text_file.write("meta_instrument_dim: %s" % meta_instrument_dim + '\n')
text_file.write("meta_instrument_length: %s" % meta_instrument_length + '\n')
text_file.write("meta_instrument_activation: %s" % meta_instrument_activation + '\n')
text_file.write("meta_instrument_weight: %s" % meta_instrument_weight + '\n')
text_file.write("signature_decoder: %s" % signature_decoder + '\n')
text_file.write("signature_dim: %s" % signature_dim + '\n')
text_file.write("signature_activation: %s" % signature_activation + '\n')
text_file.write("signature_weight: %s" % signature_weight + '\n')
text_file.write("composer_decoder_at_notes_output: %s" % composer_decoder_at_notes_output + '\n')
text_file.write("composer_decoder_at_notes_weight: %s" % composer_decoder_at_notes_weight + '\n')
text_file.write("composer_decoder_at_notes_activation: %s" % composer_decoder_at_notes_activation + '\n')
text_file.write("composer_decoder_at_instrument_output: %s" % composer_decoder_at_instrument_output + '\n')
text_file.write("composer_decoder_at_instrument_weight: %s" % composer_decoder_at_instrument_weight + '\n')
text_file.write("composer_decoder_at_instrument_activation: %s" % composer_decoder_at_instrument_activation+ '\n')
text_file.write("meta_velocity: %s" % meta_velocity +"\n")
text_file.write("meta_velocity_activation: %s" % meta_velocity_activation +"\n")
text_file.write("meta_velocity_weight: %s" % meta_velocity_weight +"\n")
text_file.write("meta_held_notes: %s" % meta_held_notes +"\n")
text_file.write("meta_held_notes_length: %s" % meta_held_notes_length +"\n")
text_file.write("meta_held_notes_activation: %s" % meta_held_notes_activation +"\n")
text_file.write("meta_held_notes_weight: %s" % meta_held_notes_weight +"\n")
text_file.write("meta_next_notes: %s" % meta_next_notes +"\n")
text_file.write("meta_next_notes_output_length: %s" % meta_next_notes_output_length +"\n")
text_file.write("meta_next_notes_weight: %s" % meta_next_notes_weight +"\n")
text_file.write("meta_next_notes_teacher_force: %s" % meta_next_notes_teacher_force +"\n")
text_file.write("activation_before_splitting: %s" % activation_before_splitting+"\n")
text_file.write("train_paths: %s" % train_paths + '\n')
text_file.write("test_paths: %s" % test_paths + '\n')
# ----------------------------------------------------------------------------------------------
# Final preprocessing / Calculate signature vectors for set
# ----------------------------------------------------------------------------------------------
total_notes = 0
for train_song_num in range(len(X_train)):
x = X_train[train_song_num]
total_notes += input_length * x.shape[0]
print("Total steps (notes + silent): ", total_notes)
print("Total samples: ", total_notes // input_length)
all_S = []
S_train = []
for train_song_num in range(len(Y_train)):
Y = Y_train[train_song_num]
num_samples = Y.shape[0]
signature_vectors = np.zeros((num_samples, signature_vector_length))
for sample in range(num_samples):
poly_sample = data_class.monophonic_to_khot_pianoroll(Y[sample], max_voices)
if include_silent_note:
poly_sample = poly_sample[:,:-1]
signature = data_class.signature_from_pianoroll(poly_sample)
signature_vectors[sample] = signature
S_train.append(signature_vectors)
all_S.extend(signature_vectors)
all_S = np.asarray(all_S)
mean_signature = np.mean(all_S, axis=0)
print(mean_signature)
std_signature = np.std(all_S, axis=0)
#make sure you don't divide by zero if std is 0
for i, val in enumerate(std_signature):
if val == 0:
std_signature[i] = 1.0e-10
print(std_signature)
normalized_S_train = []
for signature_vectors in S_train:
normalized_signature_vectors = (signature_vectors - mean_signature) / std_signature
normalized_S_train.append(normalized_signature_vectors)
normalized_S_test = []
for test_song_num in range(len(Y_test)):
Y = Y_test[test_song_num]
num_samples = Y.shape[0]
signature_vectors = np.zeros((num_samples, signature_vector_length))
for sample in range(num_samples):
poly_sample = data_class.monophonic_to_khot_pianoroll(Y[sample], max_voices)
if include_silent_note:
poly_sample = poly_sample[:,:-1]
signature = data_class.signature_from_pianoroll(poly_sample)
signature = (signature - mean_signature) / std_signature
signature_vectors[sample] = signature
normalized_S_test.append(signature_vectors)
# ----------------------------------------------------------------------------------------------
# Train and test
# ----------------------------------------------------------------------------------------------
# Train model
print('Training model...')
start_epoch = 0
if load_previous_checkpoint:
start_epoch = previous_epoch
for e in range(start_epoch, epochs):
#total_switched_notes = 0
total_train_loss = 0.0
total_train_accuracy = 0.0
total_train_meta_instrument_accuracy = 0.0
total_train_meta_instrument_loss = 0.0
total_train_meta_velocity_accuracy = 0.0
total_train_meta_velocity_loss = 0.0
total_train_meta_held_notes_accuracy = 0.0
total_train_meta_held_notes_loss = 0.0
total_train_meta_next_notes_accuracy = 0.0
total_train_meta_next_notes_loss = 0.0
total_train_composer_accuracy = 0.0
total_train_composer_loss = 0.0
total_train_signature_accuracy = 0.0
total_train_signature_loss = 0.0
total_train_notes_loss = 0.0
total_train_kl_loss = 0.0
total_train_composer_notes_accuracy = 0.0
total_train_composer_notes_loss = 0.0
total_train_composer_instrument_accuracy = 0.0
total_train_composer_instrument_loss = 0.0
print('Epoch ', e, 'of ', epochs, 'Epochs\nTraining:')
print("Beta: ", beta)
print("Epsilon std: ", epsilon_std)
if shuffle_train_set:
permutation = np.random.permutation(len(X_train))
train_paths = [train_paths[i] for i in permutation]
X_train = [X_train[i] for i in permutation]
Y_train = [Y_train[i] for i in permutation]
C_train = [C_train[i] for i in permutation]
I_train = [I_train[i] for i in permutation]
V_train = [V_train[i] for i in permutation]
D_train = [D_train[i] for i in permutation]
S_train = [S_train[i] for i in permutation]
normalized_S_train = [normalized_S_train[i] for i in permutation]
T_train = [T_train[i] for i in permutation]
bar = progressbar.ProgressBar(maxval=train_set_size)
bar.start()
for train_song_num in range(len(X_train)):
X = X_train[train_song_num]
Y = Y_train[train_song_num]
C = C_train[train_song_num]
I = I_train[train_song_num]
V = V_train[train_song_num]
D = D_train[train_song_num]
S = normalized_S_train[train_song_num]
T = T_train[train_song_num] #not yet used
#calculate history if desired
if history:
#don't use the history on the 0'th epoch since the encoder is not trained yet
if e == 0:
H = np.zeros((X.shape[0], latent_dim))
else:
#get the representation by feeding the inputs into the encoder
encoder_input_list = vae_definition.prepare_encoder_input_list(X,I,V,D)
representation_list = encoder.predict(encoder_input_list, batch_size=batch_size, verbose=False)
#roll the list by one to save the representation of the last sample for each input
H = np.zeros(representation_list.shape)
H[1:] = representation_list[:-1]
else:
H = np.zeros((X.shape[0], latent_dim))
input_list, output_list, sample_weight = vae_definition.prepare_autoencoder_input_and_output_list(X,Y,C,I,V,D,S,H, return_sample_weight=True)
hist = autoencoder.fit(input_list, output_list, epochs=1, batch_size=batch_size, shuffle=False, sample_weight=sample_weight, verbose=False)
if reset_states:
autoencoder.reset_states()
bar.update(train_song_num+1)
total_train_loss += np.mean(hist.history['loss'])
#make sure you have installed keras=2.0.8 if you receive only one loss instead of decoder_loss_0,1,2... for each output
#did not work for keras=2.1.4
if meta_instrument or meta_velocity or meta_held_notes or meta_next_notes:
count = 1
total_train_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
#originally decoder_loss_ + str(count)
total_train_notes_loss += np.mean(hist.history['decoder_loss'])
if meta_instrument:
count += 1
try:
total_train_meta_instrument_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
total_train_meta_instrument_loss += np.mean(hist.history['decoder_loss_' + str(count)])
except:
count -= 1
if meta_velocity:
count += 1
try:
total_train_meta_velocity_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
total_train_meta_velocity_loss += np.mean(hist.history['decoder_loss_' + str(count)])
except:
count -= 1
if meta_held_notes:
count += 1
try:
total_train_meta_held_notes_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
total_train_meta_held_notes_loss += np.mean(hist.history['decoder_loss_' + str(count)])
except:
count -= 1
if meta_next_notes:
count += 1
try:
total_train_meta_next_notes_accuracy += np.mean(hist.history['decoder_acc_' + str(count)])
total_train_meta_next_notes_loss += np.mean(hist.history['decoder_loss_' + str(count)])
except:
count -= 1
else:
if len(hist.history.keys()) > 2:
total_train_accuracy += np.mean(hist.history['decoder_acc'])
total_train_notes_loss += np.mean(hist.history['decoder_loss'])
else:
total_train_accuracy += np.mean(hist.history['acc'])
total_train_notes_loss += np.mean(hist.history['loss'])
if include_composer_decoder:
total_train_composer_accuracy += np.mean(hist.history['composer_decoder_acc'])
total_train_composer_loss += np.mean(hist.history['composer_decoder_loss'])
if signature_decoder:
total_train_signature_accuracy += np.mean(hist.history['signature_decoder_acc'])
total_train_signature_loss += np.mean(hist.history['signature_decoder_loss'])
if composer_decoder_at_notes_output:
total_train_composer_notes_accuracy += np.mean(hist.history['composer_decoder_at_notes_acc'])
total_train_composer_notes_loss += np.mean(hist.history['composer_decoder_at_notes_loss'])
if composer_decoder_at_instrument_output:
total_train_composer_instrument_accuracy += np.mean(hist.history['composer_decoder_at_instruments_acc'])
total_train_composer_instrument_loss += np.mean(hist.history['composer_decoder_at_instruments_loss'])
total_train_loss = total_train_loss/train_set_size
total_train_accuracy = total_train_accuracy/train_set_size
total_train_notes_loss = total_train_notes_loss/train_set_size
total_train_notes_loss_array.append(total_train_notes_loss)
total_train_loss_array.append(total_train_loss)
total_train_accuracy_array.append(total_train_accuracy)
if meta_instrument:
train_meta_instrument_accuracy = total_train_meta_instrument_accuracy/train_set_size
train_meta_instrument_loss = total_train_meta_instrument_loss/train_set_size
total_train_meta_instrument_accuracy_array.append(train_meta_instrument_accuracy)
total_train_meta_instrument_loss_array.append(train_meta_instrument_loss)
print("Train instrument meta accuracy: ", train_meta_instrument_accuracy)
print("Train instrument meta loss: ", train_meta_instrument_loss)
if meta_velocity:
train_meta_velocity_accuracy = total_train_meta_velocity_accuracy/train_set_size
train_meta_velocity_loss = total_train_meta_velocity_loss/train_set_size
total_train_meta_velocity_accuracy_array.append(train_meta_velocity_accuracy)
total_train_meta_velocity_loss_array.append(train_meta_velocity_loss)
if combine_velocity_and_held_notes:
print("Train velocity meta accuracy: ", train_meta_velocity_accuracy)
print("Train velocity meta loss: ", train_meta_velocity_loss)
if meta_held_notes:
train_meta_held_notes_accuracy = total_train_meta_held_notes_accuracy/train_set_size
train_meta_held_notes_loss = total_train_meta_held_notes_loss/train_set_size
total_train_meta_held_notes_accuracy_array.append(train_meta_held_notes_accuracy)
total_train_meta_held_notes_loss_array.append(train_meta_held_notes_loss)
print("Train held_notes meta accuracy: ", train_meta_held_notes_accuracy)
print("Train held_notes meta loss: ", train_meta_held_notes_loss)
if meta_next_notes:
train_meta_next_notes_accuracy = total_train_meta_next_notes_accuracy/train_set_size
train_meta_next_notes_loss = total_train_meta_next_notes_loss/train_set_size
total_train_meta_next_notes_accuracy_array.append(train_meta_next_notes_accuracy)
total_train_meta_next_notes_loss_array.append(train_meta_next_notes_loss)
print("Train next_notes meta accuracy: ", train_meta_next_notes_accuracy)
print("Train next_notes meta loss: ", train_meta_next_notes_loss)
if include_composer_decoder:
train_composer_accuracy = total_train_composer_accuracy/train_set_size
train_composer_loss = total_train_composer_loss/train_set_size
total_train_composer_accuracy_array.append(train_composer_accuracy)
total_train_composer_loss_array.append(train_composer_loss)
print("Train composer accuracy: ", train_composer_accuracy)
print("Train composer loss: ", train_composer_loss)
if signature_decoder:
train_signature_accuracy = total_train_signature_accuracy/train_set_size
train_signature_loss = total_train_signature_loss/train_set_size
total_train_signature_accuracy_array.append(train_signature_accuracy)
total_train_signature_loss_array.append(train_signature_loss)
#print("Train signature accuracy: ", train_signature_accuracy)
print("Train signature loss: ", train_signature_loss)
if composer_decoder_at_notes_output:
train_composer_notes_accuracy = total_train_composer_notes_accuracy/train_set_size
train_composer_notes_loss = total_train_composer_notes_loss/train_set_size
total_train_composer_notes_accuracy_array.append(train_composer_notes_accuracy)
total_train_composer_notes_loss_array.append(train_composer_notes_loss)
print("Train composer_notes accuracy: ", train_composer_notes_accuracy)
print("Train composer_notes loss: ", train_composer_notes_loss)
if composer_decoder_at_instrument_output:
train_composer_instrument_accuracy = total_train_composer_instrument_accuracy/train_set_size
train_composer_instrument_loss = total_train_composer_instrument_loss/train_set_size
total_train_composer_instrument_accuracy_array.append(train_composer_instrument_accuracy)
total_train_composer_instrument_loss_array.append(train_composer_instrument_loss)
print("Train composer_instrument accuracy: ", train_composer_instrument_accuracy)
print("Train composer_instrument loss: ", train_composer_instrument_loss)
print("Train notes accuracy: ", total_train_accuracy)
print("Train notes loss: ", total_train_notes_loss)
if beta>0:
kl_loss = total_train_loss - total_train_notes_loss * 1.0
if include_composer_decoder: kl_loss -= train_composer_loss * composer_weight
if meta_instrument: kl_loss -= train_meta_instrument_loss * meta_instrument_weight
if meta_velocity: kl_loss -= train_meta_velocity_loss * meta_velocity_weight
if meta_held_notes: kl_loss -= train_meta_held_notes_loss * meta_held_notes_weight
if meta_next_notes: kl_loss -= train_meta_next_notes_loss * meta_next_notes_weight
if signature_decoder: kl_loss -= train_signature_loss * signature_weight
if composer_decoder_at_notes_output: kl_loss -= train_composer_notes_loss * composer_decoder_at_notes_weight
if composer_decoder_at_instrument_output: kl_loss -= train_composer_instrument_loss * composer_decoder_at_instrument_weight
#since you get the value back weighted, scale back by dividing by beta
kl_loss = kl_loss / beta
total_train_kl_loss_array.append(kl_loss)
print('Train KL loss: ', kl_loss)
print("Total train loss: ", total_train_loss)
if e % test_step is 0:
test()
if e% save_step is 0:
print('saving model')
autoencoder_save_path = model_path + 'autoencoder' + 'Epoch' + str(e) + model_filetype
#autoencoder.save(autoencoder_save_path)
autoencoder.save_weights(autoencoder_save_path)
encoder_save_path = model_path + 'encoder' + 'Epoch' + str(e) + model_filetype
#encoder.save(encoder_save_path)
encoder.save_weights(encoder_save_path)
decoder_save_path = model_path + 'decoder' + 'Epoch' + str(e) + model_filetype
#decoder.save(decoder_save_path)
decoder.save_weights(decoder_save_path)
|
the-stack_106_28642 | from bingads.v13.bulk.entities import QualityScoreData
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.v13.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping, _ComplexBulkMapping
from bingads.v13.internal.extensions import *
def coop_setting_to_csv(bulk_ad_group, row_values):
if not bulk_ad_group.ad_group.Settings or not bulk_ad_group.ad_group.Settings.Setting:
return
settings = [setting for setting in bulk_ad_group.ad_group.Settings.Setting if isinstance(setting, CoOpSetting_Type)]
if len(settings) == 0:
return
if len(settings) != 1:
raise ValueError('Can only have 1 CoOpSetting in AdGroup Settings.')
row_values[_StringTable.MaximumBid] = settings[0].BidMaxValue
row_values[_StringTable.BidBoostValue] = settings[0].BidBoostValue
row_values[_StringTable.BidOption] = settings[0].BidOption
pass
def csv_to_coop_setting(row_values, bulk_ad_group):
maximum_bid_success, maximum_bid = row_values.try_get_value(_StringTable.MaximumBid)
bid_boost_value_success, bid_boost_value = row_values.try_get_value(_StringTable.BidBoostValue)
bid_option_success, bid_option = row_values.try_get_value(_StringTable.BidOption)
if maximum_bid_success or bid_boost_value_success or bid_option_success:
coop_setting = _CAMPAIGN_OBJECT_FACTORY_V13.create('CoOpSetting')
coop_setting.Type = 'CoOpSetting'
coop_setting.BidOption = bid_option if bid_option else None
coop_setting.BidBoostValue = float(bid_boost_value) if bid_boost_value else None
coop_setting.BidMaxValue = float(maximum_bid) if maximum_bid else None
bulk_ad_group.ad_group.Settings.Setting.append(coop_setting)
pass
def bidding_scheme_to_csv(bulk_ad_group, row_values):
bid_strategy_type = field_to_csv_BidStrategyType(bulk_ad_group.ad_group)
if not bid_strategy_type:
return
row_values[_StringTable.BidStrategyType] = bid_strategy_type
if bid_strategy_type == 'InheritFromParent' \
and hasattr(bulk_ad_group.ad_group.BiddingScheme, 'InheritedBidStrategyType'):
row_values[_StringTable.InheritedBidStrategyType] = bulk_ad_group.ad_group.BiddingScheme.InheritedBidStrategyType
def csv_to_bidding_scheme(row_values, bulk_ad_group):
success, bid_strategy_type = row_values.try_get_value(_StringTable.BidStrategyType)
if not success or not bid_strategy_type:
return
csv_to_field_BidStrategyType(bulk_ad_group.ad_group, bid_strategy_type)
if bid_strategy_type == 'InheritFromParent':
bulk_ad_group.ad_group.BiddingScheme.Type = "InheritFromParent"
success, inherited_bid_strategy_type = row_values.try_get_value(_StringTable.InheritedBidStrategyType)
if success and inherited_bid_strategy_type != '':
bulk_ad_group.ad_group.BiddingScheme.InheritedBidStrategyType = inherited_bid_strategy_type
elif hasattr(bulk_ad_group.ad_group.BiddingScheme, 'InheritedBidStrategyType'):
del bulk_ad_group.ad_group.BiddingScheme.InheritedBidStrategyType
else:
bulk_ad_group.ad_group.BiddingScheme.Type = bid_strategy_type
class BulkAdGroup(_SingleRecordBulkEntity):
""" Represents an ad group.
This class exposes the property :attr:`ad_group` that can be read and written as fields of the Ad Group record
in a bulk file.
For more information, see Ad Group at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, campaign_id=None, campaign_name=None, ad_group=None):
super(BulkAdGroup, self).__init__()
self._campaign_id = campaign_id
self._campaign_name = campaign_name
self._ad_group = ad_group
self._quality_score_data = None
self._performance_data = None
@property
def campaign_id(self):
""" The identifier of the campaign that contains the ad group.
Corresponds to the 'Parent Id' field in the bulk file.
:rtype: int
"""
return self._campaign_id
@campaign_id.setter
def campaign_id(self, campaign_id):
self._campaign_id = campaign_id
@property
def campaign_name(self):
""" The name of the campaign that contains the ad group.
Corresponds to the 'Campaign' field in the bulk file.
:rtype: str
"""
return self._campaign_name
@campaign_name.setter
def campaign_name(self, campaign_name):
self._campaign_name = campaign_name
@property
def ad_group(self):
""" The AdGroup Data Object of the Campaign Management Service.
A subset of AdGroup properties are available in the Ad Group record.
For more information, see Ad Group at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._ad_group
@ad_group.setter
def ad_group(self, ad_group):
self._ad_group = ad_group
@property
def quality_score_data(self):
""" The quality score data for the ad group.
:rtype: QualityScoreData
"""
return self._quality_score_data
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.ad_group.Id),
csv_to_field=lambda c, v: setattr(c.ad_group, 'Id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Status,
field_to_csv=lambda c: bulk_str(c.ad_group.Status),
csv_to_field=csv_to_status
),
_SimpleBulkMapping(
header=_StringTable.ParentId,
field_to_csv=lambda c: bulk_str(c.campaign_id),
csv_to_field=lambda c, v: setattr(c, 'campaign_id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Campaign,
field_to_csv=lambda c: c.campaign_name,
csv_to_field=lambda c, v: setattr(c, 'campaign_name', v)
),
_SimpleBulkMapping(
header=_StringTable.AdGroup,
field_to_csv=lambda c: c.ad_group.Name,
csv_to_field=lambda c, v: setattr(c.ad_group, 'Name', v)
),
_SimpleBulkMapping(
header=_StringTable.StartDate,
field_to_csv=lambda c: bulk_date_str(c.ad_group.StartDate),
csv_to_field=lambda c, v: setattr(c.ad_group, 'StartDate', parse_date(v))
),
_SimpleBulkMapping(
header=_StringTable.EndDate,
field_to_csv=lambda c: bulk_date_str(c.ad_group.EndDate),
csv_to_field=lambda c, v: setattr(c.ad_group, 'EndDate', parse_date(v))
),
_SimpleBulkMapping(
header=_StringTable.NetworkDistribution,
field_to_csv=lambda c: bulk_str(c.ad_group.Network),
csv_to_field=lambda c, v: setattr(c.ad_group, 'Network', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.AdRotation,
field_to_csv=lambda c: ad_rotation_bulk_str(c.ad_group.AdRotation, c.ad_group.Id),
csv_to_field=lambda c, v: setattr(c.ad_group, 'AdRotation', parse_ad_rotation(v))
),
_SimpleBulkMapping(
header=_StringTable.CpcBid,
field_to_csv=lambda c: ad_group_bid_bulk_str(c.ad_group.CpcBid),
csv_to_field=lambda c, v: setattr(c.ad_group, 'CpcBid', parse_ad_group_bid(v))
),
_SimpleBulkMapping(
header=_StringTable.Language,
field_to_csv=lambda c: bulk_str(c.ad_group.Language),
csv_to_field=lambda c, v: setattr(c.ad_group, 'Language', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.BidAdjustment,
field_to_csv=lambda c: bulk_str(c.ad_group.AudienceAdsBidAdjustment),
csv_to_field=lambda c, v: setattr(
c.ad_group,
'AudienceAdsBidAdjustment',
int(v) if v else None
)
),
_SimpleBulkMapping(
header=_StringTable.TrackingTemplate,
field_to_csv=lambda c: bulk_str(c.ad_group.TrackingUrlTemplate),
csv_to_field=lambda c, v: setattr(c.ad_group, 'TrackingUrlTemplate', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.CustomParameter,
field_to_csv=lambda c: field_to_csv_UrlCustomParameters(c.ad_group),
csv_to_field=lambda c, v: csv_to_field_UrlCustomParameters(c.ad_group, v)
),
_ComplexBulkMapping(bidding_scheme_to_csv, csv_to_bidding_scheme),
_SimpleBulkMapping(
header=_StringTable.TargetSetting,
field_to_csv=lambda c: target_setting_to_csv(c.ad_group),
csv_to_field=lambda c, v: csv_to_target_setting(c.ad_group, v)
),
_SimpleBulkMapping(
header=_StringTable.PrivacyStatus,
field_to_csv=lambda c: bulk_str(c.ad_group.PrivacyStatus),
csv_to_field=lambda c, v: setattr(c.ad_group, 'PrivacyStatus', v if v else None)
),
_ComplexBulkMapping(coop_setting_to_csv, csv_to_coop_setting),
_SimpleBulkMapping(
header=_StringTable.FinalUrlSuffix,
field_to_csv=lambda c: bulk_optional_str(c.ad_group.FinalUrlSuffix, c.ad_group.Id),
csv_to_field=lambda c, v: setattr(c.ad_group, 'FinalUrlSuffix', v)
),
_SimpleBulkMapping(
header=_StringTable.AdScheduleUseSearcherTimeZone,
field_to_csv=lambda c: field_to_csv_UseSearcherTimeZone(c.ad_group.AdScheduleUseSearcherTimeZone, None),
csv_to_field=lambda c, v: setattr(c.ad_group, 'AdScheduleUseSearcherTimeZone', parse_bool(v))
),
_SimpleBulkMapping(
header=_StringTable.AdGroupType,
field_to_csv=lambda c: c.ad_group.AdGroupType,
csv_to_field=lambda c, v: setattr(c.ad_group, 'AdGroupType', v)
),
]
def process_mappings_from_row_values(self, row_values):
self.ad_group = _CAMPAIGN_OBJECT_FACTORY_V13.create('AdGroup')
row_values.convert_to_entity(self, BulkAdGroup._MAPPINGS)
self._quality_score_data = QualityScoreData.read_from_row_values_or_null(row_values)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self._ad_group, 'AdGroup')
self.convert_to_values(row_values, BulkAdGroup._MAPPINGS)
if not exclude_readonly_data:
QualityScoreData.write_to_row_values_if_not_null(self.quality_score_data, row_values)
def read_additional_data(self, stream_reader):
super(BulkAdGroup, self).read_additional_data(stream_reader)
|
the-stack_106_28644 | # variavel , = variavel = troca os valores
def do_something():
primes = {2, 3, 5, 7, 11}
evens = {2, 4, 6, 8, 10}
x, = primes.intersection(evens)
print(x)
if __name__ == '__main__':
do_something()
x = 1
a = [2]
y = [9]
x, = a
|
the-stack_106_28646 | import re, fileinput, tempfile
from optparse import OptionParser
IGNOREDPREFIXES = [
'PRAGMA',
'BEGIN TRANSACTION;',
'COMMIT;',
'DELETE FROM sqlite_sequence;',
'INSERT INTO "sqlite_sequence"',
]
REPLACEMAP = {"INTEGER PRIMARY KEY": "INTEGER AUTO_INCREMENT PRIMARY KEY",
"AUTOINCREMENT": "AUTO_INCREMENT",
"DEFAULT 't'": "DEFAULT '1'",
"DEFAULT 'f'": "DEFAULT '0'",
",'t'": ",'1'",
",'f'": ",'0'",
"CREATE TABLE": "CREATE TABLE IF NOT EXISTS"
}
def _replace_match_allcase(line, src, dst):
line = line.replace(src,dst)
line = line.replace(src.lower(),dst)
return line
def _replace(line):
if any(line.startswith(prefix.encode('utf8')) for prefix in IGNOREDPREFIXES):
return
for (src,dst) in REPLACEMAP.items():
line = _replace_match_allcase(line, src.encode('utf8'), dst.encode('utf8'))
return line
def _backticks(line, in_string):
"""Replace double quotes by backticks outside (multiline) strings
>>> _backticks('''INSERT INTO "table" VALUES ('"string"');''', False)
('INSERT INTO `table` VALUES (\\'"string"\\');', False)
>>> _backticks('''INSERT INTO "table" VALUES ('"Heading''', False)
('INSERT INTO `table` VALUES (\\'"Heading', True)
>>> _backticks('''* "text":http://link.com''', True)
('* "text":http://link.com', True)
>>> _backticks(" ');", True)
(" ');", False)
"""
new = ''
for c in line:
if not in_string:
if c == "'":
in_string = True
elif c == '"':
new = new + '`'
continue
elif c == "'":
in_string = False
new = new + c
return new, in_string
def _process(opts, lines):
if opts.database:
yield '''\
create database if not exists {d} character set utf8;
use {d};\n'''.format(d=opts.database, u=opts.username, p=opts.password)
yield "SET sql_mode='NO_BACKSLASH_ESCAPES';\n"
in_string = False
for line in lines:
if not in_string:
line = _replace(line)
if line is None:
continue
line, in_string = _backticks(line.decode('utf8'), in_string)
yield line
def _removeNewline(line, in_string):
new = ''
for c in line:
if not in_string:
if c == "'":
in_string = True
elif c == "'":
in_string = False
elif in_string:
if c == "\n":
new = new + 'Newline333'
continue
if c == "\r":
new = new + 'carriagereturn333'
continue
new = new + c
return new, in_string
def _replaceNewline(lines):
for line in lines:
line = line.replace("Newline333", "\n")
line = line.replace("carriagereturn333", "\r")
yield line
def _Newline(lines):
in_string = False
for line in lines:
if line is None:
continue
line, in_string = _removeNewline(line, in_string)
yield line
def main():
op = OptionParser()
op.add_option('-d', '--database')
op.add_option('-u', '--username')
op.add_option('-p', '--password')
opts, args = op.parse_args()
lines = (l for l in fileinput.input(args))
lines = (l for l in _Newline(lines))
f = tempfile.TemporaryFile()
for line in lines:
f.write(line.encode("utf8"))
f.seek(0)
lines = (l for l in f.readlines())
f.close()
lines = (l for l in _process(opts, lines))
for line in _replaceNewline(lines):
print(line)
if __name__ == "__main__":
main()
|
the-stack_106_28650 | import os
import json
from os.path import join, basename
def parse(logpath):
DtoH = ""
HtoD = ""
with open(logpath) as ifile:
for line in ifile:
if "[CUDA memcpy DtoH]" in line:
DtoH = line
if "[CUDA memcpy HtoD]" in line:
HtoD = line
def _parse_line(line_str):
segs = line_str.split()
if len(segs) < 6:
print(segs)
avg_time = line_str.split()[-6]
return avg_time
if DtoH == "" or HtoD == "":
print(logpath)
return (None, None)
return _parse_line(DtoH), _parse_line(HtoD)
def parse_logs(path, collector):
if os.path.isdir(path):
files = os.listdir(path)
for f in files:
parse_logs(join(path, f), collector)
return
size = basename(path)
avgD2H, avgH2D = parse(path)
collector[int(size)] = {"DtoH(avg)": avgD2H, "HtoD(avg)": avgH2D}
return
def main():
""""""
# assume single file first
log_folder = "./profile_logs"
avg_time_collector = {}
parse_logs(log_folder, avg_time_collector)
print(avg_time_collector)
with open("./summary.json", "w") as ofile:
json.dump(avg_time_collector, ofile, indent=2)
if __name__ == "__main__":
main() |
the-stack_106_28652 | from robotframework_ls.impl.protocols import ICompletionContext, IKeywordFound
from typing import List, Optional, Union
def signature_help(completion_context: ICompletionContext) -> Optional[dict]:
from robocorp_ls_core.lsp import MarkupContent
from robocorp_ls_core.lsp import MarkupKind
keyword_definition = completion_context.get_current_keyword_definition()
if keyword_definition is not None:
from robocorp_ls_core.lsp import SignatureHelp
from robocorp_ls_core.lsp import SignatureInformation
from robocorp_ls_core.lsp import ParameterInformation
keyword_found: IKeywordFound = keyword_definition.keyword_found
keyword_args = keyword_found.keyword_args
lst = [arg.original_arg for arg in keyword_args]
label = "%s(%s)" % (keyword_found.keyword_name, ", ".join(lst))
docs_format = keyword_found.docs_format
documentation: Union[str, MarkupContent]
if docs_format == "markdown":
documentation = MarkupContent(MarkupKind.Markdown, keyword_found.docs)
else:
documentation = keyword_found.docs
parameters: List[ParameterInformation] = [
# Note: the label here is to highlight a part of the main signature label!
# (let's leave this out for now)
# ParameterInformation("param1", None),
]
signatures: List[SignatureInformation] = [
SignatureInformation(label, documentation, parameters)
]
return SignatureHelp(
signatures, active_signature=0, active_parameter=0
).to_dict()
return None
|
the-stack_106_28654 | """This module deals with UML diagrams, especially PlantUML formats, for a legal paragraph.
"""
import os
def make_uml(res):
uml = ['@startuml', '', '!include conf.txt', '']
N3s = []
for tag in reversed(result.tag_list()):
N3 = {'subject': 'A', 'object': 'B', 'predicate': 'None'}
if tag.pas != None:
N3['predicate'] = ''.join([mrph.midasi for mrph in tag.mrph_list()])
if 'ヲ' in tag.pas.arguments:
arg = tag.pas.arguments['ヲ'][0]
N3['object'] = arg.midasi
if 'ガ' in tag.pas.arguments:
arg = tag.pas.arguments['ガ'][0]
N3['subject'] = arg.midasi
N3s.append(' '.join([N3['subject'], '->', N3['object'], ':',
N3['predicate']]))
else:
for mrph in tag.mrph_list():
if mrph.hinsi == '動詞':
N3['predicate'] = mrph.midasi
N3s.append(' '.join([N3['subject'], '->', N3['object'], ':',
N3['predicate']]))
break
# participants
uml.append('actor Aさん as A <<当事者>>')
uml.append('actor Bさん as B <<相手方>>')
# actions
for N3 in reversed(N3s):
uml.append(N3)
uml.extend(['', '@enduml'])
return uml
def write_uml(uml, filename):
with open(filename, 'w') as f:
for line in uml:
f.write(line)
f.write('\n')
def show_uml(filename, plantuml='/usr/local/bin/plantuml'):
import subprocess
umldir = os.path.dirname(filename)
proc = subprocess.run([plantuml, filename], cwd=umldir,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
print('done!')
imgfile = filename.replace('.uml', '.png')
from PIL import Image
from matplotlib import pyplot as plt
img = Image.open(imgfile)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
ax.imshow(img)
plt.draw()
plt.waitforbuttonpress(0)
plt.close('all')
if __name__ == '__main__':
from tokens import JNLP
nlp = JNLP()
text = '詐欺又は強迫による意思表示は、取り消すことができる。'
result = nlp.parse(text)
uml = make_uml(result)
indir = os.path.join(os.path.dirname(__file__),
'../../data/UML')
filename = os.path.join(indir, 'a.uml')
write_uml(uml, filename)
show_uml(filename)
# print(tag.tag_id, tag.dpndtype, tag.parent_id, tag.fstring)
#
|
the-stack_106_28655 | # -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
from __future__ import absolute_import
# Import python libs
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils
import salt.utils.itertools
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on OS X.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.fopen(tzfile, 'r') as fp_:
for line in fp_:
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.fopen(tzfile, 'r') as fp_:
for line in fp_:
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = '/etc/localtime'
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
log.warning(
tzfile + ' is not a symbolic link, attempting to match ' +
tzfile + ' to zoneinfo files'
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in os.walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
with salt.utils.fopen('/etc/timezone', 'r') as fp_:
return fp_.read().strip()
def get_zone():
'''
Get current timezone (i.e. America/Denver)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
raise CommandExecutionError('Unable to get timezone')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
'''
if salt.utils.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
return 'Zone does not exist: {0}'.format(zonepath)
if os.path.exists('/etc/localtime'):
os.unlink('/etc/localtime')
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
else:
os.symlink(zonepath, '/etc/localtime')
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
with salt.utils.fopen('/etc/timezone', 'w') as ofh:
ofh.write(timezone.strip())
ofh.write('\n')
elif 'Gentoo' in __grains__['os_family']:
with salt.utils.fopen('/etc/timezone', 'w') as ofh:
ofh.write(timezone)
return True
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family']:
return timezone == get_zone()
curtzstring = get_zone()
if curtzstring != timezone:
return False
tzfile = '/etc/localtime'
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(tzfile):
return 'Error: {0} does not exist.'.format(tzfile)
hash_type = __opts__.get('hash_type', 'md5')
try:
usrzone = salt.utils.get_hash(zonepath, hash_type)
except IOError as exc:
raise SaltInvocationError('Invalid timezone \'{0}\''.format(timezone))
try:
etczone = salt.utils.get_hash(tzfile, hash_type)
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
if usrzone == etczone:
return True
return False
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
cmd = ['tail', '-n', '1', '/etc/adjtime']
return __salt__['cmd.run'](cmd, python_shell=False)
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
cmd = ['tail', '-n', '1', '/etc/adjtime']
return __salt__['cmd.run'](cmd, python_shell=False)
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
return 'UTC'
cmd = ['tail', '-n', '1', '/etc/adjtime']
return __salt__['cmd.run'](cmd, python_shell=False)
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.fopen(offset_file, 'r') as fp_:
for line in fp_:
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
the-stack_106_28660 | __version__ = '0.7.2'
from typing import List, Optional
import torch
import torch.nn as nn
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282–289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags: int, batch_first: bool = False) -> None:
if num_tags <= 0:
raise ValueError(f'invalid number of tags: {num_tags}')
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(num_tags={self.num_tags})'
def forward(
self,
emissions: torch.Tensor,
tags: torch.LongTensor,
mask: Optional[torch.ByteTensor] = None,
reduction: str = 'sum',
) -> torch.Tensor:
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
self._validate(emissions, tags=tags, mask=mask)
if reduction not in ('none', 'sum', 'mean', 'token_mean'):
raise ValueError(f'invalid reduction: {reduction}')
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == 'none':
return llh
if reduction == 'sum':
return llh.sum()
if reduction == 'mean':
return llh.mean()
assert reduction == 'token_mean'
return llh.sum() / mask.type_as(emissions).sum()
def decode(self, emissions: torch.Tensor,
mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
Returns:
List of list containing the best tag sequence for each batch.
"""
self._validate(emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self._viterbi_decode(emissions, mask)
def _validate(
self,
emissions: torch.Tensor,
tags: Optional[torch.LongTensor] = None,
mask: Optional[torch.ByteTensor] = None) -> None:
if emissions.dim() != 3:
raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')
if emissions.size(2) != self.num_tags:
raise ValueError(
f'expected last dimension of emissions is {self.num_tags}, '
f'got {emissions.size(2)}')
if tags is not None:
if emissions.shape[:2] != tags.shape:
raise ValueError(
'the first two dimensions of emissions and tags must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}')
if mask is not None:
if emissions.shape[:2] != mask.shape:
raise ValueError(
'the first two dimensions of emissions and mask must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}')
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:, 0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise ValueError('mask of the first timestep must all be on')
def _compute_score(
self, emissions: torch.Tensor, tags: torch.LongTensor,
mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and tags.dim() == 2
assert emissions.shape[:2] == tags.shape
assert emissions.size(2) == self.num_tags
assert mask.shape == tags.shape
assert mask[0].all()
seq_length, batch_size = tags.shape
mask = mask.type_as(emissions)
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(
self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
next_score = torch.logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return torch.logsumexp(score, dim=1)
def _viterbi_decode(self, emissions: torch.FloatTensor,
mask: torch.ByteTensor) -> List[List[int]]:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history = []
# score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# history saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
history.append(indices)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Now, compute the best path for each sample
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
best_tags_list = []
for idx in range(batch_size):
# Find the tag which maximizes the score at the last timestep; this is our best tag
# for the last timestep
_, best_last_tag = score[idx].max(dim=0)
best_tags = [best_last_tag.item()]
# We trace back where the best last tag comes from, append that to our best tag
# sequence, and trace it back again, and so on
for hist in reversed(history[:seq_ends[idx]]):
best_last_tag = hist[idx][best_tags[-1]]
best_tags.append(best_last_tag.item())
# Reverse the order because we start from the last timestep
best_tags.reverse()
best_tags_list.append(best_tags)
return best_tags_list
|
the-stack_106_28663 | #!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import dlab.fab
import dlab.actions_lib
import dlab.meta_lib
import os
import logging
import sys
import json
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
print('Generating infrastructure names and tags')
AzureMeta = dlab.meta_lib.AzureMeta()
AzureActions = dlab.actions_lib.AzureActions()
edge_conf = dict()
edge_conf['service_base_name'] = os.environ['conf_service_base_name']
edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
edge_conf['project_name'] = os.environ['project_name']
edge_conf['endpoint_name'] = os.environ['endpoint_name']
edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
edge_conf['project_name'], edge_conf['endpoint_name'])
edge_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_conf['instance_name'],
os.environ['azure_region'])
logging.info('[START EDGE]')
print('[START EDGE]')
try:
AzureActions.start_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
except Exception as err:
dlab.fab.append_result("Failed to start edge.", str(err))
sys.exit(1)
try:
public_ip_address = AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
edge_conf['instance_name'])
private_ip_address = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
edge_conf['instance_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(edge_conf['instance_name']))
print("Hostname: {}".format(edge_conf['instance_dns_name']))
print("Public IP: {}".format(public_ip_address))
print("Private IP: {}".format(private_ip_address))
with open("/root/result.json", 'w') as result:
res = {"instance_name": edge_conf['instance_name'],
"hostname": edge_conf['instance_dns_name'],
"public_ip": public_ip_address,
"ip": private_ip_address,
"Action": "Start up notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except Exception as err:
dlab.fab.append_result("Error with writing results", str(err))
sys.exit(1)
|
the-stack_106_28665 | """
.. module: security_monkey.watcher
:platform: Unix
:synopsis: Slurps the current config from AWS and compares it to what has previously
been recorded in the database to find any changes.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from common.utils.PolicyDiff import PolicyDiff
from common.utils.utils import sub_dict
from security_monkey import app
from security_monkey.datastore import Account
from security_monkey.datastore import IgnoreListEntry, Technology
from security_monkey.common.jinja import get_jinja_env
from boto.exception import BotoServerError
import time
import datastore
from sets import Set
class Watcher(object):
"""Slurps the current config from AWS and compares it to what has previously
been recorded in the database to find any changes."""
index = 'abstract'
i_am_singular = 'Abstract'
i_am_plural = 'Abstracts'
rate_limit_delay = 0
ignore_list = []
def __init__(self, accounts=None, debug=False):
"""Initializes the Watcher"""
self.datastore = datastore.Datastore()
if not accounts:
accounts = Account.query.filter(Account.third_party==False).filter(Account.active==True).all()
self.accounts = [account.name for account in accounts]
else:
self.accounts = accounts
self.debug = debug
self.created_items = []
self.deleted_items = []
self.changed_items = []
self.rate_limit_delay = 0
def prep_for_slurp(self):
"""
Should be run before slurp is run to grab the IgnoreList.
"""
query = IgnoreListEntry.query
query = query.join((Technology, Technology.id == IgnoreListEntry.tech_id))
self.ignore_list = query.filter(Technology.name==self.index).all()
def check_ignore_list(self, name):
"""
See if the given item has a name flagging it to be ignored by security_monkey.
"""
for result in self.ignore_list:
if name.lower().startswith(result.prefix.lower()):
app.logger.warn("Ignoring {}/{} because of IGNORELIST prefix {}".format(self.index, name, result.prefix))
return True
return False
def wrap_aws_rate_limited_call(self, awsfunc, *args, **nargs):
attempts = 0
while True:
attempts = attempts + 1
try:
if self.rate_limit_delay > 0:
time.sleep(self.rate_limit_delay)
retval = awsfunc(*args, **nargs)
if self.rate_limit_delay > 0:
app.logger.warn(("Successfully Executed Rate-Limited Function. " +
"Tech: {} Account: {}. "
"Reducing sleep period from {} to {}")
.format(self.index, self.accounts, self.rate_limit_delay, self.rate_limit_delay / 2))
self.rate_limit_delay = self.rate_limit_delay / 2
return retval
except BotoServerError as e:
if e.error_code == 'Throttling':
if self.rate_limit_delay == 0:
self.rate_limit_delay = 1
app.logger.warn(('Being rate-limited by AWS. Increasing delay on tech {} ' +
'in account {} from 0 to 1 second. Attempt {}')
.format(self.index, self.accounts, attempts))
elif self.rate_limit_delay < 16:
self.rate_limit_delay = self.rate_limit_delay * 2
app.logger.warn(('Still being rate-limited by AWS. Increasing delay on tech {} ' +
'in account {} to {} seconds. Attempt {}')
.format(self.index, self.accounts, self.rate_limit_delay, attempts))
else:
raise e
else:
raise e
def created(self):
"""
Used by the Jinja templates
:returns: True if created_items is not empty
:returns: False otherwise.
"""
return len(self.created_items) > 0
def deleted(self):
"""
Used by the Jinja templates
:returns: True if deleted_items is not empty
:returns: False otherwise.
"""
return len(self.deleted_items) > 0
def changed(self):
"""
Used by the Jinja templates
:returns: True if changed_items is not empty
:returns: False otherwise.
"""
return len(self.changed_items) > 0
def slurp(self):
"""
method to slurp configuration from AWS for whatever it is that I'm
interested in. This will be overriden for each technology.
"""
raise NotImplementedError()
def slurp_exception(self, location=None, exception=None, exception_map={}):
"""
Logs any exceptions that happen in slurp and adds them to the exception_map
using their location as the key. The location is a tuple in the form:
(technology, account, region, item_name) that describes the object where the exception occured.
Location can also exclude an item_name if the exception is region wide.
"""
if location in exception_map:
app.logger.debug("Exception map already has location {}. This should not happen.".format(location))
exception_map[location] = exception
app.logger.debug("Adding {} to the exceptions list. Exception was: {}".format(location, str(exception)))
def locationInExceptionMap(self, item_location, exception_map={}):
"""
Determines whether a given location is covered by an exception already in the
exception map.
Item location: (self.index, self.account, self.region, self.name)
exception Maps: (index, account, region, name)
(index, account, region)
(index, account)
:returns: True if location is covered by an entry in the exception map.
:returns: False if location is not covered by an entry in the exception map.
"""
# Exact Match
if item_location in exception_map:
app.logger.debug("Skipping {} due to an item-level exception {}.".format(item_location, exception_map[item_location]))
return True
# (index, account, region)
if item_location[0:3] in exception_map:
app.logger.debug("Skipping {} due to an region-level exception {}.".format(item_location, exception_map[item_location[0:3]]))
return True
# (index, account)
if item_location[0:2] in exception_map:
app.logger.debug("Skipping {} due to an account-level exception {}.".format(item_location, exception_map[item_location[0:2]]))
return True
# (index)
if item_location[0:1] in exception_map:
app.logger.debug("Skipping {} due to an technology-level exception {}.".format(item_location, exception_map[item_location[0:1]]))
return True
return False
def find_deleted(self, previous=[], current=[], exception_map={}):
"""
Find any items that have been deleted since the last run of the watcher.
Add these items to the deleted_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(Set(prev_map).difference(Set(curr_map)))
item_locations = [item_location for item_location in item_locations if not self.locationInExceptionMap(item_location, exception_map)]
list_deleted_items = [prev_map[item] for item in item_locations]
for item in list_deleted_items:
deleted_change_item = ChangeItem.from_items(old_item=item, new_item=None)
app.logger.debug("%s %s/%s/%s deleted" % (self.i_am_singular, item.account, item.region, item.name))
self.deleted_items.append(deleted_change_item)
def find_new(self, previous=[], current=[]):
"""
Find any new objects that have been created since the last run of the watcher.
Add these items to the created_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(Set(curr_map).difference(Set(prev_map)))
list_new_items = [curr_map[item] for item in item_locations]
for item in list_new_items:
new_change_item = ChangeItem.from_items(old_item=None, new_item=item)
self.created_items.append(new_change_item)
app.logger.debug("%s %s/%s/%s created" % (self.i_am_singular, item.account, item.region, item.name))
def find_modified(self, previous=[], current=[], exception_map={}):
"""
Find any objects that have been changed since the last run of the watcher.
Add these items to the changed_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(Set(curr_map).intersection(Set(prev_map)))
item_locations = [item_location for item_location in item_locations if not self.locationInExceptionMap(item_location, exception_map)]
for location in item_locations:
prev_item = prev_map[location]
curr_item = curr_map[location]
if not sub_dict(prev_item.config) == sub_dict(curr_item.config):
change_item = ChangeItem.from_items(old_item=prev_item, new_item=curr_item)
self.changed_items.append(change_item)
app.logger.debug("%s %s/%s/%s changed" % (self.i_am_singular, change_item.account, change_item.region, change_item.name))
def find_changes(self, current=[], exception_map={}):
"""
Identify changes between the configuration I have and what I had
last time the watcher ran.
This ignores any account/region which caused an exception during slurp.
"""
prev = self.read_previous_items()
self.find_deleted(previous=prev, current=current, exception_map=exception_map)
self.find_new(previous=prev, current=current)
self.find_modified(previous=prev, current=current, exception_map=exception_map)
def read_previous_items(self):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
for account in self.accounts:
prev = self.datastore.get_all_ctype_filtered(tech=self.index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
new_config=item_revision.config)
prev_list.append(new_item)
return prev_list
def get_latest_config(self, config_dict):
"""
config_dict is a dict indexed by timestamp, with configuration as the value;
:return: the latest configuration (based on the timestamp)
"""
timestamps = config_dict.keys()
timestamps.sort()
latest = timestamps[-1]
return config_dict[latest]
def is_changed(self):
"""
:return: boolean whether or not we've found any changes
"""
return self.deleted_items or self.created_items or self.changed_items
def issues_found(self):
"""
Runs through any changed items to see if any have issues.
:return: boolean whether any changed items have issues
"""
has_issues = False
has_new_issue = False
has_unjustified_issue = False
for item in self.created_items + self.changed_items:
if item.audit_issues:
has_issues = True
if item.found_new_issue:
has_new_issue = True
has_unjustified_issue = True
break
for issue in item.confirmed_existing_issues:
if not issue.justified:
has_unjustified_issue = True
break
return has_issues, has_new_issue, has_unjustified_issue
def save(self):
"""
save new configs, if necessary
"""
app.logger.info("{} deleted {} in {}".format(len(self.deleted_items), self.i_am_plural, self.accounts))
app.logger.info("{} created {} in {}".format(len(self.created_items), self.i_am_plural, self.accounts))
app.logger.info("{} changed {} in {}".format(len(self.changed_items), self.i_am_plural, self.accounts))
for item in self.created_items + self.changed_items + self.deleted_items:
item.save(self.datastore)
def plural_name(self):
"""
Used for Jinja Template
:return: i_am_plural
"""
return self.i_am_plural
def singular_name(self):
"""
Used for Jinja Template
:return: i_am_singular
"""
return self.i_am_singular
class ChangeItem(object):
"""
Object tracks two different revisions of a given item.
"""
def __init__(self, index=None, region=None, account=None, name=None, old_config={}, new_config={}, active=False, audit_issues=None):
self.index = index
self.region = region
self.account = account
self.name = name
self.old_config = old_config
self.new_config = new_config
self.active = active
self.audit_issues = audit_issues or []
self.confirmed_new_issues = []
self.confirmed_fixed_issues = []
self.confirmed_existing_issues = []
self.found_new_issue = False
@classmethod
def from_items(cls, old_item=None, new_item=None):
"""
Create ChangeItem from two separate items.
:return: An instance of ChangeItem
"""
if not old_item and not new_item:
return
valid_item = new_item if new_item else old_item
active = True if new_item else False
old_config = old_item.config if old_item else {}
new_config = new_item.config if new_item else {}
return cls(index=valid_item.index,
region=valid_item.region,
account=valid_item.account,
name=valid_item.name,
old_config=old_config,
new_config=new_config,
active=active,
audit_issues=valid_item.audit_issues)
@property
def config(self):
return self.new_config
def location(self):
"""
Construct a location from the object.
:return: tuple containing index, account, region, and name.
"""
return (self.index, self.account, self.region, self.name)
def get_pdiff_html(self):
pdiff = PolicyDiff(self.new_config, self.old_config)
return pdiff.produceDiffHTML()
def _dict_for_template(self):
return {
'account': self.account,
'region': self.region,
'name': self.name,
'confirmed_new_issues': self.confirmed_new_issues,
'confirmed_fixed_issues': self.confirmed_fixed_issues,
'confirmed_existing_issues': self.confirmed_existing_issues,
'pdiff_html': self.get_pdiff_html()
}
def description(self):
"""
Provide an HTML description of the object for change emails and the Jinja templates.
:return: string of HTML desribing the object.
"""
jenv = get_jinja_env()
template = jenv.get_template('jinja_change_item.html')
body = template.render(self._dict_for_template())
# app.logger.info(body)
return body
def save(self, datastore):
"""
Save the item
"""
app.logger.debug("Saving {}/{}/{}/{}\n\t{}".format(self.index, self.account, self.region, self.name, self.new_config))
datastore.store(self.index, self.region, self.account, self.name, self.active, self.new_config, new_issues=self.audit_issues)
|
the-stack_106_28666 | # -*- coding: utf-8 -*-
"""
=====================================================================
Spectro-temporal receptive field (STRF) estimation on continuous data
=====================================================================
This demonstrates how an encoding model can be fit with multiple continuous
inputs. In this case, we simulate the model behind a spectro-temporal receptive
field (or STRF). First, we create a linear filter that maps patterns in
spectro-temporal space onto an output, representing neural activity. We fit
a receptive field model that attempts to recover the original linear filter
that was used to create this data.
"""
# Authors: Chris Holdgraf <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD-3-Clause
# %%
# sphinx_gallery_thumbnail_number = 7
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.decoding import ReceptiveField, TimeDelayingRidge
from scipy.stats import multivariate_normal
from scipy.io import loadmat
from sklearn.preprocessing import scale
rng = np.random.RandomState(1337) # To make this example reproducible
# %%
# Load audio data
# ---------------
#
# We'll read in the audio data from :footcite:`CrosseEtAl2016` in order to
# simulate a response.
#
# In addition, we'll downsample the data along the time dimension in order to
# speed up computation. Note that depending on the input values, this may
# not be desired. For example if your input stimulus varies more quickly than
# 1/2 the sampling rate to which we are downsampling.
# Read in audio that's been recorded in epochs.
path_audio = mne.datasets.mtrf.data_path()
data = loadmat(path_audio + '/speech_data.mat')
audio = data['spectrogram'].T
sfreq = float(data['Fs'][0, 0])
n_decim = 2
audio = mne.filter.resample(audio, down=n_decim, npad='auto')
sfreq /= n_decim
# %%
# Create a receptive field
# ------------------------
#
# We'll simulate a linear receptive field for a theoretical neural signal. This
# defines how the signal will respond to power in this receptive field space.
n_freqs = 20
tmin, tmax = -0.1, 0.4
# To simulate the data we'll create explicit delays here
delays_samp = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
delays_sec = delays_samp / sfreq
freqs = np.linspace(50, 5000, n_freqs)
grid = np.array(np.meshgrid(delays_sec, freqs))
# We need data to be shaped as n_epochs, n_features, n_times, so swap axes here
grid = grid.swapaxes(0, -1).swapaxes(0, 1)
# Simulate a temporal receptive field with a Gabor filter
means_high = [.1, 500]
means_low = [.2, 2500]
cov = [[.001, 0], [0, 500000]]
gauss_high = multivariate_normal.pdf(grid, means_high, cov)
gauss_low = -1 * multivariate_normal.pdf(grid, means_low, cov)
weights = gauss_high + gauss_low # Combine to create the "true" STRF
kwargs = dict(vmax=np.abs(weights).max(), vmin=-np.abs(weights).max(),
cmap='RdBu_r', shading='gouraud')
fig, ax = plt.subplots()
ax.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax.set(title='Simulated STRF', xlabel='Time Lags (s)', ylabel='Frequency (Hz)')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# %%
# Simulate a neural response
# --------------------------
#
# Using this receptive field, we'll create an artificial neural response to
# a stimulus.
#
# To do this, we'll create a time-delayed version of the receptive field, and
# then calculate the dot product between this and the stimulus. Note that this
# is effectively doing a convolution between the stimulus and the receptive
# field. See `here <https://en.wikipedia.org/wiki/Convolution>`_ for more
# information.
# Reshape audio to split into epochs, then make epochs the first dimension.
n_epochs, n_seconds = 16, 5
audio = audio[:, :int(n_seconds * sfreq * n_epochs)]
X = audio.reshape([n_freqs, n_epochs, -1]).swapaxes(0, 1)
n_times = X.shape[-1]
# Delay the spectrogram according to delays so it can be combined w/ the STRF
# Lags will now be in axis 1, then we reshape to vectorize
delays = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
# Iterate through indices and append
X_del = np.zeros((len(delays),) + X.shape)
for ii, ix_delay in enumerate(delays):
# These arrays will take/put particular indices in the data
take = [slice(None)] * X.ndim
put = [slice(None)] * X.ndim
if ix_delay > 0:
take[-1] = slice(None, -ix_delay)
put[-1] = slice(ix_delay, None)
elif ix_delay < 0:
take[-1] = slice(-ix_delay, None)
put[-1] = slice(None, ix_delay)
X_del[ii][tuple(put)] = X[tuple(take)]
# Now set the delayed axis to the 2nd dimension
X_del = np.rollaxis(X_del, 0, 3)
X_del = X_del.reshape([n_epochs, -1, n_times])
n_features = X_del.shape[1]
weights_sim = weights.ravel()
# Simulate a neural response to the sound, given this STRF
y = np.zeros((n_epochs, n_times))
for ii, iep in enumerate(X_del):
# Simulate this epoch and add random noise
noise_amp = .002
y[ii] = np.dot(weights_sim, iep) + noise_amp * rng.randn(n_times)
# Plot the first 2 trials of audio and the simulated electrode activity
X_plt = scale(np.hstack(X[:2]).T).T
y_plt = scale(np.hstack(y[:2]))
time = np.arange(X_plt.shape[-1]) / sfreq
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True)
ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap='Reds',
shading='gouraud')
ax1.set_title('Input auditory features')
ax1.set(ylim=[freqs.min(), freqs.max()], ylabel='Frequency (Hz)')
ax2.plot(time, y_plt)
ax2.set(xlim=[time.min(), time.max()], title='Simulated response',
xlabel='Time (s)', ylabel='Activity (a.u.)')
mne.viz.tight_layout()
# %%
# Fit a model to recover this receptive field
# -------------------------------------------
#
# Finally, we'll use the :class:`mne.decoding.ReceptiveField` class to recover
# the linear receptive field of this signal. Note that properties of the
# receptive field (e.g. smoothness) will depend on the autocorrelation in the
# inputs and outputs.
# Create training and testing data
train, test = np.arange(n_epochs - 1), n_epochs - 1
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
X_train, X_test, y_train, y_test = [np.rollaxis(ii, -1, 0) for ii in
(X_train, X_test, y_train, y_test)]
# Model the simulated data as a function of the spectrogram input
alphas = np.logspace(-3, 3, 7)
scores = np.zeros_like(alphas)
models = []
for ii, alpha in enumerate(alphas):
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=alpha)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test)
models.append(rf)
times = rf.delays_ / float(rf.sfreq)
# Choose the model that performed best on the held out data
ix_best_alpha = np.argmax(scores)
best_mod = models[ix_best_alpha]
coefs = best_mod.coef_[0]
best_pred = best_mod.predict(X_test)[:, 0]
# Plot the original STRF, and the one that we recovered with modeling.
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Reconstructed STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot the actual response and the predicted response on a held out stimulus
time_pred = np.arange(best_pred.shape[0]) / sfreq
fig, ax = plt.subplots()
ax.plot(time_pred, y_test, color='k', alpha=.2, lw=4)
ax.plot(time_pred, best_pred, color='r', lw=1)
ax.set(title='Original and predicted activity', xlabel='Time (s)')
ax.legend(['Original', 'Predicted'])
plt.autoscale(tight=True)
mne.viz.tight_layout()
# %%
# Visualize the effects of regularization
# ---------------------------------------
#
# Above we fit a :class:`mne.decoding.ReceptiveField` model for one of many
# values for the ridge regularization parameter. Here we will plot the model
# score as well as the model coefficients for each value, in order to
# visualize how coefficients change with different levels of regularization.
# These issues as well as the STRF pipeline are described in detail
# in :footcite:`TheunissenEtAl2001,WillmoreSmyth2003,HoldgrafEtAl2016`.
# Plot model score for each ridge parameter
fig = plt.figure(figsize=(10, 4))
ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores, marker='o', color='r')
ax.annotate('Best parameter', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Ridge regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
for ii, (rf, i_alpha) in enumerate(zip(models, alphas)):
ax = plt.subplot2grid([2, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
plt.xticks([], [])
plt.yticks([], [])
plt.autoscale(tight=True)
fig.suptitle('Model coefficients / scores for many ridge parameters', y=1)
mne.viz.tight_layout()
# %%
# Using different regularization types
# ------------------------------------
# In addition to the standard ridge regularization, the
# :class:`mne.decoding.TimeDelayingRidge` class also exposes
# `Laplacian <https://en.wikipedia.org/wiki/Laplacian_matrix>`_ regularization
# term as:
#
# .. math::
# \left[\begin{matrix}
# 1 & -1 & & & & \\
# -1 & 2 & -1 & & & \\
# & -1 & 2 & -1 & & \\
# & & \ddots & \ddots & \ddots & \\
# & & & -1 & 2 & -1 \\
# & & & & -1 & 1\end{matrix}\right]
#
# This imposes a smoothness constraint of nearby time samples and/or features.
# Quoting :footcite:`CrosseEtAl2016` :
#
# Tikhonov [identity] regularization (Equation 5) reduces overfitting by
# smoothing the TRF estimate in a way that is insensitive to
# the amplitude of the signal of interest. However, the Laplacian
# approach (Equation 6) reduces off-sample error whilst preserving
# signal amplitude (Lalor et al., 2006). As a result, this approach
# usually leads to an improved estimate of the system’s response (as
# indexed by MSE) compared to Tikhonov regularization.
#
scores_lap = np.zeros_like(alphas)
models_lap = []
for ii, alpha in enumerate(alphas):
estimator = TimeDelayingRidge(tmin, tmax, sfreq, reg_type='laplacian',
alpha=alpha)
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=estimator)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test)
models_lap.append(rf)
ix_best_alpha_lap = np.argmax(scores_lap)
# %%
# Compare model performance
# -------------------------
# Below we visualize the model performance of each regularization method
# (ridge vs. Laplacian) for different levels of alpha. As you can see, the
# Laplacian method performs better in general, because it imposes a smoothness
# constraint along the time and feature dimensions of the coefficients.
# This matches the "true" receptive field structure and results in a better
# model fit.
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores_lap, marker='o', color='r')
ax.plot(np.arange(len(alphas)), scores, marker='o', color='0.5', ls=':')
ax.annotate('Best Laplacian', (ix_best_alpha_lap,
scores_lap[ix_best_alpha_lap]),
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap] - .1),
arrowprops={'arrowstyle': '->'})
ax.annotate('Best Ridge', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Laplacian regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
xlim = times[[0, -1]]
for ii, (rf_lap, rf, i_alpha) in enumerate(zip(models_lap, models, alphas)):
ax = plt.subplot2grid([3, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Laplacian')
ax = plt.subplot2grid([3, len(alphas)], [1, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Ridge')
fig.suptitle('Model coefficients / scores for laplacian regularization', y=1)
mne.viz.tight_layout()
# %%
# Plot the original STRF, and the one that we recovered with modeling.
rf = models[ix_best_alpha]
rf_lap = models_lap[ix_best_alpha_lap]
_, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3),
sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Ridge STRF')
ax3.set_title('Best Laplacian STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# %%
# References
# ==========
# .. footbibliography::
|
the-stack_106_28668 | from pathlib import Path
from typing import Optional, Dict, List
from pandas import DataFrame, read_excel, Series, isnull
from aws_managers.utils.dtype_mappings import FS_NAME_TO_ATHENA_NAME
class FeaturesMetadata(object):
def __init__(self, metadata_fn: Path, dataset_name: str):
"""
Class to rename columns
:param dataset_name: Name of the dataset in the metadata spreadsheet.
"""
# read metadata
self.columns: DataFrame = read_excel(
metadata_fn,
sheet_name=dataset_name, engine='openpyxl'
).set_index('original_name')
attributes = read_excel(
metadata_fn,
sheet_name='attributes', engine='openpyxl'
)
self.attributes: Series = attributes.loc[
attributes['dataset'] == dataset_name
].set_index('attribute_name')['attribute_type']
self._name_mapping: Optional[Dict[str, str]] = None
def check_d_types(self, data: DataFrame):
"""
Check that the data can be converted to the d-types in the metadata.
:param data: The data whose d-types to check.
"""
for old_name, attribute_values in self.columns.iterrows():
print(f'\rChecking d-type for column {old_name}' + ' ' * 256,
end='')
if attribute_values['data_type'] == 'Integral':
_ = data[old_name].dropna().astype(int)
elif attribute_values['data_type'] == 'Fractional':
_ = data[old_name].dropna().astype(float)
elif attribute_values['data_type'] == 'String':
_ = data[old_name].dropna().astype('string')
print('\nAll checks passed.')
@property
def name_mapping(self) -> Dict[str, str]:
"""
Return a dictionary that maps old feature names to new ones.
"""
# return mapping if it already exists
if self._name_mapping is not None:
return self._name_mapping
# build mapping
old_to_new_name = {}
old_name: str
for old_name, attr_values in self.columns.iterrows():
new_name = f"{attr_values['feature']}___{attr_values['metric']}"
for attr_name, attr_type in self.attributes.items():
attribute_value = self.columns.loc[old_name, attr_name]
if isnull(attribute_value):
continue
if attr_type == 'string':
new_name += f'___{attr_name}__{attribute_value}'
elif attr_type == 'bool':
if attribute_value == True:
new_name += f'___{attr_name}'
elif attribute_value == False:
new_name += f'___not_{attr_name}'
else:
raise ValueError(
f'{attr_name} should be equal to True or False '
f'but is {attribute_value}'
)
elif attr_type == 'int_range':
new_name += f'___{attr_name}__{attribute_value}'
else:
raise ValueError(
f'Invalid attribute type for attribute '
f'{attr_name} ({attr_type})'
)
# set mapping
old_to_new_name[old_name] = new_name
# return created mapping
self._name_mapping = old_to_new_name
return self._name_mapping
@property
def old_names(self) -> List[str]:
"""
Return the old names of the dataset, as listed in the metadata.
"""
return self.columns.index.to_list()
@property
def new_names(self) -> List[str]:
"""
Return the old names of the dataset, as listed in the metadata.
"""
mapping = self.name_mapping
return [mapping[old_name] for old_name in self.old_names]
@property
def feature_types(self) -> Dict[str, str]:
"""
Return a dictionary that maps new feature names to their types.
"""
mapping = self.name_mapping
return {
mapping[old_name]: data_type
for old_name, data_type in self.columns['data_type'].items()
}
def athena_schema(self, identifier_name: str, identifier_type: str) -> str:
"""
Return a string of pairs of new column name and Athena data type.
:param identifier_name: Name of the FeatureStore record identifier.
:param identifier_type: Data type of the FeatureStore record identifier.
One of {'String', 'Integral', 'Fractional'}
"""
str_out = (
f'{identifier_name} {FS_NAME_TO_ATHENA_NAME[identifier_type]},\n'
)
mapping = self.name_mapping
str_out += ',\n'.join([
f'{mapping[old_name]} {FS_NAME_TO_ATHENA_NAME[data_type]}'
for old_name, data_type in self.columns['data_type'].items()
]) + '\n'
return str_out
|
the-stack_106_28669 | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for Anderson acceleration."""
from typing import Any
import jax
import jax.numpy as jnp
from ott.core import dataclasses
SinkhornState = Any
@dataclasses.register_pytree_node
class AndersonAcceleration:
"""Implements Anderson acceleration for Sinkhorn."""
memory: int = 2 # Number of iterates considered to form interpolation.
refresh_every: int = 1 # Recompute interpolation periodically.
ridge_identity: float = 1e-2 # Ridge used in the linear system.
def extrapolation(self, xs, fxs):
"""Computes Anderson extrapolation from past observations."""
# Remove -inf values to instantiate quadratic problem. All others
# remain since they might be caused by a valid issue.
fxs_clean = jnp.nan_to_num(fxs, nan=jnp.nan, posinf=jnp.inf, neginf=0.0)
xs_clean = jnp.nan_to_num(xs, nan=jnp.nan, posinf=jnp.inf, neginf=0.0)
residuals = fxs_clean - xs_clean
gram_matrix = jnp.matmul(residuals.T, residuals)
gram_matrix /= jnp.linalg.norm(gram_matrix)
# Solve linear system to obtain weights
weights = jax.scipy.sparse.linalg.cg(
gram_matrix + self.ridge_identity * jnp.eye(xs.shape[1]),
jnp.ones(xs.shape[1]))[0]
weights /= jnp.sum(weights)
# Recover linear combination and return it with NaN (caused
# by 0 weights leading to -jnp.inf potentials, mixed with weights
# coefficiences of different signs), disambiguated to -inf.
combination = jnp.sum(fxs * weights[None, :], axis=1)
return jnp.where(jnp.isfinite(combination), combination, -jnp.inf)
def update(self,
state: SinkhornState,
iteration: int,
pb, lse_mode: bool):
"""Anderson acceleration update.
When using Anderson acceleration, first update the dual variable f_u with
previous updates (if iteration count sufficiently large), then record
new iterations in array.
Anderson acceleration always happens in potentials (not scalings) space,
regardless of the lse_mode setting. If the iteration count is large
enough the update below will output a potential variable.
Args:
state: A sinkhorn.SinkhornState
iteration: int, the current iteration.
pb: a problem.LinearProblem defining the OT problem.
lse_mode: whether to compute in log-sum-exp or in scalings.
Returns:
A potential variable.
"""
geom = pb.geom
trigger_update = jnp.logical_and(iteration > self.memory,
iteration % self.refresh_every == 0)
fu = jnp.where(trigger_update,
self.extrapolation(state.old_fus, state.old_mapped_fus),
state.fu)
# If the interpolation was triggered, we store it in memory
# Otherwise we add the previous value (converting it to potential form if
# it was initially stored in scaling form).
old_fus = jnp.where(
trigger_update,
jnp.concatenate((state.old_fus[:, 1:], fu[:, None]), axis=1),
jnp.concatenate(
(state.old_fus[:, 1:],
(fu if lse_mode else geom.potential_from_scaling(fu))[:, None]),
axis=1))
# If update was triggered, ensure a scaling is returned, since the result
# from the extrapolation was outputted in potential form.
fu = jnp.where(
trigger_update,
fu if lse_mode else geom.scaling_from_potential(fu),
fu)
return state.set(fu=fu, old_fus=old_fus)
def init_maps(self, pb, state):
"""Initializes log matrix used in Anderson acceleration with nan values."""
fus = jnp.ones((pb.geom.shape[0], self.memory)) * jnp.nan
return state.set(old_fus=fus, old_mapped_fus=fus)
def update_history(self, state, pb, lse_mode: bool):
f = state.fu if lse_mode else pb.geom.potential_from_scaling(state.fu)
mapped = jnp.concatenate((state.old_mapped_fus[:, 1:], f[:, None]), axis=1)
return state.set(old_mapped_fus=mapped)
|
the-stack_106_28670 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from tinctest.lib import local_path
from tinctest.models.scenario import ScenarioTestCase
from tinctest.main import TINCException
from mpp.lib.PSQL import PSQL
from mpp.lib.gprecoverseg import GpRecover
class FilerepResyncException(TINCException): pass
'''
Filerep Resync scenario
'''
class FilerepResync(ScenarioTestCase):
"""
@description test cases for MPP-11167
@created 2013-03-15 10:10:10
@modified 2013-05-07 17:10:15
@tags persistent tables schedule_filerep
@product_version gpdb:
"""
@classmethod
def setUpClass(cls):
super(FilerepResync,cls).setUpClass()
tinctest.logger.info('Setting up the filerep resync test.')
def wait_till_insync_transition(self):
self.gpr = GpRecover()
self.gpr.wait_till_insync_transition()
def test_filerep_resysnc(self):
#Step 1: Create an append-only table
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.AOTable")
self.test_case_scenario.append(test_case_list1)
#Step 2:1 Begin a transaction & insert values into created table
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.runsql.TransactionTest.Transaction")
#Step 2:2 Start a concurrent process to kill all the mirror processes.
# It should start only after the begin & insert are performed
test_case_list2.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.ProcessKill")
self.test_case_scenario.append(test_case_list2)
#Step 3: Check the persistent table for duplicate entries
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.DuplicateEntries.test_duplicate_entries_after_hitting_fault")
self.test_case_scenario.append(test_case_list3)
#Step 4: Perform incremental recovery
test_case_list4 = []
test_case_list4.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.Recovery")
self.test_case_scenario.append(test_case_list4)
#Step 5: Check if the mirror segments are up or not
test_case_list5 = []
test_case_list5.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.Health")
self.test_case_scenario.append(test_case_list5)
#Step 6: Re-check the persistent table for duplicate entries
test_case_list6 = []
test_case_list6.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.DuplicateEntries.test_duplicate_entries_after_recovery")
self.test_case_scenario.append(test_case_list6)
#Step 7: Check the Sate of DB and Cluster
test_case_list7 = []
test_case_list7.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_catalog")
self.test_case_scenario.append(test_case_list7)
test_case_list8 = []
test_case_list8.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.test_filerep_resync.FilerepResync.wait_till_insync_transition")
self.test_case_scenario.append(test_case_list8)
test_case_list9 = []
test_case_list9.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_mirrorintegrity")
self.test_case_scenario.append(test_case_list9)
|
the-stack_106_28673 | import os
import pandas as pd
from shutil import copyfile
def save_results(jordan, jordan_gw, jordan_ww, jordan_desal,
folder, template=None):
os.makedirs(folder, exist_ok=True)
if template:
copyfile(os.path.join(template, 'crop_production.gz'), os.path.join(folder, 'crop_production.gz'))
copyfile(os.path.join(template, 'water_delivered.gz'), os.path.join(folder, 'water_delivered.gz'))
copyfile(os.path.join(template, 'water_requirements.gz'), os.path.join(folder, 'water_requirements.gz'))
jordan.df.to_csv(os.path.join(folder, 'pipelines_data.gz'), index=False)
jordan_gw.df.to_csv(os.path.join(folder, 'groundwater_pumping.gz'), index=False)
jordan_ww.df.to_csv(os.path.join(folder, 'wwtp_data.gz'), index=False)
jordan_desal.df.to_csv(os.path.join(folder, 'desal_data.gz'), index=False)
def merge_scenario_data(path, scenarios):
all_results = [pd.DataFrame()] * 7
for scenario in scenarios:
results = load_scenarios(os.path.join(path, scenario))
for dff in results:
dff['Scenario'] = scenario
for i, dff in enumerate(results):
all_results[i] = all_results[i].append(dff, ignore_index=True)
df_delivered = all_results[0]
df_required = all_results[1]
df_gw = all_results[2]
df_pipelines = all_results[3]
df_wwtp = all_results[4]
df_desal = all_results[5]
df_crop = all_results[6]
return df_delivered, df_required, df_gw, df_pipelines, df_wwtp, df_desal, df_crop
def load_scenarios(path):
files = ['water_delivered.gz', 'water_requirements.gz',
'groundwater_pumping.gz', 'pipelines_data.gz',
'wwtp_data.gz', 'desal_data.gz', 'crop_production.gz']
output = []
for file in files:
output.append(pd.read_csv(os.path.join(path, file)))
return output |
the-stack_106_28674 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import tempfile
import time
import os
from collections import OrderedDict
import torch
from tqdm import tqdm
from ..structures.bounding_box import BoxList
from ..utils.comm import is_main_process
from ..utils.comm import scatter_gather
from ..utils.comm import synchronize
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
def compute_on_dataset(model, data_loader, device):
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for i, batch in tqdm(enumerate(data_loader)):
images, targets, image_ids = batch
images = images.to(device)
with torch.no_grad():
output = model(images)
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
return results_dict
def prepare_for_coco_detection(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert("xywh")
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(predictions, dataset, maskiou_on):
import pycocotools.mask as mask_util
import numpy as np
masker = Masker(threshold=0.5, padding=1)
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in tqdm(enumerate(predictions)):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
masks = prediction.get_field("mask")
# t = time.time()
# Masker is necessary only if masks haven't been already resized.
if list(masks.shape[-2:]) != [image_height, image_width]:
masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)
masks = masks[0]
# logger.info('Time mask: {}'.format(time.time() - t))
# prediction = prediction.convert('xywh')
# boxes = prediction.bbox.tolist()
if maskiou_on:
scores = prediction.get_field("mask_scores").tolist()
else:
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
# rles = prediction.get_field('mask')
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
# inspired from Detectron
def evaluate_box_proposals(
predictions, dataset, thresholds=None, area="all", limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field("objectness").sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def evaluate_predictions_on_coco(
coco_gt, coco_results, json_result_file, iou_type="bbox"
):
import json
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.cocoeval import COCOeval
coco_dt = coco_gt.loadRes(str(json_result_file))
# coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = scatter_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("maskrcnn_benchmark.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not "
"a contiguous set. Some images might be missing from the evaluation"
)
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
class COCOResults(object):
METRICS = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"box_proposal": [
"AR@100",
"ARs@100",
"ARm@100",
"ARl@100",
"AR@1000",
"ARs@1000",
"ARm@1000",
"ARl@1000",
],
"keypoint": ["AP", "AP50", "AP75", "APm", "APl"],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResults.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
from pycocotools.cocoeval import COCOeval
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResults.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
# TODO make it pretty
return repr(self.results)
def check_expected_results(results, expected_results, sigma_tol):
if not expected_results:
return
logger = logging.getLogger("maskrcnn_benchmark.inference")
for task, metric, (mean, std) in expected_results:
actual_val = results.results[task][metric]
lo = mean - sigma_tol * std
hi = mean + sigma_tol * std
ok = (lo < actual_val) and (actual_val < hi)
msg = (
"{} > {} sanity check (actual vs. expected): "
"{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})"
).format(task, metric, actual_val, mean, std, lo, hi)
if not ok:
msg = "FAIL: " + msg
logger.error(msg)
else:
msg = "PASS: " + msg
logger.info(msg)
def inference(
model,
data_loader,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
maskiou_on=False
):
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = (
torch.distributed.get_world_size()
if torch.distributed.is_initialized()
else 1
)
logger = logging.getLogger("maskrcnn_benchmark.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} images".format(len(dataset)))
start_time = time.time()
predictions = compute_on_dataset(model, data_loader, device)
# wait for all processes to complete before measuring the time
synchronize()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
logger.info(
"Total inference time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
predictions = _accumulate_predictions_from_multiple_gpus(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
if box_only:
logger.info("Evaluating bbox proposals")
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
res = COCOResults("box_proposal")
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(
predictions, dataset, area=area, limit=limit
)
key = "AR{}@{:d}".format(suffix, limit)
res.results["box_proposal"][key] = stats["ar"].item()
logger.info(res)
check_expected_results(res, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
return
logger.info("Preparing results for COCO format")
coco_results = {}
if "bbox" in iou_types:
logger.info("Preparing bbox results")
coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
if "segm" in iou_types:
logger.info("Preparing segm results")
coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset, maskiou_on)
results = COCOResults(*iou_types)
logger.info("Evaluating predictions")
for iou_type in iou_types:
with tempfile.NamedTemporaryFile() as f:
file_path = f.name
if output_folder:
file_path = os.path.join(output_folder, iou_type + ".json")
res = evaluate_predictions_on_coco(
dataset.coco, coco_results[iou_type], file_path, iou_type
)
results.update(res)
logger.info(results)
check_expected_results(results, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(results, os.path.join(output_folder, "coco_results.pth"))
return results, coco_results, predictions
|
the-stack_106_28675 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
u""" Другая частая задача, с которой приходится сталкиваться - это получение
данных с чужих сайтом. Не у всех сайтов есть api, поэтому нужно уметь
получить html и добыть из него нужные данные:
Получение html-страницы при помощи requests
Основы библиотеки Beautiful Soup
Разбираем поисковую страницы Яндекса
"""
import requests
import pprint
from bs4 import BeautifulSoup
def get_page(url):
try:
params = {}
responce = requests.get(url, params=params)
responce.raise_for_status()
print(responce.apparent_encoding)
return responce.text
except requests.exceptions.RequestException as e:
print(e)
return None
def parse_page(data):
bs = BeautifulSoup(data, 'html.parser')
sites_list = []
# print(bs.prettify)
for item in bs.find_all('li', class_='serp-item'):
title = item.find('div', class_='organic__url-text').text
link = item.find('a', class_='path__item').get('href')
sites_list.append({'link': link, 'title': title})
return sites_list
if __name__ == '__main__':
url = "https://yandex.ru/search/?lr=213&text=python"
# url = 'https://learn.python.ru/lessons/5_db.html?full#0'
data = get_page(url)
if data:
sites_list = parse_page(data)
pprint.pprint(sites_list)
|
the-stack_106_28676 | """Set up some common test helper things."""
import functools
import logging
from unittest.mock import patch
import pytest
import requests_mock as _requests_mock
from homeassistant import util
from homeassistant.auth.const import GROUP_ID_ADMIN, GROUP_ID_READ_ONLY
from homeassistant.auth.providers import homeassistant, legacy_api_password
from homeassistant.components.websocket_api.auth import (
TYPE_AUTH,
TYPE_AUTH_OK,
TYPE_AUTH_REQUIRED,
)
from homeassistant.components.websocket_api.http import URL
from homeassistant.setup import async_setup_component
from homeassistant.util import location
pytest.register_assert_rewrite("tests.common")
from tests.common import ( # noqa: E402, isort:skip
CLIENT_ID,
INSTANCES,
MockUser,
async_test_home_assistant,
mock_coro,
mock_storage as mock_storage,
)
from tests.test_util.aiohttp import mock_aiohttp_client # noqa: E402, isort:skip
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
def check_real(func):
"""Force a function to require a keyword _test_real to be passed in."""
@functools.wraps(func)
async def guard_func(*args, **kwargs):
real = kwargs.pop("_test_real", None)
if not real:
raise Exception(
'Forgot to mock or pass "_test_real=True" to %s', func.__name__
)
return await func(*args, **kwargs)
return guard_func
# Guard a few functions that would make network connections
location.async_detect_location_info = check_real(location.async_detect_location_info)
util.get_local_ip = lambda: "127.0.0.1"
@pytest.fixture(autouse=True)
def verify_cleanup():
"""Verify that the test has cleaned up resources correctly."""
yield
if len(INSTANCES) >= 2:
count = len(INSTANCES)
for inst in INSTANCES:
inst.stop()
pytest.exit(f"Detected non stopped instances ({count}), aborting test run")
@pytest.fixture
def hass_storage():
"""Fixture to mock storage."""
with mock_storage() as stored_data:
yield stored_data
@pytest.fixture
def hass(loop, hass_storage):
"""Fixture to provide a test instance of Home Assistant."""
hass = loop.run_until_complete(async_test_home_assistant(loop))
yield hass
loop.run_until_complete(hass.async_stop(force=True))
@pytest.fixture
def requests_mock():
"""Fixture to provide a requests mocker."""
with _requests_mock.mock() as m:
yield m
@pytest.fixture
def aioclient_mock():
"""Fixture to mock aioclient calls."""
with mock_aiohttp_client() as mock_session:
yield mock_session
@pytest.fixture
def mock_device_tracker_conf():
"""Prevent device tracker from reading/writing data."""
devices = []
async def mock_update_config(path, id, entity):
devices.append(entity)
with patch(
"homeassistant.components.device_tracker.legacy"
".DeviceTracker.async_update_config",
side_effect=mock_update_config,
), patch(
"homeassistant.components.device_tracker.legacy.async_load_config",
side_effect=lambda *args: mock_coro(devices),
):
yield devices
@pytest.fixture
def hass_access_token(hass, hass_admin_user):
"""Return an access token to access Home Assistant."""
refresh_token = hass.loop.run_until_complete(
hass.auth.async_create_refresh_token(hass_admin_user, CLIENT_ID)
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def hass_owner_user(hass, local_auth):
"""Return a Home Assistant admin user."""
return MockUser(is_owner=True).add_to_hass(hass)
@pytest.fixture
def hass_admin_user(hass, local_auth):
"""Return a Home Assistant admin user."""
admin_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_ADMIN)
)
return MockUser(groups=[admin_group]).add_to_hass(hass)
@pytest.fixture
def hass_read_only_user(hass, local_auth):
"""Return a Home Assistant read only user."""
read_only_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_READ_ONLY)
)
return MockUser(groups=[read_only_group]).add_to_hass(hass)
@pytest.fixture
def hass_read_only_access_token(hass, hass_read_only_user):
"""Return a Home Assistant read only user."""
refresh_token = hass.loop.run_until_complete(
hass.auth.async_create_refresh_token(hass_read_only_user, CLIENT_ID)
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def legacy_auth(hass):
"""Load legacy API password provider."""
prv = legacy_api_password.LegacyApiPasswordAuthProvider(
hass,
hass.auth._store,
{"type": "legacy_api_password", "api_password": "test-password"},
)
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
@pytest.fixture
def local_auth(hass):
"""Load local auth provider."""
prv = homeassistant.HassAuthProvider(
hass, hass.auth._store, {"type": "homeassistant"}
)
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
@pytest.fixture
def hass_client(hass, aiohttp_client, hass_access_token):
"""Return an authenticated HTTP client."""
async def auth_client():
"""Return an authenticated client."""
return await aiohttp_client(
hass.http.app, headers={"Authorization": f"Bearer {hass_access_token}"},
)
return auth_client
@pytest.fixture
def hass_ws_client(aiohttp_client, hass_access_token):
"""Websocket client fixture connected to websocket server."""
async def create_client(hass, access_token=hass_access_token):
"""Create a websocket client."""
assert await async_setup_component(hass, "websocket_api", {})
client = await aiohttp_client(hass.http.app)
with patch("homeassistant.components.http.auth.setup_auth"):
websocket = await client.ws_connect(URL)
auth_resp = await websocket.receive_json()
assert auth_resp["type"] == TYPE_AUTH_REQUIRED
if access_token is None:
await websocket.send_json(
{"type": TYPE_AUTH, "access_token": "incorrect"}
)
else:
await websocket.send_json(
{"type": TYPE_AUTH, "access_token": access_token}
)
auth_ok = await websocket.receive_json()
assert auth_ok["type"] == TYPE_AUTH_OK
# wrap in client
websocket.client = client
return websocket
return create_client
|
the-stack_106_28680 | import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import re
from paddleocr import PaddleOCR, draw_ocr
import cv2 as cv
import numpy as np
import time
from collections import defaultdict
import json
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
keyword1 = "goal"
class PlayerTime:
def __init__(self, playername=''):
self.goaltime = defaultdict(str)
self.playername = playername
self.location = 0
self.use = 1
class OneLine:
def __init__(self, y1, y2):
self.y1 = y1
self.y2 = y2
self.strr = ""
self.times = []
self.location = ""
def makedir(new_dir):
if not os.path.exists(new_dir):
os.makedirs(new_dir)
EVENT_DIR = "D:/dataset/event"
VIDEO_DIR = "D:/dataset/video_6"
ocr = PaddleOCR(lang="en", gpu_mem=5000, det=False,
rec_model_dir="./inference/en_ppocr_mobile_v2.0_rec_infer/") # 首次执行会自动下载模型文件
# 获取队名文件
team_name = []
f = open("team_name.txt", "r")
for line in f:
line = line[:-1]
team_name.append(line)
# result = process.extract(a, team_name, scorer=fuzz.token_set_ratio, limit=5)
def get_ocr_result(videoCap, i, h_index):
result = ''
ocr_list = []
temp_result = []
videoCap.set(cv.CAP_PROP_POS_FRAMES, i)
# i += 100;
boolFrame, matFrame = videoCap.read()
if boolFrame:
temp_jpgframe = np.asarray(matFrame)
# 截取上面0-90区域进行OCR检测
if h_index < 200:
jpgframe = temp_jpgframe[0:h_index]
else:
jpgframe = temp_jpgframe[h_index:]
# 得到OCR识别结果
temp_result = ocr(jpgframe)
for mystr, ration in temp_result[1]:
result += mystr
ocr_list.append(mystr)
result += ' '
return result, temp_result
def check_line(result_line, LorR, player_name_dic, big_candidate, score_time_dic, i, Player_Time_list, big_flag):
for lline in result_line:
P = 0
OG = 0
score_time = '0'
string_list = re.findall("[A-Za-z]+", lline.strr)
number_list = re.findall("\d+", lline.strr)
if (not string_list) or (not number_list):
continue
player_name = ''
# 这里目前只处理了普通的P、90加时的P以及普通的OG
# 还可能有45加时的P,以及45/90加时的OG
for string_one in string_list:
if string_one.upper() == 'P':
P = 1
P_add90_score_time_list = re.findall("90\D+\d\'? ?\(?P|90\D+\d\'? ?\[?P", lline.strr)
if P_add90_score_time_list:
add_number = re.findall("\d", P_add90_score_time_list[0])[0]
score_time = '90+' + add_number
score_time_list = re.findall("\d+\'? ?\(?P|\d+\'? ?\[?P", lline.strr)
if score_time_list:
score_time = re.findall("\d+", score_time_list[0])[0]
elif string_one.upper() == 'OG':
OG = 1
score_time_list = re.findall("\d+\'? ?\(?OG|\d+\'? ?\[?OG", lline.strr)
if score_time_list:
score_time = re.findall("\d+", score_time_list[0])[0]
else:
player_name += (string_one + ' ')
if not player_name or len(player_name) < 3:
continue
player_name_dic[player_name] += 1
one_playertime = PlayerTime(player_name)
one_playertime.location = LorR
if P and score_time != '0':
one_playertime.goaltime[score_time] = 'P'
elif OG and score_time != '0':
one_playertime.goaltime[score_time] = 'OG'
one_playertime.location = -LorR
add_number = ''
for number_one in number_list:
# 如果这个数字是90后面加的,就跳过
if number_one == add_number:
continue
# 如果数字是90,说明有加时
if number_one == '90' or number_one == '45':
add_time = 1
# add_score_time_list = re.findall("\+\d\'?", lline.strr)
add90_score_time_list = re.findall("90\D+\d", lline.strr)
add45_score_time_list = re.findall("45\D+\d", lline.strr)
if add90_score_time_list:
add_number = add90_score_time_list[0][-1]
score_time = '90+' + add_number
# 如果这个进球不是P或者OG
if not one_playertime.goaltime[score_time]:
one_playertime.goaltime[score_time] = "add"
elif add45_score_time_list:
add_number = add45_score_time_list[0][-1]
score_time = '45+' + add_number
# 如果这个进球不是P或者OG
if not one_playertime.goaltime[score_time]:
one_playertime.goaltime[score_time] = "add"
else:
score_time = number_one
# 如果这个进球不是P或者OG
if not one_playertime.goaltime[score_time]:
one_playertime.goaltime[score_time] = "N"
big_candidate[player_name].add(score_time)
score_time_dic[score_time] += 1
big_flag = True
# 正常进球情况
# 理论上应该小于3,但是有1302特殊情况时间有115,101
elif len(number_one) < 4:
score_time = number_one
big_candidate[player_name].add(score_time)
# 如果这个进球不是P或者OG
if not one_playertime.goaltime[score_time]:
one_playertime.goaltime[score_time] = "N"
score_time_dic[score_time] += 1
big_flag = True
Player_Time_list.append(one_playertime)
# print("check players name: {} goal_time: {} frame:{} goaltime:{}".format(
# player_name, big_candidate[player_name], i, one_playertime.goaltime))
del one_playertime
continue
return player_name_dic, big_candidate, score_time_dic, Player_Time_list, big_flag
def get_frames(video_dir):
get = 0
miss = 0
total_num = 0
more = 0
# with open('video_6_score_change.json', 'r') as fd:
# Video_Score_dic = json.load(fd)
SCORES = {}
for root, _, files in os.walk(video_dir):
for video_index in files:
time1 = time.time()
video_index1 = video_index.split('_')
if video_index1[1] not in ['0059', '0060', '0061', '0062',
'0063', '0064', '0065', '0066', '0067', '0068', '0069', '0070',
'0071', '0072', '0073', '1040', '1041', '1045', '1046', '1047', '1048',
'1049', '1050', '1051', '1052', '1054',
'1055', '1056', '1057', '1058', '1059', '1171',
'1212', '1216', '1218', '1221', '1223', '1224', '1225',
'1226', '1228', '1230', '1231', '1233', '1236', '1237', '1238', '1239', '1242',
'1243', '1244', '1245']:
score_dic = {}
score_record = []
player_score_dic = {}
player_score_record = []
videoCap = cv.VideoCapture(video_dir + "\\" + video_index)
frame_height = videoCap.get(cv.CAP_PROP_FRAME_HEIGHT)
frame_width = videoCap.get(cv.CAP_PROP_FRAME_WIDTH)
big_h_index = int(frame_height * 7 / 10)
big_w_index = int(frame_width / 2)
x1_big_center = big_w_index - 50
x2_big_center = big_w_index + 50
frame_count = videoCap.get(cv.CAP_PROP_FRAME_COUNT)
print("-----------------------------")
print("video:{}".format(video_index1[1]))
i = frame_count - 20000
# i = 0
init_candidate = defaultdict(int)
player_name_dic = defaultdict(int)
score_time_dic = defaultdict(int)
big_candidate = defaultdict(set)
location_player_score = 0 # 得分球员位置,为0代表在队名下面,为1代表在队名上面
tempResult_index = 0
mode = 0
modify_candidate = defaultdict(int)
temp_result = []
gt_y2 = 0
left_team = ''
right_team = ''
left_team_x = 0
right_team_x = 0
team_y1 = 10000
team_y2 = 0
big_flag = False
big_nums = 6
Player_Time_list = []
while i < frame_count:
# if i > 78800:
# print("a")
# 评估候选比分
# 获取ocr第i帧的直接结果temp_result, 以及字符串连接后的result
result, temp_result = get_ocr_result(videoCap, i, 125)
big_result, big_temp_result = get_ocr_result(videoCap, i, big_h_index)
goal_flag = False
big_flag = False
team_nums = 0
left_line = []
right_line = []
left_big_temp_result = []
right_big_temp_result = []
if big_result:
# 首先检测是否存在两个队名
team_candidates_list = []
two_teams = False
for ii, [strr, ration] in enumerate(big_temp_result[1]):
x1_big = big_temp_result[0][ii][0][0]
x2_big = big_temp_result[0][ii][2][0]
y1_big = big_temp_result[0][ii][0][1]
y2_big = big_temp_result[0][ii][3][1]
# 排除在中间位置的字符串
if abs((x1_big + x2_big)/2 - big_w_index) < 4:
continue
team1 = process.extractOne(strr, team_name, scorer=fuzz.ratio,
score_cutoff=70)
if team1:
if team_candidates_list:
for team_candidate in team_candidates_list:
# 两个检测出的队名必须满足:y坐标近似,x坐标关于中间对称
if abs(team_candidate[2] - (y1_big + y2_big)/2) < 3 and\
abs((team_candidate[1] + (x1_big + x2_big)/2)/2 - big_w_index) < 10:
two_teams = True
if (x1_big + x2_big)/2 > big_w_index:
right_team = team1[0]
left_team = team_candidate[0]
# 确定队名的x坐标(中心)用于后面排除太过边缘的字符串
left_team_x = team_candidate[1]
right_team_x = (x1_big + x2_big)/2
if x2_big < big_w_index:
left_team = team1[0]
right_team = team_candidate[0]
left_team_x = (x1_big + x2_big) / 2
right_team_x = team_candidate[1]
team_y1 = y1_big
team_y2 = y2_big
break
team_candidates_list.append([team1[0], (x1_big + x2_big)/2, (y1_big + y2_big)/2])
continue
if (x1_big + x2_big)/2 < big_w_index:
left_big_temp_result.append([strr, x1_big, x2_big, y1_big, y2_big])
else:
right_big_temp_result.append([strr, x1_big, x2_big, y1_big, y2_big])
# 检测出了两个球队名,说明是大字幕,此时team_y1,team_y2都有了
if two_teams:
if left_big_temp_result:
sorted_left_big_result = sorted(left_big_temp_result, key=lambda student: (student[3], student[2]))
yy1 = sorted_left_big_result[0][3]
yy2 = sorted_left_big_result[0][4]
left_oneline_0 = OneLine(sorted_left_big_result[0][3], sorted_left_big_result[0][4])
line_index = 0
for ii, [strr, x1_big, x2_big, y1_big, y2_big] in enumerate(sorted_left_big_result):
if abs((y1_big + y2_big)/2 - (team_y1 + team_y2)/2) < 4 or left_team_x - (x1_big + x2_big)/2 > 100:
continue
# 循环建立四个对象,locals()函数可以将字符串转换为变量名!
# 具体的操作和含义我并不清楚,大家可以自行百度~
if abs((yy1 + yy2)/2 - (y1_big + y2_big)/2) > 3:
left_line.append(locals()['left_oneline_' + str(line_index)])
del locals()['left_oneline_' + str(line_index)]
line_index += 1
locals()['left_oneline_' + str(line_index)] = OneLine(sorted_left_big_result[ii][3],
sorted_left_big_result[ii][4])
yy1 = y1_big
yy2 = y2_big
locals()['left_oneline_' + str(line_index)].strr += (strr + ' ')
left_line.append(locals()['left_oneline_' + str(line_index)])
del locals()['left_oneline_' + str(line_index)]
if right_big_temp_result:
sorted_right_big_result = sorted(right_big_temp_result, key=lambda student: (student[3], student[2]))
yy1 = sorted_right_big_result[0][3]
yy2 = sorted_right_big_result[0][4]
right_oneline_0 = OneLine(sorted_right_big_result[0][3], sorted_right_big_result[0][4])
line_index = 0
for ii, [strr, x1_big, x2_big, y1_big, y2_big] in enumerate(sorted_right_big_result):
if abs((y1_big + y2_big)/2 - (team_y1 + team_y2)/2) < 4 or (x1_big + x2_big)/2 - right_team_x > 100:
continue
# 循环建立四个对象,locals()函数可以将字符串转换为变量名!
# 具体的操作和含义我并不清楚,大家可以自行百度~
if abs((yy1 + yy2)/2 - (y1_big + y2_big)/2) > 3:
right_line.append(locals()['right_oneline_' + str(line_index)])
del locals()['right_oneline_' + str(line_index)]
line_index += 1
locals()['right_oneline_' + str(line_index)] = OneLine(sorted_right_big_result[ii][3],
sorted_right_big_result[ii][4])
yy1 = y1_big
yy2 = y2_big
locals()['right_oneline_' + str(line_index)].strr += (strr + ' ')
right_line.append(locals()['right_oneline_' + str(line_index)])
del locals()['right_oneline_' + str(line_index)]
player_name_dic, big_candidate, score_time_dic, Player_Time_list, big_flag = check_line(
left_line, 1, player_name_dic, big_candidate, score_time_dic, i, Player_Time_list,
big_flag)
player_name_dic, big_candidate, score_time_dic, Player_Time_list, big_flag = check_line(
right_line, -1, player_name_dic, big_candidate, score_time_dic, i,
Player_Time_list,
big_flag)
# 如果检测出比分,则帧数+1检测下一帧,否则帧数+200
if goal_flag:
i += 1
mode = 2
continue
elif big_flag:
i += 2
big_nums += 5
continue
# 没有大字幕持续若干次
elif big_nums < 4 and not big_flag:
big_nums += 1
i += 2
continue
# 从有大字幕突然变成没有大字幕
elif big_nums > 6 and not big_flag:
big_nums = 1
i += 5
continue
# 没有大字幕持续若干次后,进行跨越,再检测
elif big_nums == 4:
# if i+10000 < frame_count-5000:
# i += 10000
# else:
# i = max(i+1000, frame_count-5000)
if i + 5000 < frame_count - 4000:
i += 5000
elif i + 3000 < frame_count - 2000:
i += 3000
elif i > frame_count - 100:
i += 5
continue
else:
i = max(i + 2000, frame_count - 2000)
big_nums = 6
continue
elif i > frame_count - 100:
i += 5
continue
else:
i += 50
# print("can not get frames!")
continue
# 把big_candidate里的值(set)取交集,如果不为空,就在player_name_dic里面看谁的key次数多
# 次数少的key就在big_candidate中删除
nn = len(Player_Time_list)
# names = list(Player_Time_list.keys())
score_times = list(score_time_dic.keys())
for i in range(nn):
if Player_Time_list[i].use == 1:
i_score_times = list(Player_Time_list[i].goaltime.keys())
i_playername = Player_Time_list[i].playername
# 对于其他球员
for j in range(i + 1, nn):
if Player_Time_list[j].use == 1:
j_score_times = list(Player_Time_list[j].goaltime.keys())
j_playername = Player_Time_list[j].playername
set_temp = set(i_score_times) & set(j_score_times)
if set_temp:
if player_name_dic[i_playername] >= player_name_dic[j_playername]:
# Player_Time_list[i].goaltime.update(Player_Time_list[j].goaltime)
Player_Time_list[j].use = 0
else:
# Player_Time_list[j].goaltime.update(Player_Time_list[i].goaltime)
Player_Time_list[i].use = 0
break
elif fuzz.token_set_ratio(i_playername, j_playername) >= 70:
# 处理进球时间微调的情况,+1或者-1,需要删除前面的,而不是直接并集
i_times = list(Player_Time_list[i].goaltime.keys())
j_times = list(Player_Time_list[j].goaltime.keys())
for j_time in j_times:
if "+" in j_time:
sj_index = j_time.find('+')
jj_time = int(j_time[0:sj_index]) + int(j_time[sj_index:])
else:
jj_time = int(j_time)
for i_time in i_times:
if Player_Time_list[i].goaltime[i_time]:
if j_time == i_time:
continue
else:
if "+" in i_time:
# 格式为“90+xx”的时候
si_index = i_time.find('+')
ii_time = int(i_time[0:si_index]) + int(i_time[si_index:])
else:
ii_time = int(i_time)
if abs(jj_time - ii_time) == 1:
Player_Time_list[i].goaltime.pop(i_time)
# 根据次数多少选择保留的对象
if player_name_dic[i_playername] >= player_name_dic[j_playername]:
Player_Time_list[i].goaltime.update(Player_Time_list[j].goaltime)
Player_Time_list[j].use = 0
else:
Player_Time_list[j].goaltime.update(Player_Time_list[i].goaltime)
Player_Time_list[i].use = 0
break
# 与其他的比较后:
if Player_Time_list[i].use == 1:
# 如果该运动员的某个得分时间次数出现少于5,就删除这个得分时间
for score_time in list(Player_Time_list[i].goaltime.keys()):
if score_time_dic[score_time] < 5:
Player_Time_list[i].goaltime.pop(score_time)
# 如果该球员没有得分时间,就删除该球员的记录
if not Player_Time_list[i].goaltime:
Player_Time_list[i].use = 0
if player_name_dic[i_playername] < 5:
Player_Time_list[i].use = 0
for i in range(nn):
if Player_Time_list[i].use:
name = Player_Time_list[i].playername
if Player_Time_list[i].location == 1:
team = left_team
else:
team = right_team
print("name:{} team:{}".format(name, team))
print(Player_Time_list[i].goaltime)
# 把big_candidate里的值(set)取交集,如果不为空,就在player_name_dic里面看谁的key次数多
# 次数少的key就在big_candidate中删除
nn = len(big_candidate)
names = list(big_candidate.keys())
score_times = list(score_time_dic.keys())
for i in range(nn):
if big_candidate[names[i]] != set('0'):
# 如果该运动员的某个得分时间次数出现少于5,就删除这个得分时间
for score_time in list(big_candidate[names[i]]):
if score_time_dic[score_time] < 5:
big_candidate[names[i]].remove(score_time)
# 如果该球员没有得分时间,就删除该球员的记录
if big_candidate[names[i]] == set():
big_candidate[names[i]] = set('0')
# 对于其他球员
for j in range(i + 1, nn):
if big_candidate[names[j]] != set('0'):
set_temp = big_candidate[names[i]] & big_candidate[names[j]]
if set_temp:
if player_name_dic[names[i]] >= player_name_dic[names[j]]:
big_candidate[names[j]] = set('0')
else:
big_candidate[names[i]] = set('0')
elif fuzz.token_set_ratio(names[i], names[j]) >= 70:
big_candidate[names[i]] = big_candidate[names[i]].union(big_candidate[names[j]])
big_candidate[names[i]] = set('0')
# big_candidate里的值(set)为‘0’的key也应该删除
for key in names:
if '0' in big_candidate[key]:
del big_candidate[key]
elif key.isdigit():
del big_candidate[key]
elif re.findall("\d\d", key):
del big_candidate[key]
print(big_candidate)
time2 = time.time()
print("time for this video:{}".format(time2 - time1))
# SCORES[video_index] = score_record
# with open('scores_3' + '.json', 'w') as fd:
# json.dump(SCORES, fd)
print("total_num:{} get:{} miss:{} more:{}".format(total_num, get, miss, more))
get_frames(video_dir=VIDEO_DIR)
|
the-stack_106_28682 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for Suggestion-related one-off jobs"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import exp_domain
from core.domain import question_domain
from core.domain import suggestion_jobs_one_off
from core.domain import suggestion_services
from core.platform import models
from core.tests import test_utils
import feconf
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class QuestionSuggestionMigrationJobManagerTests(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
QUESTION_ID = 'question_id'
def setUp(self):
super(QuestionSuggestionMigrationJobManagerTests, self).setUp()
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.process_and_flush_pending_mapreduce_tasks()
self.skill_id = 'skill_id'
self.save_new_skill(
self.skill_id, self.albert_id, description='Skill Description')
def _run_job_and_verify_output(self, expected_output):
"""Runs the QuestionSuggestionMigrationJobManager and
verifies that the output matches the expected output.
Args:
expected_output: list(str). The expected output from the one off
job.
"""
job_id = (
suggestion_jobs_one_off
.QuestionSuggestionMigrationJobManager.create_new())
(
suggestion_jobs_one_off
.QuestionSuggestionMigrationJobManager.enqueue(job_id)
)
self.process_and_flush_pending_mapreduce_tasks()
actual_output = (
suggestion_jobs_one_off
.QuestionSuggestionMigrationJobManager.get_output(job_id))
self.assertItemsEqual(actual_output, expected_output)
def test_migration_job_does_not_convert_up_to_date_suggestion(self):
suggestion_change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': [self.skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
suggestion = suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, self.skill_id, 1,
self.albert_id, suggestion_change, 'test description')
self.assertEqual(
suggestion.change.question_dict[
'question_state_data_schema_version'],
feconf.CURRENT_STATE_SCHEMA_VERSION)
expected_output = [u'[u\'SUCCESS\', 1]']
self._run_job_and_verify_output(expected_output)
updated_suggestion = suggestion_services.get_suggestion_by_id(
suggestion.suggestion_id)
self.assertEqual(
updated_suggestion.change.question_dict[
'question_state_data_schema_version'],
feconf.CURRENT_STATE_SCHEMA_VERSION)
def test_migration_job_after_deleting_question_suggestion(self):
suggestion_id = (
self.save_new_question_suggestion_with_state_data_schema_v27(
self.albert_id, self.skill_id))
suggestion_models.GeneralSuggestionModel.delete_by_id(suggestion_id)
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion, None)
expected_output = []
self._run_job_and_verify_output(expected_output)
def test_migration_job_converts_old_question_suggestion(self):
suggestion_id = (
self.save_new_question_suggestion_with_state_data_schema_v27(
self.albert_id, self.skill_id))
old_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get_by_id(suggestion_id))
self.assertEqual(
old_suggestion_model.change_cmd['question_dict'][
'question_state_data_schema_version'], 27)
expected_output = [u'[u\'SUCCESS\', 1]']
self._run_job_and_verify_output(expected_output)
updated_suggestion_model = (
suggestion_models.GeneralSuggestionModel.get_by_id(suggestion_id))
self.assertEqual(
updated_suggestion_model.change_cmd['question_dict'][
'question_state_data_schema_version'],
feconf.CURRENT_STATE_SCHEMA_VERSION)
def test_migration_job_output_with_invalid_question_suggestion(self):
suggestion_id = (
self.save_new_question_suggestion_with_state_data_schema_v27(
self.albert_id, self.skill_id, suggestion_id='suggestion456'))
suggestion_model = (
suggestion_models.GeneralSuggestionModel.get_by_id(suggestion_id))
# Adding some invalid values in suggestion.
suggestion_model.language_code = None
suggestion_model.update_timestamps(update_last_updated_time=False)
suggestion_model.put()
expected_output = [
u'[u\'POST_MIGRATION_VALIDATION_FALIURE\', '
'[u"(\'suggestion456\', '
'ValidationError(u\'Expected language_code '
'to be en, received None\',))"]]'
]
self._run_job_and_verify_output(expected_output)
def test_migration_job_yields_no_output_for_non_question_suggestion(self):
exp_id = 'expId1'
exploration = (
self.save_new_linear_exp_with_state_names_and_interactions(
exp_id, self.albert_id, ['State 1', 'State 2'],
['TextInput'], category='Algebra'))
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'State 1',
'content_id': 'content',
'language_code': 'hi',
'content_html': exploration.states['State 1'].content.html,
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
}
suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
feconf.ENTITY_TYPE_EXPLORATION,
exp_id, 1, self.albert_id, add_translation_change_dict,
'test description')
expected_output = []
self._run_job_and_verify_output(expected_output)
def test_migration_job_yields_exception_message(self):
def mock_raise_expection_method(item):
raise Exception(item.id)
suggestion_id = 'suggestion456'
self.save_new_question_suggestion_with_state_data_schema_v27(
self.albert_id, self.skill_id, suggestion_id=suggestion_id)
with self.swap(
suggestion_services, 'get_suggestion_from_model',
mock_raise_expection_method):
expected_output = [
u'[u\'MIGRATION_FAILURE\', [u"(\'suggestion456\', '
'Exception(\'suggestion456\',))"]]']
self._run_job_and_verify_output(expected_output)
|
the-stack_106_28683 | from pretalx.event.models import Organiser, Team
def create_organiser_with_user(*, name, slug, user):
organiser = Organiser.objects.create(name=name, slug=slug)
team = Team.objects.create(
organiser=organiser, name='Team {}'.format(name),
can_create_events=True, can_change_teams=True,
can_change_organiser_settings=True,
)
team.members.add(user)
return organiser, team
|
the-stack_106_28684 | import torch
class SquashedMultivariateNormalDiag:
def __init__(self, loc, scale):
self._distribution = torch.distributions.normal.Normal(loc, scale)
def rsample_with_log_prob(self, shape=()):
samples = self._distribution.rsample(shape)
squashed_samples = torch.tanh(samples)
log_probs = self._distribution.log_prob(samples)
log_probs -= torch.log(1 - squashed_samples ** 2 + 1e-6)
return squashed_samples, log_probs
def rsample(self, shape=()):
samples = self._distribution.rsample(shape)
return torch.tanh(samples)
def sample(self, shape=()):
samples = self._distribution.sample(shape)
return torch.tanh(samples)
def log_prob(self, samples):
'''Required unsquashed samples cannot be accurately recovered.'''
raise NotImplementedError(
'Not implemented to avoid approximation errors. '
'Use sample_with_log_prob directly.')
@property
def loc(self):
return torch.tanh(self._distribution.mean)
class DetachedScaleGaussianPolicyHead(torch.nn.Module):
def __init__(
self, loc_activation=torch.nn.Tanh, loc_fn=None,
scale_init=0.6, scale_min=1e-4, scale_max=1.,
distribution=torch.distributions.normal.Normal
):
super().__init__()
self.loc_activation = loc_activation
self.loc_fn = loc_fn
self.scale_init = scale_init
self.scale_min = scale_min
self.scale_max = scale_max
self.distribution = distribution
def initialize(self, input_size, action_size):
self.loc_layer = torch.nn.Sequential(
torch.nn.Linear(input_size, action_size), self.loc_activation())
if self.loc_fn:
self.loc_layer.apply(self.loc_fn)
scale = [[self.scale_init] * action_size]
self.log_scale = torch.nn.Parameter(
torch.log(torch.as_tensor(scale, dtype=torch.float32)))
def forward(self, inputs):
loc = self.loc_layer(inputs)
batch_size = inputs.shape[0]
scale = torch.exp(self.log_scale)
scale = torch.clamp(scale, self.scale_min, self.scale_max)
scale = scale.repeat(batch_size, 1)
return self.distribution(loc, scale)
class GaussianPolicyHead(torch.nn.Module):
def __init__(
self, loc_activation=torch.nn.Tanh, loc_fn=None,
scale_activation=torch.nn.Softplus, scale_min=1e-4, scale_max=1,
scale_fn=None, distribution=torch.distributions.normal.Normal
):
super().__init__()
self.loc_activation = loc_activation
self.loc_fn = loc_fn
self.scale_activation = scale_activation
self.scale_min = scale_min
self.scale_max = scale_max
self.scale_fn = scale_fn
self.distribution = distribution
def initialize(self, input_size, action_size):
self.loc_layer = torch.nn.Sequential(
torch.nn.Linear(input_size, action_size), self.loc_activation())
if self.loc_fn:
self.loc_layer.apply(self.loc_fn)
self.scale_layer = torch.nn.Sequential(
torch.nn.Linear(input_size, action_size), self.scale_activation())
if self.scale_fn:
self.scale_layer.apply(self.scale_fn)
def forward(self, inputs):
loc = self.loc_layer(inputs)
scale = self.scale_layer(inputs)
scale = torch.clamp(scale, self.scale_min, self.scale_max)
return self.distribution(loc, scale)
class DeterministicPolicyHead(torch.nn.Module):
def __init__(self, activation=torch.nn.Tanh, fn=None):
super().__init__()
self.activation = activation
self.fn = fn
def initialize(self, input_size, action_size):
self.action_layer = torch.nn.Sequential(
torch.nn.Linear(input_size, action_size),
self.activation())
if self.fn is not None:
self.action_layer.apply(self.fn)
def forward(self, inputs):
return self.action_layer(inputs)
class Actor(torch.nn.Module):
def __init__(self, encoder, torso, head):
super().__init__()
self.encoder = encoder
self.torso = torso
self.head = head
def initialize(
self, observation_space, action_space, observation_normalizer=None
):
size = self.encoder.initialize(
observation_space, observation_normalizer)
size = self.torso.initialize(size)
action_size = action_space.shape[0]
self.head.initialize(size, action_size)
def forward(self, *inputs):
out = self.encoder(*inputs)
out = self.torso(out)
return self.head(out)
|
the-stack_106_28686 | import urllib.request
import urllib.error
import urllib.parse
import json
import sys
from arbitrage.public_markets.market import Market
class Bitstamp(Market):
def __init__(self, currency, code):
super().__init__(currency)
self.code = code
self.update_rate = 20
def update_depth(self):
url = "https://www.bitstamp.net/api/v2/order_book/" + self.code
req = urllib.request.Request(
url,
None,
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"User-Agent": "curl/7.24.0 (x86_64-apple-darwin12.0)",
},
)
res = urllib.request.urlopen(req)
depth = json.loads(res.read().decode("utf8"))
self.depth = self.format_depth(depth)
def sort_and_format(self, l, reverse):
r = []
for i in l:
r.append({"price": float(i[0]), "amount": float(i[1])})
r.sort(key=lambda x: float(x["price"]), reverse=reverse)
return r
def format_depth(self, depth):
bids = self.sort_and_format(depth["bids"], True)
asks = self.sort_and_format(depth["asks"], False)
return {"asks": asks, "bids": bids}
|
the-stack_106_28688 | import pygame as pg
class SpriteSheetExtractor:
def __init__(self, image, colorkey=None):
self.colorkey = colorkey
self.sheet = image
def image_at(self, rectangle, colorkey=None):
"""Load a specific image from a specific rect."""
rect = pg.Rect(rectangle)
if colorkey:
image = pg.Surface(rect.size).convert()
image.set_colorkey(colorkey)
else:
image = pg.Surface(rect.size).convert_alpha()
image.blit(self.sheet, (0, 0), rect)
return image
def images_at(self, rects, colorkey=None):
"""Load multiple images from a list of rects."""
ck = colorkey or self.colorkey
return [self.image_at(rect, ck) for rect in rects]
def load_strip(self, rect, image_count, offset=0, colorkey=None):
"""Load a strip of images and return them as a list."""
tups = [
(rect[0] + rect[2] * x + offset, rect[1], rect[2], rect[3])
for x in range(image_count)
]
ck = colorkey or self.colorkey
return self.images_at(tups, ck)
class SpriteStrip:
"""Splits a spritesheet strip into separate images."""
def __init__(self, img, start_frame=0):
self.img = img
total_frames = int(img.get_width() / img.get_height())
frame_width = img.get_width() / total_frames
self.frames = total_frames - start_frame
start_x = start_frame * frame_width
self.images = SpriteSheetExtractor(img).load_strip(
(0, 0, frame_width, img.get_height()),
image_count=self.frames,
offset=start_x,
)
def get_frame(self, index=0):
return self.images[index]
|
the-stack_106_28690 | # Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from unittest.mock import Mock, patch, mock_open
import os
import pytest
from io import BytesIO
from wca import metrics
from wca import perf
from wca import perf_const as pc
from wca.metrics import MetricName
from wca.perf import _parse_raw_event_name, _get_event_config, PerfCgroupDerivedMetricsGenerator, \
filter_out_event_names_for_cpu, check_perf_event_count_limit
from wca.platforms import CPUCodeName, Platform
@pytest.mark.parametrize("raw_value,time_enabled,time_running,expected_value,expected_factor", [
(300, 200, 100, 600, 2.0),
(0, 0, 0, 0, 0),
(200, 0, 100, 0, 0),
(200, 100, 0, 0, 0),
(300, 200, 200, 300, 1.0),
])
def test_scale_counter_value(raw_value, time_running, time_enabled, expected_value,
expected_factor):
assert perf._scale_counter_value(raw_value, time_enabled, time_running) == (
expected_value, expected_factor)
@patch('os.open', return_value=10)
def test_get_cgroup_fd(os_open):
assert perf._get_cgroup_fd('a_cgroup') == 10
os_open.assert_called_once_with('/sys/fs/cgroup/perf_event/a_cgroup',
os.O_RDONLY)
@patch('os.fdopen', return_value=object)
def test_create_file_from_valid_fd(_):
file = perf._create_file_from_fd(1)
assert file is not None
def test_create_file_from_invalid_fd():
with pytest.raises(perf.UnableToOpenPerfEvents):
perf._create_file_from_fd(-1)
@pytest.mark.parametrize("disabled_flag,expected", [
(True, pc.AttrFlags.exclude_guest | pc.AttrFlags.disabled | pc.AttrFlags.inherit),
(False, pc.AttrFlags.exclude_guest | pc.AttrFlags.inherit)
])
def test_create_event_attributes_disabled_flag(disabled_flag, expected):
assert perf._create_event_attributes(
metrics.MetricName.TASK_CYCLES,
disabled_flag,
cpu_code_name=CPUCodeName.UNKNOWN).flags == expected
@pytest.mark.parametrize("raw_string,expected", [
('0-31', list(range(32))),
('0', [0]),
('0-3,6,10-12', [0, 1, 2, 3, 6, 10, 11, 12])
])
def test_parse_online_cpus_string(raw_string, expected):
assert perf._parse_online_cpus_string(raw_string) == expected
@pytest.mark.parametrize("file,event_names,expected", [
(BytesIO(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x4a\x16\x00\x00\x00\x00\x00\x00"),
[metrics.MetricName.TASK_CYCLES],
{metrics.MetricName.TASK_CYCLES: 0,
metrics.MetricName.TASK_SCALING_FACTOR_AVG: 1.0,
metrics.MetricName.TASK_SCALING_FACTOR_MAX: 1.0}
),
# case with no scaling
(BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00\x26\xe7\xea\x29\x01\x00\x00\x00\x26\xe7\xea\x29"
b"\x01\x00\x00\x00\xa6\x6e\x1a\x9d\x08\x00\x00\x00\x1d\x17\x00\x00\x00\x00\x00\x00"
b"\xc8\xfd\x08\x88\x04\x00\x00\x00\x1e\x17\x00\x00\x00\x00\x00\x00\x18\xc8\x43\x00"
b"\x00\x00\x00\x00\x1f\x17\x00\x00\x00\x00\x00\x00"),
[metrics.MetricName.TASK_INSTRUCTIONS, metrics.MetricName.TASK_CYCLES,
metrics.MetricName.TASK_CACHE_MISSES],
{metrics.MetricName.TASK_INSTRUCTIONS: 36995493542,
metrics.MetricName.TASK_CYCLES: 19462159816,
metrics.MetricName.TASK_CACHE_MISSES: 4442136,
metrics.MetricName.TASK_SCALING_FACTOR_AVG: 1.0,
metrics.MetricName.TASK_SCALING_FACTOR_MAX: 1.0}
),
# case with 50% scaling factor
(BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00\xb2\xef\xff\x29\x01\x00\x00\x00\x07\x13\x08\x95"
b"\x00\x00\x00\x00\x86\xb2\xf1\x4b\x04\x00\x00\x00\x5d\x19\x00\x00\x00\x00\x00\x00"
b"\xbe\x74\x5f\x43\x02\x00\x00\x00\x5e\x19\x00\x00\x00\x00\x00\x00\xf0\xa5\x15\x00"
b"\x00\x00\x00\x00\x5f\x19\x00\x00\x00\x00\x00\x00"),
[metrics.MetricName.TASK_INSTRUCTIONS, metrics.MetricName.TASK_CYCLES,
metrics.MetricName.TASK_CACHE_MISSES],
{metrics.MetricName.TASK_INSTRUCTIONS: 36900158682,
metrics.MetricName.TASK_CYCLES: 19436397211,
metrics.MetricName.TASK_CACHE_MISSES: 2836869,
# TODO: assert for 2.0 with some margin
metrics.MetricName.TASK_SCALING_FACTOR_AVG: 1.9995750600302817,
metrics.MetricName.TASK_SCALING_FACTOR_MAX: 1.9995750600302817}
)
])
def test_parse_event_groups(file, event_names, expected):
assert perf._parse_event_groups(file, event_names, include_scaling_info=True) == expected
@patch('wca.perf._get_cgroup_fd')
@patch('wca.perf.PerfCounters._open')
def test_perf_counters_init(_open_mock, _get_cgroup_fd_mock):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES], platform_mock)
assert prf._group_event_leader_files == {}
_get_cgroup_fd_mock.assert_called_once()
_open_mock.assert_called_once()
@patch('builtins.open')
@patch('wca.perf._parse_online_cpus_string', return_value=[0])
def test_get_online_cpus(_parse_online_cpu_string_mock, open_mock):
assert perf._get_online_cpus() == [0]
open_mock.assert_called_with('/sys/devices/system/cpu/online', 'r')
@patch('wca.perf._parse_event_groups', return_value={
metrics.MetricName.TASK_CYCLES: 2,
metrics.MetricName.TASK_INSTRUCTIONS: 4,
metrics.MetricName.TASK_SCALING_FACTOR_MAX: 3,
metrics.MetricName.TASK_SCALING_FACTOR_AVG: 2})
@patch('wca.perf._get_cgroup_fd')
@patch('wca.perf.PerfCounters._open')
def test_read_metrics_non_aggregated(*args):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES],
platform_mock, aggregate_for_all_cpus_with_sum=False)
prf._group_event_leader_files[0] = {0: mock_open()}
assert prf.get_measurements() == {metrics.MetricName.TASK_CYCLES: {0: 2},
metrics.MetricName.TASK_INSTRUCTIONS: {0: 4},
metrics.MetricName.TASK_SCALING_FACTOR_AVG: {0: 2},
metrics.MetricName.TASK_SCALING_FACTOR_MAX: {0: 3}}
@patch('wca.perf._parse_event_groups', return_value={
metrics.MetricName.TASK_CYCLES: 2,
metrics.MetricName.TASK_INSTRUCTIONS: 4,
metrics.MetricName.TASK_SCALING_FACTOR_MAX: 3,
metrics.MetricName.TASK_SCALING_FACTOR_AVG: 2})
@patch('wca.perf._get_cgroup_fd')
@patch('wca.perf.PerfCounters._open')
def test_read_metrics_aggregated(*args):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES],
platform_mock, aggregate_for_all_cpus_with_sum=True)
prf._group_event_leader_files[0] = {0: mock_open()}
assert prf.get_measurements() == {metrics.MetricName.TASK_CYCLES: 2,
metrics.MetricName.TASK_INSTRUCTIONS: 4,
metrics.MetricName.TASK_SCALING_FACTOR_AVG: 2,
metrics.MetricName.TASK_SCALING_FACTOR_MAX: 3}
@patch('wca.perf.LIBC.ioctl', return_value=1)
@patch('wca.perf._get_cgroup_fd')
@patch('wca.perf.PerfCounters._open')
def test_reset_and_enable_group_event_leaders(_open_mock, _get_cgroup_fd_mock, ioctl_mock):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES], platform_mock)
# cpu0 group event leader mock
prf._group_event_leader_files = {0: Mock()}
prf._reset_and_enable_group_event_leaders()
ioctl_mock.assert_has_calls([mock.ANY] * 2)
@patch('wca.perf.LIBC.ioctl', return_value=-1)
@patch('wca.perf._get_cgroup_fd')
@patch('wca.perf.PerfCounters._open')
def test_reset_and_enable_group_event_leaders_reset_fail(
_open_mock, _get_cgroup_fd_mock, ioctl_mock
):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES], platform_mock)
# cpu0 group event leader mock
prf._group_event_leader_files = {0: Mock()}
with pytest.raises(OSError, match="Cannot reset perf counts"):
prf._reset_and_enable_group_event_leaders()
@patch('wca.perf.LIBC.ioctl', side_effect=[1, -1])
@patch('wca.perf._get_cgroup_fd')
@patch('wca.perf.PerfCounters._open')
def test_reset_and_enable_group_event_leaders_enable_fail(
_open_mock, _get_cgroup_fd_mock, ioctl_mock
):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES], platform_mock)
# cpu0 group event leader mock
prf._group_event_leader_files = {0: Mock()}
with pytest.raises(OSError, match="Cannot enable perf counts"):
prf._reset_and_enable_group_event_leaders()
@patch('os.close')
@patch('wca.perf._get_cgroup_fd', return_value=10)
@patch('wca.perf.PerfCounters._open')
def test_cleanup(_open_mock, _get_cgroup_fd_mock, os_close_mock):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES], platform_mock)
file_descriptor_mock = Mock()
file_descriptor_mock.close = Mock()
prf._group_event_leader_files = {'mock1': file_descriptor_mock, 'mock2': file_descriptor_mock}
prf._event_files = [file_descriptor_mock] * 3
prf.cleanup()
os_close_mock.assert_called_once_with(10)
file_descriptor_mock.close.assert_has_calls(
[mock.call()] * (len(prf._event_files)
+ len(prf._group_event_leader_files)))
@patch('wca.perf._get_cgroup_fd', return_value=10)
@patch('wca.perf.PerfCounters._open')
def test_open_for_cpu_wrong_arg(_open_mock, _get_cgroup_fd_mock):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [], platform_mock)
# let's check non-existent type of measurement
with pytest.raises(Exception, match='Unknown event name'):
prf._open_for_cpu(0, 'invalid_event_name')
@patch('os.fdopen')
@patch('wca.perf._perf_event_open', return_value=5)
@patch('wca.perf._get_cgroup_fd', return_value=10)
@patch('wca.perf.PerfCounters._open')
def test_open_for_cpu(_open_mock, _get_cgroup_fd_mock,
_perf_event_open_mock, fdopen_mock):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES], platform_mock)
prf._open_for_cpu(0, metrics.MetricName.TASK_CYCLES)
assert prf._group_event_leader_files == {0: mock.ANY}
assert prf._event_files == []
# perf_event_open call for the event group leader
_perf_event_open_mock.assert_called_once_with(
perf_event_attr=mock.ANY,
pid=10,
cpu=0,
group_fd=-1,
flags=pc.PERF_FLAG_PID_CGROUP | pc.PERF_FLAG_FD_CLOEXEC
)
fdopen_mock.assert_called_once_with(5, 'rb')
@patch('os.fdopen', side_effect=[Mock(fileno=Mock(return_value=5)),
Mock(fileno=Mock(return_value=6))])
@patch('wca.perf._perf_event_open', return_value=5)
@patch('wca.perf._get_cgroup_fd', return_value=10)
@patch('wca.perf.PerfCounters._open')
def test_open_for_cpu_with_existing_event_group_leader(_open_mock,
_get_cgroup_fd_mock,
_perf_event_open_mock, fdopen_mock):
platform_mock = Mock(Spec=Platform, cpu_model='intel xeon', cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [metrics.MetricName.TASK_CYCLES], platform_mock)
# Create event group leader
prf._open_for_cpu(0, metrics.MetricName.TASK_CYCLES)
# Create non leading event
prf._open_for_cpu(0, metrics.MetricName.TASK_INSTRUCTIONS)
assert prf._group_event_leader_files[0].fileno() == 5
assert prf._event_files[0].fileno() == 6
# perf_event_open call for non leading event
_perf_event_open_mock.assert_called_with(perf_event_attr=mock.ANY,
pid=-1,
cpu=0,
group_fd=5,
flags=pc.PERF_FLAG_FD_CLOEXEC)
@patch('wca.perf._get_cgroup_fd')
@patch('wca.perf.PerfCounters._open')
def test_get_measurements_zero_values_zero_cpus(_open_mock, _get_cgroup_fd_mock):
platform_mock = Mock(Spec=Platform, cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [], platform_mock)
prf._group_event_leaders = {}
assert prf.get_measurements() == {}
@patch('wca.perf._get_cgroup_fd')
@patch('wca.perf.PerfCounters._open')
def test_get_measurements_zero_values_one_cpu(_open_mock, _get_cgroup_fd_mock):
platform_mock = Mock(Spec=Platform, cpu_codename=CPUCodeName.SKYLAKE)
prf = perf.PerfCounters('/mycgroup', [], platform_mock)
# File descriptor mock for single cpu
prf._group_event_leaders = {0: Mock()}
assert prf.get_measurements() == {}
@pytest.mark.parametrize('event_name, expected_attr_config', [
('some__r000000', 0),
('some__r000001', 0x01000000),
('some__r0000ff', 0xff000000),
('some__r0302', 0x00000203),
('some__r0302ff', 0xff000203),
('some__rc000', 0x000000c0), # example of Instruction Retired
])
def test_parse_raw_event_name(event_name, expected_attr_config):
got_attr_config, _ = _parse_raw_event_name(event_name)
assert got_attr_config == expected_attr_config
@pytest.mark.parametrize('event_name, expected_match', [
('som', 'contain'),
('some__r00000100', 'length'),
('some__r0000xx', 'invalid literal'),
('some__rxx02', 'invalid literal'),
])
def test_parse_raw_event_name_invalid(event_name, expected_match):
with pytest.raises(Exception, match=expected_match):
_parse_raw_event_name(event_name)
@pytest.mark.parametrize('cpu, event_name, expected_config', [
(CPUCodeName.SKYLAKE, MetricName.TASK_STALLED_MEM_LOADS, 0x140014A3),
(CPUCodeName.BROADWELL, MetricName.TASK_STALLED_MEM_LOADS, 0x60006A3),
(CPUCodeName.SKYLAKE, MetricName.TASK_OFFCORE_REQUESTS_L3_MISS_DEMAND_DATA_RD, 0x000010B0),
(CPUCodeName.SKYLAKE,
MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD, 0x00001060),
])
def test_get_event_config(cpu, event_name, expected_config):
assert expected_config == _get_event_config(cpu, event_name)
def test_derived_metrics_flat():
def gm_func():
return {
MetricName.TASK_INSTRUCTIONS: 1000,
MetricName.TASK_CYCLES: 5,
MetricName.TASK_CACHE_MISSES: 10000,
MetricName.TASK_CACHE_REFERENCES: 50000,
}
derived_metrics_generator = PerfCgroupDerivedMetricsGenerator(
get_measurements_func=gm_func)
# First run, does not have enough information to generate those metrics.
with patch('time.time', return_value=1):
measurements = derived_metrics_generator.get_measurements()
assert MetricName.TASK_IPC not in measurements
assert MetricName.TASK_IPS not in measurements
assert MetricName.TASK_CACHE_HIT_RATIO not in measurements
assert MetricName.TASK_CACHE_MISSES_PER_KILO_INSTRUCTIONS not in measurements
# 5 seconds later
def gm_func_2():
return {
MetricName.TASK_INSTRUCTIONS: 11000, # 10k more
MetricName.TASK_CYCLES: 15, # 10 more
MetricName.TASK_CACHE_MISSES: 20000, # 10k more
MetricName.TASK_CACHE_REFERENCES: 100000, # 50k more
}
derived_metrics_generator.get_measurements_func = gm_func_2
with patch('time.time', return_value=6):
measurements = derived_metrics_generator.get_measurements()
assert MetricName.TASK_IPC in measurements
assert MetricName.TASK_IPS in measurements
assert MetricName.TASK_CACHE_HIT_RATIO in measurements
assert MetricName.TASK_CACHE_MISSES_PER_KILO_INSTRUCTIONS in measurements
assert measurements[MetricName.TASK_IPC] == 10000 / 10
assert measurements[MetricName.TASK_IPS] == 10000 / 5
# Assuming cache misses increase is 10k over all 50k cache references
# Cache hit ratio should be 40k / 50k = 80%
assert measurements[MetricName.TASK_CACHE_HIT_RATIO] == 0.8
# 10000 / (10k/1000) = 10000 / 10
assert measurements[MetricName.TASK_CACHE_MISSES_PER_KILO_INSTRUCTIONS] == 1000
@pytest.mark.parametrize('event_names, cpu_codename, expected', [
(['task_cycles', 'task_instructions', 'task_cache_misses', 'task_cache_references'],
CPUCodeName.SKYLAKE,
['task_cache_misses', 'task_cache_references', 'task_cycles', 'task_instructions']),
(['__r1234', 'task_instructions', 'task_cycles', 'task_cache_references'],
CPUCodeName.SKYLAKE,
['task_instructions', 'task_cache_references', 'task_cycles', '__r1234']),
([MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD, 'task_instructions',
'task_cache_misses', 'task_cache_references'],
CPUCodeName.SKYLAKE,
['task_cache_misses', 'task_cache_references',
MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD, 'task_instructions']),
([MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD, 'task_instructions',
'task_cache_misses', MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD],
CPUCodeName.SKYLAKE,
['task_cache_misses', MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD,
MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD, 'task_instructions']),
])
def test_parse_event_names(event_names, cpu_codename, expected):
parsed_event_names = filter_out_event_names_for_cpu(event_names, cpu_codename)
assert set(parsed_event_names) == set(expected)
@pytest.mark.parametrize('event_names, cpu_codename', [
(
['task_cycles', 'task_instructions', 'task_cache_misses', 'false_metric'],
CPUCodeName.SKYLAKE),
(
['__r1234', 'task_instructions', 'false_metric', 'task_cache_references'],
CPUCodeName.SKYLAKE),
([MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD, 'task_instructions',
'false_metric', 'task_cache_references'], CPUCodeName.SKYLAKE),
([MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD, 'false_metric',
'task_cache_misses', MetricName.TASK_OFFCORE_REQUESTS_OUTSTANDING_L3_MISS_DEMAND_DATA_RD],
CPUCodeName.SKYLAKE)])
def test_exception_parse_event_names(event_names, cpu_codename):
with pytest.raises(Exception):
filter_out_event_names_for_cpu(event_names, cpu_codename)
@pytest.mark.parametrize('event_names, cpus, cores, expected', [
# for HT enabled 5 is too much
(['e1', 'e2', 'e3', 'e4', 'e5'], 8, 4, False),
# for HT disabled 8 is ok
(['e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8'], 16, 16, True),
# for HT disabled 9 is too much
(['e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9'], 16, 16, False),
# fixed counters are not taken into consideration
(['task_cycles', 'task_instructions', 'e1', 'e2', 'e3', 'e4'], 4, 8, True),
# fixed counters are not taken into consideration
(['task_cycles', 'task_instructions', 'e1', 'e2', 'e3', 'e4',
'e5', 'e6', 'e7', 'e8'], 8, 8, True),
# HD=disabled fixed counters are not taken into consideration
(['task_cycles', 'task_instructions', 'e1', 'e2', 'e3', 'e4', 'e5'], 8, 4, False),
])
def test_check_out_perf_event_names(event_names, cpus, cores, expected):
got = check_perf_event_count_limit(event_names, cpus, cores)
assert expected == got
|
the-stack_106_28691 | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from transitleastsquares import transit_mask
import numpy as np
from lightkurve import LightCurve
import transit_tools.constants as c
##function to save all diagnostic plots as combined png
##function to generate vetting sheet
def tls_vetsheet(lc, results=0, show=True, save=False, savename='vetsheet.png'):
#!!Add functionality to plot without having to run search?!!
#!!Add vertical lines for expected transit times on unprocessed LC!!
#!!Change x-axis phase values for odd-even plot!!
#!!Make processed and unprocessed x-axes line up, especially when LC is
# masked. Maybe include grayed-out parts that were masked!!
"""
Function to plot a vetting sheet after running the
transit_tools.signal_search function. Output is formatted to fit onto a
standard 8"x11" sheet of paper.
Parameters
----------
lc : `transit_tools.lightcurve` object
Input transit_tools `transit_tools.lightcurve` object that has had a
TLS signal search performed on it.
results : int
Index of results attribute array of provided 'lightcurve' object.
Indicates which set of results to plot if signal_search produced more
than one set of output results. Can be set to -1 to display the most
recent signal run that did not meet the significance threshold.
show : bool or str
Flag to determine whether plots will be displayed or not. Must be set to
False or 'both' for output matplotlib object to be expected.
save : bool
Flag to determine whether the plots will be saved as a PNG.
savename : str or None
File name for plots to be saved as if save is set to True.
Returns
-------
plots : matplotlib object
Output matplotlib plot object. Optional if show is set to False or
'both'.
"""
if results == -1 and len(lc.results) > 0:
time = lc.cleanlc[-1].time.value
flux = lc.cleanlc[-1].flux.value
flux_err = lc.cleanlc[-1].flux_err.value
elif len(lc.results) > 0:
res = lc.results[results]
if results == -1:
res = lc.bad_search[0]
if results == 0 or len(lc.results) == 0:
time = lc.time.value
flux = lc.flux.value
flux_err = lc.flux_err.value
else:
time = lc.cleanlc[results].time.value
flux = lc.cleanlc[results].flux.value
flux_err = lc.cleanlc[results].flux_err.value
#setting up figure
fig = plt.figure(figsize=(8, 9.2))
gs = fig.add_gridspec(5, 2)
#phase-folded light curve with transit model
ax = fig.add_subplot(gs[0, 1])
fold = LightCurve(res.folded_phase, res.folded_y).bin(20)
ax.plot(res.model_folded_phase, res.model_folded_model, color='red',
alpha=0.7)
ax.scatter(res.folded_phase, res.folded_y, color='blue', s=0.2, alpha=0.5,
zorder=2)
ax.scatter(fold.time.value, fold.flux.value, s=2., c='k')
ax.set_xlim(0.45, 0.55)
ax.set_xlabel('Phase')
ax.set_ylabel('Relative Flux')
#raw light curve
ax = fig.add_subplot(gs[1, :])
ax.scatter(lc.raw_lc.time.value, lc.raw_lc.flux.value, s=0.2, c='b',
alpha=0.5)
ax.set_xlabel('Time [BTJD]')
ax.set_ylabel('Raw Relative Flux')
ax.set_xlim(lc.raw_lc.time.value.min(), lc.raw_lc.time.value.max())
if hasattr(lc, 'trend'):
plt.plot(lc.trend.time.value, lc.trend.flux.value, c='g')
#processed light curve with transit model
ax = fig.add_subplot(gs[2, :])
transit_time = transit_mask(time, res.period, res.duration, res.T0)
time_notrans = time[~transit_time]
flux_notrans = flux[~transit_time]
flux_err_notrans = flux_err[~transit_time]
ax.scatter(time[transit_time], flux[transit_time], color='red', s=0.2,
zorder=0)
ax.scatter(time[~transit_time], flux[~transit_time], color='blue',
alpha=0.5, s=0.2, zorder=0)
ax.plot(res.model_lightcurve_time, res.model_lightcurve_model, alpha=0.5,
color='red', zorder=1)
ax.set_xlim(time.min(), time.max())
ax.set_ylim(flux.min()-0.001, flux.max()+0.001)
ax.set_xlabel('Time [BTJD]')
ax.set_ylabel('Relative Flux')
#TLS periodogram
ax = fig.add_subplot(gs[3, :])
ax.axvline(res.period, alpha=0.4, lw=3)
ax.set_xlim(np.min(res.periods), np.max(res.periods))
for n in range(2, 10):
ax.axvline(n * res.period, alpha=0.4, lw=1, linestyle='dashed')
ax.axvline(res.period / n, alpha=0.4, lw=1, linestyle='dashed')
ax.set_ylabel('SDE')
ax.set_xlabel('Period [days]')
ax.plot(res.periods, res.power, color='k', lw=0.5)
ax.set_xlim(0, max(res.periods))
#secondary eclipse
ax = fig.add_subplot(gs[4, 0])
ax.plot(res.model_folded_phase-0.5,
np.roll(res.model_folded_model, len(res.model_folded_model)//2),
color='red')
ax.scatter(res.folded_phase-0.5,
np.roll(res.folded_y, len(res.folded_y)//2), color='blue',
s=0.2, alpha=0.5, zorder=2)
ax.set_xlim(-0.05, 0.05)
ax.set_xlabel('Phase')
ax.set_ylabel('Relative Flux')
ax.axvspan(-(res.duration/2/res.period), (res.duration/2/res.period),
alpha=0.3, color='orange', label='Transit Duration')
ax.legend(loc=2, fontsize='x-small')
#Odd-Even comparison
ax = fig.add_subplot(gs[4, 1])
oe = LightCurve(time, flux, flux_err)
oe_odd = oe.fold(2*res.period, res.T0)
oe_even = oe.fold(2*res.period, res.T0+res.period)
ax.scatter(oe_odd.time.value+0.5, oe_odd.flux.value, s=0.2, c='b', label='Odd')
ax.scatter(oe_even.time.value+0.5, oe_even.flux.value, s=0.2, c='r', label='Even')
ax.set_xlim(0.475, 0.525)
ax.set_xlabel('Phase')
ax.set_ylabel('Relative Flux')
ax.set_xticks([0.48, 0.49, 0.50, 0.51, 0.52])
ax.set_xticklabels(['0.46', '0.48', '0.50', '0.52', '0.54'])
ax.legend(loc=2, fontsize='x-small')
#plot summary text
fig.text(0.12, 0.97, s=(str(lc.name) + ' (TIC ' + str(lc.tic) + ')'),
fontweight='bold')
fig.text(0.04, 0.95,
s=(r'P = %.5f +/- %.5f d, $t_{0}$ = %.5f BTJD' %
(res.period, res.period_uncertainty, res.T0)))
fig.text(0.04, 0.93,
s=(r'$T_{dur}$ = %.5f d, %s/%s transits with data' %
(res.duration, res.distinct_transit_count, res.transit_count)))
fig.text(0.04, 0.91,
s=('SDE = %.2f, SNR = %.2f, FAP = %.3e' %
(res.SDE, res.snr, res.FAP)))
fig.text(0.04, 0.89,
s=(r'$R_{P}$/$R_{*}$ = %.4f, $R_{P}$ = %.3f $R_{\bigoplus}$ = %.3f $R_{Jup}$' %
(res.rp_rs,
(lc.star_params_tls['rstar']*res.rp_rs*c.Rsolar_m)/c.Rearth_m,
(lc.star_params_tls['rstar']*res.rp_rs*c.Rsolar_m)/c.Rjup_m)))
fig.text(0.04, 0.87,
s=(r'odd/even mismatch = %.2f $\sigma$, $\delta$ = %.4f' %
(res.odd_even_mismatch, 1-res.depth)))
fig.text(0.04, 0.85,
s=(r'$R_{*}$ = %.2f (+%.2f, -%.2f) $R_{\bigodot}$'
% (lc.star_params_tls['rstar'], lc.star_params_tls['rhigh'],
lc.star_params_tls['rlow'])))
fig.text(0.04, 0.83, s=(r'$M_{*}$ = %.2f (+%.2f, -%.2f) $M_{\bigodot}$' %
(lc.star_params_tls['mstar'],
lc.star_params_tls['mhigh'],
lc.star_params_tls['mlow'])))
if lc.star_params is not None and lc.star_params['Tmag'] is not None:
fig.text(0.04, 0.81, s=('Tmag = %.2f' % (lc.star_params['Tmag'])))
plt.tight_layout()
if show:
plt.show()
if save:
plt.savefig(savename)
def bls_vetsheet(lc, results=0, show=True, save=False, savename='vetsheet.png'):
"""
Function to plot a vetting sheet after running the
transit_tools.signal_search function. Output is formatted to fit onto a
standard 8"x11" sheet of paper.
Parameters
----------
lc : `transit_tools.lightcurve` object
Input transit_tools `transit_tools.lightcurve` object that has had a
BLS signal search performed on it.
results : int
Index of results attribute array of provided 'lightcurve' object.
Indicates which set of results to plot if signal_search produced more
than one set of output results. Can be set to -1 to display the most
recent signal run that did not meet the significance threshold.
show : bool or str
Flag to determine whether plots will be displayed or not. Must be set to
False or 'both' for output matplotlib object to be expected.
save : bool
Flag to determine whether the plots will be saved as a PNG.
savename : str or None
File name for plots to be saved as if save is set to True.
Returns
-------
plots : matplotlib object
Output matplotlib plot object. Optional if show is set to False or
'both'.
"""
if not hasattr(lc, 'results') or len(lc.results) == 0:
raise ValueError('lightcurve object has no results.')
elif len(lc.results) > 0:
res = lc.results[results]
bls = lc.blsobjs[results] #rerun on cleanlc[results]?
model = bls.get_transit_model(period=res['period'],
transit_time=res['t0'],
duration=res['duration'])
if results == 0 or len(lc.results) == 0:
time = lc.time
flux = lc.flux
flux_err = lc.flux_err
else:
time = lc.cleanlc[results].time
flux = lc.cleanlc[results].flux
flux_err = lc.cleanlc[results].flux_err
lc_tmp = LightCurve(time, flux, flux_err)
#setting up figure
fig = plt.figure(figsize=(8, 9.2))
gs = fig.add_gridspec(5, 2)
#phase-folded light curve with transit model
ax = fig.add_subplot(gs[0, 1])
fold = lc_tmp.fold(res['period'], t0=res['t0'])
binfold = fold.bin(20)
ax.scatter(fold.time, fold.flux, color='blue', s=0.2,
alpha=0.5, zorder=2)
model.fold(res['period'], t0=res['t0']).plot(ax=ax, color='r', lw=2,
alpha=0.7)
ax.scatter(binfold.time, binfold.flux, c='k', s=2.)
ax.set_xlim(-0.05, 0.05)
ax.set_xlabel('Phase')
ax.set_ylabel('Relative Flux')
plt.tight_layout()
if show:
plt.show()
if save:
plt.savefig(savename)
##search-specific plots
|
the-stack_106_28692 | #
# Copyright 2016-2019 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import unittest
from gafferpy import gaffer as g
from gafferpy import gaffer_connector
class GafferPredicatesIntegrationTest(unittest.TestCase):
def test_all_predicates_have_classes(self):
gc = gaffer_connector.GafferConnector(
'http://localhost:8080/rest/latest')
predicates = gc.execute_get(
g.GetFilterFunctions()
)
predicates = json.loads(predicates)
ignore_predicates = [
'uk.gov.gchq.koryphe.predicate.AdaptedPredicate',
'uk.gov.gchq.koryphe.predicate.AdaptedPredicate',
'uk.gov.gchq.koryphe.predicate.PredicateComposite',
'uk.gov.gchq.gaffer.rest.example.ExampleFilterFunction',
'uk.gov.gchq.koryphe.tuple.predicate.TupleAdaptedPredicate',
'uk.gov.gchq.gaffer.data.element.function.ElementFilter',
'uk.gov.gchq.koryphe.tuple.predicate.TupleAdaptedPredicateComposite',
'uk.gov.gchq.gaffer.store.util.AggregatorUtil$IsElementAggregated',
'uk.gov.gchq.gaffer.graph.hook.migrate.predicate.TransformAndFilter'
]
for i in ignore_predicates:
if i in predicates: predicates.remove(i)
for op in predicates:
self.assertTrue(op in g.JsonConverter.GENERIC_JSON_CONVERTERS,
'Missing predicate class: ' + op)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_28695 | #
# Code under the MIT license by Alexander Pruss
#
from mcturtle import *
t = Turtle()
t.penblock(GOLD_BLOCK)
#t.turtle(GIANT)
t.pendelay(0.01)
for i in range(7):
t.go(50)
t.left(180.-180./7)
|
the-stack_106_28697 | from collections import defaultdict
class Vocabulary:
def __init__(self):
pass
def __len__(self):
return self.__size
def stoi(self, s):
return self.__stoi[s]
def itos(self, i):
return self.__itos[i]
@staticmethod
def new(list_generator, size):
self = Vocabulary()
self.__size = size
word_freq = defaultdict(lambda: 0)
for words in list_generator:
for word in words:
word_freq[word] += 1
self.__stoi = defaultdict(lambda: 0)
self.__stoi['<unk>'] = 0
self.__stoi['<s>'] = 1
self.__stoi['</s>'] = 2
self.__itos = [''] * self.__size
self.__itos[0] = '<unk>'
self.__itos[1] = '<s>'
self.__itos[2] = '</s>'
for i, (k, v) in zip(range(self.__size - 3), sorted(word_freq.items(), key=lambda x: -x[1])):
self.__stoi[k] = i + 3
self.__itos[i + 3] = k
return self
def save(self, filename):
with open(filename, 'w') as fp:
print(self.__size)
for i in range(self.__size):
print(self.__itos[i])
@staticmethod
def load(filename):
with open(filename) as fp:
self = Vocabulary()
self.__size = int(next(fp))
self.__stoi = defaultdict(lambda: 0)
self.__itos = [''] * self.__size
for i in range(self.__size):
s = next(fp).strip()
if s:
self.__stoi[s] = i
self.__itos[i] = s
return self
|
the-stack_106_28699 | from contextlib import contextmanager
import dbt.exceptions
from dbt.adapters.base import Credentials
from dbt.adapters.sql import SQLConnectionManager
from dbt.logger import GLOBAL_LOGGER as logger
from dataclasses import dataclass
from typing import Optional
from dbt.helper_types import Port
from datetime import datetime
import decimal
import re
import prestodb
from prestodb.transaction import IsolationLevel
import sqlparse
@dataclass
class PrestoCredentials(Credentials):
host: str
port: Port
user: str
password: Optional[str] = None
method: Optional[str] = None
_ALIASES = {
'catalog': 'database'
}
@property
def type(self):
return 'presto'
def _connection_keys(self):
return ('host', 'port', 'user', 'database', 'schema')
class ConnectionWrapper(object):
"""Wrap a Presto connection in a way that accomplishes two tasks:
- prefetch results from execute() calls so that presto calls actually
persist to the db but then present the usual cursor interface
- provide `cancel()` on the same object as `commit()`/`rollback()`/...
"""
def __init__(self, handle):
self.handle = handle
self._cursor = None
self._fetch_result = None
def cursor(self):
self._cursor = self.handle.cursor()
return self
def cancel(self):
if self._cursor is not None:
self._cursor.cancel()
def close(self):
# this is a noop on presto, but pass it through anyway
self.handle.close()
def commit(self):
pass
def rollback(self):
pass
def start_transaction(self):
pass
def fetchall(self):
if self._cursor is None:
return None
if self._fetch_result is not None:
ret = self._fetch_result
self._fetch_result = None
return ret
return None
def execute(self, sql, bindings=None):
if bindings is not None:
# presto doesn't actually pass bindings along so we have to do the
# escaping and formatting ourselves
bindings = tuple(self._escape_value(b) for b in bindings)
sql = sql % bindings
result = self._cursor.execute(sql)
self._fetch_result = self._cursor.fetchall()
return result
@property
def description(self):
return self._cursor.description
@classmethod
def _escape_value(cls, value):
"""A not very comprehensive system for escaping bindings.
I think "'" (a single quote) is the only character that matters.
"""
numbers = (decimal.Decimal, int, float)
if value is None:
return 'NULL'
elif isinstance(value, str):
return "'{}'".format(value.replace("'", "''"))
elif isinstance(value, numbers):
return value
elif isinstance(value, datetime):
time_formatted = value.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return "TIMESTAMP '{}'".format(time_formatted)
else:
raise ValueError('Cannot escape {}'.format(type(value)))
class PrestoConnectionManager(SQLConnectionManager):
TYPE = 'presto'
@contextmanager
def exception_handler(self, sql):
try:
yield
# TODO: introspect into `DatabaseError`s and expose `errorName`,
# `errorType`, etc instead of stack traces full of garbage!
except Exception as exc:
logger.debug("Error while running:\n{}".format(sql))
logger.debug(exc)
raise dbt.exceptions.RuntimeException(str(exc))
def add_begin_query(self):
connection = self.get_thread_connection()
with self.exception_handler('handle.start_transaction()'):
connection.handle.start_transaction()
def add_commit_query(self):
connection = self.get_thread_connection()
with self.exception_handler('handle.commit()'):
connection.handle.commit()
@classmethod
def open(cls, connection):
if connection.state == 'open':
logger.debug('Connection is already open, skipping open.')
return connection
credentials = connection.credentials
if credentials.method == 'ldap':
auth = prestodb.auth.BasicAuthentication(
credentials.user,
credentials.password,
)
http_scheme = "https"
elif credentials.method == 'kerberos':
auth = prestodb.auth.KerberosAuthentication()
http_scheme = "https"
else:
auth = prestodb.constants.DEFAULT_AUTH
http_scheme = "http"
# it's impossible for presto to fail here as 'connections' are actually
# just cursor factories.
presto_conn = prestodb.dbapi.connect(
host=credentials.host,
port=credentials.port,
user=credentials.user,
catalog=credentials.database,
schema=credentials.schema,
http_scheme=http_scheme,
auth=auth,
isolation_level=IsolationLevel.AUTOCOMMIT
)
connection.state = 'open'
connection.handle = ConnectionWrapper(presto_conn)
return connection
@classmethod
def get_response(cls, cursor):
# this is lame, but the cursor doesn't give us anything useful.
return 'OK'
def cancel(self, connection):
connection.handle.cancel()
def add_query(self, sql, auto_begin=True,
bindings=None, abridge_sql_log=False):
connection = None
cursor = None
# TODO: is this sufficient? Largely copy+pasted from snowflake, so
# there's some common behavior here we can maybe factor out into the
# SQLAdapter?
queries = [q.rstrip(';') for q in sqlparse.split(sql)]
for individual_query in queries:
# hack -- after the last ';', remove comments and don't run
# empty queries. this avoids using exceptions as flow control,
# and also allows us to return the status of the last cursor
without_comments = re.sub(
re.compile('^.*(--.*)$', re.MULTILINE),
'', individual_query).strip()
if without_comments == "":
continue
parent = super(PrestoConnectionManager, self)
connection, cursor = parent.add_query(
individual_query, auto_begin, bindings,
abridge_sql_log
)
if cursor is None:
raise dbt.exceptions.RuntimeException(
"Tried to run an empty query on model '{}'. If you are "
"conditionally running\nsql, eg. in a model hook, make "
"sure your `else` clause contains valid sql!\n\n"
"Provided SQL:\n{}".format(connection.name, sql)
)
return connection, cursor
def execute(self, sql, auto_begin=False, fetch=False):
_, cursor = self.add_query(sql, auto_begin)
status = self.get_response(cursor)
table = self.get_result_from_cursor(cursor)
return status, table
|
the-stack_106_28700 | import numpy as np
from mmocr.datasets.pipelines import LoadTextAnnotations
def _create_dummy_ann():
results = {}
results['img_info'] = {}
results['img_info']['height'] = 1000
results['img_info']['width'] = 1000
results['ann_info'] = {}
results['ann_info']['masks'] = []
results['mask_fields'] = []
results['ann_info']['masks_ignore'] = [
[[499, 94, 531, 94, 531, 124, 499, 124]],
[[3, 156, 81, 155, 78, 181, 0, 182]],
[[11, 223, 59, 221, 59, 234, 11, 236]],
[[500, 156, 551, 156, 550, 165, 499, 165]]
]
return results
def test_loadtextannotation():
results = _create_dummy_ann()
with_bbox = True
with_label = True
with_mask = True
with_seg = False
poly2mask = False
loader = LoadTextAnnotations(with_bbox, with_label, with_mask, with_seg,
poly2mask)
output = loader._load_masks(results)
assert len(output['gt_masks_ignore']) == 4
assert np.allclose(output['gt_masks_ignore'].masks[0],
[[499, 94, 531, 94, 531, 124, 499, 124]])
|
the-stack_106_28703 | import tensorflow as tf
import numpy as np
from crf_rnn_layer import crf_rnn_layer
def get_spatial_rank(x):
"""
:param x: an input tensor with shape [batch_size, ..., num_channels]
:return: the spatial rank of the tensor i.e. the number of spatial dimensions between batch_size and num_channels
"""
return len(x.get_shape()) - 2
def get_num_channels(x):
"""
:param x: an input tensor with shape [batch_size, ..., num_channels]
:return: the number of channels of x
"""
return int(x.get_shape()[-1])
def get_spatial_size(x):
"""
:param x: an input tensor with shape [batch_size, ..., num_channels]
:return: The spatial shape of x, excluding batch_size and num_channels.
"""
return x.get_shape()[1:-1]
def constant_initializer(value, shape, lambda_initializer=True):
if lambda_initializer:
return np.full(shape, value).astype(np.float32)
else:
return tf.constant(value, tf.float32, shape)
def xavier_initializer_convolution(shape, dist='uniform', lambda_initializer=True):
"""
Xavier initializer for N-D convolution patches. input_activations = patch_volume * in_channels;
output_activations = patch_volume * out_channels; Uniform: lim = sqrt(3/(input_activations + output_activations))
Normal: stddev = sqrt(6/(input_activations + output_activations))
:param shape: The shape of the convolution patch i.e. spatial_shape + [input_channels, output_channels]. The order of
input_channels and output_channels is irrelevant, hence this can be used to initialize deconvolution parameters.
:param dist: A string either 'uniform' or 'normal' determining the type of distribution
:param lambda_initializer: Whether to return the initial actual values of the parameters (True) or placeholders that
are initialized when the session is initiated
:return: A numpy araray with the initial values for the parameters in the patch
"""
s = len(shape) - 2
num_activations = np.prod(shape[:s]) * np.sum(shape[s:]) # input_activations + output_activations
if dist == 'uniform':
lim = np.sqrt(6. / num_activations)
if lambda_initializer:
return np.random.uniform(-lim, lim, shape).astype(np.float32)
else:
return tf.random_uniform(shape, minval=-lim, maxval=lim)
if dist == 'normal':
stddev = np.sqrt(3. / num_activations)
if lambda_initializer:
return np.random.normal(0, stddev, shape).astype(np.float32)
else:
tf.truncated_normal(shape, mean=0, stddev=stddev)
raise ValueError('Distribution must be either "uniform" or "normal".')
def convolution(x, filter, padding='SAME', strides=None, dilation_rate=None):
w = tf.get_variable(name='weights', initializer=xavier_initializer_convolution(shape=filter))
b = tf.get_variable(name='biases', initializer=constant_initializer(0, shape=filter[-1]))
return tf.nn.convolution(x, w, padding, strides, dilation_rate) + b
def deconvolution(x, filter, output_shape, strides, padding='SAME'):
w = tf.get_variable(name='weights', initializer=xavier_initializer_convolution(shape=filter))
b = tf.get_variable(name='biases', initializer=constant_initializer(0, shape=filter[-2]))
spatial_rank = get_spatial_rank(x)
if spatial_rank == 2:
return tf.nn.conv2d_transpose(x, w, output_shape, strides, padding) + b
if spatial_rank == 3:
return tf.nn.conv3d_transpose(x, w, output_shape, strides, padding) + b
raise ValueError('Only 2D and 3D images supported.')
# down convolution
def down_convolution(x, factor, kernel_size):
num_channels = get_num_channels(x)
spatial_rank = get_spatial_rank(x)
strides = spatial_rank * [factor]
filter = spatial_rank * [kernel_size] + [num_channels, num_channels * factor]
x = convolution(x, filter, strides=strides)
return x
# up convolution
def up_convolution(x, output_shape, factor, kernel_size):
num_channels = get_num_channels(x)
spatial_rank = get_spatial_rank(x)
strides = [1] + spatial_rank * [factor] + [1]
filter = spatial_rank * [kernel_size] + [num_channels // factor, num_channels]
x = deconvolution(x, filter, output_shape, strides=strides)
return x
def convolution_block(layer_input, num_convolutions, keep_prob, activation_fn):
n_channels = get_num_channels(layer_input)
spatial_rank = get_spatial_rank(layer_input)
x = layer_input
kernel = spatial_rank * [5] + [n_channels, n_channels]
for i in range(num_convolutions):
with tf.variable_scope('conv_' + str(i + 1)):
x = convolution(x, kernel)
if i == num_convolutions - 1:
x = x + layer_input
x = activation_fn(x)
x = tf.nn.dropout(x, keep_prob)
return x
def convolution_block_2(layer_input, fine_grained_features, num_convolutions, keep_prob, activation_fn):
n_channels = get_num_channels(layer_input)
spatial_rank = get_spatial_rank(layer_input)
x = tf.concat((layer_input, fine_grained_features), axis=-1)
for i in range(0, num_convolutions):
with tf.variable_scope('conv_' + str(i + 1)):
kernel = spatial_rank * [5]
kernel = kernel + [n_channels * 2, n_channels] if i == 0 else kernel + [n_channels, n_channels]
x = convolution(x, kernel)
if i == num_convolutions - 1:
x = x + layer_input
x = activation_fn(x)
x = tf.nn.dropout(x, keep_prob)
return x
class VNetCRF(object):
def __init__(self,
num_classes,
keep_prob=1.0,
num_channels=16,
num_levels=4,
num_convolutions=(1, 2, 3, 3),
bottom_convolutions=3,
activation_fn=tf.nn.relu,
theta_alpha=50,
theta_beta=25,
theta_gamma=50,
num_iterations=5):
"""
Implements VNet architecture https://arxiv.org/abs/1606.04797
:param num_classes: Number of output classes.
:param keep_prob: Dropout keep probability, set to 1.0 if not training or if no dropout is desired.
:param num_channels: The number of output channels in the first level, this will be doubled every level.
:param num_levels: The number of levels in the network. Default is 4 as in the paper.
:param num_convolutions: An array with the number of convolutions at each level.
:param bottom_convolutions: The number of convolutions at the bottom level of the network.
:param activation_fn: The activation function.
:param theta_alpha: Spatial standard deviation for bilateral filter
:param theta_beta: Color standard deviation for bilateral filter
:param theta_gamma: Spatial standard deviation for Gaussian filter
:param num_iterations: Number of iterations for mean field approximation of the CRF
"""
self.num_classes = num_classes
self.keep_prob = keep_prob
self.num_channels = num_channels
assert num_levels == len(num_convolutions)
self.num_levels = num_levels
self.num_convolutions = num_convolutions
self.bottom_convolutions = bottom_convolutions
self.activation_fn = activation_fn
self.theta_alpha = theta_alpha
self.theta_beta = theta_beta
self.theta_gamma = theta_gamma
self.num_iterations = num_iterations
def network_fn(self, x, is_training):
input_image = x
input_channels = get_num_channels(x)
spatial_rank = get_spatial_rank(x)
keep_prob = self.keep_prob if is_training else 1.0
# if the input has more than 1 channel it has to be expanded because broadcasting only works for 1 input
# channel
with tf.variable_scope('vnet/input_layer'):
if input_channels == 1:
x = tf.tile(x, (spatial_rank + 1) * [1] + [self.num_channels])
else:
x = self.activation_fn(convolution(x, spatial_rank * [5] + [input_channels, self.num_channels]))
features = list()
for l in range(self.num_levels):
with tf.variable_scope('vnet/encoder/level_' + str(l + 1)):
x = convolution_block(x, self.num_convolutions[l], keep_prob, activation_fn=self.activation_fn)
features.append(x)
with tf.variable_scope('down_convolution'):
x = self.activation_fn(down_convolution(x, factor=2, kernel_size=2))
with tf.variable_scope('vnet/bottom_level'):
x = convolution_block(x, self.bottom_convolutions, keep_prob, activation_fn=self.activation_fn)
for l in reversed(range(self.num_levels)):
with tf.variable_scope('vnet/decoder/level_' + str(l + 1)):
f = features[l]
with tf.variable_scope('up_convolution'):
x = self.activation_fn(up_convolution(x, tf.shape(f), factor=2, kernel_size=2))
x = convolution_block_2(x, f, self.num_convolutions[l], keep_prob, activation_fn=self.activation_fn)
with tf.variable_scope('vnet/output_layer'):
logits = convolution(x, spatial_rank * [1] + [self.num_channels, self.num_classes])
with tf.variable_scope('crf_as_rnn'):
logits = crf_rnn_layer(unaries=logits,
reference_image=input_image,
num_classes=self.num_classes,
theta_alpha=self.theta_alpha,
theta_beta=self.theta_beta,
theta_gamma=self.theta_gamma,
num_iterations=self.num_iterations)
return logits
def input_function(batch_size, reference_channels, num_classes):
# dummy inputs (feed your own images by using TFRecordDataset: tf.data.TFRecordDataset(filenames))
input_image = tf.constant(1.0, shape=(batch_size, 100, 100, 50, reference_channels), dtype=tf.float32)
ground_truth = tf.constant(1.0, shape=(batch_size, 100, 100, 50, num_classes), dtype=tf.float32)
dataset = tf.data.Dataset.from_tensors((input_image, ground_truth))
dataset = dataset.repeat(10) # 10 epochs
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
if __name__ == "__main__":
# Compile with SPATIAL_DIMENSIONS=3, REFERENCE_CHANNELS=4, INPUT_CHANNELS=2 (num_classes)
BATCH_SIZE = 1
REFERENCE_CHANNELS = 4
INPUT_CHANNELS = 2
num_classes = INPUT_CHANNELS
with tf.Graph().as_default():
input_image, ground_truth = input_function(BATCH_SIZE, REFERENCE_CHANNELS, num_classes)
net = VNetCRF(num_classes=num_classes)
logits = net.network_fn(input_image, is_training=True)
logits = tf.reshape(logits, (-1, num_classes))
labels = tf.reshape(ground_truth, (-1, num_classes))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits)
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
probability = tf.nn.softmax(logits)
prediction = tf.round(probability)
# calculate dice coefficient and/or other metrics that are useful to you
with tf.Session() as sess:
while not sess.should_stop():
_, l, p, = sess.run([train_op, loss, prediction])
print('loss: %:.3f\n'.format(l))
|
the-stack_106_28706 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 14:10:11 2018
@author: spalazzo
"""
# import modules
# skeleton.py file to use as a template when porting CodeSkulptor
# projects over to PyGame. It should provide the basic structure
# to allow moving your code into PyGame. It will not make your
# code work automatically, but does provide the *new* pieces of code
# that are required to make your Codeskulptor projects run in
# PyGame. You then need to replace calls to simplegui functions
# with their equivalent functions in PyGame. Good Luck!
#
# As it is setup it will run a simple text animation illustrating
# the draw handler and timers.
# import modules
import os
import pygame
import random
import math
# pygame specific locals/constants
from pygame.locals import *
# some resource related warnings
if not pygame.font: print('Warning, fonts disabled')
if not pygame.mixer: print('Warning, sound disabled')
#import SimpleGUICS2Pygame library (a browser Python interpreter)
try:
import simplegui
except ImportError:
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
# initializations
pygame.init()
# a bit similar to CodeSkulptor frame creation -- we'll call the window the canvas
canvas = pygame.display.set_mode((640, 480))
pygame.display.set_caption("My_Project")
# Pygame Wrapper functions -- resource loading sanity checks
# Taken from the "Monkey tutorial" and updated for 3.3 by me
#
# load Image:
# A colorkey is used in graphics to represent a color of the image
# that is transparent (r, g, b). -1 = top left pixel colour is used.
def load_image(name, colorkey=None):
fullname = os.path.join('data\images', name)
try:
image = pygame.image.load(fullname)
except pygame.error as message:
print('Cannot load image:', name)
raise SystemExit(message)
if colorkey is not None:
image = image.convert()
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
else:
image = image.convert_alpha()
return image, image.get_rect()
# Load Sound
def load_sound(name):
class NoneSound:
def play(self): pass
if not pygame.mixer:
return NoneSound()
fullname = os.path.join('data\sounds', name)
try:
sound = pygame.mixer.Sound(fullname)
except pygame.error as message:
print('Cannot load sound:', name)
raise SystemExit(message)
return sound
# need to create fonts and colour objects in PyGame
#fontObj = pygame.font.Font('ARBERKLEY.ttf', 32)
#fontObj2 = pygame.font.Font('ARBERKLEY.ttf', 24)
fontObj3 = pygame.font.Font(pygame.font.match_font('timesnewroman'), 32)
gold_color = pygame.Color(255, 215, 0)
white_color = pygame.Color(255, 255, 255)
# ------------------------Begin Your CodeSkulptor Port-------------------------
# Tile Images
IMAGENAME = "assets_2048.png"
TILE_SIZE = 100
HALF_TILE_SIZE = TILE_SIZE / 2
BORDER_SIZE = 45
# Directions
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
# Offsets for computing tile indices in each direction.
OFFSETS = { UP: (1, 0),
DOWN: (-1, 0),
LEFT: (0, 1),
RIGHT: (0, -1)
}
class GUI:
"""
Class to run game GUI.
"""
def __init__(self, game):
self._rows = game.get_grid_height()
self._cols = game.get_grid_width()
self._frame = simplegui.create_frame('2048',
self._cols * TILE_SIZE + 2 * BORDER_SIZE,
self._rows * TILE_SIZE + 2 * BORDER_SIZE)
self._frame.add_button('New Game', self.start)
self._frame.set_keydown_handler(self.keydown)
self._frame.set_draw_handler(self.draw)
self._frame.set_canvas_background("#BCADA1")
self._frame.start()
self._game = game
url = codeskulptor.file2url(IMAGENAME)
self._tiles = load_image(url)
self._directions = {"up": UP, "down": DOWN,
"left": LEFT, "right": RIGHT}
def keydown(self, key):
"""
Keydown handler
"""
for dirstr, dirval in self._directions.items():
if key == pygame.KEY_MAP[dirstr]:
self._game.move(dirval)
break
def draw(self, canvas):
"""
Draw handler
"""
for row in range(self._rows):
for col in range(self._cols):
tile = game.get_tile(row, col)
if tile == 0:
val = 0
else:
val = int(math.log(tile, 2))
canvas.draw_image(self._tiles,
[HALF_TILE_SIZE + val * TILE_SIZE, HALF_TILE_SIZE],
[TILE_SIZE, TILE_SIZE],
[col * TILE_SIZE + HALF_TILE_SIZE + BORDER_SIZE,
row * TILE_SIZE + HALF_TILE_SIZE + BORDER_SIZE],
[TILE_SIZE, TILE_SIZE])
def start(self):
"""
Start the game.
"""
self._game.reset()
def run_gui(game):
"""
Instantiate and run the GUI.
"""
gui = GUI(game)
gui.start()
def merge(line):
"""
Function that merges a single row or column in 2048.
"""
merged_list = [i for i in line if i]
merged_list += [0 for num in range(len(line) - len(merged_list))]
for indx in range(0,len(line)-1):
if merged_list[indx] == merged_list[indx +1]:
merged_list[indx] *= 2
merged_list[indx +1] = 0
# remove zeros after merge and return the list
merged = [i for i in merged_list if i]
merged += [0 for num in range(len(merged_list) - len(merged))]
return merged
class TwentyFortyEight:
"""
Class to run the game logic.
"""
def __init__(self, grid_height, grid_width):
self._board = []
self._grid_height = grid_height
self._grid_width = grid_width
self.reset()
self._slide = {UP : [[0,element] for element in range(self.get_grid_width())],
DOWN : [[self.get_grid_height() - 1, element] for element in range(self.get_grid_width())],
LEFT : [[element, 0] for element in range(self.get_grid_height())],
RIGHT : [[element, self.get_grid_width() - 1] for element in range (self.get_grid_height())]}
def reset(self):
"""
Reset the game so the grid is empty except for two
initial tiles.
"""
#create the board
self._board = [[0 for dummycol in range(self.get_grid_width())]
for dummyrow in range(self.get_grid_height())]
#place 2 to tiles at begin of the game
for dummy_num in range(2):
self.new_tile()
def get_grid_height(self):
"""
Get the height of the board.
"""
return self._grid_height
def get_grid_width(self):
"""
Get the width of the board.
"""
return self._grid_width
def __str__(self):
"""
Return a string representation of the grid for debugging.
"""
msg = " "
for dummy_row in range(self.get_grid_height()):
msg += str(self._board[dummy_row]) + "\n "
return msg
def move(self, direction):
"""
Move all tiles in the given direction and add
a new tile if any tiles moved.
"""
if(direction == UP):
self.move_helper(direction, self.get_grid_height())
elif(direction == DOWN):
self.move_helper(direction, self.get_grid_height())
elif(direction == LEFT):
self.move_helper(direction, self.get_grid_width())
elif(direction == RIGHT):
self.move_helper(direction, self.get_grid_width())
def move_helper(self, direction, border):
"""
Move all columns and merge
"""
slides_list = list(self._slide[direction])
tmp_list = []
#get a snapshopt of the board
before_move = str(self._board)
# rotate the grid and call merge function
for element in slides_list:
tmp_list.append(element)
for indx in range(1, border):
tmp_list.append([dummy_row + dummy_col for dummy_row,
dummy_col in zip(tmp_list[-1],
OFFSETS[direction])])
index= []
for indx in tmp_list:
index.append(self.get_tile(indx[0], indx[1]))
merged_list = merge(index)
for indx_x, indx_y in zip(merged_list, tmp_list):
self.set_tile(indx_y[0], indx_y[1], indx_x)
tmp_list = []
#get a new snapshopt of the board
after_move = str(self._board)
# if sometihing changes add a new tile
if before_move != after_move:
self.new_tile()
def set_tile(self, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
self._board[row][col] = value
def new_tile(self):
"""
Create a new tile in a randomly selected empty
square. The tile should be 2 90% of the time and
4 10% of the time.
"""
tile = None
num = float('%.1f' %(random.random()))
if num < 0.9 :
tile = 2
else:
tile = 4
blank_tiles = []
# scan the grid for available position where to place the new tile
for dummy_row in range(self.get_grid_height()):
for dummy_col in range(self.get_grid_width()):
# check if there is a winner
if self._board[dummy_row][dummy_col] == 2048:
return "Congratulations you win!"
if self._board[dummy_row][dummy_col] == 0:
blank_tiles.append([dummy_row, dummy_col])
#place the new tile to a random available location
tile_to_place = random.choice(blank_tiles)
self.set_tile(tile_to_place[0], tile_to_place[1], tile)
def get_tile(self, row, col):
"""
Return the value of the tile at position row, col.
"""
return self._board[row][col]
# ------------------------End Your CodeSkulptor Port-------------------------
count = 0
draw_colour = white_color
def draw_handler(canvas):
# clear canvas -- fill canvas with uniform colour, then draw everything below.
# this removes everything previously drawn and refreshes
canvas.fill((0, 0, 0))
# draw example
global count
count += 1
text_draw = fontObj3.render("CodeSkulptor Port", True, draw_colour)
text_draw2 = fontObj3.render("Tutorial", True, draw_colour)
if count % 90 < 45:
canvas.blit(text_draw, (190, 220))
else:
canvas.blit(text_draw2, (250, 220))
# update the display
pygame.display.update()
def t_example():
global draw_colour
if draw_colour == white_color:
draw_colour = gold_color
else:
draw_colour = white_color
# pygame has no start() and stop() methods -- 0 time is off any other value is on
# set some on/off constants for readability with each timer
TIMER_OFF = 0
# timer for example -- 1500 milliseconds when on
TIMER_EXAMPLE_ON = 1500
# set the timer name to its user event for readability
timer_example = USEREVENT + 1
pygame.time.set_timer(timer_example, TIMER_EXAMPLE_ON)
# call this function to start everything
# could be thought of as the implemntation of the CodeSkulptor frame .start() method.
def main():
# initialize loop until quit variable
running = True
# create our FPS timer clock
clock = pygame.time.Clock()
# start the GUI and call the game
run_gui(TwentyFortyEight(4, 4))
#---------------------------Frame is now Running-----------------------------------------
# doing the infinte loop until quit -- the game is running
while running:
# event queue iteration
for event in pygame.event.get():
# window GUI ('x' the window)
if event.type == pygame.QUIT:
running = False
# input - key and mouse event handlers
elif event.type == pygame.MOUSEBUTTONDOWN:
pass
# just respond to left mouse clicks
#if pygame.mouse.get_pressed()[0]:
#mc_handler(pygame.mouse.get_pos())
elif event.type == pygame.KEYDOWN:
pass
#kd_handler(event.key)
# timers
elif event.type == timer_example:
t_example()
# the call to the draw handler
draw_handler(canvas)
# FPS limit to 60 -- essentially, setting the draw handler timing
# it micro pauses so while loop only runs 60 times a second max.
clock.tick(60)
#-----------------------------Frame Stops------------------------------------------
# quit game -- we're now allowed to hit the quit call
pygame.quit ()
# this calls the 'main' function when this script is executed
# could be thought of as a call to frame.start() of sorts
if __name__ == '__main__': main()
|
the-stack_106_28711 | # > \brief \b ZTRMM
#
# =========== DOCUMENTATION ===========
#
# Online html documentation available at
# http://www.netlib.org/lapack/explore-html/
#
# Definition:
# ===========
#
# def ZTRMM(SIDE,UPLO,TRANSA,DIAG,M,N,ALPHA,A,LDA,B,LDB)
#
# .. Scalar Arguments ..
# COMPLEX*16 ALPHA
# INTEGER LDA,LDB,M,N
# CHARACTER DIAG,SIDE,TRANSA,UPLO
# ..
# .. Array Arguments ..
# COMPLEX*16 A(LDA,*),B(LDB,*)
# ..
#
#
# > \par Purpose:
# =============
# >
# > \verbatim
# >
# > ZTRMM performs one of the matrix-matrix operations
# >
# > B := alpha*op( A )*B, or B := alpha*B*op( A )
# >
# > where alpha is a scalar, B is an m by n matrix, A is a unit, or
# > non-unit, upper or lower triangular matrix and op( A ) is one of
# >
# > op( A ) = A or op( A ) = A**T or op( A ) = A**H.
# > \endverbatim
#
# Arguments:
# ==========
#
# > \param[in] SIDE
# > \verbatim
# > SIDE is CHARACTER*1
# > On entry, SIDE specifies whether op( A ) multiplies B from
# > the left or right as follows:
# >
# > SIDE = 'L' or 'l' B := alpha*op( A )*B.
# >
# > SIDE = 'R' or 'r' B := alpha*B*op( A ).
# > \endverbatim
# >
# > \param[in] UPLO
# > \verbatim
# > UPLO is CHARACTER*1
# > On entry, UPLO specifies whether the matrix A is an upper or
# > lower triangular matrix as follows:
# >
# > UPLO = 'U' or 'u' A is an upper triangular matrix.
# >
# > UPLO = 'L' or 'l' A is a lower triangular matrix.
# > \endverbatim
# >
# > \param[in] TRANSA
# > \verbatim
# > TRANSA is CHARACTER*1
# > On entry, TRANSA specifies the form of op( A ) to be used in
# > the matrix multiplication as follows:
# >
# > TRANSA = 'N' or 'n' op( A ) = A.
# >
# > TRANSA = 'T' or 't' op( A ) = A**T.
# >
# > TRANSA = 'C' or 'c' op( A ) = A**H.
# > \endverbatim
# >
# > \param[in] DIAG
# > \verbatim
# > DIAG is CHARACTER*1
# > On entry, DIAG specifies whether or not A is unit triangular
# > as follows:
# >
# > DIAG = 'U' or 'u' A is assumed to be unit triangular.
# >
# > DIAG = 'N' or 'n' A is not assumed to be unit
# > triangular.
# > \endverbatim
# >
# > \param[in] M
# > \verbatim
# > M is INTEGER
# > On entry, M specifies the number of rows of B. M must be at
# > least zero.
# > \endverbatim
# >
# > \param[in] N
# > \verbatim
# > N is INTEGER
# > On entry, N specifies the number of columns of B. N must be
# > at least zero.
# > \endverbatim
# >
# > \param[in] ALPHA
# > \verbatim
# > ALPHA is COMPLEX*16
# > On entry, ALPHA specifies the scalar alpha. When alpha is
# > zero then A is not referenced and B need not be set before
# > entry.
# > \endverbatim
# >
# > \param[in] A
# > \verbatim
# > A is COMPLEX*16 array, dimension ( LDA, k ), where k is m
# > when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
# > Before entry with UPLO = 'U' or 'u', the leading k by k
# > upper triangular part of the array A must contain the upper
# > triangular matrix and the strictly lower triangular part of
# > A is not referenced.
# > Before entry with UPLO = 'L' or 'l', the leading k by k
# > lower triangular part of the array A must contain the lower
# > triangular matrix and the strictly upper triangular part of
# > A is not referenced.
# > Note that when DIAG = 'U' or 'u', the diagonal elements of
# > A are not referenced either, but are assumed to be unity.
# > \endverbatim
# >
# > \param[in] LDA
# > \verbatim
# > LDA is INTEGER
# > On entry, LDA specifies the first dimension of A as declared
# > in the calling (sub) program. When SIDE = 'L' or 'l' then
# > LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
# > then LDA must be at least max( 1, n ).
# > \endverbatim
# >
# > \param[in,out] B
# > \verbatim
# > B is COMPLEX*16 array, dimension ( LDB, N ).
# > Before entry, the leading m by n part of the array B must
# > contain the matrix B, and on exit is overwritten by the
# > transformed matrix.
# > \endverbatim
# >
# > \param[in] LDB
# > \verbatim
# > LDB is INTEGER
# > On entry, LDB specifies the first dimension of B as declared
# > in the calling (sub) program. LDB must be at least
# > max( 1, m ).
# > \endverbatim
#
# Authors:
# ========
#
# > \author Univ. of Tennessee
# > \author Univ. of California Berkeley
# > \author Univ. of Colorado Denver
# > \author NAG Ltd.
#
# > \date December 2016
#
# > \ingroup complex16_blas_level3
#
# > \par Further Details:
# =====================
# >
# > \verbatim
# >
# > Level 3 Blas routine.
# >
# > -- Written on 8-February-1989.
# > Jack Dongarra, Argonne National Laboratory.
# > Iain Duff, AERE Harwell.
# > Jeremy Du Croz, Numerical Algorithms Group Ltd.
# > Sven Hammarling, Numerical Algorithms Group Ltd.
# > \endverbatim
# >
# =====================================================================
from util import lsame
from xerbla import xerbla
def ZTRMM(SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, A, LDA, B, LDB):
#
# -- Reference BLAS level3 routine (version 3.7.0) --
# -- Reference BLAS is a software package provided by Univ. of Tennessee, --
# -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
# December 2016
#
# .. Scalar Arguments ..
# COMPLEX*16 ALPHA
# INTEGER LDA,LDB,M,N
# CHARACTER DIAG,SIDE,TRANSA,UPLO
# ..
# .. Array Arguments ..
# COMPLEX*16 A(LDA,*),B(LDB,*)
# ..
#
# =====================================================================
# Test the input parameters.
LSIDE = lsame(SIDE, "L")
if LSIDE:
NROWA = M
else:
NROWA = N
NOCONJ = lsame(TRANSA, "T")
NOUNIT = lsame(DIAG, "N")
UPPER = lsame(UPLO, "U")
#
INFO = 0
if (not LSIDE) and (not lsame(SIDE, "R")):
INFO = 1
elif (not UPPER) and (not lsame(UPLO, "L")):
INFO = 2
elif (
(not lsame(TRANSA, "N"))
and (not lsame(TRANSA, "T"))
and (not lsame(TRANSA, "C"))
):
INFO = 3
elif (not lsame(DIAG, "U")) and (not lsame(DIAG, "N")):
INFO = 4
elif M < 0:
INFO = 5
elif N < 0:
INFO = 6
elif LDA < max(1, NROWA):
INFO = 9
elif LDB < max(1, M):
INFO = 11
if INFO != 0:
xerbla("ZTRMM ", INFO)
# Quick return if possible.
if M == 0 or N == 0:
return
# And when alpha==zero.
if ALPHA == 0:
for J in range(N):
for I in range(M):
B[I, J] = 0
return
# Start the operations.
if LSIDE:
if lsame(TRANSA, "N"):
# Form B := alpha*A*B.
if UPPER:
for J in range(N):
for K in range(M):
if B[K, J] != 0:
TEMP = ALPHA * B[K, J]
for I in range(K - 1):
B[I, J] = B[I, J] + TEMP * A[I, K]
if NOUNIT:
TEMP = TEMP * A[K, K]
B[K, J] = TEMP
else:
for J in range(N):
for K in range(M - 1, -1, -1):
if B[K, J] != 0:
TEMP = ALPHA * B[K, J]
B[K, J] = TEMP
if NOUNIT:
B[K, J] = B[K, J] * A[K, K]
for I in range(K, M):
B[I, J] = B[I, J] + TEMP * A[I, K]
else:
# Form B := alpha*A**T*B or B := alpha*A**H*B.
if UPPER:
for J in range(N):
for I in range(M - 1, -1, -1):
TEMP = B[I, J]
if NOCONJ:
if NOUNIT:
TEMP = TEMP * A[I, I]
for K in range(I - 1):
TEMP += A[K, I] * B[K, J]
else:
if NOUNIT:
TEMP = TEMP * A[I, I].conjugate()
for K in range(I - 1):
TEMP += A[K, I].conjugate() * B[K, J]
B[I, J] = ALPHA * TEMP
else:
for J in range(N):
for I in range(M):
TEMP = B[I, J]
if NOCONJ:
if NOUNIT:
TEMP = TEMP * A[I, I]
for K in range(I, M):
TEMP += A[K, I] * B[K, J]
else:
if NOUNIT:
TEMP = TEMP * A[I, I].conjugate()
for K in range(I, M):
TEMP += A[K, I].conjugate() * B[K, J]
B[I, J] = ALPHA * TEMP
else:
if lsame(TRANSA, "N"):
# Form B := alpha*B*A.
if UPPER:
for J in range(N - 1, -1, -1):
TEMP = ALPHA
if NOUNIT:
TEMP *= A[J, J]
for I in range(M):
B[I, J] = TEMP * B[I, J]
for K in range(J - 1):
if A[K, J] != 0:
TEMP = ALPHA * A[K, J]
for I in range(M):
B[I, J] = B[I, J] + TEMP * B[I, K]
else:
for J in range(N):
TEMP = ALPHA
if NOUNIT:
TEMP *= A[J, J]
for I in range(M):
B[I, J] = TEMP * B[I, J]
for K in range(J, N):
if A[K, J] != 0:
TEMP = ALPHA * A[K, J]
for I in range(M):
B[I, J] = B[I, J] + TEMP * B[I, K]
else:
# Form B := alpha*B*A**T or B := alpha*B*A**H.
if UPPER:
for K in range(N):
for K in range(K - 1):
if A[J, K] != 0:
if NOCONJ:
TEMP = ALPHA * A[J, K]
else:
TEMP = ALPHA * A[J, K].conjugate()
for I in range(M):
B[I, J] = B[I, J] + TEMP * B[I, K]
TEMP = ALPHA
if NOUNIT:
if NOCONJ:
TEMP = TEMP * A[K, K]
else:
TEMP = TEMP * A[K, K].conjugate()
if TEMP != 1:
for I in range(M):
B[I, K] = TEMP * B[I, K]
else:
for K in range(N - 1, -1, -1):
for J in range(K, N):
if A[J, K] != 0:
if NOCONJ:
TEMP = ALPHA * A[J, K]
else:
TEMP = ALPHA * A[J, K].conjugate()
for I in range(M):
B[I, J] = B[I, J] + TEMP * B[I, K]
TEMP = ALPHA
if NOUNIT:
if NOCONJ:
TEMP = TEMP * A[K, K]
else:
TEMP = TEMP * A[K, K].conjugate()
if TEMP != 1:
for I in range(M):
B[I, K] = TEMP * B[I, K]
|
the-stack_106_28714 | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for visualize."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import PIL.Image as Image
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from visualize import standard_fields as fields
from visualize import vis_utils
_TESTDATA_PATH = 'testdata'
class VisualizationUtilsTest(tf.test.TestCase):
def test_get_prime_multiplier_for_color_randomness(self):
# Show that default multipler is not 1 and does not divide the total number
# of standard colors.
multiplier = vis_utils._get_multiplier_for_color_randomness()
self.assertNotEqual(0, multiplier % len(vis_utils.STANDARD_COLORS))
self.assertNotEqual(1, multiplier)
# Show that with 34 colors, the closest prime number to 34/10 that
# satisfies the constraints is 5.
default_standard_colors = vis_utils.STANDARD_COLORS
vis_utils.STANDARD_COLORS = ['color_{}'.format(str(i)) for i in range(34)]
multiplier = vis_utils._get_multiplier_for_color_randomness()
self.assertEqual(5, multiplier)
# Show that with 110 colors, the closest prime number to 110/10 that
# satisfies the constraints is 13 (since 11 equally divides 110).
vis_utils.STANDARD_COLORS = ['color_{}'.format(str(i)) for i in range(110)]
multiplier = vis_utils._get_multiplier_for_color_randomness()
self.assertEqual(13, multiplier)
vis_utils.STANDARD_COLORS = default_standard_colors
def create_colorful_test_image(self):
"""This function creates an image that can be used to test vis functions.
It makes an image composed of four colored rectangles.
Returns:
colorful test numpy array image.
"""
ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
imr = np.concatenate((ch255, ch128, ch128), axis=2)
img = np.concatenate((ch255, ch255, ch0), axis=2)
imb = np.concatenate((ch255, ch0, ch255), axis=2)
imw = np.concatenate((ch128, ch128, ch128), axis=2)
imu = np.concatenate((imr, img), axis=1)
imd = np.concatenate((imb, imw), axis=1)
image = np.concatenate((imu, imd), axis=0)
return image
def create_test_image_with_five_channels(self):
return np.full([100, 200, 5], 255, dtype=np.uint8)
def create_test_grayscale_image(self):
return np.full([100, 200, 1], 255, dtype=np.uint8)
def test_draw_bounding_box_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
vis_utils.draw_bounding_box_on_image(test_image, ymin, xmin, ymax, xmax)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_box_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
vis_utils.draw_bounding_box_on_image_array(test_image, ymin, xmin, ymax,
xmax)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
boxes = np.array([[0.25, 0.75, 0.4, 0.6], [0.1, 0.1, 0.9, 0.9]])
vis_utils.draw_bounding_boxes_on_image(test_image, boxes)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
boxes = np.array([[0.25, 0.75, 0.4, 0.6], [0.1, 0.1, 0.9, 0.9]])
vis_utils.draw_bounding_boxes_on_image_array(test_image, boxes)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_tensors(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'img1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]],
[[0.25, 0.25, 0.75, 0.75], [0.1, 0.3, 0.6, 1.0]]])
classes = tf.constant([[1, 1], [1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.1], [0.6, 0.5]])
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
images_with_boxes = (
vis_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
min_score_thresh=0.2,
keypoint_edges=keypoint_edges))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Write output images for visualization.
images_with_boxes_np = sess.run(images_with_boxes)
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_track_ids(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'img1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.7, 0.9],
[0.7, 0.5, 0.8, 0.9]],
[[0.41, 0.25, 0.75, 0.75], [0.51, 0.3, 0.7, 0.9],
[0.75, 0.5, 0.8, 0.9]]])
classes = tf.constant([[1, 1, 2], [1, 1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.5, 0.7], [0.6, 0.5, 0.8]])
track_ids = tf.constant([[3, 9, 7], [3, 9, 144]], dtype=tf.int32)
images_with_boxes = (
vis_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
track_ids=track_ids,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Write output images for visualization.
images_with_boxes_np = sess.run(images_with_boxes)
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_with_track_ids_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_additional_channels(self):
"""Tests the case where input image tensor has more than 3 channels."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_image_with_five_channels()
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
vis_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
final_images_np = sess.run(images_with_boxes)
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_bounding_boxes_on_image_tensors_grayscale(self):
"""Tests the case where input image tensor has one channel."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_grayscale_image()
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant([[100, 200], [100, 200]], dtype=tf.int32)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
vis_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
final_images_np = sess.run(images_with_boxes)
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_keypoints_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
vis_utils.draw_keypoints_on_image(
test_image,
keypoints,
keypoint_edges=keypoint_edges,
keypoint_edge_width=1,
keypoint_edge_color='green')
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_keypoints_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
keypoint_edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
vis_utils.draw_keypoints_on_image_array(
test_image,
keypoints,
keypoint_edges=keypoint_edges,
keypoint_edge_width=1,
keypoint_edge_color='green')
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_mask_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.uint8)
mask = np.asarray([[0, 1], [1, 1]], dtype=np.uint8)
expected_result = np.asarray(
[[[0, 0, 0], [0, 0, 127]], [[0, 0, 127], [0, 0, 127]]], dtype=np.uint8)
vis_utils.draw_mask_on_image_array(test_image, mask, color='Blue', alpha=.5)
self.assertAllEqual(test_image, expected_result)
def test_add_cdf_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
vis_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss')
cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
cdf_image_summary.eval()
def test_add_hist_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
bins = [0.01 * i for i in range(101)]
vis_utils.add_hist_image_summary(values, bins, 'ScoresDistribution')
hist_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
hist_image_summary.eval()
def test_eval_metric_ops(self):
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
max_examples_to_draw = 4
metric_op_base = 'Detections_Left_Groundtruth_Right'
eval_metric_ops = vis_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=max_examples_to_draw,
summary_name_prefix=metric_op_base)
original_image = tf.placeholder(tf.uint8, [4, None, None, 3])
original_image_spatial_shape = tf.placeholder(tf.int32, [4, 2])
true_image_shape = tf.placeholder(tf.int32, [4, 3])
detection_boxes = tf.random_uniform([4, 20, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
detection_classes = tf.random_uniform([4, 20],
minval=1,
maxval=3,
dtype=tf.int64)
detection_scores = tf.random_uniform([4, 20],
minval=0.,
maxval=1.,
dtype=tf.float32)
groundtruth_boxes = tf.random_uniform([4, 8, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
groundtruth_classes = tf.random_uniform([4, 8],
minval=1,
maxval=3,
dtype=tf.int64)
eval_dict = {
fields.DetectionResultFields.detection_boxes: detection_boxes,
fields.DetectionResultFields.detection_classes: detection_classes,
fields.DetectionResultFields.detection_scores: detection_scores,
fields.InputDataFields.original_image: original_image,
fields.InputDataFields.original_image_spatial_shape:
(original_image_spatial_shape),
fields.InputDataFields.true_image_shape: (true_image_shape),
fields.InputDataFields.groundtruth_boxes: groundtruth_boxes,
fields.InputDataFields.groundtruth_classes: groundtruth_classes
}
metric_ops = eval_metric_ops.get_estimator_eval_metric_ops(eval_dict)
_, update_op = metric_ops[next(six.iterkeys(metric_ops))]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
value_ops = {}
for key, (value_op, _) in six.iteritems(metric_ops):
value_ops[key] = value_op
# First run enough update steps to surpass `max_examples_to_draw`.
for i in range(max_examples_to_draw):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
for key, value_op in six.iteritems(value_ops_out):
self.assertNotEqual('', value_op)
# Now run fewer update steps than `max_examples_to_draw`. A single value
# op will be the empty string, since not enough image summaries can be
# produced.
for i in range(max_examples_to_draw - 1):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
self.assertEqual(
six.b(''),
value_ops_out[metric_op_base + '/' + str(max_examples_to_draw - 1)])
def test_visualize_boxes_and_labels_on_image_array(self):
ori_image = np.ones([360, 480, 3], dtype=np.int32) * 255
test_image = np.ones([360, 480, 3], dtype=np.int32) * 255
detections = np.array([[0.8, 0.1, 0.9, 0.1, 1., 0.1],
[0.1, 0.3, 0.8, 0.7, 1., 0.6]])
labelmap = {1: {'id': 1, 'name': 'cat'}, 2: {'id': 2, 'name': 'dog'}}
vis_utils.visualize_boxes_and_labels_on_image_array(
test_image,
detections[:, :4],
detections[:, 4].astype(np.int32),
detections[:, 5],
labelmap,
track_ids=None,
use_normalized_coordinates=True,
max_boxes_to_draw=1,
min_score_thresh=0.2,
agnostic_mode=False,
line_thickness=8)
self.assertGreater(np.abs(np.sum(test_image - ori_image)), 0)
if __name__ == '__main__':
tf.disable_eager_execution()
tf.test.main()
|
the-stack_106_28715 | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration test for the distributed agent."""
import acme
from acme.agents.tf import impala
from acme.testing import fakes
from acme.tf import networks
import launchpad as lp
from absl.testing import absltest
class DistributedAgentTest(absltest.TestCase):
"""Simple integration/smoke test for the distributed agent."""
def test_atari(self):
"""Tests that the agent can run for some steps without crashing."""
env_factory = lambda x: fakes.fake_atari_wrapped(oar_wrapper=True)
net_factory = lambda spec: networks.IMPALAAtariNetwork(spec.num_values)
agent = impala.DistributedIMPALA(
environment_factory=env_factory,
network_factory=net_factory,
num_actors=2,
batch_size=32,
sequence_length=5,
sequence_period=1,
)
program = agent.build()
(learner_node,) = program.groups['learner']
learner_node.disable_run()
lp.launch(program, launch_type='test_mt')
learner: acme.Learner = learner_node.create_handle().dereference()
for _ in range(5):
learner.step()
if __name__ == '__main__':
absltest.main()
|
the-stack_106_28716 | """awstest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path("api/ec2/", include("ec2.urls")),
path("api/host/", include("host.urls")),
path("api/traceroute/", include("traceroute.urls")),
path("api/httpreq/", include("httpreq.urls")),
path("api/upload/", include("upload.urls"))
]
|
the-stack_106_28720 | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from nomad.datamodel import EntryArchive
from electronicparsers.molcas import MolcasParser
def approx(value, abs=0, rel=1e-6):
return pytest.approx(value, abs=abs, rel=rel)
@pytest.fixture(scope='module')
def parser():
return MolcasParser()
def test_basic(parser):
archive = EntryArchive()
parser.parse('tests/data/molcas/test000.input.out', archive, None)
sec_run = archive.run[0]
assert sec_run.program.version == '7.8 patchlevel 047'
sec_system = archive.run[0].system[0]
assert sec_system.atoms.positions[1][0].magnitude == approx(-3.5e-11)
assert sec_system.atoms.labels == ['H', 'H']
sec_sccs = sec_run.calculation[0]
assert sec_sccs.energy.total.value.magnitude == approx(-4.89497079e-18)
def test_1(parser):
archive = EntryArchive()
parser.parse('tests/data/molcas/test003.input.out', archive, None)
sec_sccs = archive.run[0].calculation
assert len(sec_sccs) == 61
assert sec_sccs[4].energy.total.value.magnitude == approx(-9.14252116e-15)
|
the-stack_106_28721 | import pytest
from unittest.mock import patch
from cartoframes.auth import Credentials
from cartoframes.data.observatory.catalog.dataset import Dataset
from cartoframes.data.observatory.catalog.geography import Geography
from cartoframes.data.observatory.catalog.country import Country
from cartoframes.data.observatory.catalog.category import Category
from cartoframes.data.observatory.catalog.provider import Provider
from cartoframes.data.observatory.catalog.catalog import Catalog
from cartoframes.data.observatory.catalog.subscriptions import Subscriptions
from cartoframes.data.observatory.catalog.repository.geography_repo import GeographyRepository
from cartoframes.data.observatory.catalog.repository.constants import (
CATEGORY_FILTER, COUNTRY_FILTER, GEOGRAPHY_FILTER, PUBLIC_FILTER
)
from .examples import (
test_country2, test_country1, test_category1, test_category2, test_dataset1, test_dataset2,
test_geographies, test_datasets, test_categories, test_countries, test_geography1, test_geography2,
test_provider1, test_provider2
)
class TestCatalog(object):
@patch.object(Country, 'get_all')
def test_countries(self, mocked_countries):
# Given
expected_countries = [test_country1, test_country2]
mocked_countries.return_value = expected_countries
catalog = Catalog()
# When
countries = catalog.countries
# Then
assert countries == expected_countries
@patch.object(Category, 'get_all')
def test_categories(self, mocked_categories):
# Given
expected_categories = [test_category1, test_category2]
mocked_categories.return_value = expected_categories
catalog = Catalog()
# When
categories = catalog.categories
# Then
assert categories == expected_categories
@patch.object(Provider, 'get_all')
def test_providers(self, mocked_providers):
# Given
expected_providers = [test_provider1, test_provider2]
mocked_providers.return_value = expected_providers
catalog = Catalog()
# When
providers = catalog.providers
# Then
assert providers == expected_providers
@patch.object(Dataset, 'get_all')
def test_datasets(self, mocked_datasets):
# Given
expected_datasets = [test_dataset1, test_dataset2]
mocked_datasets.return_value = expected_datasets
catalog = Catalog()
# When
datasets = catalog.datasets
# Then
assert datasets == expected_datasets
@patch.object(Country, 'get_all')
def test_filters_on_countries(self, mocked_countries):
# Given
mocked_countries.return_value = test_countries
catalog = Catalog()
# When
countries = catalog.category('demographics').countries
# Then
mocked_countries.assert_called_once_with({CATEGORY_FILTER: 'demographics'})
assert countries == test_countries
@patch.object(Category, 'get_all')
def test_filters_on_categories(self, mocked_categories):
# Given
mocked_categories.return_value = test_categories
catalog = Catalog()
# When
categories = catalog.country('usa').categories
# Then
mocked_categories.assert_called_once_with({COUNTRY_FILTER: 'usa'})
assert categories == test_categories
@patch.object(Dataset, 'get_all')
def test_filters_on_datasets(self, mocked_datasets):
# Given
mocked_datasets.return_value = test_datasets
catalog = Catalog()
# When
datasets = catalog.country('usa').category('demographics').datasets
# Then
mocked_datasets.assert_called_once_with({COUNTRY_FILTER: 'usa', CATEGORY_FILTER: 'demographics'})
assert datasets == test_datasets
@patch.object(Geography, 'get_all')
def test_filters_on_geographies(self, mocked_geographies):
# Given
mocked_geographies.return_value = test_geographies
catalog = Catalog()
# When
geographies = catalog.country('usa').category('demographics').geographies
# Then
mocked_geographies.assert_called_once_with({COUNTRY_FILTER: 'usa', CATEGORY_FILTER: 'demographics'})
assert geographies == test_geographies
@patch.object(Dataset, 'get_all')
def test_filters_public_datasets(self, mocked_datasets):
# Given
expected_datasets = [test_dataset1, test_dataset2]
mocked_datasets.return_value = expected_datasets
catalog = Catalog()
# When
datasets = catalog.public().datasets
# Then
mocked_datasets.assert_called_once_with({PUBLIC_FILTER: 'true'})
assert datasets == expected_datasets
@patch.object(Geography, 'get_all')
def test_filters_public_geographies(self, mocked_geographies):
# Given
expected_geographies = [test_geography1, test_geography2]
mocked_geographies.return_value = expected_geographies
catalog = Catalog()
# When
geographies = catalog.public().geographies
# Then
mocked_geographies.assert_called_once_with({PUBLIC_FILTER: 'true'})
assert geographies == expected_geographies
@patch.object(Dataset, 'get_all')
def test_filters_private_datasets(self, mocked_datasets):
# Given
expected_datasets = [test_dataset1, test_dataset2]
mocked_datasets.return_value = expected_datasets
catalog = Catalog()
# When
datasets = catalog.public(False).datasets
# Then
mocked_datasets.assert_called_once_with({PUBLIC_FILTER: 'false'})
assert datasets == expected_datasets
@patch.object(Geography, 'get_all')
def test_filters_private_geographies(self, mocked_geographies):
# Given
expected_geographies = [test_geography1, test_geography2]
mocked_geographies.return_value = expected_geographies
catalog = Catalog()
# When
geographies = catalog.public(False).geographies
# Then
mocked_geographies.assert_called_once_with({PUBLIC_FILTER: 'false'})
assert geographies == expected_geographies
@patch.object(Dataset, 'get_all')
def test_all_filters(self, mocked_datasets):
# Given
mocked_datasets.return_value = test_datasets
catalog = Catalog()
# When
datasets = catalog.country('usa').category('demographics').public() \
.geography('carto-do-public-data.tiger.geography_esp_census_2019').datasets
# Then
mocked_datasets.assert_called_once_with({
COUNTRY_FILTER: 'usa',
CATEGORY_FILTER: 'demographics',
PUBLIC_FILTER: 'true',
GEOGRAPHY_FILTER: 'carto-do-public-data.tiger.geography_esp_census_2019'
})
assert datasets == test_datasets
@patch.object(Dataset, 'get_all')
@patch.object(GeographyRepository, 'get_by_id')
def test_geography_filter_by_slug(self, mocked_repo, mocked_datasets):
# Given
mocked_repo.return_value = test_geography1
mocked_datasets.return_value = test_datasets
slug = 'esp_census_2019_4567890d'
catalog = Catalog()
# When
datasets = catalog.geography(slug).datasets
# Then
mocked_repo.assert_called_once_with(slug)
mocked_datasets.assert_called_once_with({GEOGRAPHY_FILTER: test_geography1.id})
assert datasets == test_datasets
@patch.object(Dataset, 'get_all')
@patch.object(Geography, 'get_all')
def test_subscriptions(self, mocked_geographies, mocked_datasets):
# Given
expected_datasets = [test_dataset1, test_dataset2]
expected_geographies = [test_geography1, test_geography2]
mocked_datasets.return_value = expected_datasets
mocked_geographies.return_value = expected_geographies
credentials = Credentials('user', '1234')
catalog = Catalog()
# When
subscriptions = catalog.subscriptions(credentials)
# Then
assert isinstance(subscriptions, Subscriptions)
assert subscriptions.datasets == expected_datasets
assert subscriptions.geographies == expected_geographies
mocked_datasets.assert_called_once_with({'only_products': True}, credentials)
mocked_geographies.assert_called_once_with({'only_products': True}, credentials)
@patch.object(Dataset, 'get_all')
@patch.object(Geography, 'get_all')
@patch('cartoframes.auth.defaults.get_default_credentials')
def test_subscriptions_default_credentials(self, mocked_credentials, mocked_geographies, mocked_datasets):
# Given
expected_datasets = [test_dataset1, test_dataset2]
expected_geographies = [test_geography1, test_geography2]
expected_credentials = Credentials('user', '1234')
mocked_datasets.return_value = expected_datasets
mocked_geographies.return_value = expected_geographies
mocked_credentials.return_value = expected_credentials
catalog = Catalog()
# When
subscriptions = catalog.subscriptions()
# Then
assert isinstance(subscriptions, Subscriptions)
assert subscriptions.datasets == expected_datasets
assert subscriptions.geographies == expected_geographies
mocked_datasets.assert_called_once_with({'only_products': True}, expected_credentials)
mocked_geographies.assert_called_once_with({'only_products': True}, expected_credentials)
@patch.object(Dataset, 'get_all')
@patch.object(Geography, 'get_all')
def test_subscriptions_wrong_credentials(self, mocked_geographies, mocked_datasets):
# Given
wrong_credentials = 1234
catalog = Catalog()
# When
with pytest.raises(ValueError) as e:
catalog.subscriptions(wrong_credentials)
# Then
assert str(e.value) == ('Credentials attribute is required. '
'Please pass a `Credentials` instance '
'or use the `set_default_credentials` function.')
|
the-stack_106_28723 | import os
import traceback
import hashlib
import glob
import operator
from functools import reduce
import cv2
import numpy as np
import requests
def imread(filename):
return cv2.imdecode(np.fromfile(file=filename, dtype=np.uint8), cv2.IMREAD_COLOR)
def walk_dir_recursively(root_dir,ext_list = None):
if ext_list is None:
return glob.glob(os.path.join(root_dir,"**","*"),recursive=True)
elif isinstance(ext_list,(tuple,list)):
lazy_paths = (glob.glob(os.path.join(root_dir, "**", "*"+ext),recursive=True) for ext in ext_list)
return reduce(operator.add, lazy_paths, [])
def get_img_pathes_recursively(root_dir):
return walk_dir_recursively(root_dir,ext_list=(".jpg",".png",".tiff"))
def get_video_pathes_recursively(root_dir):
return walk_dir_recursively(root_dir,ext_list=(".mp4",".mkv",".mov",".avi"))
def download_file(url,out_path,md5=None,overwite=False):
try:
if not os.path.exists(out_path) or overwite or (not md5 is None and not (checksum_md5(out_path)==md5)):
print("Downloading file to {}".format(out_path))
res=requests.get(url)
data=res.content
print('Received data succesfully, saving to :{}'.format(out_path))
with open(out_path,'wb') as fp:
fp.write(data)
if not md5 is None:
md5_local=checksum_md5(out_path)
checked=(md5_local==md5)
assert checked, "File {} MD5 check failed!".format(out_path)
print('Save file completed')
else:
print("File {} already exists".format(out_path))
except FileNotFoundError:
traceback.print_exc()
print("File {} path not exists".format(out_path))
except AssertionError:
traceback.print_exc()
os.remove(out_path)
def checksum_md5(path):
with open(path,'rb') as fp:
hasher = hashlib.md5()
for chunk in _read_chunks(fp):
hasher.update(chunk)
return hasher.hexdigest()
def _read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
yield data
|
the-stack_106_28724 | import os
import random
import errno
import subprocess
from shutil import copytree
source_dir = 'omniglot'
target_dir = 'data/omniglot'
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# change folder structure :
alphabet_folders = [family \
for family in os.listdir(source_dir) \
if os.path.isdir(os.path.join(source_dir, family))]
for folder in alphabet_folders:
alphabet_path = os.path.join(source_dir, folder)
char_folders = [character \
for character in os.listdir(alphabet_path) \
if os.path.isdir(os.path.join(alphabet_path, character))]
for character in os.listdir(alphabet_path):
if os.path.isdir(os.path.join(alphabet_path, character)):
new_char_path = os.path.join(target_dir, folder + '_' + character)
try:
copytree(os.path.join(alphabet_path, character), new_char_path)
except:
pass
# train-test split :
character_folders = [os.path.join(target_dir, family, character) \
for family in os.listdir(target_dir) \
if os.path.isdir(os.path.join(target_dir, family)) \
for character in os.listdir(os.path.join(target_dir, family))]
print('Total number of character folders:', len(character_folders))
random.seed(123)
random.shuffle(character_folders)
num_train = 1200
train_folders = character_folders[:num_train]
test_folders = character_folders[num_train:]
if not os.path.exists(os.path.join(target_dir, 'train')):
os.makedirs(os.path.join(target_dir, 'train'))
for folder in train_folders:
root, char_folder = os.path.split(folder)
root, alphabet_folder = os.path.split(root)
try:
copytree(folder, os.path.join(root, 'train', alphabet_folder, char_folder))
except OSError as e:
# If the error was caused because the source wasn't a directory, simply ignore
if e.errno==errno.ENOTDIR:
pass
else:
print('Could not copy directory!')
if not os.path.exists(os.path.join(target_dir, 'test')):
os.makedirs(os.path.join(target_dir, 'test'))
for folder in test_folders:
root, char_folder = os.path.split(folder)
root, alphabet_folder = os.path.split(root)
try:
copytree(folder, os.path.join(root, 'test', alphabet_folder, char_folder))
except OSError as e:
# If the error was caused because the source wasn't a directory, simply ignore
if e.errno == errno.ENOTDIR:
pass
else:
print('Could not copy directory!')
# resize images
cmd = ['python', 'utils/get_dataset_script/resize_dataset.py', 'data/omniglot', 'omniglot']
subprocess.call(cmd)
|
the-stack_106_28726 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for the Find flow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from absl import app
from grr_response_client.client_actions import searching
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server.flows.general import find
from grr.test_lib import action_mocks
from grr.test_lib import db_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
@db_test_lib.DualDBTest
class TestFindFlow(flow_test_lib.FlowTestsBaseclass):
"""Test the interrogate flow."""
def setUp(self):
super(TestFindFlow, self).setUp()
vfs_overrider = vfs_test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, vfs_test_lib.ClientVFSHandlerFixture)
vfs_overrider.Start()
self.addCleanup(vfs_overrider.Stop)
self.client_id = self.SetupClient(0)
def testInvalidFindSpec(self):
"""Test that its impossible to produce an invalid findspec."""
# The regular expression is not valid.
with self.assertRaises(re.error):
rdf_client_fs.FindSpec(path_regex="[")
def testFindFiles(self):
"""Test that the Find flow works with files."""
client_mock = action_mocks.ActionMock(searching.Find)
# Prepare a findspec.
findspec = rdf_client_fs.FindSpec(
path_regex="bash",
pathspec=rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS))
session_id = flow_test_lib.TestFlowHelper(
find.FindFiles.__name__,
client_mock,
client_id=self.client_id,
token=self.token,
findspec=findspec)
# Check the results.
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
# Should match ["bash" and "rbash"].
matches = set([x.AFF4Path(self.client_id).Basename() for x in results])
self.assertCountEqual(matches, ["bash", "rbash"])
self.assertLen(results, 4)
for child in results:
path = utils.SmartStr(child.AFF4Path(self.client_id))
self.assertEndsWith(path, "bash")
self.assertEqual(child.__class__.__name__, "StatEntry")
def testFindFilesWithGlob(self):
"""Test that the Find flow works with glob."""
client_mock = action_mocks.ActionMock(searching.Find)
# Prepare a findspec.
findspec = rdf_client_fs.FindSpec(
path_glob="bash*",
pathspec=rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS))
session_id = flow_test_lib.TestFlowHelper(
find.FindFiles.__name__,
client_mock,
client_id=self.client_id,
token=self.token,
findspec=findspec)
# Check the results.
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
# Make sure that bash is a file.
matches = set([x.AFF4Path(self.client_id).Basename() for x in results])
self.assertEqual(matches, set(["bash"]))
self.assertLen(results, 2)
for child in results:
path = utils.SmartStr(child.AFF4Path(self.client_id))
self.assertEndsWith(path, "bash")
self.assertEqual(child.__class__.__name__, "StatEntry")
def testFindDirectories(self):
"""Test that the Find flow works with directories."""
client_mock = action_mocks.ActionMock(searching.Find)
# Prepare a findspec.
findspec = rdf_client_fs.FindSpec(
path_regex="bin",
pathspec=rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS))
session_id = flow_test_lib.TestFlowHelper(
find.FindFiles.__name__,
client_mock,
client_id=self.client_id,
token=self.token,
findspec=findspec)
# Check the results.
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
# Make sure that bin is a directory
self.assertLen(results, 2)
for child in results:
self.assertEqual(child.__class__.__name__, "StatEntry")
self.assertIn("bin", child.pathspec.CollapsePath())
def testCollectionOverwriting(self):
"""Test we overwrite the collection every time the flow is executed."""
client_mock = action_mocks.ActionMock(searching.Find)
# Prepare a findspec.
findspec = rdf_client_fs.FindSpec()
findspec.path_regex = "bin"
findspec.pathspec.path = "/"
findspec.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS
session_id = flow_test_lib.TestFlowHelper(
find.FindFiles.__name__,
client_mock,
client_id=self.client_id,
token=self.token,
findspec=findspec)
# Check the results.
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(results, 2)
# Now find a new result, should overwrite the collection
findspec.path_regex = "dd"
session_id = flow_test_lib.TestFlowHelper(
find.FindFiles.__name__,
client_mock,
client_id=self.client_id,
token=self.token,
findspec=findspec)
# Check the results.
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(results, 1)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
the-stack_106_28729 | import argparse
import os
import random
import re
import string
import threading
import time
from typing import List
import keras
import nltk
import numpy as np
import spacy
import tensorflow as tf
import unidecode
from gensim.models.wrappers import FastText as FastTextWrapper
from keras import backend as K
from tensorflow import keras
nltk.download('punkt')
class Diacritics:
def __init__(self):
self.model_embeddings = None
self.dict_lock = threading.Lock()
self.dict_avg_words = {}
# the weights of the model will be saved in a folder per each epoch
self.fast_text = "fastText/wiki.ro"
self.window_sentence = 15
self.window_character = 6 # 2 * x + 1
self.character_embedding_size = 28
self.tag_embedding_size = 5
self.dep_embedding_size = 5
self.word_embedding_size = 300
self.characters_cell_size = 64
self.sentence_cell_size = 300
self.neurons_dense_layer_after_merge = 512
self.batch_size = 128
self.limit_backtracking_characters = 10
self.CPUS = 1
self.size_prefetch_buffer = 10
self.max_unicode_allowed = 770
self.replace_character = 255
self.padding_character = 0
self.train_files = "corpus/train/"
self.test_files = "corpus/test/"
self.valid_files = "corpus/validation/"
self.samples_number = {
'full_train': 380968863,
'full_test': 131424533,
'full_valid': 131861863,
'par_train': 84321880,
'par_test': 29407143,
'par_valid': 28882058,
}
self.map_no_diac = {
'ă': 'a',
'â': 'a',
'Â': 'A',
'Ă': 'A',
'ț': 't',
'Ț': 'T',
'ș': 's',
'Ș': 'S',
'î': 'i',
'Î': 'I',
"ş": "ș",
"Ş": "Ș",
"ţ": "ț",
"Ţ": "Ț",
}
self.all_in = {
'ă','â','a',
'Ă','Â','A',
'ț',
'Ț',
'ș',
'Ș',
'î',
'Î',
'i',
't',
's',
"ş",
"Ş",
"ţ",
"Ţ",
'S',
'T',
'I'
}
self.map_correct_diac = {
"ş": "ș",
"Ş": "Ș",
"ţ": "ț",
"Ţ": "Ț",
}
self.map_char_to_possible_chars = {
'a': ['ă', 'â', 'a'],
'i': ['î', 'i'],
's': ['ș', 's'],
't': ['ț', 't']
}
self.map_substitute_chars = {'"': '`'}
self.characters_in_interest = {'a', 'i', 's', 't'}
self.to_lower = {}
self.rom_spacy = spacy.load('models/model3')
self.create_lower_mapping()
self.parse_args()
# get case of the highest probability (returned by softmax)
def get_case(self, p):
case = 0
for i in range(len(p)):
if p[i] > p[case]:
case = i
return case
def create_lower_mapping(self):
for c in string.ascii_uppercase:
self.to_lower[c] = c.lower()
self.to_lower['Ș'] = 'ș'
self.to_lower['Â'] = 'â'
self.to_lower['Ă'] = 'ă'
self.to_lower['Ț'] = 'ț'
self.to_lower['Î'] = 'î'
# case:
# 0 -> ă
# 1 -> â, î
# 2 -> unmodified
# 3 -> ș, ț
def get_label(self, i, clean_text_utf, original_text_utf):
case = 2
if clean_text_utf[i] == 'a':
if original_text_utf[i] == 'ă':
case = 0
elif original_text_utf[i] == 'â':
case = 1
elif original_text_utf[i] == 'a':
case = 2
elif clean_text_utf[i] == 'i':
if original_text_utf[i] == 'î':
case = 1
elif original_text_utf[i] == 'i':
case = 2
elif clean_text_utf[i] == 't':
if original_text_utf[i] == 'ț':
case = 3
elif original_text_utf[i] == 't':
case = 2
elif clean_text_utf[i] == 's':
if original_text_utf[i] == 'ș':
if self.args.nr_classes == 5:
case = 4
else:
case = 3
elif original_text_utf[i] == 's':
case = 2
label = np.float32([0] * self.args.nr_classes)
label[case] = np.float32(1.0)
return label
def bkt_all_words(self, index: int, clean_word: str, current_word: List) -> List:
if index == len(clean_word):
word = "".join(current_word)
if self.args.use_dummy_word_embeddings == True:
return [word]
else:
if word in self.model_embeddings.wv.vocab:
return [word]
else:
return []
else:
L = []
c = clean_word[index]
if c in self.map_char_to_possible_chars:
for ch in self.map_char_to_possible_chars[c]:
current_word[index] = ch
L += self.bkt_all_words(index + 1, clean_word, current_word)
else:
current_word[index] = c
L += self.bkt_all_words(index + 1, clean_word, current_word)
return L
def get_avg_possible_word(self, clean_word):
with self.dict_lock:
if clean_word in self.dict_avg_words:
return self.dict_avg_words[clean_word]
count_diacritics_chars = 0
for c in clean_word:
if c in self.map_char_to_possible_chars:
count_diacritics_chars += 1
if count_diacritics_chars > self.limit_backtracking_characters:
if self.args.use_dummy_word_embeddings == False:
return np.float32(self.model_embeddings.wv[clean_word])
else:
return np.float32([0] * self.word_embedding_size)
all_words = self.bkt_all_words(0, clean_word, ['a'] * len(clean_word))
if len(all_words) > 0:
if self.args.use_dummy_word_embeddings == False:
return np.mean([np.float32(self.model_embeddings.wv[word]) for word in all_words], axis=0)
else:
return np.float32([0] * self.word_embedding_size)
else:
try:
if self.args.use_dummy_word_embeddings == False:
return np.float32(self.model_embeddings.wv[clean_word])
else:
return np.float32([0] * self.word_embedding_size)
except:
return np.float32([0] * self.word_embedding_size)
def get_embeddings_sentence(self, clean_tokens_sentence, index_token):
embeddings_sentence = []
for i in range(index_token - self.window_sentence, index_token + self.window_sentence + 1):
if i >= 0 and i < len(clean_tokens_sentence):
token = clean_tokens_sentence[i]
token_embedding = self.get_avg_possible_word(token)
embeddings_sentence.append(token_embedding)
with self.dict_lock:
self.dict_avg_words[token] = token_embedding
else:
embeddings_sentence.append(np.float32([0] * self.word_embedding_size))
return np.array(embeddings_sentence)
# return an tuple input (window_char, embedding_token, embedding_sentence)
def get_input_example(self, clean_text_utf, index_text, clean_tokens, index_sent, \
index_last_sent, index_token):
# window with characters
w = []
for j in range(index_text - self.window_character, index_text + self.window_character + 1):
if j < 0 or j >= len(clean_text_utf):
v1 = self.padding_character
elif ord(clean_text_utf[j]) > self.max_unicode_allowed:
v1 = self.replace_character
else:
v1 = ord(clean_text_utf[j])
w.append(v1)
# token
token = clean_tokens[index_sent][index_token]
token_embedding = self.get_avg_possible_word(token)
with self.dict_lock:
self.dict_avg_words[token] = token_embedding
# sentence
# if is the same sentence don't recompute it
if index_last_sent is None or index_sent != index_last_sent:
sentence_embedding = self.get_embeddings_sentence(clean_tokens[index_sent], index_token)
else:
sentence_embedding = None
return (np.int32(w), token_embedding, sentence_embedding)
def replace_char(self, c):
if c in self.map_correct_diac:
c = self.map_correct_diac[c]
if c in self.to_lower:
c = self.to_lower[c]
if c in self.map_no_diac:
c = self.map_no_diac[c]
if ord(c) > 255:
return chr(self.replace_character)
elif c in self.map_substitute_chars:
return self.map_substitute_chars[c]
else:
return c
def replace_char_original(self, c):
if c in self.map_correct_diac:
c = self.map_correct_diac[c]
if c in self.to_lower:
c = self.to_lower[c]
return c
def count_chars_in_interest(self, s):
cnt_chars = 0
chars_in_int = []
for c in s:
character_c = chr(c)
if character_c in self.characters_in_interest:
cnt_chars += 1
chars_in_int.append(character_c)
return cnt_chars, chars_in_int
def get_word_around_index(self, original_text_utf, index_text):
s = []
i = index_text
while i >= 0 and original_text_utf[i].isalpha():
i -= 1
i += 1
while i < len(original_text_utf) and original_text_utf[i].isalpha():
s.append(original_text_utf[i])
i += 1
return "".join(s)
def create_examples(self, original_text, is_test_dataset):
drop_example = False
try:
original_text_utf = original_text.decode('utf-8')
first_clean_text_utf = "".join([self.replace_char_original(c) for c in original_text_utf])
# replace some strange characters which are modified by tokenization
clean_text_utf = "".join([self.replace_char(c) for c in first_clean_text_utf])
clean_sentences = nltk.sent_tokenize(clean_text_utf)
clean_tokens = []
clean_tokens_tag = []
clean_tokens_dep = []
# construct tokens
for i in range(len(clean_sentences)):
tokens = self.rom_spacy(clean_sentences[i])
clean_tokens_sent = [token.text for token in tokens]
tag_tokens_sent = [int(token.tag) for token in tokens]
dep_tokens_sent = [int(token.dep) for token in tokens]
#clean_tokens_sent = nltk.word_tokenize(clean_sentences[i])
clean_tokens.append(clean_tokens_sent)
clean_tokens_tag.append(tag_tokens_sent)
clean_tokens_dep.append(dep_tokens_sent)
index_text = 0 # current position in text
index_sent = 0 # current sentence
index_token = 0 # current token
index_last_sent = None # last sentence computed
# input and output lists
clean_words = []
original_words = []
self.window_characters = []
word_embeddings = []
sentence_embeddings = []
tag_tokens = []
dep_tokens = []
labels = []
while index_sent < len(clean_tokens):
clean_token = clean_tokens[index_sent][index_token]
tag_token = clean_tokens_tag[index_sent][index_token]
dep_token = clean_tokens_dep[index_sent][index_token]
original_word = self.get_word_around_index(original_text_utf, index_text)
i = 0
# construct all inputs from the current token (letter(a, i, t, s) -> input)
while i < len(clean_token):
if clean_text_utf[index_text] in self.characters_in_interest:
label = self.get_label(index_text, clean_text_utf, original_text_utf)
#print(original_text_utf[index_text], label)
win_char, word_emb, sent_emb = self.get_input_example(clean_text_utf, \
index_text, clean_tokens, index_sent, index_last_sent, index_token)
index_last_sent = index_sent
if is_test_dataset == True:
clean_words.append(clean_token)
original_words.append(original_word)
self.window_characters.append(win_char)
word_embeddings.append(word_emb)
tag_tokens.append(np.float32([tag_token]))
dep_tokens.append(np.float32([dep_token]))
# sentence already computed
if sent_emb is None:
sentence_embeddings.append(sentence_embeddings[-1])
else:
sentence_embeddings.append(sent_emb)
labels.append(label)
#print(clean_text_utf[index_text], original_text_utf[index_text], label)
if clean_text_utf[index_text] == clean_token[i]:
index_text += 1
i += 1
else: # discard char in text
index_text += 1
if index_token == len(clean_tokens[index_sent]) - 1:
index_token = 0
index_sent += 1
else:
index_token += 1
except Exception as e:
print(e.message, e.args)
drop_example = True
# dummy values for empty sentence
if len(self.window_characters) == 0 or drop_example == True:
clean_words = ['a']
self.window_characters = [np.int32([0] * (self.window_character * 2 + 1))]
word_embeddings = [np.float32([0] * self.word_embedding_size)]
sentence_embeddings = [np.array(\
[np.float32([0] * self.word_embedding_size)] * (self.window_sentence * 2 + 1))]
tag_tokens = [np.float32([0])]
dep_tokens = [np.float32([0])]
lab = np.float32([0] * self.args.nr_classes)
lab[2] = np.float32(1.0)
labels = [lab]
if is_test_dataset == False:
return (self.window_characters, word_embeddings, sentence_embeddings, tag_tokens, dep_tokens, labels)
else:
return (clean_words, self.window_characters, word_embeddings, sentence_embeddings, tag_tokens, dep_tokens, labels, original_words)
def filter_null_strings(self, s):
if len(s) == 0:
return np.array([False])
return np.array([True])
def flat_map_f(self, a, b):
return tf.data.Dataset.from_tensor_slices((a, b))
def get_dataset(self, dpath, sess, is_test_dataset=False, restore=False, batch_1=False):
if restore == False:
input_files = tf.gfile.ListDirectory(dpath)
for i in range(len(input_files)):
if self.args.corpus_rowiki == False and input_files[i].count('rowiki') > 0:
input_files.remove(input_files[i])
break
for i in range(len(input_files)):
input_files[i] = dpath + input_files[i]
else:
input_files = [dpath]
dataset = tf.data.TextLineDataset(input_files)
dataset = dataset.filter(lambda x:
(tf.py_func(self.filter_null_strings, [x], tf.bool, stateful=False))[0])
if is_test_dataset == True:
datatype_returned = (tf.string, tf.int32, tf.float32, tf.float32, tf.float32, tf.float32,\
tf.float32, tf.string)
else:
datatype_returned = (tf.int32, tf.float32, tf.float32, tf.float32, tf.float32,\
tf.float32)
dataset = dataset.map(lambda x:\
tf.py_func(self.create_examples,\
(x, is_test_dataset,),\
datatype_returned,\
stateful=False),\
num_parallel_calls=self.CPUS)
# map input - output tuple
if is_test_dataset == True:
dataset = dataset.map(lambda x1, x2, x3, x4, x5, x6, y1, y2:\
((x1, x2, x3, x4, x5, x6), (y1, y2)),\
num_parallel_calls=self.CPUS)
else:
dataset = dataset.map(lambda x1, x2, x3, x4, x5, y:\
((x1, x2, x3, x4, x5), y),\
num_parallel_calls=self.CPUS)
dataset = dataset.flat_map(self.flat_map_f)
# do not shuffle or batch test dataset
if is_test_dataset == True:
if batch_1 == False:
dataset = dataset.batch(self.args.batch_size_restore)
else:
dataset = dataset.batch(1)
else:
dataset = dataset.shuffle(self.args.buffer_size_shuffle)
dataset = dataset.batch(self.batch_size)
dataset = dataset.prefetch(self.size_prefetch_buffer)
return dataset
def get_charr(self, simple_c, case):
# 0 : ă
# 1 : â, î
# 2 : a, i, t, s
# 3 : ț, ș
if simple_c == 'a':
if case == 0: # ă
return 'ă'
elif case == 1: #
return 'â'
elif case == 2:
return 'a'
elif simple_c == 'i':
if case == 1:
return 'î'
elif case == 2:
return 'i'
elif simple_c == 't':
if case == 3:
return 'ț'
elif case == 2:
return 't'
elif simple_c == 's':
if case == 3 and self.args.nr_classes == 4:
return 'ș'
elif case == 4 and self.args.nr_classes == 5:
return 'ș'
elif case == 2:
return 's'
print('the model is not trained properly')
return 'a'
def compute_prediction(self, correct_case, predicted_case, simple_c, precision_chars, recall_chars):
correct_char = self.get_charr(simple_c, correct_case)
pred_char = self.get_charr(simple_c, predicted_case)
if pred_char == correct_char:
# correct results
recall_chars[correct_char][0] += 1
precision_chars[correct_char][0] += 1
precision_chars[pred_char][1] += 1
recall_chars[correct_char][1] += 1
def get_next_possible(self, all_txt, index_full_text):
while True:
index_full_text += 1
if all_txt[index_full_text] in self.all_in:
return index_full_text
# restore char if you have the word
def restore_char(self, pred_char, original_char, original_word):
# don't modify char if already there (has diacritics)
if original_char in self.map_no_diac:
return original_char
# don't modify the word if is only uppercase letters
elif original_word.isupper():
return original_char
elif original_char.isupper():
return pred_char.upper()
return pred_char
# restore char without word
def restore_char_simple(self, pred_char, original_char):
# don't modify char if already there
if original_char in self.map_no_diac:
return original_char
elif original_char.isupper():
return pred_char.upper()
return pred_char
# restoration expects a file
def restore_file(self, sess, model, file_path, nr_batches, batch_1=False):
all_txt = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
all_txt.append(list(line))
all_txt = sum(all_txt, [])
if batch_1 == True:
self.batch_sizee = 1
else:
self.batch_sizee = self.args.batch_size_restore
dt_test = self.get_dataset(file_path, sess, True, True, batch_1=batch_1)
iterator_test = dt_test.make_initializable_iterator()
sess.run(iterator_test.initializer)
test_inp_pred, _ = iterator_test.get_next()
_, test_char_window_pred, test_words_pred, test_sentence_pred, _, _ = test_inp_pred
test_char_window_pred = tf.reshape(test_char_window_pred, [self.batch_sizee, self.window_character * 2 + 1])
test_words_pred = tf.reshape(test_words_pred, [self.batch_sizee, self.word_embedding_size])
test_sentence_pred = tf.reshape(test_sentence_pred, [self.batch_sizee, self.window_sentence * 2 + 1, self.word_embedding_size])
input_list = self.get_input_list(test_char_window_pred, test_words_pred, test_sentence_pred, None, None)
predictions = model.predict(x=input_list,
verbose=1,
steps=nr_batches)
sess.run(iterator_test.initializer)
index_full_text = -1
for current_prediction in range(self.batch_sizee * nr_batches):
pred_vector = predictions[current_prediction]
predicted_case = self.get_case(pred_vector)
index_full_text = self.get_next_possible(all_txt, index_full_text)
c = self.replace_char_original(all_txt[index_full_text])
c = self.replace_char(c)
pred_char = self.get_charr(c, predicted_case)
all_txt[index_full_text] = self.restore_char_simple(pred_char, all_txt[index_full_text])
res = "".join(all_txt)
return res
# restoration is done in batches
# the text is splitted into 2 files - one for the bog batches and with batches == 1
def restore_diacritics(self, sess, model):
print(model.summary())
path1 = "".join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)]) + '_big_batches.txt'
path2 = "".join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)]) + 'small_batches.txt'
out_file = self.args.output_file_restore
txt_file = self.args.restore_diacritics
all_txt = []
with open(txt_file, 'r', encoding='utf-8') as f:
for line in f:
all_txt.append(list(line))
all_txt = sum(all_txt, [])
all_txt_chars = 0
# count nr of predictions (all possible)
for c in all_txt:
if c in self.all_in:
all_txt_chars += 1
nr_batches = all_txt_chars // self.args.batch_size_restore
partial_txt_chars = 0
if all_txt_chars >= self.args.batch_size_restore:
for i, c in enumerate(all_txt):
if c in self.all_in:
partial_txt_chars += 1
if partial_txt_chars == nr_batches * self.args.batch_size_restore:
txt_big_batches = all_txt[:i+1]
txt_small_batches = all_txt[i+1:]
break
with open(path1, 'w', encoding='utf-8') as f:
f.write("".join(txt_big_batches))
with open(path2, 'w', encoding='utf-8') as f:
f.write("".join(txt_small_batches))
s1 = self.restore_file(sess, model, path1, nr_batches, batch_1=False)
s2 = self.restore_file(sess, model, path2, all_txt_chars - partial_txt_chars, batch_1=True)
os.remove(path1)
os.remove(path2)
with open(out_file, 'w', encoding='utf-8') as f:
f.write(s1 + s2)
else:
s = self.restore_file(sess, model, txt_file, all_txt_chars, batch_1=True)
with open(out_file, 'w', encoding='utf-8') as f:
f.write(s)
print('Successfully restored in {}'.format(out_file))
def compute_test_accuracy(self, sess, model):
dt_test = self.get_dataset(self.test_files, sess, True)
iterator_test = dt_test.make_initializable_iterator()
sess.run(iterator_test.initializer)
nr_test_batches = self.args.number_samples_test
test_inp_pred, _ = iterator_test.get_next()
test_string_word_pred, test_char_window_pred, test_words_pred, test_sentence_pred, _, _ = test_inp_pred
predictions = model.predict(x=[test_char_window_pred, test_words_pred, test_sentence_pred],
verbose=1,
steps=nr_test_batches)
current_test_batch = 0
sess.run(iterator_test.initializer)
test_next_element = iterator_test.get_next()
prediction_index = 0
total_words = 0
correct_predicted_words = 0
correct_predicted_chars = 0
wrong_predicatd_chars = 0
precision_chars = {'ă': [0, 0], 'â': [0, 0], 'a': [0, 0], 'i': [0, 0], 'î': [0, 0], 's': [0, 0],\
'ș': [0, 0], 't': [0, 0], 'ț': [0, 0]}
recall_chars = {'ă': [0, 0], 'â': [0, 0], 'a': [0, 0], 'i': [0, 0], 'î': [0, 0], 's': [0, 0],\
'ș': [0, 0], 't': [0, 0], 'ț': [0, 0]}
wrong_restoration_words = {}
correct_restoration_words = {}
acc_restoration_word = {}
all_words= set()
while True:
try:
test_inp, test_out = sess.run(test_next_element)
test_string_word, _, _, _ = test_inp
current_test_batch += 1
index_batch = 0
if current_test_batch % 100 == 0:
print('batch {} out of {}'.format(current_test_batch, nr_test_batches))
while index_batch < len(test_string_word):
# skip last word no matter what
word = test_string_word[index_batch]
all_words.add(word)
nr_chars_in_word, chars_in_int = self.count_chars_in_interest(word)
if nr_chars_in_word > len(test_string_word) - index_batch:
prediction_index += len(test_string_word) - index_batch
break
correct_prediction_word = True
for i in range(nr_chars_in_word):
pred_vector = predictions[prediction_index]
predicted_case = self.get_case(pred_vector)
correct_case = self.get_case(test_out[index_batch][0])
index_batch += 1
prediction_index += 1
if predicted_case != correct_case:
correct_prediction_word = False
wrong_predicatd_chars += 1
else:
correct_predicted_chars += 1
self.compute_prediction(correct_case, predicted_case, chars_in_int[i], precision_chars, recall_chars)
total_words += 1
if correct_prediction_word == True:
correct_predicted_words += 1
if word in correct_restoration_words:
correct_restoration_words[word] += 1
else:
correct_restoration_words[word] = 1
else:
if word in wrong_restoration_words:
wrong_restoration_words[word] += 1
else:
wrong_restoration_words[word] = 1
if current_test_batch == nr_test_batches:
break
except tf.errors.OutOfRangeError:
break
index_word = 0
print('highest missed words: ')
for key, value in sorted(wrong_restoration_words.items(), key=lambda x: x[1], reverse=True):
correct = 0
if key in correct_restoration_words:
correct = correct_restoration_words[key]
print("word '" + key.decode('utf-8') + "' wrong: " + str(value) + \
' correct: ' + str(correct) + ' accuracy: ' + str(1.0 * correct / (value + correct)))
index_word += 1
if index_word == self.args.top_wrong_words_restoration:
break
print('precision per character: ')
for key, values in precision_chars.items():
if values[1] != 0:
p = values[0] / values[1]
print(key + ': ' + str(p))
precision_chars[key] = p
print('recall per character: ')
for key, values in recall_chars.items():
if values[1] != 0:
r = values[0] / values[1]
print(key + ': ' + str(r))
recall_chars[key] = r
print('F1 measure per character: ')
for key, r in recall_chars.items():
p = precision_chars[key]
if p != 0 and r != 0:
f1 = 2 * p * r / (p + r)
print(key + ': ' + str(f1))
(char_acc, word_acc) = (correct_predicted_chars / (correct_predicted_chars + wrong_predicatd_chars), correct_predicted_words / total_words)
print("char acc: " + str(char_acc) + ", word accuracy: " + str(word_acc) + ' ')
return char_acc, word_acc
def set_up_folders_saved_models(self):
full_path_dir = self.args.folder_saved_model_per_epoch
if self.args.save_model == True:
if os.path.exists(full_path_dir) == False:
os.makedirs(full_path_dir)
elif self.args.load_model_name is None:
print('a folder with the same name (' + self.args.folder_saved_model_per_epoch +\
') for saving model already exists, delete it to continue or give other name to the folder of the saved model')
exit(0)
def parse_args(self):
# when specify -load, specify also the nr of classes and if the model uses chars, words, sent
# to restore diacritics run:
# python model_diacritice.py -buff 1000 -no_fast -load saved_models_diacritice/chars16-32-4classes -no_word -no_sent -classes 4 -no_dep -no_tag -restore raw_text.txt
parser = argparse.ArgumentParser(description='Run diacritics model')
parser.add_argument('-s', dest="save_model", action='store_false', default=True,\
help="save the model (and weights), default=true")
parser.add_argument('-f', dest="folder_saved_model_per_epoch",\
action='store', default="char_word_sentence",\
help="name of the folder to store the weights, default: char_word_sentence")
parser.add_argument('-c', dest="corpus_rowiki",\
action='store_true', default=False,\
help="if you want to use rowiki corpus, beside parliament corpus, default=false")
parser.add_argument('-test', dest="do_test",\
action='store_true', default=False,\
help="if you want to run test dataset, default=false")
parser.add_argument('-n_test', dest="number_samples_test",\
action='store', default=self.samples_number['par_test'] // self.batch_size, type=int,\
help="number of samples for test accuracy, if -test is not set \
this does not have any effect, default=100000")
parser.add_argument('-e', dest="epochs",\
action='store', default=20, type=int,\
help="number of epochs, default=20")
parser.add_argument('-r', dest="reset_iterators_every_epochs",\
action='store', default=10, type=int,\
help="reset the iterators for the dataset every nr epochs, default=10")
parser.add_argument('-buff', dest="buffer_size_shuffle",\
action='store', default=100000, type=int,\
help="size of the buffer for shuffle, default=100000")
parser.add_argument('-no_fast', dest="use_dummy_word_embeddings",\
action='store_true', default=False,\
help="use dummy word embeddings instead of fasttext, default=false")
parser.add_argument('-load', dest="load_model_name",\
action='store', default=None,\
help="load presaved model and weights\
, specify just the folder name, it will take the last epoch file,\
default=None")
parser.add_argument('-no_train_valid', dest="run_train_validation",\
action='store_false', default=True,\
help="run train and validation, if false you should set -load model param\
, default=True")
parser.add_argument('-mgpu', dest="percent_memory_gpu",\
action='store', default=0.2, type=float,\
help="percentage of the gpu memory to use, default=0.2")
parser.add_argument('-wrong', dest="top_wrong_words_restoration",\
action='store', default=30, type=int,\
help="hardest words to restore, default=30")
parser.add_argument('-no_char', dest="use_window_characters",\
action='store_false', default=True,\
help="if model should use window of characters, default=True")
parser.add_argument('-no_word', dest="use_word_embedding",\
action='store_false', default=True,\
help="if model should use word embeddings, default=True")
parser.add_argument('-no_sent', dest="use_sentence_embedding",\
action='store_false', default=True,\
help="if model should use sentence embedding, default=True")
parser.add_argument('-no_tags', dest="use_tags",\
action='store_false', default=True,\
help="if model should use tags of the words, default=True")
parser.add_argument('-no_deps', dest="use_deps",\
action='store_false', default=True,\
help="if model should use dependencies of the words, default=True")
parser.add_argument('-hidden', dest="hidden_neurons",\
action='append', type=int,\
help="number of neurons on the hidden layer, no default")
parser.add_argument('-classes', dest="nr_classes", default=4,\
action='store', type=int,\
help="number of classes to be used (4 or 5), default=4")
parser.add_argument('-restore', dest="restore_diacritics",
action='store', default=None,\
help="name of the file to restore diacritics")
parser.add_argument('-batch_size_restore', dest="batch_size_restore",
action='store', type=int, default=64,\
help="batch size ised for restoration")
parser.add_argument('-output_file_restore', dest="output_file_restore",
action='store', default='tmp_res.txt',\
help="output of the file with diacritics")
self.args = parser.parse_args()
self.args.folder_saved_model_per_epoch += '/'
if self.args.restore_diacritics is not None:
# by default use the best model
self.args.save_model = False
self.args.do_test = False
self.args.buffer_size_shuffle = 100
self.args.run_train_validation = False
if self.args.nr_classes != 4 and self.args.nr_classes != 5:
print('classes has to be either 4 or 5, exit')
exit(0)
for k in self.args.__dict__:
if self.args.__dict__[k] is not None:
print(k, '->', self.args.__dict__[k])
def get_number_samples(self):
if self.args.corpus_rowiki == False:
inp_batches_train = self.samples_number['par_train'] // self.batch_size
inp_batches_test = self.samples_number['par_test'] // self.batch_size
inp_batches_valid = self.samples_number['par_valid'] // self.batch_size
else:
inp_batches_train = self.samples_number['full_train'] // self.batch_size
inp_batches_test = self.samples_number['full_test'] // self.batch_size
inp_batches_valid = self.samples_number['full_valid'] // self.batch_size
return inp_batches_train, inp_batches_test, inp_batches_valid
def get_input_list(self, characters_bi_lstm_layer, word_embeddings_layer, sentence_bi_lstm_layer, tags, deps):
input_list = []
if self.args.use_window_characters == True:
input_list.append(characters_bi_lstm_layer)
if self.args.use_word_embedding == True:
input_list.append(word_embeddings_layer)
if self.args.use_sentence_embedding == True:
input_list.append(sentence_bi_lstm_layer)
if self.args.use_tags == True:
input_list.append(tags)
if self.args.use_deps == True:
input_list.append(deps)
if len(input_list) == 1:
return input_list[0]
return input_list
# class AttentionLayer(keras.layers.Layer):
# def __init__(self, **kwargs):
# super(AttentionLayer, self).__init__(**kwargs)
# # multiple inputs represented as a list (of tuples?)
# def build(self, input_shape):
# assert isinstance(input_shape, list)
# # Create a trainable weight variable for this layer.
# #
# self.kernel = self.add_weight(name='kernel',
# shape=(self.character_embedding_size, self.characters_cell_size * 2),
# initializer='uniform',
# trainable=True)
# super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end
# def call(self, x):
# assert isinstance(x, list)
# # W = windows size
# # C = cell size
# # E = embedding size
# # inputt has shape (self.batch_size, W, E)
# # hidden has shape (self.batch_size, W, C * 2)
# # kernel has shape (E, C * 2)
# inputt, hidden = x
# # inputt * kernel * hidden_states.T
# return K.batch_dot(K.batch_dot(inputt, K.repeat_elements(K.expand_dims(self.kernel, axis=0), self.batch_size, axis=0)),\
# K.permute_dimensions(hidden, (0, 2, 1)))
# # res i j = correlation between input i and hidden state j
# def compute_output_shape(self, input_shape):
# assert isinstance(input_shape, list)
# return (self.batch_size, self.window_character * 2 + 1, self.window_character * 2 + 1)
# construct the model
def construct_model(self, sess):
last_epoch = 0
if self.args.load_model_name is not None:
folder_path_with_epochs = self.args.load_model_name
epochs_files = os.listdir(folder_path_with_epochs)
sorted_epochs_files = sorted(epochs_files)
self.args.load_model_name = os.path.join(self.args.load_model_name, sorted_epochs_files[-1])
print('load model name: {}'.format(self.args.load_model_name))
model = keras.models.load_model(self.args.load_model_name)
last_epoch = len(sorted_epochs_files)
else:
vocabulary_size = self.max_unicode_allowed + 1
# character window
input_character_window = keras.layers.Input(shape=(self.window_character * 2 + 1,))
character_embeddings_layer = keras.layers.Embedding(\
input_dim=vocabulary_size,\
output_dim=self.character_embedding_size)(input_character_window)
character_lstm_layer = keras.layers.LSTM(
units=self.characters_cell_size,\
input_shape=(self.window_character * 2 + 1, self.character_embedding_size,),
return_sequences=False)
characters_bi_lstm_layer = keras.layers.Bidirectional(
layer=character_lstm_layer,\
merge_mode="concat")(character_embeddings_layer)
# word token
word_embeddings_layer = keras.layers.Input(shape=(self.word_embedding_size,))
# sentence token
sentence_embeddings_layer = keras.layers.Input(shape=((self.window_sentence * 2 + 1, self.word_embedding_size,)))
sentence_lstm_layer = keras.layers.LSTM(units=self.sentence_cell_size,\
input_shape=(self.window_sentence * 2 + 1, self.word_embedding_size,))
sentence_bi_lstm_layer = keras.layers.Bidirectional(layer=sentence_lstm_layer,\
merge_mode="concat")(sentence_embeddings_layer)
# tags
tags = keras.layers.Input(shape=(1,),\
dtype='float32')
# deps
deps = keras.layers.Input(shape=(1,),\
dtype='float32')
# merged
input_list = self.get_input_list(characters_bi_lstm_layer,\
word_embeddings_layer, sentence_bi_lstm_layer, tags, deps)
if len(input_list) > 1:
merged_layer = keras.layers.concatenate(input_list, axis=-1)
else:
merged_layer = input_list[0]
prev_layer = merged_layer
#prev_layer = keras.layers.Flatten()(prev_layer)
#attention = AttentionLayer()([character_embeddings_layer, prev_layer])
#attention = keras.layers.Flatten()(attention)
attention = prev_layer
# hidden layers
for h_neurons in self.args.hidden_neurons:
attention = keras.layers.Dense(h_neurons, activation='tanh')(attention)
output = keras.layers.Dense(self.args.nr_classes, activation='softmax')(attention)
# [input_character_window, word_embeddings_layer, sentence_embeddings_layer]
model_input = self.get_input_list(input_character_window, word_embeddings_layer, sentence_embeddings_layer,\
tags, deps)
model = keras.models.Model(inputs=model_input,\
outputs=output)
model.compile(optimizer='adam',\
loss='categorical_crossentropy',\
metrics=['accuracy'])
print(model.summary())
return model, last_epoch
def run_task(self):
inp_batches_train, inp_batches_test, inp_batches_valid = self.get_number_samples()
self.set_up_folders_saved_models()
if self.args.use_dummy_word_embeddings == False:
self.model_embeddings = FastTextWrapper.load_fasttext_format(self.fast_text)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = self.args.percent_memory_gpu
with tf.Session(config=config) as sess:
if inp_batches_test < self.args.number_samples_test:
print("cannot start, too many test samples given, has to be lower than "\
+ str(inp_batches_test))
print("char, word, sentence - char cell:{}, word cell: {}, hidden: {}"\
.format(self.characters_cell_size, self.sentence_cell_size, self.neurons_dense_layer_after_merge))
model, last_epoch = self.construct_model(sess)
# run test and validation
if self.args.run_train_validation == True:
dt_train = self.get_dataset(self.train_files, sess)
dt_valid = self.get_dataset(self.valid_files, sess)
iterator_train = dt_train.make_initializable_iterator()
iterator_valid = dt_valid.make_initializable_iterator()
for i in range(last_epoch, last_epoch + self.args.epochs):
print('epoch: ' + str(i + 1))
# reset iterators
if (i - last_epoch) % self.args.reset_iterators_every_epochs == 0:
print('resseting iterators')
sess.run(iterator_valid.initializer)
valid_inp, valid_out = iterator_valid.get_next()
valid_char_window, valid_words, valid_sentence, valid_tags, valid_deps = valid_inp
sess.run(iterator_train.initializer)
train_inp, train_out = iterator_train.get_next()
train_char_window, train_words, train_sentence, train_tags, train_deps = train_inp
# train an epoch
train_input = self.get_input_list(train_char_window, train_words, train_sentence,\
train_tags, train_deps)
model.fit(\
train_input,\
[train_out],\
steps_per_epoch=inp_batches_train//self.args.reset_iterators_every_epochs,\
epochs=1,\
verbose=1)
# save weights
if self.args.save_model == True:
print('saving model (and weights)')
full_path_epoch_weights = os.path.join(self.args.folder_saved_model_per_epoch, 'epoch_' + str(i) + '.h5')
model.save(full_path_epoch_weights)
# validate
valid_input = self.get_input_list(valid_char_window, valid_words, valid_sentence,
valid_tags, valid_deps)
[valid_loss, valid_acc] = model.evaluate(valid_input,\
valid_out,\
verbose=1,\
steps=inp_batches_valid//self.args.reset_iterators_every_epochs)
print("validation - loss: " + str(valid_loss) + " acc: " + str(valid_acc))
# test
if self.args.do_test:
self.compute_test_accuracy(sess, model)
if self.args.restore_diacritics:
self.restore_diacritics(sess, model)
if __name__ == "__main__":
diacritics = Diacritics()
diacritics.args.use_window_characters = True
diacritics.args.use_dummy_word_embeddings = True
diacritics.args.use_sentence_embedding = False
diacritics.args.use_word_embedding = False
diacritics.args.buffer_size_shuffle = 1000
diacritics.args.load_model_name = './saved_models_diacritice/chars16-32-4classes/'
diacritics.args.nr_classes = 4
diacritics.args.use_tags = False
diacritics.args.use_deps = False
diacritics.args.restore_diacritics = 'raw_text.txt'
diacritics.args.batch_size_restore = 1024
diacritics.args.save_model = False
diacritics.args.do_test = False
diacritics.args.run_train_validation = False
diacritics.args.output_file_restore = 'tmp_res2.txt'
diacritics.run_task()
|
the-stack_106_28732 | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import concurrent.futures as futures
import os
import socket
import tempfile
from abc import ABCMeta
from asyncio import StreamReader, StreamWriter, AbstractServer
from functools import lru_cache
from hashlib import md5
from typing import Any, Dict, Callable, Coroutine, Type
from urllib.parse import urlparse
from ....serialization import AioSerializer, AioDeserializer, deserialize
from ....utils import implements, to_binary, classproperty
from .base import Channel, ChannelType, Server, Client
from .core import register_client, register_server
from .utils import read_buffers, write_buffers
class SocketChannel(Channel):
__slots__ = 'reader', 'writer', '_channel_type'
name = 'socket'
def __init__(self,
reader: StreamReader,
writer: StreamWriter,
local_address: str = None,
dest_address: str = None,
compression: int = None,
channel_type: ChannelType = None):
super().__init__(local_address=local_address,
dest_address=dest_address,
compression=compression)
self.reader = reader
self.writer = writer
self._channel_type = channel_type
@property
@implements(Channel.type)
def type(self) -> ChannelType:
return self._channel_type
@implements(Channel.send)
async def send(self, message: Any):
# get buffers
compress = self.compression or 0
serializer = AioSerializer(message, compress=compress)
buffers = await serializer.run()
# write buffers
write_buffers(self.writer, buffers)
await self.writer.drain()
@implements(Channel.recv)
async def recv(self):
deserializer = AioDeserializer(self.reader)
header = await deserializer.get_header()
buffers = await read_buffers(header, self.reader)
return deserialize(header, buffers)
@implements(Channel.close)
async def close(self):
self.writer.close()
await self.writer.wait_closed()
@property
@implements(Channel.closed)
def closed(self):
return self.writer.is_closing()
class _BaseSocketServer(Server, metaclass=ABCMeta):
__slots__ = '_aio_server',
def __init__(self,
address: str,
aio_server: AbstractServer,
channel_handler: Callable[[Channel], Coroutine] = None):
super().__init__(address, channel_handler)
# asyncio.Server
self._aio_server = aio_server
@implements(Server.start)
async def start(self):
await self._aio_server.start_serving()
@implements(Server.join)
async def join(self, timeout=None):
if timeout is None:
await self._aio_server.serve_forever()
else:
future = asyncio.create_task(self._aio_server.serve_forever())
try:
await asyncio.wait_for(future, timeout=timeout)
except (futures.TimeoutError, asyncio.TimeoutError):
future.cancel()
@implements(Server.on_connected)
async def on_connected(self, *args, **kwargs):
reader, writer = args
local_address = kwargs.pop('local_address', None)
dest_address = kwargs.pop('dest_address', None)
if kwargs: # pragma: no cover
raise TypeError(f'{type(self).__name__} got unexpected '
f'arguments: {",".join(kwargs)}')
channel = SocketChannel(reader, writer,
local_address=local_address,
dest_address=dest_address,
channel_type=self.channel_type)
# handle over channel to some handlers
await self.channel_handler(channel)
@implements(Server.stop)
async def stop(self):
self._aio_server.close()
await self._aio_server.wait_closed()
@property
@implements(Server.stopped)
def stopped(self) -> bool:
return not self._aio_server.is_serving()
@register_server
class SocketServer(_BaseSocketServer):
__slots__ = 'host', 'port'
scheme = None
def __init__(self,
host: str,
port: int,
aio_server: AbstractServer,
channel_handler: Callable[[Channel], Coroutine] = None):
address = f'{host}:{port}'
super().__init__(address, aio_server,
channel_handler=channel_handler)
self.host = host
self.port = port
@classproperty
@implements(Server.client_type)
def client_type(self) -> Type["Client"]:
return SocketClient
@property
@implements(Server.channel_type)
def channel_type(self) -> ChannelType:
return ChannelType.remote
@staticmethod
@implements(Server.create)
async def create(config: Dict) -> "Server":
config = config.copy()
if 'address' in config:
address = config.pop('address')
host, port = address.split(':', 1)
port = int(port)
else:
host = config.pop('host')
port = int(config.pop('port'))
handle_channel = config.pop('handle_channel')
if 'start_serving' not in config:
config['start_serving'] = False
async def handle_connection(reader, writer):
# create a channel when client connected
return await server.on_connected(reader, writer,
local_address=server.address)
aio_server = await asyncio.start_server(
handle_connection, host=host, port=port, **config)
server = SocketServer(host, port, aio_server,
channel_handler=handle_channel)
return server
@register_client
class SocketClient(Client):
__slots__ = ()
scheme = SocketServer.scheme
@staticmethod
@implements(Client.connect)
async def connect(dest_address: str,
local_address: str = None,
**kwargs) -> "Client":
host, port = dest_address.split(':', 1)
port = int(port)
(reader, writer) = await asyncio.open_connection(
host=host, port=port, **kwargs)
channel = SocketChannel(reader, writer,
local_address=local_address,
dest_address=dest_address)
return SocketClient(local_address, dest_address, channel)
TEMPDIR = tempfile.gettempdir()
@lru_cache(100)
def _gen_unix_socket_default_path(process_index):
return f'{TEMPDIR}/mars/' \
f'{md5(to_binary(str(process_index))).hexdigest()}' # nosec
@register_server
class UnixSocketServer(_BaseSocketServer):
__slots__ = 'process_index', 'path'
scheme = 'unixsocket'
def __init__(self,
process_index: int,
aio_server: AbstractServer,
path: str,
channel_handler: Callable[[Channel], Coroutine] = None):
address = f'{self.scheme}:///{process_index}'
super().__init__(address, aio_server,
channel_handler=channel_handler)
self.process_index = process_index
self.path = path
@classproperty
@implements(Server.client_type)
def client_type(self) -> Type["Client"]:
return UnixSocketClient
@property
@implements(Server.channel_type)
def channel_type(self) -> ChannelType:
return ChannelType.ipc
@staticmethod
@implements(Server.create)
async def create(config: Dict) -> "Server":
config = config.copy()
if 'address' in config:
process_index = int(urlparse(config.pop('address')).path.lstrip('/'))
else:
process_index = config.pop('process_index')
handle_channel = config.pop('handle_channel')
path = config.pop('path', _gen_unix_socket_default_path(process_index))
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if 'start_serving' not in config:
config['start_serving'] = False
async def handle_connection(reader, writer):
# create a channel when client connected
return await server.on_connected(reader, writer,
local_address=server.address)
aio_server = await asyncio.start_unix_server(
handle_connection, path=path, **config)
for sock in aio_server.sockets:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
server = UnixSocketServer(process_index, aio_server, path,
channel_handler=handle_channel)
return server
@implements(Server.stop)
async def stop(self):
await super().stop()
os.remove(self.path)
@register_client
class UnixSocketClient(Client):
__slots__ = ()
scheme = UnixSocketServer.scheme
@staticmethod
@lru_cache(100)
def _get_process_index(addr):
return int(urlparse(addr).path.lstrip('/'))
@staticmethod
@implements(Client.connect)
async def connect(dest_address: str,
local_address: str = None,
**kwargs) -> "Client":
process_index = UnixSocketClient._get_process_index(dest_address)
path = kwargs.pop('path',
_gen_unix_socket_default_path(process_index))
try:
(reader, writer) = await asyncio.open_unix_connection(path, **kwargs)
except FileNotFoundError:
raise ConnectionRefusedError('Cannot connect unix socket '
'due to file not exists')
channel = SocketChannel(reader, writer,
local_address=local_address,
dest_address=dest_address)
return UnixSocketClient(local_address, dest_address, channel)
|
the-stack_106_28734 | from __future__ import absolute_import
from typing import Any
from django.views.debug import SafeExceptionReporterFilter
from django.http import HttpRequest, build_request_repr
class ZulipExceptionReporterFilter(SafeExceptionReporterFilter):
def get_post_parameters(self, request):
# type: (HttpRequest) -> Dict[str, Any]
filtered_post = SafeExceptionReporterFilter.get_post_parameters(self, request).copy()
filtered_vars = ['content', 'secret', 'password', 'key', 'api-key', 'subject', 'stream',
'subscriptions', 'to', 'csrfmiddlewaretoken', 'api_key']
for var in filtered_vars:
if var in filtered_post:
filtered_post[var] = '**********'
return filtered_post
def get_request_repr(self, request):
# type: (HttpRequest) -> str
if request is None:
return repr(None)
else:
return build_request_repr(request,
POST_override=self.get_post_parameters(request),
COOKIES_override="**********",
META_override="**********")
|
the-stack_106_28737 | import tensorflow as tf
from gcn_layer import *
class GCN_graph_cls(object):
def __init__(self, feature_dim_size, hidden_size, num_GNN_layers, num_sampled, vocab_size):
# Placeholders for input, output
self.Adj_block = tf.compat.v1.sparse_placeholder(tf.float32, [None, None], name="Adj_block")
self.X_concat = tf.compat.v1.sparse_placeholder(tf.float32, [None, feature_dim_size], name="X_concat")
self.num_features_nonzero = tf.compat.v1.placeholder(tf.int32, name="num_features_nonzero")
self.dropout = tf.compat.v1.placeholder(tf.float32, name="dropout")
self.input_y = tf.compat.v1.placeholder(tf.int32, [None, 1], name="input_y")
self.placeholders = {
'adj': self.Adj_block,
'dropout': self.dropout,
'num_features_nonzero': self.num_features_nonzero
}
self.input = self.X_concat # set hidden_size = feature_dim_size if not tuning sizes of hidden stacked layers
in_hidden_size = feature_dim_size
self.output_vectors = []
#Construct k GNN layers
for idx_layer in range(num_GNN_layers):
sparse_inputs = False
if idx_layer == 0:
sparse_inputs = True
gcn_gnn = GraphConvolution(input_dim=in_hidden_size,
output_dim=hidden_size,
placeholders=self.placeholders,
act=tf.nn.relu,
dropout=True,
sparse_inputs=sparse_inputs)
in_hidden_size = hidden_size
# run --> output --> input for next layer
self.input = gcn_gnn(self.input)
#
self.output_vectors.append(self.input)
self.output_vectors = tf.concat(self.output_vectors, axis=1)
self.output_vectors = tf.nn.dropout(self.output_vectors, 1-self.dropout)
with tf.name_scope("embedding"):
self.embedding_matrix = glorot([vocab_size, hidden_size*num_GNN_layers], name='node_embeddings')
self.softmax_biases = tf.Variable(tf.zeros([vocab_size]))
self.total_loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(weights=self.embedding_matrix, biases=self.softmax_biases,
inputs=self.output_vectors, labels=self.input_y, num_sampled=num_sampled, num_classes=vocab_size))
self.saver = tf.compat.v1.train.Saver(tf.global_variables(), max_to_keep=500)
tf.logging.info('Seting up the main structure') |
the-stack_106_28738 | import os
import sys
import pip.backwardcompat
from pip.backwardcompat import urllib, string_types, b, u, emailmessage
urlopen_original = pip.backwardcompat.urllib2.urlopen
class CachedResponse(object):
"""
CachedResponse always cache url access and returns the cached response.
It returns an object compatible with ``urllib.addinfourl``,
it means the object is like the result of a call like::
>>> response = urllib2.urlopen('http://example.com')
"""
def __init__(self, url, folder):
self.headers = emailmessage.Message()
# patch due to setuptools>=0.7 header processing
# easy_install fails w/o this on windows/py2
# https://github.com/pypa/pip/issues/946#issuecomment-20860320
if sys.version_info < (3,):
def getheaders(key):
return self.headers.get_all(key)
self.headers.getheaders = getheaders
self.code = 500
self.msg = 'Internal Server Error'
# url can be a simple string, or a urllib2.Request object
if isinstance(url, string_types):
self.url = url
else:
self.url = url.get_full_url()
for key, value in url.headers.items():
self.headers[key] = value
self._body = b('')
self._set_all_fields(folder)
def _set_all_fields(self, folder):
filename = os.path.join(folder, urllib.quote(self.url, ''))
if not os.path.exists(filename):
self._cache_url(filename)
fp = open(filename, 'rb')
try:
line = fp.readline().strip()
self.code, self.msg = line.split(None, 1)
except ValueError:
raise ValueError('Bad field line: %r' % line)
self.code = int(self.code)
self.msg = u(self.msg)
for line in fp:
if line == b('\n'):
break
key, value = line.split(b(': '), 1)
self.headers[u(key)] = u(value.strip())
for line in fp:
self._body += line
fp.close()
def getcode(self):
return self.code
def geturl(self):
return self.url
def info(self):
return self.headers
def read(self, bytes=None):
"""
it can read a chunk of bytes or everything
"""
if bytes:
result = self._body[:bytes]
self._body = self._body[bytes:]
return result
return self._body
def close(self):
pass
def _cache_url(self, filepath):
response = urlopen_original(self.url)
fp = open(filepath, 'wb')
# when it uses file:// scheme, code is None and there is no msg attr
# but it has been successfully opened
status = b('%s %s' % (getattr(response, 'code', 200) or 200, getattr(response, 'msg', 'OK')))
headers = [b('%s: %s' % (key, value)) for key, value in list(response.headers.items())]
body = response.read()
fp.write(b('\n').join([status] + headers + [b(''), body]))
fp.close()
class PyPIProxy(object):
CACHE_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tests_cache')
@classmethod
def setup(cls):
instance = cls()
instance._create_cache_folder()
instance._monkey_patch_urllib2_to_cache_everything()
def _monkey_patch_urllib2_to_cache_everything(self):
def urlopen(url):
return CachedResponse(url, self.CACHE_PATH)
pip.backwardcompat.urllib2.urlopen = urlopen
def _create_cache_folder(self):
if not os.path.exists(self.CACHE_PATH):
os.mkdir(self.CACHE_PATH)
def assert_equal(a, b):
assert a == b, "\nexpected:\n%r\ngot:\n%r" % (b, a)
def test_cache_proxy():
url = 'http://example.com'
here = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(here, urllib.quote(url, ''))
if os.path.exists(filepath):
os.remove(filepath)
response = pip.backwardcompat.urllib2.urlopen(url)
r = CachedResponse(url, here)
try:
assert_equal(r.code, response.code)
assert_equal(r.msg, response.msg)
assert_equal(r.read(), response.read())
assert_equal(r.url, response.url)
assert_equal(r.geturl(), response.geturl())
assert_equal(set(r.headers.keys()), set(response.headers.keys()))
assert_equal(set(r.info().keys()), set(response.info().keys()))
assert_equal(r.headers['content-length'], response.headers['content-length'])
finally:
os.remove(filepath)
|
the-stack_106_28739 | """
Functionality for reading NISAR data into a SICD model.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import os
from collections import OrderedDict
from typing import Tuple, Dict
import numpy
from numpy.polynomial import polynomial
from scipy.constants import speed_of_light
try:
import h5py
except ImportError:
h5py = None
from sarpy.compliance import string_types, int_func, bytes_to_string
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.blocks import Poly2DType
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType, RadarModeType
from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType
from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, \
TxFrequencyType, ChanParametersType, TxStepType
from sarpy.io.complex.sicd_elements.ImageData import ImageDataType
from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType
from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType
from sarpy.io.complex.sicd_elements.Position import PositionType, XYZPolyType
from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType
from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType
from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, TxFrequencyProcType, RcvChanProcType
from sarpy.io.complex.sicd_elements.RMA import RMAType, INCAType
from sarpy.io.complex.sicd_elements.Radiometric import RadiometricType, NoiseLevelType_
from sarpy.geometry import point_projection
from sarpy.io.general.base import BaseReader
from sarpy.io.general.utils import get_seconds, parse_timestring, is_file_like
from sarpy.io.complex.csk import H5Chipper
from sarpy.io.complex.utils import fit_position_xvalidation, two_dim_poly_fit
########
# base expected functionality for a module with an implemented Reader
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a NISAR file. Returns a reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
NISARReader|None
`NISARReader` instance if NISAR file, `None` otherwise
"""
if h5py is None:
return None
if is_file_like(file_name):
return None
try:
nisar_details = NISARDetails(file_name)
logging.info('File {} is determined to be a NISAR file.'.format(file_name))
return NISARReader(nisar_details)
except (ImportError, IOError):
return None
###########
# parser and interpreter for hdf5 attributes
def _stringify(val):
"""
Decode the value as necessary, for hdf5 string support issues.
Parameters
----------
val : str|bytes
Returns
-------
str
"""
return bytes_to_string(val).strip()
def _get_ref_time(str_in):
"""
Extract the given reference time.
Parameters
----------
str_in : str|bytes
Returns
-------
numpy.datetime64
"""
str_in = bytes_to_string(str_in)
prefix = 'seconds since '
if not str_in.startswith(prefix):
raise ValueError('Got unexpected reference time string - {}'.format(str_in))
return parse_timestring(str_in[len(prefix):], precision='ns')
def _get_string_list(array):
return [bytes_to_string(el) for el in array]
class NISARDetails(object):
"""
Parses and converts the Cosmo Skymed metadata
"""
__slots__ = ('_file_name', )
def __init__(self, file_name):
"""
Parameters
----------
file_name : str
"""
if h5py is None:
raise ImportError("Can't read NISAR files, because the h5py dependency is missing.")
if not os.path.isfile(file_name):
raise IOError('Path {} is not a file'.format(file_name))
with h5py.File(file_name, 'r') as hf:
# noinspection PyBroadException
try:
# noinspection PyUnusedLocal
gp = hf['/science/LSAR/SLC']
except:
raise IOError('The hdf5 file does not have required path /science/LSAR/SLC')
self._file_name = file_name
@property
def file_name(self):
"""
str: the file name
"""
return self._file_name
@staticmethod
def _get_frequency_list(hf):
"""
Gets the list of frequencies.
Parameters
----------
hf : h5py.File
Returns
-------
numpy.ndarray
"""
return _get_string_list(hf['/science/LSAR/identification/listOfFrequencies'][:])
@staticmethod
def _get_collection_times(hf):
"""
Gets the collection start and end times, and inferred duration.
Parameters
----------
hf : h5py.File
The h5py File object.
Returns
-------
(numpy.datetime64, numpy.datetime64, float)
Start and end times and duration
"""
start = parse_timestring(_stringify(hf['/science/LSAR/identification/zeroDopplerStartTime'][()]), precision='ns')
end = parse_timestring(_stringify(hf['/science/LSAR/identification/zeroDopplerEndTime'][()]), precision='ns')
duration = get_seconds(end, start, precision='ns')
return start, end, duration
@staticmethod
def _get_zero_doppler_data(hf, base_sicd):
"""
Gets zero-doppler parameters.
Parameters
----------
hf : h5py.File
base_sicd : SICDType
Returns
-------
(numpy.ndarray, float, numpy.ndarray, numpy.ndarray)
The azimuth zero-doppler time array, azimuth zero-doppler time spacing,
grid range array, range zero doppler time array.
"""
gp = hf['/science/LSAR/SLC/swaths']
ds = gp['zeroDopplerTime']
ref_time = _get_ref_time(ds.attrs['units'])
zd_time = ds[:] + get_seconds(ref_time, base_sicd.Timeline.CollectStart, precision='ns')
ss_az_s = gp['zeroDopplerTimeSpacing'][()]
if base_sicd.SCPCOA.SideOfTrack == 'L':
zd_time = zd_time[::-1]
ss_az_s *= -1
gp = hf['/science/LSAR/SLC/metadata/processingInformation/parameters']
grid_r = gp['slantRange'][:]
ds = gp['zeroDopplerTime']
ref_time = _get_ref_time(ds.attrs['units'])
grid_zd_time = ds[:] + get_seconds(ref_time, base_sicd.Timeline.CollectStart, precision='ns')
return zd_time, ss_az_s, grid_r, grid_zd_time
def _get_base_sicd(self, hf):
"""
Defines the base SICD object, to be refined with further details.
Returns
-------
SICDType
"""
def get_collection_info():
# type: () -> CollectionInfoType
gp = hf['/science/LSAR/identification']
return CollectionInfoType(
CollectorName=_stringify(hf.attrs['mission_name']),
CoreName='{0:07d}_{1:s}'.format(gp['absoluteOrbitNumber'][()],
_stringify(gp['trackNumber'][()])),
CollectType='MONOSTATIC',
Classification='UNCLASSIFIED',
RadarMode=RadarModeType(ModeType='STRIPMAP'))
def get_image_creation():
# type: () -> ImageCreationType
application = 'ISCE'
# noinspection PyBroadException
try:
application = '{} {}'.format(
application,
_stringify(hf['/science/LSAR/SLC/metadata/processingInformation/algorithms/ISCEVersion'][()]))
except:
pass
from sarpy.__about__ import __version__
# TODO: DateTime?
return ImageCreationType(
Application=application,
Site='Unknown',
Profile='sarpy {}'.format(__version__))
def get_geo_data():
# type: () -> GeoDataType
# seeds a rough SCP for projection usage
poly_str = _stringify(hf['/science/LSAR/identification/boundingPolygon'][()])
beg_str = 'POLYGON (('
if not poly_str.startswith(beg_str):
raise ValueError('Unexpected polygon string {}'.format(poly_str))
parts = poly_str[len(beg_str):-2].strip().split(',')
if len(parts) != 5:
raise ValueError('Unexpected polygon string parts {}'.format(parts))
lats_lons = numpy.zeros((4, 2), dtype=numpy.float64)
for i, part in enumerate(parts[:-1]):
spart = part.strip().split()
if len(spart) != 2:
raise ValueError('Unexpected polygon string parts {}'.format(parts))
lats_lons[i, :] = float(spart[1]), float(spart[0])
llh = numpy.zeros((3, ), dtype=numpy.float64)
llh[0:2] = numpy.mean(lats_lons, axis=0)
llh[2] = numpy.mean(hf['/science/LSAR/SLC/metadata/processingInformation/parameters/referenceTerrainHeight'][:])
return GeoDataType(SCP=SCPType(LLH=llh))
def get_grid():
# type: () -> GridType
# TODO: Future Change Required - JPL states that uniform weighting in data simulated
# from UAVSAR is a placeholder, not an accurate description of the data.
# At this point, it is not clear what the final weighting description for NISAR
# will be.
gp = hf['/science/LSAR/SLC/metadata/processingInformation/parameters']
row_wgt = gp['rangeChirpWeighting'][:]
win_name = 'UNIFORM' if numpy.all(row_wgt == row_wgt[0]) else 'UNKNOWN'
row = DirParamType(
Sgn=-1,
DeltaKCOAPoly=[[0,]],
WgtFunct=numpy.cast[numpy.float64](row_wgt),
WgtType=WgtTypeType(WindowName=win_name))
col_wgt = gp['azimuthChirpWeighting'][:]
win_name = 'UNIFORM' if numpy.all(col_wgt == col_wgt[0]) else 'UNKNOWN'
col = DirParamType(
Sgn=-1,
KCtr=0,
WgtFunct=numpy.cast[numpy.float64](col_wgt),
WgtType=WgtTypeType(WindowName=win_name))
return GridType(ImagePlane='SLANT', Type='RGZERO', Row=row, Col=col)
def get_timeline():
# type: () -> TimelineType
# NB: IPPEnd must be set, but will be replaced
return TimelineType(
CollectStart=collect_start,
CollectDuration=duration,
IPP=[IPPSetType(index=0, TStart=0, TEnd=duration, IPPStart=0, IPPEnd=0), ])
def get_position():
# type: () -> PositionType
gp = hf['/science/LSAR/SLC/metadata/orbit']
ref_time = _get_ref_time(gp['time'].attrs['units'])
T = gp['time'][:] + get_seconds(ref_time, collect_start, precision='ns')
Pos = gp['position'][:]
Vel = gp['velocity'][:]
P_x, P_y, P_z = fit_position_xvalidation(T, Pos, Vel, max_degree=8)
return PositionType(ARPPoly=XYZPolyType(X=P_x, Y=P_y, Z=P_z))
def get_scpcoa():
# type: () -> SCPCOAType
# remaining fields set later
sot = _stringify(hf['/science/LSAR/identification/lookDirection'][()])[0].upper()
return SCPCOAType(SideOfTrack=sot)
def get_image_formation():
# type: () -> ImageFormationType
return ImageFormationType(
ImageFormAlgo='RMA',
TStartProc=0,
TEndProc=duration,
STBeamComp='NO',
ImageBeamComp='SV',
AzAutofocus='NO',
RgAutofocus='NO',
RcvChanProc=RcvChanProcType(NumChanProc=1, PRFScaleFactor=1))
def get_rma():
# type: () -> RMAType
return RMAType(RMAlgoType='OMEGA_K', INCA=INCAType(DopCentroidCOA=True))
collect_start, collect_end, duration = self._get_collection_times(hf)
collection_info = get_collection_info()
image_creation = get_image_creation()
geo_data = get_geo_data()
grid = get_grid()
timeline = get_timeline()
position = get_position()
scpcoa = get_scpcoa()
image_formation = get_image_formation()
rma = get_rma()
return SICDType(
CollectionInfo=collection_info,
ImageCreation=image_creation,
GeoData=geo_data,
Grid=grid,
Timeline=timeline,
Position=position,
SCPCOA=scpcoa,
ImageFormation=image_formation,
RMA=rma)
@staticmethod
def _get_freq_specific_sicd(gp, base_sicd):
"""
Gets the frequency specific sicd.
Parameters
----------
gp : h5py.Group
base_sicd : SICDType
Returns
-------
(SICDType, numpy.ndarray, list, center_freq)
frequency dependent sicd, array of polarization names, list of formatted polarizations for sicd,
the processed center frequency
"""
def update_grid():
row_imp_resp_bw = 2*gp['processedRangeBandwidth'][()]/speed_of_light
t_sicd.Grid.Row.SS = gp['slantRangeSpacing'][()]
t_sicd.Grid.Row.ImpRespBW = row_imp_resp_bw
t_sicd.Grid.Row.DeltaK1 = -0.5*row_imp_resp_bw
t_sicd.Grid.Row.DeltaK2 = -t_sicd.Grid.Row.DeltaK1
def update_timeline():
prf = gp['nominalAcquisitionPRF'][()]
t_sicd.Timeline.IPP[0].IPPEnd = prf*t_sicd.Timeline.CollectDuration
t_sicd.Timeline.IPP[0].IPPPoly = [0, prf]
def define_radar_collection():
tx_rcv_pol_t = []
tx_pol = []
for entry in pols:
tx_rcv_pol_t.append('{}:{}'.format(entry[0], entry[1]))
if entry[0] not in tx_pol:
tx_pol.append(entry[0])
center_freq_t = gp['acquiredCenterFrequency'][()]
bw = gp['acquiredRangeBandwidth'][()]
tx_freq = TxFrequencyType(Min=center_freq_t - 0.5*bw, Max=center_freq_t + 0.5*bw)
rcv_chans = [ChanParametersType(TxRcvPolarization=pol) for pol in tx_rcv_pol_t]
if len(tx_pol) == 1:
tx_sequence = None
tx_pol = tx_pol[0]
else:
tx_sequence = [TxStepType(WFIndex=j+1, TxPolarization=pol) for j, pol in enumerate(tx_pol)]
tx_pol = 'SEQUENCE'
t_sicd.RadarCollection = RadarCollectionType(
TxFrequency=tx_freq,
RcvChannels=rcv_chans,
TxPolarization=tx_pol,
TxSequence=tx_sequence)
return tx_rcv_pol_t
def update_image_formation():
center_freq_t = gp['processedCenterFrequency'][()]
bw = gp['processedRangeBandwidth'][()]
t_sicd.ImageFormation.TxFrequencyProc = TxFrequencyProcType(
MinProc=center_freq_t - 0.5*bw,
MaxProc=center_freq_t + 0.5*bw, )
return center_freq_t
pols = _get_string_list(gp['listOfPolarizations'][:])
t_sicd = base_sicd.copy()
update_grid()
update_timeline()
tx_rcv_pol = define_radar_collection()
center_freq = update_image_formation()
return t_sicd, pols, tx_rcv_pol, center_freq
@staticmethod
def _get_pol_specific_sicd(hf, ds, base_sicd, pol_name, freq_name, j, pol,
r_ca_sampled, zd_time, grid_zd_time, grid_r, doprate_sampled,
dopcentroid_sampled, center_freq, ss_az_s, dop_bw, beta0, gamma0, sigma0):
"""
Gets the frequency/polarization specific sicd.
Parameters
----------
hf : h5py.File
ds : h5py.Dataset
base_sicd : SICDType
pol_name : str
freq_name : str
j : int
pol : str
r_ca_sampled : numpy.ndarray
zd_time : numpy.ndarray
grid_zd_time : numpy.ndarray
grid_r : numpy.ndarray
doprate_sampled : numpy.ndarray
dopcentroid_sampled : numpy.ndarray
center_freq : float
ss_az_s : float
dop_bw : float
Returns
-------
(SICDType, Tuple[int])
"""
def define_image_data():
dtype = ds.dtype.name
if dtype in ('float32', 'complex64'):
pixel_type = 'RE32F_IM32F'
elif dtype == 'int16':
pixel_type = 'RE16I_IM16I'
else:
raise ValueError('Got unhandled dtype {}'.format(dtype))
t_sicd.ImageData = ImageDataType(
PixelType=pixel_type,
NumRows=shape[0],
NumCols=shape[1],
FirstRow=0,
FirstCol=0,
SCPPixel=[0.5*shape[0], 0.5*shape[1]],
FullImage=[shape[0], shape[1]])
def update_image_formation():
t_sicd.ImageFormation.RcvChanProc.ChanIndices = [j, ]
t_sicd.ImageFormation.TxRcvPolarizationProc = pol
def update_inca_and_grid():
t_sicd.RMA.INCA.R_CA_SCP = r_ca_sampled[t_sicd.ImageData.SCPPixel.Row]
scp_ca_time = zd_time[t_sicd.ImageData.SCPPixel.Col]
# compute DRateSFPoly
# velocity at scp ca time
vel_ca = t_sicd.Position.ARPPoly.derivative_eval(scp_ca_time, der_order=1)
# squared magnitude
vm_ca_sq = numpy.sum(vel_ca*vel_ca)
# polynomial coefficient for function representing range as a function of range distance from SCP
r_ca_poly = numpy.array([t_sicd.RMA.INCA.R_CA_SCP, 1], dtype=numpy.float64)
# closest Doppler rate polynomial to SCP
min_ind = numpy.argmin(numpy.absolute(grid_zd_time - scp_ca_time))
# define range coordinate grid
coords_rg_m = grid_r - t_sicd.RMA.INCA.R_CA_SCP
# determine dop_rate_poly coordinates
dop_rate_poly = polynomial.polyfit(coords_rg_m, -doprate_sampled[min_ind, :], 4) # why fourth order?
t_sicd.RMA.INCA.FreqZero = center_freq
t_sicd.RMA.INCA.DRateSFPoly = Poly2DType(Coefs=numpy.reshape(
-numpy.convolve(dop_rate_poly, r_ca_poly)*speed_of_light/(2*center_freq*vm_ca_sq), (-1, 1)))
# update Grid.Col parameters
t_sicd.Grid.Col.SS = numpy.sqrt(vm_ca_sq)*abs(ss_az_s)*t_sicd.RMA.INCA.DRateSFPoly.Coefs[0, 0]
t_sicd.Grid.Col.ImpRespBW = min(abs(dop_bw*ss_az_s), 1)/t_sicd.Grid.Col.SS
t_sicd.RMA.INCA.TimeCAPoly = [scp_ca_time, ss_az_s/t_sicd.Grid.Col.SS]
#TimeCOAPoly/DopCentroidPoly/DeltaKCOAPoly
coords_az_m = (grid_zd_time - scp_ca_time)*t_sicd.Grid.Col.SS/ss_az_s
# cerate the 2d grids
coords_rg_2d_t, coords_az_2d_t = numpy.meshgrid(coords_rg_m, coords_az_m, indexing='xy')
coefs, residuals, rank, sing_values = two_dim_poly_fit(
coords_rg_2d_t, coords_az_2d_t, dopcentroid_sampled,
x_order=3, y_order=3, x_scale=1e-3, y_scale=1e-3, rcond=1e-40)
logging.info(
'The dop_centroid_poly fit details:\nroot mean square residuals = {}\nrank = {}\nsingular values = {}'.format(
residuals, rank, sing_values))
t_sicd.RMA.INCA.DopCentroidPoly = Poly2DType(Coefs=coefs)
t_sicd.Grid.Col.DeltaKCOAPoly = Poly2DType(Coefs=coefs*ss_az_s/t_sicd.Grid.Col.SS)
timeca_sampled = numpy.outer(grid_zd_time, numpy.ones((grid_r.size, )))
time_coa_sampled = timeca_sampled + (dopcentroid_sampled/doprate_sampled)
coefs, residuals, rank, sing_values = two_dim_poly_fit(
coords_rg_2d_t, coords_az_2d_t, time_coa_sampled,
x_order=3, y_order=3, x_scale=1e-3, y_scale=1e-3, rcond=1e-40)
logging.info(
'The time_coa_poly fit details:\nroot mean square residuals = {}\nrank = {}\nsingular values = {}'.format(
residuals, rank, sing_values))
t_sicd.Grid.TimeCOAPoly = Poly2DType(Coefs=coefs)
return coords_rg_2d_t, coords_az_2d_t
def define_radiometric():
def get_poly(ds, name):
array = ds[:]
fill = ds.attrs['_FillValue']
boolc = (array != fill)
if numpy.any(boolc):
array = array[boolc]
if numpy.any(array != array[0]):
coefs, residuals, rank, sing_values = two_dim_poly_fit(
coords_rg_2d[boolc], coords_az_2d[boolc], array,
x_order=3, y_order=3, x_scale=1e-3, y_scale=1e-3, rcond=1e-40)
logging.info(
'The {} fit details:\nroot mean square residuals = {}\nrank = {}\nsingular values = {}'.format(
name, residuals, rank, sing_values))
else:
# it's constant, so just use a constant polynomial
coefs = [[array[0], ], ]
logging.info('The {} values are constant'.format(name))
return Poly2DType(Coefs=coefs)
else:
logging.warning('No non-trivial values for {} provided.'.format(name))
return None
beta0_poly = get_poly(beta0, 'beta0')
gamma0_poly = get_poly(gamma0, 'gamma0')
sigma0_poly = get_poly(sigma0, 'sigma0')
nesz = hf['/science/LSAR/SLC/metadata/calibrationInformation/frequency{}/{}/nes0'.format(freq_name,
pol_name)][:]
noise_samples = nesz - (10 * numpy.log10(sigma0_poly.Coefs[0, 0]))
coefs, residuals, rank, sing_values = two_dim_poly_fit(
coords_rg_2d, coords_az_2d, noise_samples,
x_order=3, y_order=3, x_scale=1e-3, y_scale=1e-3, rcond=1e-40)
logging.info(
'The noise_poly fit details:\nroot mean square residuals = {}\nrank = {}\nsingular values = {}'.format(
residuals, rank, sing_values))
t_sicd.Radiometric = RadiometricType(
BetaZeroSFPoly=beta0_poly,
GammaZeroSFPoly=gamma0_poly,
SigmaZeroSFPoly=sigma0_poly,
NoiseLevel=NoiseLevelType_(
NoiseLevelType='ABSOLUTE', NoisePoly=Poly2DType(Coefs=coefs)))
def update_geodata():
ecf = point_projection.image_to_ground(
[t_sicd.ImageData.SCPPixel.Row, t_sicd.ImageData.SCPPixel.Col], t_sicd)
t_sicd.GeoData.SCP = SCPType(ECF=ecf) # LLH will be populated
t_sicd = base_sicd.copy()
shape = (int_func(ds.shape[1]), int_func(ds.shape[0]))
define_image_data()
update_image_formation()
coords_rg_2d, coords_az_2d = update_inca_and_grid()
define_radiometric()
update_geodata()
t_sicd.derive()
t_sicd.populate_rniirs(override=False)
return t_sicd, shape
def get_sicd_collection(self):
"""
Get the sicd collection for the bands.
Returns
-------
Tuple[Dict[str, SICDType], Dict[str, str], Tuple[bool, bool, bool]]
the first entry is a dictionary of the form {band_name: sicd}
the second entry is of the form {band_name: shape}
the third entry is the symmetry tuple
"""
# TODO: check if the hdf already has the sicds defined, and fish them out if so.
with h5py.File(self.file_name, 'r') as hf:
# fetch the base shared sicd
base_sicd = self._get_base_sicd(hf)
# prepare our output workspace
out_sicds = OrderedDict()
shapes = OrderedDict()
symmetry = (base_sicd.SCPCOA.SideOfTrack == 'L', False, True)
# fetch the common use data for frequency issues
collect_start, collect_end, duration = self._get_collection_times(hf)
zd_time, ss_az_s, grid_r, grid_zd_time = self._get_zero_doppler_data(hf, base_sicd)
gp = hf['/science/LSAR/SLC/metadata/calibrationInformation/geometry']
beta0 = gp['beta0']
gamma0 = gp['gamma0']
sigma0 = gp['sigma0']
# formulate the frequency specific sicd information
freqs = self._get_frequency_list(hf)
for i, freq in enumerate(freqs):
gp_name = '/science/LSAR/SLC/swaths/frequency{}'.format(freq)
gp = hf[gp_name]
freq_sicd, pols, tx_rcv_pol, center_freq = self._get_freq_specific_sicd(gp, base_sicd)
# formulate the frequency dependent doppler grid
# TODO: Future Change Required - processedAzimuthBandwidth acknowledged
# by JPL to be wrong in simulated datasets.
dop_bw = gp['processedAzimuthBandwidth'][()]
gp2 = hf['/science/LSAR/SLC/metadata/processingInformation/parameters/frequency{}'.format(freq)]
dopcentroid_sampled = gp2['dopplerCentroid'][:]
doprate_sampled = gp2['azimuthFMRate'][:]
r_ca_sampled = gp['slantRange'][:]
# formulate the frequency/polarization specific sicd information
for j, pol in enumerate(pols):
ds_name = '{}/{}'.format(gp_name, pol)
ds = gp[pol]
pol_sicd, shape = self._get_pol_specific_sicd(
hf, ds, freq_sicd, pol, freq, j, tx_rcv_pol[j],
r_ca_sampled, zd_time, grid_zd_time, grid_r,
doprate_sampled, dopcentroid_sampled, center_freq,
ss_az_s, dop_bw, beta0, gamma0, sigma0)
out_sicds[ds_name] = pol_sicd
shapes[ds_name] = ds.shape[:2]
return out_sicds, shapes, symmetry
################
# The NISAR reader
class NISARReader(BaseReader, SICDTypeReader):
"""
Gets a reader type object for NISAR files
"""
__slots__ = ('_nisar_details', )
def __init__(self, nisar_details):
"""
Parameters
----------
nisar_details : str|NISARDetails
file name or NISARDetails object
"""
if isinstance(nisar_details, string_types):
nisar_details = NISARDetails(nisar_details)
if not isinstance(nisar_details, NISARDetails):
raise TypeError('The input argument for NISARReader must be a '
'filename or NISARDetails object')
self._nisar_details = nisar_details
sicd_data, shape_dict, symmetry = nisar_details.get_sicd_collection()
chippers = []
sicds = []
for band_name in sicd_data:
sicds.append(sicd_data[band_name])
chippers.append(H5Chipper(nisar_details.file_name, band_name, shape_dict[band_name], symmetry))
SICDTypeReader.__init__(self, tuple(sicds))
BaseReader.__init__(self, tuple(chippers), reader_type="SICD")
@property
def nisar_details(self):
# type: () -> NISARDetails
"""
NISARDetails: The nisar details object.
"""
return self._nisar_details
@property
def file_name(self):
return self.nisar_details.file_name
|
the-stack_106_28740 | import os
import threading
import copy
from PackageBuildDataGenerator import PackageBuildDataGenerator
from Logger import Logger
from constants import constants
from CommandUtils import CommandUtils
from PackageUtils import PackageUtils
from ToolChainUtils import ToolChainUtils
from Scheduler import Scheduler
from ThreadPool import ThreadPool
from SpecData import SPECS
from StringUtils import StringUtils
from Sandbox import Chroot, Container
class PackageManager(object):
def __init__(self, logName=None, logPath=None, pkgBuildType="chroot"):
if logName is None:
logName = "PackageManager"
if logPath is None:
logPath = constants.logPath
self.logName = logName
self.logPath = logPath
self.logLevel = constants.logLevel
self.logger = Logger.getLogger(logName, logPath, constants.logLevel)
self.mapCyclesToPackageList = {}
self.mapPackageToCycle = {}
self.sortedPackageList = []
self.listOfPackagesAlreadyBuilt = set()
self.pkgBuildType = pkgBuildType
if self.pkgBuildType == "container":
import docker
self.dockerClient = docker.from_env(version="auto")
def buildToolChain(self):
self.logger.info("Step 1 : Building the core toolchain packages for " + constants.currentArch)
self.logger.info(constants.listCoreToolChainPackages)
self.logger.info("")
pkgCount = 0
pkgUtils = PackageUtils(self.logName, self.logPath)
coreToolChainYetToBuild = []
doneList = []
for package in constants.listCoreToolChainPackages:
version = SPECS.getData().getHighestVersion(package)
rpmPkg = pkgUtils.findRPMFile(package, version)
self.sortedPackageList.append(package+"-"+version)
if rpmPkg is not None:
doneList.append(package+'-'+version)
continue
else:
coreToolChainYetToBuild.append(package)
self.listOfPackagesAlreadyBuilt = set(doneList)
pkgCount = len(coreToolChainYetToBuild)
if coreToolChainYetToBuild:
self.logger.info("The following core toolchain packages need to be built :")
self.logger.info(coreToolChainYetToBuild)
else:
self.logger.info("Core toolchain packages are already available")
self.logger.info("")
return pkgCount
Scheduler.coreToolChainBuild = True
self._buildPackages(1)
Scheduler.coreToolChainBuild = False
self.logger.debug("Successfully built core toolchain")
self.logger.info("-" * 45 + "\n")
return pkgCount
def buildToolChainPackages(self, buildThreads):
pkgCount = self.buildToolChain()
# Stage 2 makes sence only for native tools
if not constants.crossCompiling:
if self.pkgBuildType == "container":
# Stage 1 build container
#TODO image name constants.buildContainerImageName
if pkgCount > 0 or not self.dockerClient.images.list(constants.buildContainerImage):
self._createBuildContainer(True)
self.logger.info("Step 2 : Building stage 2 of the toolchain...")
self.logger.info(constants.listToolChainPackages)
self.logger.info("")
self._buildGivenPackages(constants.listToolChainPackages, buildThreads)
self.logger.info("The entire toolchain is now available")
self.logger.info(45 * '-')
self.logger.info("")
if self.pkgBuildType == "container":
# Stage 2 build container
#TODO: rebuild container only if anything in listToolChainPackages was built
self._createBuildContainer(False)
def buildPackages(self, listPackages, buildThreads):
if constants.rpmCheck:
constants.rpmCheck = False
constants.addMacro("with_check", "0")
self.buildToolChainPackages(buildThreads)
self._buildTestPackages(buildThreads)
constants.rpmCheck = True
constants.addMacro("with_check", "1")
self._buildGivenPackages(listPackages, buildThreads)
else:
self.buildToolChainPackages(buildThreads)
self.logger.info("Step 3 : Building the following package(s) and dependencies...")
self.logger.info(listPackages)
self.logger.info("")
self._buildGivenPackages(listPackages, buildThreads)
self.logger.info("Package build has been completed")
self.logger.info("")
def _readPackageBuildData(self, listPackages):
try:
pkgBuildDataGen = PackageBuildDataGenerator(self.logName, self.logPath)
self.mapCyclesToPackageList, self.mapPackageToCycle, self.sortedPackageList = (
pkgBuildDataGen.getPackageBuildData(listPackages))
except Exception as e:
self.logger.exception(e)
self.logger.error("unable to get sorted list")
return False
return True
# Returns list of base package names which spec file has all subpackages built
# Returns set of package name and version like
# ["name1-vers1", "name2-vers2",..]
def _readAlreadyAvailablePackages(self):
listAvailablePackages = set()
pkgUtils = PackageUtils(self.logName, self.logPath)
listPackages = SPECS.getData().getListPackages()
for package in listPackages:
for version in SPECS.getData().getVersions(package):
# Mark package available only if all subpackages are available
packageIsAlreadyBuilt=True
listRPMPackages = SPECS.getData().getRPMPackages(package, version)
for rpmPkg in listRPMPackages:
if pkgUtils.findRPMFile(rpmPkg, version) is None:
packageIsAlreadyBuilt=False
break;
if packageIsAlreadyBuilt:
listAvailablePackages.add(package+"-"+version)
return listAvailablePackages
def _calculateParams(self, listPackages):
self.mapCyclesToPackageList.clear()
self.mapPackageToCycle.clear()
self.sortedPackageList = []
self.listOfPackagesAlreadyBuilt = self._readAlreadyAvailablePackages()
if self.listOfPackagesAlreadyBuilt:
self.logger.debug("List of already available packages:")
self.logger.debug(self.listOfPackagesAlreadyBuilt)
listPackagesToBuild = copy.copy(listPackages)
for pkg in listPackages:
if (pkg in self.listOfPackagesAlreadyBuilt and
not constants.rpmCheck):
listPackagesToBuild.remove(pkg)
if constants.rpmCheck:
self.sortedPackageList = listPackagesToBuild
else:
if not self._readPackageBuildData(listPackagesToBuild):
return False
if self.sortedPackageList:
self.logger.info("List of packages yet to be built...")
self.logger.info(str(set(self.sortedPackageList) - set(self.listOfPackagesAlreadyBuilt)))
self.logger.info("")
return True
def _buildTestPackages(self, buildThreads):
self.buildToolChain()
self._buildGivenPackages(constants.listMakeCheckRPMPkgtoInstall, buildThreads)
def _initializeThreadPool(self, statusEvent):
ThreadPool.clear()
ThreadPool.mapPackageToCycle = self.mapPackageToCycle
ThreadPool.logger = self.logger
ThreadPool.statusEvent = statusEvent
ThreadPool.pkgBuildType = self.pkgBuildType
def _initializeScheduler(self, statusEvent):
Scheduler.setLog(self.logName, self.logPath, self.logLevel)
Scheduler.setParams(self.sortedPackageList, self.listOfPackagesAlreadyBuilt)
Scheduler.setEvent(statusEvent)
Scheduler.stopScheduling = False
def _buildGivenPackages(self, listPackages, buildThreads):
# Extend listPackages from ["name1", "name2",..] to ["name1-vers1", "name2-vers2",..]
listPackageNamesAndVersions=set()
for pkg in listPackages:
base = SPECS.getData().getSpecName(pkg)
for version in SPECS.getData().getVersions(base):
listPackageNamesAndVersions.add(base+"-"+version)
returnVal = self._calculateParams(listPackageNamesAndVersions)
if not returnVal:
self.logger.error("Unable to set parameters. Terminating the package manager.")
raise Exception("Unable to set parameters")
self._buildPackages(buildThreads)
def _buildPackages(self, buildThreads):
statusEvent = threading.Event()
self._initializeScheduler(statusEvent)
self._initializeThreadPool(statusEvent)
for i in range(0, buildThreads):
workerName = "WorkerThread" + str(i)
ThreadPool.addWorkerThread(workerName)
ThreadPool.startWorkerThread(workerName)
statusEvent.wait()
Scheduler.stopScheduling = True
self.logger.debug("Waiting for all remaining worker threads")
ThreadPool.join_all()
setFailFlag = False
allPackagesBuilt = False
if Scheduler.isAnyPackagesFailedToBuild():
setFailFlag = True
if Scheduler.isAllPackagesBuilt():
allPackagesBuilt = True
if setFailFlag:
self.logger.error("Some of the packages failed:")
self.logger.error(Scheduler.listOfFailedPackages)
raise Exception("Failed during building package")
if not setFailFlag:
if allPackagesBuilt:
self.logger.debug("All packages built successfully")
else:
self.logger.error("Build stopped unexpectedly.Unknown error.")
raise Exception("Unknown error")
def _createBuildContainer(self, usePublishedRPMs):
self.logger.debug("Generating photon build container..")
try:
#TODO image name constants.buildContainerImageName
self.dockerClient.images.remove(constants.buildContainerImage, force=True)
except Exception as e:
#TODO - better handling
self.logger.debug("Photon build container image not found.")
# Create toolchain chroot and install toolchain RPMs
chroot = None
try:
#TODO: constants.tcrootname
chroot = Chroot(self.logger)
chroot.create("toolchain-chroot")
tcUtils = ToolChainUtils("toolchain-chroot", self.logPath)
tcUtils.installToolchainRPMS(chroot, usePublishedRPMS=usePublishedRPMs)
except Exception as e:
if chroot:
chroot.destroy()
raise e
self.logger.debug("createBuildContainer: " + chroot.getID())
# Create photon build container using toolchain chroot
chroot.unmountAll()
#TODO: Coalesce logging
cmdUtils = CommandUtils()
cmd = "cd " + chroot.getID() + " && tar -czf ../tcroot.tar.gz ."
cmdUtils.runCommandInShell(cmd, logfn=self.logger.debug)
cmd = "mv " + chroot.getID() + "/../tcroot.tar.gz ."
cmdUtils.runCommandInShell(cmd, logfn=self.logger.debug)
#TODO: Container name, docker file name from constants.
self.dockerClient.images.build(tag=constants.buildContainerImage,
path=".",
rm=True,
dockerfile="Dockerfile.photon_build_container")
# Cleanup
cmd = "rm -f ./tcroot.tar.gz"
cmdUtils.runCommandInShell(cmd, logfn=self.logger.debug)
chroot.destroy()
self.logger.debug("Photon build container successfully created.")
|
the-stack_106_28741 | from xml.dom.minidom import parse, parseString
dom = parse("eat100.xml")
stimuli = dom.getElementsByTagName('stimulus')
words = dict()
for i in range(100):
stim_word = str(stimuli[i].attributes['word'].value)
responses = stimuli[i].getElementsByTagName('response')
response_words = []
for response in responses:
response_word = str(response.attributes['word'].value)
percent = str(response.attributes['r'].value)
response_words.append((response_word, percent))
words[stim_word] = response_words
for word in words:
for response in words[word]:
line = "<eat:" + word + ">" + "cn:relates-to<eat:" + response[0] + ">" + "r=" + response[1] + '\n'
print(line) |
the-stack_106_28742 | """Metadata generation logic for source distributions.
"""
import atexit
import logging
import os
from pip._internal.exceptions import InstallationError
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.setuptools_build import make_setuptools_egg_info_args
from pip._internal.utils.subprocess import (
call_subprocess,
runner_with_spinner_message,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import Callable, List, Optional
from pip._internal.req.req_install import InstallRequirement
logger = logging.getLogger(__name__)
def get_metadata_generator(install_req):
# type: (InstallRequirement) -> Callable[[InstallRequirement], str]
"""Return a callable metadata generator for this InstallRequirement.
A metadata generator takes an InstallRequirement (install_req) as an input,
generates metadata via the appropriate process for that install_req and
returns the generated metadata directory.
"""
if not install_req.use_pep517:
return _generate_metadata_legacy
return _generate_metadata
def _find_egg_info(source_directory, is_editable):
# type: (str, bool) -> str
"""Find an .egg-info in `source_directory`, based on `is_editable`.
"""
def looks_like_virtual_env(path):
# type: (str) -> bool
return (
os.path.lexists(os.path.join(path, 'bin', 'python')) or
os.path.exists(os.path.join(path, 'Scripts', 'Python.exe'))
)
def locate_editable_egg_info(base):
# type: (str) -> List[str]
candidates = [] # type: List[str]
for root, dirs, files in os.walk(base):
for dir_ in vcs.dirnames:
if dir_ in dirs:
dirs.remove(dir_)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir_ in list(dirs):
if looks_like_virtual_env(os.path.join(root, dir_)):
dirs.remove(dir_)
# Also don't search through tests
elif dir_ == 'test' or dir_ == 'tests':
dirs.remove(dir_)
candidates.extend(os.path.join(root, dir_) for dir_ in dirs)
return [f for f in candidates if f.endswith('.egg-info')]
def depth_of_directory(dir_):
# type: (str) -> int
return (
dir_.count(os.path.sep) +
(os.path.altsep and dir_.count(os.path.altsep) or 0)
)
base = source_directory
if is_editable:
filenames = locate_editable_egg_info(base)
else:
base = os.path.join(base, 'pip-egg-info')
filenames = os.listdir(base)
if not filenames:
raise InstallationError(
"Files/directories not found in {}".format(base)
)
# If we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(key=depth_of_directory)
return os.path.join(base, filenames[0])
def _generate_metadata_legacy(install_req):
# type: (InstallRequirement) -> str
req_details_str = install_req.name or "from {}".format(install_req.link)
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
install_req.setup_py_path, req_details_str,
)
egg_info_dir = None # type: Optional[str]
# For non-editable installs, don't put the .egg-info files at the root,
# to avoid confusion due to the source code being considered an installed
# egg.
if not install_req.editable:
egg_info_dir = os.path.join(
install_req.unpacked_source_directory, 'pip-egg-info',
)
# setuptools complains if the target directory does not exist.
ensure_dir(egg_info_dir)
args = make_setuptools_egg_info_args(
install_req.setup_py_path,
egg_info_dir=egg_info_dir,
no_user_config=install_req.isolated,
)
with install_req.build_env:
call_subprocess(
args,
cwd=install_req.unpacked_source_directory,
command_desc='python setup.py egg_info',
)
# Return the .egg-info directory.
return _find_egg_info(
install_req.unpacked_source_directory,
install_req.editable,
)
def _generate_metadata(install_req):
# type: (InstallRequirement) -> str
assert install_req.pep517_backend is not None
build_env = install_req.build_env
backend = install_req.pep517_backend
# NOTE: This needs to be refactored to stop using atexit
metadata_tmpdir = TempDirectory(kind="modern-metadata")
atexit.register(metadata_tmpdir.cleanup)
metadata_dir = metadata_tmpdir.path
with build_env:
# Note that Pep517HookCaller implements a fallback for
# prepare_metadata_for_build_wheel, so we don't have to
# consider the possibility that this hook doesn't exist.
runner = runner_with_spinner_message("Preparing wheel metadata")
with backend.subprocess_runner(runner):
distinfo_dir = backend.prepare_metadata_for_build_wheel(
metadata_dir
)
return os.path.join(metadata_dir, distinfo_dir)
|
the-stack_106_28743 | import os
import time
import numpy as np
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
"""
Checks Amazon Prime Now and posts delivery times
"""
class AmazonPrimeNow(webdriver.Chrome):
def __init__(self, browser):
self.site = "https://primenow.amazon.com/home"
if browser.lower() == "chrome":
self.driver = webdriver.Chrome.__init__(self)
else:
raise ValueError("Only Chrome is installed at this time")
def open_site(self):
# Navigate to Amazon Prime Now
self.get(self.site)
# Enter Postal Code
self.find_element_by_name("lsPostalCode").send_keys("10009")
# sleep
time.sleep(np.random.randint(3, 8))
self.find_element_by_class_name("a-button-input").click()
# sleep
time.sleep(np.random.randint(5, 8))
self.fullscreen_window()
def sign_in(self, amazon_un, amazon_pw):
# Navigate to Log-on screen
self.find_element_by_css_selector(
"""div[class="show-for-large page_header_drop_menu_icon__root__19BcV"]"""
).click()
# sleep
time.sleep(np.random.randint(3, 6))
email = self.find_element_by_name("email")
password = self.find_element_by_name("password")
email.send_keys(amazon_un)
password.send_keys(amazon_pw)
# sleep
time.sleep(np.random.randint(7, 8))
self.find_element_by_id("signInSubmit").click()
def check_out(self):
# Go-to cart
self.find_element_by_css_selector(
"""
[aria-label="Cart"]
"""
).click()
# sleep
time.sleep(np.random.randint(4, 8))
# Proceed to check out
self.find_element_by_id("a-autoid-1").click()
def _strip_time(self, times, categories):
"""strip unwanted categories"""
stripped_times = [x for x in times.splitlines() if x not in categories]
return stripped_times
def _enumerate_time(self, strip_times):
enum_times = [(x, y) for x, y in enumerate(strip_times)]
return enum_times
def check_availability(self):
# Delivery times
# sleep
time.sleep(np.random.randint(4, 8))
try:
window = self.find_element_by_id("two-hour-window")
times = window.text
categories = [
"Collapse all 2-Hour windows",
"Tomorrow",
"See all 2-hour windows",
]
stripped_times = self._strip_time(times, categories)
enum_times = self._enumerate_time(stripped_times)
time_slot = [x[1] for x in enum_times if x[0] % 2 == 0]
availability = [x[1] for x in enum_times if x[0] % 2 != 0]
df = pd.DataFrame({"times": time_slot, "avail": availability})
return df
except Exception as e:
print("No times available.\n\n", e)
def email_alert(self, message=None, recipients=None, avail_df=None):
sender_email = "[email protected]"
all_recipients = []
for i in recipients:
all_recipients.append(i)
recipients = ", ".join(all_recipients)
msg = MIMEMultipart()
msg["Subject"] = "Food Delivery Update"
msg["From"] = f"Food Delivery Monitor <{sender_email}>"
msg["To"] = recipients
msg.attach(MIMEText(message, "plain"))
msg.attach(MIMEText(avail_df.to_string(), "plain"))
session = smtplib.SMTP("mail.xxxxx.com")
session.sendmail(sender_email, recipients, msg.as_string())
session.quit()
# Kick off script
# ---------------
amazon_un = "xxxx"
amazon_pw = "xxxx"
message = "Your food is ready"
recipients = ["xxxx", "xxxx"]
counter = 0
num_min = 30 # How often we'd like to check Amazon for available times
run_time_hrs = 5 # Total run time in hours
if __name__ == "__main__":
amzn = AmazonPrimeNow(browser="Chrome")
amzn.open_site()
amzn.sign_in(amazon_un=amazon_un, amazon_pw=amazon_pw)
amzn.check_out()
while True:
# Refresh page
# ------------
amzn.refresh()
# Store delivery times to df
# --------------------------
df = amzn.check_availability()
# Execute email trigger
# ---------------------
if df is not None:
amzn.email_alert(message=message, recipients=recipients, avail_df=df)
else:
print("No times available - no email sent.")
# Keeping Track & Ending Routine
# ------------------------------
print(f"Executing loop {counter}...")
sleep_time = num_min * 60
time.sleep(sleep_time)
counter += 1
if counter > run_time_hrs:
break |
the-stack_106_28744 | import django
from django.db.models import Q, FieldDoesNotExist
if django.VERSION >= (1, 8):
from django.db.models.expressions import Expression
else:
from django.db.models.expressions import ExpressionNode as Expression
from django.db.models.sql.where import WhereNode
from collections import namedtuple
#===============================================================================
# Generators abstracting walking through internal django structures
QueryTerm = namedtuple('QueryTerm', 'depth term model field translated target many')
def query_terms(model, path):
""" Yields QueryTerms of given path starting from given model.
- model can be either a regular model or a translatable model
"""
bits = path.split('__')
field = None
for depth, bit in enumerate(bits):
# STEP 1 -- Resolve the field
if bit == 'pk': # handle 'pk' alias
bit = model._meta.pk.name
try:
if django.VERSION >= (1, 8):
try: # is field on the shared model?
field = model._meta.get_field.real(bit)
translated = False
except FieldDoesNotExist: # nope, get field from translations model
field = model._meta.translations_model._meta.get_field(bit)
translated = True
except AttributeError: # current model is a standard model
field = model._meta.get_field(bit)
translated = False
direct = (
not field.auto_created or
getattr(field, 'db_column', None) or
getattr(field, 'attname', None)
)
else:
# older versions do not retrieve reverse/m2m with get_field, we must use the obsolete api
try:
field, _, direct, _ = model._meta.get_field_by_name.real(bit)
translated = False
except FieldDoesNotExist:
field, _, direct, _ = model._meta.translations_model._meta.get_field_by_name(bit)
translated = True
except AttributeError:
field, _, direct, _ = model._meta.get_field_by_name(bit)
translated = False
except FieldDoesNotExist:
break
# STEP 2 -- Find out the target of the relation, if it is one
if direct: # field is on model
if field.rel: # field is a foreign key, follow it
target = field.rel.to._meta.concrete_model
else: # field is a regular field
target = None
else: # field is a m2m or reverse fk, follow it
target = (field.related_model._meta.concrete_model if django.VERSION >= (1, 8) else
field.model._meta.concrete_model)
yield QueryTerm(
depth=depth,
term=bit,
model=model,
field=field,
translated=translated,
target=target,
many=not direct
)
# Onto next iteration
if target is None:
depth += 1 # we hit a regular field, mark it as yielded then break
break # through to lookup/transform flushing
model = target
else:
return # all bits were recognized as fields, job done
# STEP 3 -- Flush lookup/transform bits - do not handle invalid stuff, Django will do it
for depth, bit in enumerate(bits[depth:], depth):
yield QueryTerm(
depth=depth,
term=bit,
model=model,
field=None,
translated=None,
target=None,
many=False
)
def q_children(q):
''' Recursively visit a Q object, yielding each (key, value) pair found.
- q: the Q object to visit
- Yields a 3-tuple ((key, value), containing_list, index_in_list) so
as to allow updating the tuple in the list
'''
todo = [q]
while todo:
q = todo.pop()
for index, child in enumerate(q.children):
if isinstance(child, Q):
todo.append(child)
else:
yield child, q.children, index
if django.VERSION >= (1, 8):
def expression_nodes(expression):
''' Recursively visit an expression object, yielding each node in turn.
- expression: the expression object to visit
'''
todo = [expression]
while todo:
expression = todo.pop()
if expression is not None:
yield expression
if isinstance(expression, Expression):
todo.extend(expression.get_source_expressions())
else:
def expression_nodes(expression):
''' Recursively visit an expression object, yielding each node in turn.
- expression: the expression object to visit
'''
todo = [expression]
while todo:
expression = todo.pop()
if expression is not None:
yield expression
if isinstance(expression, Expression):
todo.extend(expression.children or ())
def where_node_children(node):
''' Recursively visit all children of a where node, yielding each field in turn.
- node: the node to visit
'''
todo = [node]
get_field_name = ((lambda n: n.lhs.target.name) if django.VERSION >= (1, 7) else
(lambda n: n[0].field.name))
while todo:
node = todo.pop()
for child in node.children:
try:
field_name = get_field_name(child)
except (TypeError, AttributeError):
pass
else:
yield child, field_name
if isinstance(child, WhereNode):
todo.append(child)
|
the-stack_106_28745 | #!/usr/bin/python
# gui.py
import sys
from PyQt4 import QtGui,QtCore
class SigSlot(QtGui.QWidget):
def __init__(self,parent=None):
QtGui.QWidget.__init__(self,parent)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
self.setWindowTitle("signal and slot")
self.setMouseTracking(True)
self.setObjectName("main_window")
style = '''
QWidget#main_window{
background-color: #F2EABC;
}
QPushButton#button{
background-color: #EEEEEE;
border: 3px solid #242424;
color: #242424;
width: 120px;
height: 30px;
font-size: 15px;
font-weight: bold;
line-height: 30px;
}
QPushButton#button:hover{
background-color: #242424;
border: 2px solid #EEEEEE;
color: #EEEEEE;
}
'''
start = QtGui.QPushButton('START')
exit = QtGui.QPushButton('EXIT')
start.setObjectName("button")
exit.setObjectName("button")
main = QtGui.QVBoxLayout()
panel_buttons = QtGui.QHBoxLayout()
panel_upper = QtGui.QHBoxLayout()
panel_buttons.addStretch(1)
panel_buttons.addWidget(start)
panel_buttons.addWidget(exit)
main.addLayout(panel_upper)
main.addStretch(1)
main.addLayout(panel_buttons)
self.connect(exit,QtCore.SIGNAL('clicked()'),QtGui.qApp,QtCore.SLOT('quit()'))
self.setLayout(main)
self.resize(500,300)
self.move(500,300)
self.setStyleSheet(style)
def mousePressEvent(self, event):
if event.button()==QtCore.Qt.LeftButton:
self.m_drag=True
self.m_DragPosition=event.globalPos()-self.pos()
event.accept()
def mouseMoveEvent(self, QMouseEvent):
if QMouseEvent.buttons() and QtCore.Qt.LeftButton:
self.move(QMouseEvent.globalPos()-self.m_DragPosition)
QMouseEvent.accept()
def mouseReleaseEvent(self, QMouseEvent):
self.m_drag=False
app = QtGui.QApplication(sys.argv)
qb = SigSlot()
qb.show()
sys.exit(app.exec_())
|
the-stack_106_28747 | # Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" SYNCHRONISATION CKAN """
import logging
from django.core.management.base import BaseCommand
from idgo_admin.ckan_module import CkanHandler
from idgo_admin.models import Category
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Synchroniser les catégories IDGO avec CKAN."
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.ckan = CkanManagerHandler()
def handle(self, *args, **options):
queryset = Category.objects.all().order_by('id')
total = queryset.count()
count = 0
for instance in queryset:
count += 1
logger.info("[%d/%d] - Save Resource %d." % (count, total, instance.pk))
if CkanHandler.is_group_exists(instance.slug):
CkanHandler.update_group(instance)
logger.info("'%s' is udpated." % instance.slug)
else:
CkanHandler.add_group(instance)
logger.info("'%s' is created." % instance.slug)
|
the-stack_106_28748 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Base class for array feature extractor
"""
import torch
from ._physical_operator import PhysicalOperator
class ArrayFeatureExtractor(PhysicalOperator, torch.nn.Module):
"""
Class implementing ArrayFeatureExtractor in PyTorch
This is used by SelectKBest, VarianceThreshold operators in scikit-learn
"""
def __init__(self, logical_operator, column_indices, device):
super(ArrayFeatureExtractor, self).__init__(logical_operator, transformer=True)
is_contiguous = False
if max(column_indices) - min(column_indices) + 1 == len(column_indices):
is_contiguous = True
self.min = min(column_indices)
self.max = max(column_indices) + 1
self.column_indices = torch.nn.Parameter(torch.LongTensor(column_indices), requires_grad=False)
self.is_contiguous = is_contiguous
def forward(self, x):
if type(x) == tuple:
return x[self.column_indices]
if len(x.shape) == 1:
x = x.view(1, -1)
if self.is_contiguous:
return x[:, self.min : self.max]
else:
return torch.index_select(x, 1, self.column_indices)
|
the-stack_106_28749 | import pytest
from dvc.repo.plots.diff import _revisions
@pytest.mark.parametrize(
"arg_revisions,is_dirty,expected_revisions",
[
([], False, ["workspace"]),
([], True, ["HEAD", "workspace"]),
(["v1", "v2", "workspace"], False, ["v1", "v2", "workspace"]),
(["v1", "v2", "workspace"], True, ["v1", "v2", "workspace"]),
],
)
def test_revisions(mocker, arg_revisions, is_dirty, expected_revisions):
mock_scm = mocker.Mock()
mock_scm.configure_mock(
**{"is_dirty.return_value": is_dirty, "get_ref.return_value": None}
)
mock_repo = mocker.Mock(scm=mock_scm)
assert _revisions(mock_repo, arg_revisions, False) == expected_revisions
@pytest.mark.parametrize(
"arg_revisions,baseline,expected_revisions",
[
(["v1"], "v0", ["v1", "v0"]),
(["v1"], None, ["v1", "workspace"]),
(["v1", "v2"], "v0", ["v1", "v2"]),
(["v1", "v2"], None, ["v1", "v2"]),
],
)
def test_revisions_experiment(
mocker, arg_revisions, baseline, expected_revisions
):
mock_scm = mocker.Mock()
mock_scm.configure_mock(
**{"is_dirty.return_value": False, "get_ref.return_value": None}
)
mock_experiments = mocker.Mock()
mock_experiments.configure_mock(**{"get_baseline.return_value": baseline})
mock_repo = mocker.Mock(scm=mock_scm, experiments=mock_experiments)
assert _revisions(mock_repo, arg_revisions, True) == expected_revisions
|
the-stack_106_28750 | """
Predict age from connectivity.
The aim of the script is age prediction of CamCan features extracted with
diferent connectivity matrices and different atlases.
"""
import os
import pandas as pd
from collections import OrderedDict
import numpy as np
from camcan.datasets import load_camcan_connectivity_rest
from sklearn.metrics import mean_squared_error, mean_absolute_error, \
explained_variance_score, r2_score
from sklearn import linear_model, svm, tree, ensemble, neighbors
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
def camcan_prediction_uni_out(x, y, regr_uni_list, results_path,
name_csv_prediction):
"""Unioutput prediction.
:param x: Training vector, array-like, shape (n_samples, n_features)
:param y: Target vector, array-like, shape (n_samples)
:param regr_uni_list: list of uni output regressors
:param results_path: path to the result folder
:param name_csv_prediction: name of the file to save
:return: metrics as pandas.DataFrame,
"""
name_regr_list = []
mse_list = []
mae_list = []
evs_list = []
r2s_list = []
y_test_array = []
y_predict_array = []
for regr in regr_uni_list:
name_regr = str(regr)[0:str(regr).find('(')]
name_regr_list.append(name_regr)
print(name_regr)
mse_list_cv = []
mae_list_cv = []
evs_list_cv = []
r2s_list_cv = []
# Train the model using the training sets
for index, (train_index, test_index) in enumerate(cv.split(x)):
print(index)
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
regr.fit(x_train, y_train)
mse = mean_squared_error(y_test, regr.predict(x_test))
mse_list_cv.append(mse)
mae = mean_absolute_error(y_test, regr.predict(x_test))
mae_list_cv.append(mae)
evs = explained_variance_score(y_test, regr.predict(x_test))
evs_list_cv.append(evs)
r2s = r2_score(y_test, regr.predict(x_test))
r2s_list_cv.append(r2s)
y_test_array.append(y_test)
y_predict_array.append(regr.predict(x_test))
mse_list.append(np.mean(mse_list_cv))
mae_list.append(np.mean(mae_list_cv))
evs_list.append(np.mean(evs_list_cv))
r2s_list.append(np.mean(r2s_list_cv))
df_prediction_uni = pd.DataFrame(
OrderedDict((('Model', pd.Series(name_regr_list)),
('MSE', pd.Series(mse_list)),
('MAE', pd.Series(mae_list)),
('EVS', pd.Series(evs_list)),
('R2S', pd.Series(r2s_list)))))
df_prediction_uni.to_csv(os.path.join(results_path, name_csv_prediction))
print('The result csv file %s' % os.path.join(results_path,
name_csv_prediction))
return df_prediction_uni, y_test_array, y_predict_array
def plot_regression(y_target, y_predict, fig_name, fig_path):
"""Plot regression results."""
plt.scatter(y_target, y_predict)
fig, ax = plt.subplots()
ax.scatter(y_target, y_predict)
ax.plot([y_target.min(), y_target.max()], [y_target.min(), y_target.max()],
'r-', lw=4)
ax.set_xlabel('Target')
ax.set_ylabel('Predicted')
plt.savefig(os.path.join(fig_path, fig_name))
plt.close()
###############################################################################
# CamCan data
myhost = os.uname()[1]
if myhost == 'darya':
print(myhost)
raw_path = '/home/darya/Documents/Inria/data/camcan'
csv_path = '/home/darya/Documents/Inria/data/camcan/cc700-scored'
cache_path = '/home/darya/Documents/Inria/experiments/cache'
results_path = '/home/darya/Documents/Inria/experiments/CamCan/prediction'
elif myhost == 'drago':
raw_path = 'drago:/storage/data/camcan'
csv_path = 'drago:/storage/data/camcan/cc700-scored'
cache_path = '/storage/tompouce/dchyzhyk/data/cache'
results_path = '/storage/tompouce/dchyzhyk/data/experiments/camcan'
# connectivity folder
connectivity_folder = 'camcan_connectivity'
atlases = ['basc064', 'basc122', 'basc197', 'msdl']
kind_connectivity = ['tangent', 'correlation', 'partial correlation']
# phenotype
csv_name = 'participant_data.csv'
csv_file = os.path.join(csv_path, csv_name)
csv_behav = os.path.join(csv_path, '_Summary/csv',
'AllExpts_AllButOneRaw_DataTable.csv')
dataname = 'CamCan'
###############################################################################
# Cross validation
n_iter = 10 # Number of splits
cv = KFold(n_splits=n_iter, random_state=0, shuffle=True)
###############################################################################
# Prediction: Unilabel
list_models = []
# Create regression object
regr_multi_list = [linear_model.LinearRegression(),
linear_model.RidgeCV(),
tree.DecisionTreeRegressor(),
ensemble.ExtraTreesRegressor(),
neighbors.KNeighborsRegressor()]
regr_uni_list = [linear_model.BayesianRidge(),
linear_model.ElasticNet(),
linear_model.HuberRegressor(),
linear_model.Lasso(),
linear_model.LassoLars(),
linear_model.PassiveAggressiveRegressor(),
svm.LinearSVR(),
ensemble.RandomForestRegressor(),
ensemble.AdaBoostRegressor(),
ensemble.BaggingRegressor(),
SVR(kernel='rbf'),
SVR(kernel='linear')]
###############################################################################
# Prediction age
# select number of subject
# n_subj_list = [100, 200, 400, 626]
n_subj_list = [626]
y_keys = ['age']
y_keys_save_name = 'age'
for atlas in atlases:
for kind_con in kind_connectivity:
conn_files = load_camcan_connectivity_rest(data_dir=os.path.join(
raw_path, connectivity_folder), patients_info_csv=csv_file,
atlas=atlas, kind=kind_con, patients_excluded=None)
x = np.array(conn_files.connectivity)
y = np.array(conn_files.scores.age)
for n_subjects in n_subj_list:
name_csv_prediction = (dataname + '_' + y_keys_save_name +
'_prediction_' + str(n_subjects) + 'subj_' +
atlas + '_atlas_' + kind_con + '.csv')
df_prediction, y_test_array, y_predict_array = \
camcan_prediction_uni_out(x[0:n_subjects, :], y[0:n_subjects],
regr_uni_list, results_path,
name_csv_prediction)
# plotting
y_test_array_plot = np.concatenate(y_test_array)
y_predict_array_plot = np.concatenate(y_predict_array)
name_fig = (dataname + '_' + y_keys_save_name + '_prediction_' +
str(n_subjects) + 'subj_' + atlas + '_atlas_' +
kind_con + '.png')
plot_regression(y_test_array_plot, y_predict_array_plot, name_fig,
results_path)
###############################################################################
# Prediction behavioural data
n_subj_list = [626]
y_keys = ['behavioural']
y_keys_save_name = 'behavioural'
|
the-stack_106_28751 | import argparse
import logging
import os
from dvc.command import completion
from dvc.command.base import CmdBase, append_doc_link, fix_subparsers
from dvc.exceptions import DvcException
from dvc.schema import PLOT_PROPS
from dvc.utils import format_link
logger = logging.getLogger(__name__)
PAGE_HTML = """<!DOCTYPE html>
<html>
<head>
<title>DVC Plot</title>
<script src="https://cdn.jsdelivr.net/npm/[email protected]"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]"></script>
</head>
<body>
{divs}
</body>
</html>"""
DIV_HTML = """<div id = "{id}"></div>
<script type = "text/javascript">
var spec = {vega_json};
vegaEmbed('#{id}', spec);
</script>"""
class CmdPlots(CmdBase):
def _func(self, *args, **kwargs):
raise NotImplementedError
def _props(self):
# Pass only props specified by user, to not shadow ones from plot def
props = {p: getattr(self.args, p) for p in PLOT_PROPS}
return {k: v for k, v in props.items() if v is not None}
def run(self):
if self.args.show_vega:
if not self.args.targets:
logger.error("please specify a target for `--show-vega`")
return 1
if len(self.args.targets) > 1:
logger.error(
"you can only specify one target for `--show-vega`"
)
return 1
try:
plots = self._func(targets=self.args.targets, props=self._props())
if self.args.show_vega:
target = self.args.targets[0]
logger.info(plots[target])
return 0
divs = [
DIV_HTML.format(id=f"plot{i}", vega_json=plot)
for i, plot in enumerate(plots.values())
]
html = PAGE_HTML.format(divs="\n".join(divs))
path = self.args.out or "plots.html"
with open(path, "w") as fobj:
fobj.write(html)
logger.info(
"file://{}".format(os.path.join(self.repo.root_dir, path))
)
except DvcException:
logger.exception("")
return 1
return 0
class CmdPlotsShow(CmdPlots):
UNINITIALIZED = True
def _func(self, *args, **kwargs):
return self.repo.plots.show(*args, **kwargs)
class CmdPlotsDiff(CmdPlots):
UNINITIALIZED = True
def _func(self, *args, **kwargs):
return self.repo.plots.diff(
*args,
revs=self.args.revisions,
experiment=self.args.experiment,
**kwargs,
)
class CmdPlotsModify(CmdPlots):
def run(self):
self.repo.plots.modify(
self.args.target, props=self._props(), unset=self.args.unset,
)
return 0
def add_parser(subparsers, parent_parser):
PLOTS_HELP = (
"Commands to visualize and compare plot metrics in structured files "
"(JSON, YAML, CSV, TSV)"
)
plots_parser = subparsers.add_parser(
"plots",
parents=[parent_parser],
description=append_doc_link(PLOTS_HELP, "plots"),
help=PLOTS_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
plots_subparsers = plots_parser.add_subparsers(
dest="cmd",
help="Use `dvc plots CMD --help` to display command-specific help.",
)
fix_subparsers(plots_subparsers)
SHOW_HELP = "Generate plots from metrics files."
plots_show_parser = plots_subparsers.add_parser(
"show",
parents=[parent_parser],
description=append_doc_link(SHOW_HELP, "plots/show"),
help=SHOW_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
plots_show_parser.add_argument(
"targets",
nargs="*",
help="Files to visualize (supports any file, "
"even when not found as `plots` in `dvc.yaml`). "
"Shows all plots by default.",
).complete = completion.FILE
_add_props_arguments(plots_show_parser)
_add_output_arguments(plots_show_parser)
plots_show_parser.set_defaults(func=CmdPlotsShow)
PLOTS_DIFF_HELP = (
"Show multiple versions of plot metrics "
"by plotting them in a single image."
)
plots_diff_parser = plots_subparsers.add_parser(
"diff",
parents=[parent_parser],
description=append_doc_link(PLOTS_DIFF_HELP, "plots/diff"),
help=PLOTS_DIFF_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
plots_diff_parser.add_argument(
"--targets",
nargs="*",
help="Files to visualize (supports any file, "
"even when not found as `plots` in `dvc.yaml`). "
"Shows all plots by default.",
metavar="<path>",
).complete = completion.FILE
plots_diff_parser.add_argument(
"-e",
"--experiment",
action="store_true",
default=False,
help=argparse.SUPPRESS,
)
plots_diff_parser.add_argument(
"revisions", nargs="*", default=None, help="Git commits to plot from",
)
_add_props_arguments(plots_diff_parser)
_add_output_arguments(plots_diff_parser)
plots_diff_parser.set_defaults(func=CmdPlotsDiff)
PLOTS_MODIFY_HELP = "Modify display properties of plot metrics files."
plots_modify_parser = plots_subparsers.add_parser(
"modify",
parents=[parent_parser],
description=append_doc_link(PLOTS_MODIFY_HELP, "plots/modify"),
help=PLOTS_MODIFY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
plots_modify_parser.add_argument(
"target", help="Metric file to set properties to",
).complete = completion.FILE
_add_props_arguments(plots_modify_parser)
plots_modify_parser.add_argument(
"--unset",
nargs="*",
metavar="<property>",
help="Unset one or more display properties.",
)
plots_modify_parser.set_defaults(func=CmdPlotsModify)
def _add_props_arguments(parser):
parser.add_argument(
"-t",
"--template",
nargs="?",
default=None,
help=(
"Special JSON or HTML schema file to inject with the data. "
"See {}".format(
format_link("https://man.dvc.org/plots#plot-templates")
)
),
metavar="<path>",
).complete = completion.FILE
parser.add_argument(
"-x", default=None, help="Field name for X axis.", metavar="<field>"
)
parser.add_argument(
"-y", default=None, help="Field name for Y axis.", metavar="<field>"
)
parser.add_argument(
"--no-header",
action="store_false",
dest="header",
default=None, # Use default None to distinguish when it's not used
help="Provided CSV or TSV datafile does not have a header.",
)
parser.add_argument(
"--title", default=None, metavar="<text>", help="Plot title."
)
parser.add_argument(
"--x-label", default=None, help="X axis label", metavar="<text>"
)
parser.add_argument(
"--y-label", default=None, help="Y axis label", metavar="<text>"
)
def _add_output_arguments(parser):
parser.add_argument(
"-o",
"--out",
default=None,
help="Destination path to save plots to",
metavar="<path>",
).complete = completion.DIR
parser.add_argument(
"--show-vega",
action="store_true",
default=False,
help="Show output in Vega format.",
)
|
the-stack_106_28752 | import fnmatch
from collections import OrderedDict
from conans.paths import SimplePaths
from conans.client.output import Color
from conans.model.ref import ConanFileReference
from conans.model.ref import PackageReference
from conans.client.installer import build_id
class Printer(object):
""" Print some specific information """
INDENT_COLOR = {0: Color.BRIGHT_CYAN,
1: Color.BRIGHT_RED,
2: Color.BRIGHT_GREEN,
3: Color.BRIGHT_YELLOW,
4: Color.BRIGHT_MAGENTA}
INDENT_SPACES = 4
def __init__(self, out):
self._out = out
def print_inspect(self, inspect):
for k, v in inspect.items():
if isinstance(v, dict):
self._out.writeln("%s" % k)
for sk, sv in sorted(v.items()):
self._out.writeln(" %s: %s" % (sk, str(sv)))
else:
self._out.writeln("%s: %s" % (k, str(v)))
def _print_paths(self, ref, conan, path_resolver, show):
if isinstance(ref, ConanFileReference):
if show("export_folder"):
path = path_resolver.export(ref)
self._out.writeln(" export_folder: %s" % path, Color.BRIGHT_GREEN)
if show("source_folder"):
path = path_resolver.source(ref, conan.short_paths)
self._out.writeln(" source_folder: %s" % path, Color.BRIGHT_GREEN)
if show("build_folder") and isinstance(path_resolver, SimplePaths):
# @todo: check if this is correct or if it must always be package_id()
bid = build_id(conan)
if not bid:
bid = conan.info.package_id()
path = path_resolver.build(PackageReference(ref, bid), conan.short_paths)
self._out.writeln(" build_folder: %s" % path, Color.BRIGHT_GREEN)
if show("package_folder") and isinstance(path_resolver, SimplePaths):
id_ = conan.info.package_id()
path = path_resolver.package(PackageReference(ref, id_), conan.short_paths)
self._out.writeln(" package_folder: %s" % path, Color.BRIGHT_GREEN)
def print_info(self, deps_graph, _info, registry, node_times=None, path_resolver=None, package_filter=None,
show_paths=False):
""" Print the dependency information for a conan file
Attributes:
deps_graph: the dependency graph of conan file references to print
placeholder_reference: the conan file reference that represents the conan
file for a project on the path. This may be None,
in which case the project itself will not be part
of the printed dependencies.
"""
if _info is None: # No filter
def show(_):
return True
else:
_info_lower = [s.lower() for s in _info]
def show(field):
return field in _info_lower
compact_nodes = OrderedDict()
for node in sorted(deps_graph.nodes):
compact_nodes.setdefault((node.conan_ref, node.conanfile.info.package_id()), []).append(node)
for (ref, package_id), list_nodes in compact_nodes.items():
node = list_nodes[0]
conan = node.conanfile
if not ref:
# ref is only None iff info is being printed for a project directory, and
# not a passed in reference
if conan.output is None: # Identification of "virtual" node
continue
ref = str(conan)
if package_filter and not fnmatch.fnmatch(str(ref), package_filter):
continue
self._out.writeln("%s" % str(ref), Color.BRIGHT_CYAN)
try:
# Excludes PROJECT fake reference
reg_remote = registry.get_recipe_remote(ref)
except:
reg_remote = None
if show("id"):
self._out.writeln(" ID: %s" % package_id, Color.BRIGHT_GREEN)
if show("build_id"):
bid = build_id(conan)
self._out.writeln(" BuildID: %s" % bid, Color.BRIGHT_GREEN)
if show_paths:
self._print_paths(ref, conan, path_resolver, show)
if isinstance(ref, ConanFileReference) and show("remote"):
if reg_remote:
self._out.writeln(" Remote: %s=%s" % (reg_remote.name, reg_remote.url),
Color.BRIGHT_GREEN)
else:
self._out.writeln(" Remote: None", Color.BRIGHT_GREEN)
url = getattr(conan, "url", None)
license_ = getattr(conan, "license", None)
author = getattr(conan, "author", None)
if url and show("url"):
self._out.writeln(" URL: %s" % url, Color.BRIGHT_GREEN)
if license_ and show("license"):
if isinstance(license_, (list, tuple, set)):
self._out.writeln(" Licenses: %s" % ", ".join(license_), Color.BRIGHT_GREEN)
else:
self._out.writeln(" License: %s" % license_, Color.BRIGHT_GREEN)
if author and show("author"):
self._out.writeln(" Author: %s" % author, Color.BRIGHT_GREEN)
if isinstance(ref, ConanFileReference) and show("recipe"): # Excludes PROJECT
self._out.writeln(" Recipe: %s" % node.recipe)
if isinstance(ref, ConanFileReference) and show("binary"): # Excludes PROJECT
self._out.writeln(" Binary: %s" % node.binary)
if isinstance(ref, ConanFileReference) and show("binary_remote"): # Excludes PROJECT
self._out.writeln(" Binary remote: %s" % (node.binary_remote.name if node.binary_remote else "None"))
if node_times and node_times.get(ref, None) and show("date"):
self._out.writeln(" Creation date: %s" % node_times.get(ref, None),
Color.BRIGHT_GREEN)
dependants = [n for node in list_nodes for n in node.inverse_neighbors()]
if isinstance(ref, ConanFileReference) and show("required"): # Excludes
self._out.writeln(" Required by:", Color.BRIGHT_GREEN)
for d in dependants:
ref = d.conan_ref if d.conan_ref else str(d.conanfile)
self._out.writeln(" %s" % str(ref), Color.BRIGHT_YELLOW)
if show("requires"):
depends = node.neighbors()
requires = [d for d in depends if not d.build_require]
build_requires = [d for d in depends if d.build_require]
if requires:
self._out.writeln(" Requires:", Color.BRIGHT_GREEN)
for d in requires:
self._out.writeln(" %s" % repr(d.conan_ref), Color.BRIGHT_YELLOW)
if build_requires:
self._out.writeln(" Build Requires:", Color.BRIGHT_GREEN)
for d in build_requires:
self._out.writeln(" %s" % repr(d.conan_ref), Color.BRIGHT_YELLOW)
def print_search_recipes(self, search_info, pattern, raw, all_remotes_search):
""" Print all the exported conans information
param pattern: wildcards, e.g., "opencv/*"
"""
if not search_info and not raw:
warn_msg = "There are no packages"
pattern_msg = " matching the '%s' pattern" % pattern
self._out.info(warn_msg + pattern_msg if pattern else warn_msg)
return
if not raw:
self._out.info("Existing package recipes:\n")
for remote_info in search_info:
if all_remotes_search:
self._out.highlight("Remote '%s':" % str(remote_info["remote"]))
for conan_item in remote_info["items"]:
self._print_colored_line(str(conan_item["recipe"]["id"]), indent=0)
else:
for remote_info in search_info:
if all_remotes_search:
self._out.writeln("Remote '%s':" % str(remote_info["remote"]))
for conan_item in remote_info["items"]:
self._out.writeln(conan_item["recipe"]["id"])
def print_search_packages(self, search_info, reference, packages_query,
outdated=False):
assert(isinstance(reference, ConanFileReference))
self._out.info("Existing packages for recipe %s:\n" % str(reference))
for remote_info in search_info:
if remote_info["remote"]:
self._out.info("Existing recipe in remote '%s':\n" % remote_info["remote"])
if not remote_info["items"][0]["packages"]:
if packages_query:
warn_msg = "There are no %spackages for reference '%s' matching the query '%s'" % \
("outdated " if outdated else "", str(reference), packages_query)
elif remote_info["items"][0]["recipe"]:
warn_msg = "There are no %spackages for reference '%s', but package recipe found." % \
("outdated " if outdated else "", str(reference))
self._out.info(warn_msg)
continue
reference = remote_info["items"][0]["recipe"]["id"]
packages = remote_info["items"][0]["packages"]
# Each package
for package in packages:
package_id = package["id"]
self._print_colored_line("Package_ID", package_id, 1)
for section in ("options", "settings", "requires"):
attr = package[section]
if attr:
self._print_colored_line("[%s]" % section, indent=2)
if isinstance(attr, dict): # options, settings
attr = OrderedDict(sorted(attr.items()))
for key, value in attr.items():
self._print_colored_line(key, value=value, indent=3)
elif isinstance(attr, list): # full requires
for key in sorted(attr):
self._print_colored_line(key, indent=3)
# Always compare outdated with local recipe, simplification,
# if a remote check is needed install recipe first
if "outdated" in package:
self._print_colored_line("Outdated from recipe: %s" % package["outdated"], indent=2)
self._out.writeln("")
def print_profile(self, name, profile):
self._out.info("Configuration for profile %s:\n" % name)
self._print_profile_section("settings", profile.settings.items(), separator="=")
self._print_profile_section("options", profile.options.as_list(), separator="=")
self._print_profile_section("build_requires", [(key, ", ".join(str(val) for val in values))
for key, values in
profile.build_requires.items()])
envs = []
for package, env_vars in profile.env_values.data.items():
for name, value in env_vars.items():
key = "%s:%s" % (package, name) if package else name
envs.append((key, value))
self._print_profile_section("env", envs, separator='=')
def _print_profile_section(self, name, items, indent=0, separator=": "):
self._print_colored_line("[%s]" % name, indent=indent, color=Color.BRIGHT_RED)
for key, value in items:
self._print_colored_line(key, value=str(value), indent=0, separator=separator)
def _print_colored_line(self, text, value=None, indent=0, separator=": ", color=None):
""" Print a colored line depending on its indentation level
Attributes:
text: string line
split_symbol: if you want an output with different in-line colors
indent_plus: integer to add a plus indentation
"""
text = text.strip()
if not text:
return
text_color = Printer.INDENT_COLOR.get(indent, Color.BRIGHT_WHITE) if not color else color
indent_text = ' ' * Printer.INDENT_SPACES * indent
if value is not None:
value_color = Color.BRIGHT_WHITE
self._out.write('%s%s%s' % (indent_text, text, separator), text_color)
self._out.writeln(value, value_color)
else:
self._out.writeln('%s%s' % (indent_text, text), text_color)
|
the-stack_106_28753 | #!/usr/bin/env python3
# Copyright (c) 2019 The PIVX developers
# Copyright (c) 2020 The YEP developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Tests v2, v3 and v4 Zerocoin Spends
'''
from time import sleep
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import YEPTestFramework
from test_framework.util import (
sync_blocks,
assert_equal,
assert_raises_rpc_error,
set_node_times,
DecimalAmt
)
class ZerocoinSpendTest(YEPTestFramework):
def set_test_params(self):
self.num_nodes = 3
# node 0 and node 1 move the chain (node 0 also sets the sporks)
# node 2 does the spends
self.extra_args = [['-staking=0']]*self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with PoS cache: 330 blocks
self._initialize_chain(toPosPhase=True)
self.enable_mocktime()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Tests v2, v3 and v4 Zerocoin Spends."
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def setV4SpendEnforcement(self, fEnable=True):
sporkName = "SPORK_18_ZEROCOIN_PUBLICSPEND_V4"
# update spork 18 with node[0]
if fEnable:
self.log.info("Enabling v4 PublicSpend version with SPORK 18...")
res = self.activate_spork(0, sporkName)
else:
self.log.info("Enabling v3 PublicSpend version with SPORK 18...")
res = self.deactivate_spork(0, sporkName)
assert_equal(res, "success")
sleep(1)
# check that node[1] receives it
assert_equal(fEnable, self.is_spork_active(1, sporkName))
self.log.info("done")
def run_test(self):
def get_zerocoin_data(coin):
return coin["s"], coin["r"], coin["k"], coin["id"], coin["d"], coin["t"]
def check_balances(denom, zyep_bal, yep_bal):
zyep_bal -= denom
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], zyep_bal)
yep_bal += denom
wi = self.nodes[2].getwalletinfo()
assert_equal(wi['balance'] + wi['immature_balance'], yep_bal)
return zyep_bal, yep_bal
def stake_4_blocks(block_time):
for peer in range(2):
for i in range(2):
block_time = self.generate_pos(peer, block_time)
sync_blocks(self.nodes)
return block_time
self.log_title()
block_time = self.mocktime
set_node_times(self.nodes, block_time)
# Start with cache balances
wi = self.nodes[2].getwalletinfo()
balance = wi['balance'] + wi['immature_balance']
zyep_balance = self.nodes[2].getzerocoinbalance()['Total']
assert_equal(balance, DecimalAmt(13833.92))
assert_equal(zyep_balance, 6666)
# Export zerocoin data
listmints = self.nodes[2].listmintedzerocoins(True, True)
serial_ids = [mint["serial hash"] for mint in listmints]
exported_zerocoins = [x for x in self.nodes[2].exportzerocoins(False) if x["id"] in serial_ids]
exported_zerocoins.sort(key=lambda x: x["d"], reverse=False)
assert_equal(8, len(exported_zerocoins))
# 1) Try to do a v3 spend before activation
self.log.info("Trying to make a public spend...")
serial_0, randomness_0, privkey_0, id_0, denom_0, tx_0 = get_zerocoin_data(exported_zerocoins[0])
assert_raises_rpc_error(-4, "The transaction was rejected!",
self.nodes[2].spendrawzerocoin, serial_0, randomness_0, denom_0, privkey_0, "", tx_0)
self.log.info("GOOD: v3 spend is not possible yet.")
# 2) Spend one minted coin - spend v2 (serial_0)
self.log.info("Spending the minted coin with serial %s..." % serial_0[:16])
txid = self.nodes[2].spendzerocoin(denom_0, False, False, "", False)['txid']
# stake 4 blocks - check it gets included on chain and check balances
block_time = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
zyep_balance, balance = check_balances(denom_0, zyep_balance, balance)
self.log.info("--> VALID COIN SPEND (v2) PASSED")
# 3) stake more blocks - save a v3 spend for later (serial_1)
serial_1, randomness_1, privkey_1, id_1, denom_1, tx_1 = get_zerocoin_data(exported_zerocoins[1])
self.log.info("Staking 70 blocks to get to public spend activation")
for j in range(5):
for peer in range(2):
for i in range(7):
block_time = self.generate_pos(peer, block_time)
sync_blocks(self.nodes)
old_spend_v3 = self.nodes[2].createrawzerocoinspend(id_1)
# 4) Check spend v2 disabled
serial_2, randomness_2, privkey_2, id_2, denom_2, tx_2 = get_zerocoin_data(exported_zerocoins[2])
self.log.info("Trying to spend using the old coin spend method..")
try:
self.nodes[2].spendzerocoin(denom_2, False, False, "", False)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if e.error["code"] != -4:
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if ([x for x in ["Couldn't generate the accumulator witness",
"The transaction was rejected!"] if x in e.error['message']] == []):
raise e
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
self.log.info("GOOD: v2 spend was not possible.")
# 5) Spend one minted coin - spend v3 (serial_2)
self.log.info("Spending the minted coin with serial %s..." % serial_2[:16])
txid = self.nodes[2].spendzerocoinmints([id_2])['txid']
# stake 4 blocks - check it gets included on chain and check balances
block_time = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
zyep_balance, balance = check_balances(denom_2, zyep_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")
# 6) Check double spends - spend v3
self.log.info("Trying to spend the serial twice now...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_2, randomness_2, denom_2, privkey_2, "", tx_2)
# 7) Activate v4 spends with SPORK_18
self.setV4SpendEnforcement()
# 8) Spend one minted coin - spend v4 (serial_3)
serial_3, randomness_3, privkey_3, id_3, denom_3, tx_3 = get_zerocoin_data(exported_zerocoins[3])
self.log.info("Spending the minted coin with serial %s..." % serial_3[:16])
txid = self.nodes[2].spendzerocoinmints([id_3])['txid']
# stake 4 blocks - check it gets included on chain and check balances
block_time = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
zyep_balance, balance = check_balances(denom_3, zyep_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v4) PASSED")
# 9) Check double spends - spend v4
self.log.info("Trying to spend the serial twice now...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_3, randomness_3, denom_3, privkey_3, "", tx_3)
# 10) Try to relay old v3 spend now (serial_1)
self.log.info("Trying to send old v3 spend now...")
assert_raises_rpc_error(-26, "bad-txns-invalid-zyep",
self.nodes[2].sendrawtransaction, old_spend_v3)
self.log.info("GOOD: Old transaction not sent.")
# 11) Try to double spend with v4 a mint already spent with v3 (serial_2)
self.log.info("Trying to double spend v4 against v3...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_2, randomness_2, denom_2, privkey_2, "", tx_2)
self.log.info("GOOD: Double-spending transaction did not verify.")
# 12) Reactivate v3 spends and try to spend the old saved one (serial_1) again
self.setV4SpendEnforcement(False)
self.log.info("Trying to send old v3 spend now (serial: %s...)" % serial_1[:16])
txid = self.nodes[2].sendrawtransaction(old_spend_v3)
# stake 4 blocks - check it gets included on chain and check balances
_ = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
# need to reset spent mints since this was a raw broadcast
self.nodes[2].resetmintzerocoin()
_, _ = check_balances(denom_1, zyep_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")
if __name__ == '__main__':
ZerocoinSpendTest().main()
|
the-stack_106_28754 | import discord, requests, json
from discord.ext import commands
import difflib
import config
class Fortnite:
def __init__(self, bot, config):
self.bot = bot
self.data = dict()
self.keys = []
self.config = config
with open(config["weapon_data_loc"], 'r') as f:
self.data = json.load(f)
self.keys = list(self.data.keys())
def send_request(self, platform, username):
r = requests.get(self.config["url"] + platform + '/' + username)
response = r.text
try:
player_data = json.loads(self.find_between(response, 'var playerData = ', ';</script>'))
account_info = json.loads(self.find_between(response, 'var accountInfo = ', ';</script>'))
lifetime_stats = json.loads(self.find_between(response, 'var LifeTimeStats = ', ';</script>'))
except Exception:
return ''
return [player_data, account_info, lifetime_stats]
def find_between(self, s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
@commands.command(pass_context = True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def flookup(self, ctx, platform:str, player:str):
r = self.send_request(platform.lower(), player.lower())
try:
solo = r[0]['p2']
except IndexError:
await self.bot.send_message(ctx.message.channel, "Player not found")
embed = discord.Embed(title="Fortnite Stats", description="~TRN Network", color=0x00ff00)
embed.add_field(name="K/D", value=solo[9]['displayValue'], inline=False)
embed.add_field(name="Score", value=solo[1]['displayValue'], inline=True)
embed.add_field(name="Top 25", value=solo[8]['displayValue'], inline=True)
embed.add_field(name="Time Played", value=solo[12]['displayValue'], inline=True)
await self.bot.send_message(ctx.message.channel, embed=embed)
@commands.command(pass_context = True)
async def fstats(self, ctx):
args = (ctx.message.content).split()[1:]
query = (" ").join(args)
matches = (difflib.get_close_matches(query, self.keys))
if(len(matches) == 0):
await self.bot.send_message(ctx.message.channel, "No weapon found")
else:
match = self.data[matches[0]]
msg = matches[0] + ":\n"
msg += " Damage: " + match["damage"] + "\n"
msg += " DPS: " + match["dps"] + "\n"
msg += " Mag Size: " + match["mag_size"] + "\n"
msg += " Rarity: " + match["rarity"] + "\n"
msg += " Type: " + match["type"] + "\n"
embed = discord.Embed(title="Fortnite Weapon Stats", description="~Courtesy of Redux", color=0x00ff00)
embed.add_field(name=matches[0], value=match['type'], inline=False)
embed.add_field(name="Damage", value=match['damage'])
embed.add_field(name="DPS", value=match['dps'])
embed.add_field(name="Mag Size", value=match['mag_size'])
embed.add_field(name="Rarity", value=match['rarity'])
await self.bot.send_message(ctx.message.channel, embed=embed)
def setup(bot):
try:
bot.add_cog(Fortnite(bot, config.fortnite))
print("[Fortnite Module Loaded]")
except Exception as e:
print(" >> Fortnite Module: {0}".format(e)) |
the-stack_106_28758 | # -*- coding:utf-8 -*-
import pika
import sys
username = "faith"
pwd = "qq2921481"
user_pwd = pika.PlainCredentials(username, pwd)
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='172.16.54.130',
credentials=user_pwd)
)
channel = connection.channel()
# 这里还是不声明 queue
# channel.queue_declare(queue='hello', durable=True)
# 声明
channel.exchange_declare(exchange='direct_logs', type='direct')
severity = sys.argv[1] if len(sys.argv) > 1 else 'info' # 默认info级别
# message = 'hello direct_logs [%s]' % sys.argv[1:]
message = ','.join(sys.argv[1:]) or 'hello world' # 命令行没有输入消息类型就发 hello world
channel.basic_publish(exchange='direct_logs',
routing_key=severity, # 命令行的形式生产什么类型的消息
body=message,
properties=pika.BasicProperties(delivery_mode=2)
)
print(' sent level【%s】 message ->【%s】' % (severity, message))
connection.close()
"""
(a3) catdeMacBook-Pro:rbmq cat$ python 05_direct_proceduer.py warning info # 发送警告信息 warning info
sent level【warning】message ->【warning,info】
(a3) catdeMacBook-Pro:rbmq cat$ python 05_direct_proceduer.py # 发送默认info类型信息 helloworld
sent level【info】 message ->【hello world】
"""
"""
python 06_direct_consumer.py info warning --》 接受warning类型信息
python 06_direct_consumer.py info --》 接受info类型信息
"""
|
the-stack_106_28759 | from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.log import setLogLevel
import time
import sys
import os
from gdb_log_utils import *
class GDPSimulationTopo(Topo):
def build(self, n, loss_rate=None):
switch = self.addSwitch('s1')
for i in range(n):
host = self.addHost('h' + str(i + 1), cpu=.5 / n)
self.addLink(host, switch, cls=TCLink, bw=10, delay='20ms', loss=loss_rate)
# topos = {'simple': (lambda: GDPSimulationTopo(3, None)), 'lossy': (lambda: GDPSimulationTopo(3, 0.01))}
if len(sys.argv) != 7:
print ('NUM_LOG_SERVER, WRITE_ITERVAL, FANOUT, ALGO, FAULT_RATE, churn/no_churn')
sys.exit(2)
NUM_LOG_SERVER = int(sys.argv[1])
WRITE_INTERVAL = float(sys.argv[2])
FANOUT = int(sys.argv[3])
ALGO = sys.argv[4]
PORT = 10262
FAULT_RATE = float(sys.argv[5])
CHURN = sys.argv[6]
if __name__ == '__main__':
setLogLevel('info')
topo = GDPSimulationTopo(n=(NUM_LOG_SERVER + 1))
net = Mininet(topo=topo)
net.start()
# net.pingAll()
path = ','.join(sys.argv[1:])
os.system("mkdir " + path)
os.system('rm -f {0}/*.db'.format(path))
os.system('rm -f {0}/*.log'.format(path))
for i in range(NUM_LOG_SERVER):
create_fresh_logdb("{0}/{1}.db".format(path, i))
log_servers = net.hosts[:NUM_LOG_SERVER]
writer = net.hosts[-1]
for i, server in enumerate(log_servers):
# server.cmdPrint('tcpdump port {0} -i h{1}-eth0 -w {2}/{3}.pcap &'.format(PORT, i + 1, path, i))
peers_addr = ['{0}:{1}'.format(h.IP(), PORT) for h in log_servers
if h != server]
peers_addr_str = ",".join(peers_addr)
db_file = str(i) + ".db"
server_cmd = ['sudo ../gdp-replicate',
"{0}/{1}.db".format(path, i),
'{0}:{1}'.format(server.IP(), PORT),
",".join(peers_addr),
FANOUT,
'naive',
'2>', "{0}/{1}.log".format(path, i),
'&']
if ALGO != 'naive':
server_cmd.remove('naive')
server.cmdPrint(server_cmd)
writer.cmdPrint('sudo python3 writer.py',
NUM_LOG_SERVER, WRITE_INTERVAL, path, FAULT_RATE, CHURN,
'&')
time.sleep(1000)
net.stop()
|
the-stack_106_28760 | #!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import mle
import thread_cert
LEADER = 1
REED = 2
ROUTER2 = 3
SED = 4
class Cert_6_1_6_REEDAttachLinkQuality_SED(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'mode': 'rdn',
'panid': 0xface,
'allowlist': [REED, ROUTER2]
},
REED: {
'mode': 'rdn',
'panid': 0xface,
'router_upgrade_threshold': 0,
'allowlist': [LEADER, SED]
},
ROUTER2: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER, (SED, -85)]
},
SED: {
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [REED, ROUTER2]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[REED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[REED].get_state(), 'child')
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[SED].start()
self.simulator.go(10)
self.assertEqual(self.nodes[SED].get_state(), 'child')
self.assertEqual(self.nodes[REED].get_state(), 'router')
leader_messages = self.simulator.get_messages_sent_by(LEADER)
reed_messages = self.simulator.get_messages_sent_by(REED)
sed_messages = self.simulator.get_messages_sent_by(SED)
router2_messages = self.simulator.get_messages_sent_by(ROUTER2)
# 1 - Leader. REED1, Router2
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
reed_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
reed_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
router2_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router2_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router2_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
msg = leader_messages.next_coap_message("2.04")
router2_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
# 3 - SED
msg = sed_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
msg.assertSentWithHopLimit(255)
msg.assertSentToDestinationAddress("ff02::2")
msg.assertMleMessageContainsTlv(mle.Mode)
msg.assertMleMessageContainsTlv(mle.Challenge)
msg.assertMleMessageContainsTlv(mle.ScanMask)
msg.assertMleMessageContainsTlv(mle.Version)
scan_mask_tlv = msg.get_mle_message_tlv(mle.ScanMask)
self.assertEqual(1, scan_mask_tlv.router)
self.assertEqual(0, scan_mask_tlv.end_device)
# 4 - Router2
msg = router2_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
msg.assertSentToNode(self.nodes[SED])
# 5 - SED
msg = sed_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
msg.assertSentWithHopLimit(255)
msg.assertSentToDestinationAddress("ff02::2")
msg.assertMleMessageContainsTlv(mle.Mode)
msg.assertMleMessageContainsTlv(mle.Challenge)
msg.assertMleMessageContainsTlv(mle.ScanMask)
msg.assertMleMessageContainsTlv(mle.Version)
scan_mask_tlv = msg.get_mle_message_tlv(mle.ScanMask)
self.assertEqual(1, scan_mask_tlv.router)
self.assertEqual(1, scan_mask_tlv.end_device)
# 6 - REED
msg = router2_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
msg.assertSentToNode(self.nodes[SED])
msg = reed_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
msg.assertSentToNode(self.nodes[SED])
# 7 - SED
msg = sed_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertMleMessageContainsTlv(mle.AddressRegistration)
msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
msg.assertMleMessageContainsTlv(mle.Mode)
msg.assertMleMessageContainsTlv(mle.Response)
msg.assertMleMessageContainsTlv(mle.Timeout)
msg.assertMleMessageContainsTlv(mle.TlvRequest)
msg.assertMleMessageContainsTlv(mle.Version)
msg.assertMleMessageContainsOptionalTlv(mle.MleFrameCounter)
msg.assertSentToNode(self.nodes[REED])
msg = reed_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg.assertSentToNode(self.nodes[SED])
if __name__ == '__main__':
unittest.main()
|
the-stack_106_28761 | # Copyright 2014 Ahmed H. Ismail
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
from version import Version
class TwoWayDict(dict):
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
dict.__setitem__(self, value, key)
def __delitem__(self, key):
dict.__delitem__(self, self[key])
dict.__delitem__(self, key)
def load_license_list():
FILE_NAME = os.path.join(os.path.dirname(__file__), 'spdx_licenselist.csv')
with open(FILE_NAME, 'rb') as file:
reader = csv.DictReader(file)
dict = TwoWayDict()
for entry in reader:
dict[entry['Full name of License']] = entry['License Identifier']
return dict
LICENSE_MAP = load_license_list()
LICENSE_LIST_VERSION = Version(major=1, minor=19)
|
the-stack_106_28763 | #< @file DevAdf5901.m
#< @author Haderer Andreas (HaAn)
#< @date 2013-06-13
#< @brief Class for configuration of Adf5901 transmitter
#< @version 1.0.1
import src.cmd_modules.DevDriver as DevDriver
from numpy import *
class DevAdf5901(DevDriver.DevDriver):
# DOXYGEN ------------------------------------------------------
#> @brief Class constructor
#>
#> Construct a class self.ct to configure the transmitter with an existing Frontend class self.ct.
#>
#> @param[in] Brd: Radarbook or Frontend class self.ct
#>
#> @param[in] USpiCfg: Configuration of USpi interface: access of device from the baseboard
#> - <span style="color: #ff9900;"> 'Mask': </span>: Bitmask to select the device
#> - <span style="color: #ff9900;"> 'Chn': </span>: Channel of USPI interface; TX is connected to this channel
#> - <span style="color: #ff9900;"> 'Type': </span>: In the actual version only 'USpi' is supported for the type
#>
#> @return Returns a object of the class with the desired USpi interface configuration
#>
#> e.g. with PNet TCP/IP functions
#> @code
#> Brd = Radarbook('PNet','192.168.1.1')
#> USpiCfg.Mask = 1
#> USpiCfg.Chn = 1
#> Adf5901 = DevAdf5901(Brd,USpiCfg)
#> @endcode
def __init__(self, Brd, dUSpiCfg):
super(DevAdf5901, self).__init__()
self.stVers = '1.0.1'
self.USpiCfg_Mask = 1
self.USpiCfg_Chn = 1
self.USpiCfg_Type = 'USpi'
self.RfFreqStrt = 24.125e9
self.RfRefDiv = 1
self.RfSysClk = 100e6
self.RDiv2 = 2
self.TxPwr = 100
self.RegR0Final = 0
self.Brd = Brd;
if self.Brd.DebugInf > 10:
print('ADF5901 Initialize')
if not ('Mask' in dUSpiCfg):
print('DevAdf5901: Mask not specified')
self.USpiCfg_Mask = 1
else:
self.USpiCfg_Mask = dUSpiCfg["Mask"]
if not ('Chn' in dUSpiCfg):
print('DevAdf5901: Chn not specified')
self.USpiCfg_Chn = 0
else:
self.USpiCfg_Chn = dUSpiCfg["Chn"]
if not ('Type' in dUSpiCfg):
self.USpiCfg_Type = 'USpi'
else:
self.USpiCfg_Type = dUSpiCfg["Type"]
self.DefineConst()
self.RegR0Final = self.GenRegFlag('R0',0, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupVco', 1, 'PupLo', 1, 'PupAdc', 1, 'PupTx1', 0, 'PupTx2', 0);
# DOXYGEN ------------------------------------------------------
#> @brief Get version information of Adf5901 class
#>
#> Get version of class
#> - Version String is returned as string
#>
#> @return Returns the version string of the class (e.g. 0.5.0)
def GetVers(self):
return self.stVers
# DOXYGEN -------------------------------------------------
#> @brief Displays version information in Matlab command window
#>
#> Display version of class in Matlab command window
def DispVers(self):
print("ADF5901 Class Version: ", self.stVers)
# DOXYGEN ------------------------------------------------------
#> @brief Set device configuration
#>
#> This method is used to set the configuration of the device. A configuration structure is used to set the desired parameters.
#> If a field is not present in the structure, then the parameter is not changed. The function only changes the local parameters of the class.
#> The IniCfg methos must be called, so that the configuration takes place.
#>
#> @param[in] Cfg: structure with the desired configuration
#> - <span style="color: #ff9900;"> 'TxPwr': </span>: Desired transmit power; register setting 0 - 255
#> - <span style="color: #ff9900;"> 'TxChn': </span>: Transmit channel to be enabled <br>
#> If set to 0, then only the LO generation is enabled <br>
#> If set to 1, then the first transmit antenna is activated
#> If set to 2, then the second transmit antenna is enabled
#>
#> @return Returns a self.ct of the class with the desired USpi interface configuration
#>
#> e.g. Enable the first TX antenna and set the power to 100
#> @code
#> Brd = Radarbook('PNet','192.168.1.1')
#> USpiCfg.Mask = 1
#> USpiCfg.Chn = 1
#> Adf5901 = DevAdf5901(Brd,USpiCfg)
#> Cfg.TxPwr = 100
#> Cfg.TxChn = 1
#> Adf5901.SetCfg(Cfg)
#> @endcode
def SetCfg(self, dCfg):
if 'TxPwr' in dCfg:
self.TxPwr = (dCfg["TxPwr"] % 256)
if 'TxChn' in dCfg:
if dCfg["TxChn"] == 0:
self.RegR0Final = self.GenRegFlag('R0',self.RegR0Final, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupVco', 1, 'PupLo', 1, 'PupAdc', 1, 'PupTx1', 0, 'PupTx2', 0);
elif dCfg["TxChn"] == 1:
self.RegR0Final = self.GenRegFlag('R0',self.RegR0Final, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupVco', 1, 'PupLo', 1, 'PupAdc', 1, 'PupTx1', 1, 'PupTx2', 0);
elif dCfg["TxChn"] == 2:
self.RegR0Final = self.GenRegFlag('R0',self.RegR0Final, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupVco', 1, 'PupLo', 1, 'PupAdc', 1, 'PupTx1', 0, 'PupTx2', 1);
else:
self.RegR0Final = self.GenRegFlag('R0',self.RegR0Final, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupVco', 1, 'PupLo', 1, 'PupAdc', 1, 'PupTx1', 0, 'PupTx2', 0);
# DOXYGEN ------------------------------------------------------
#> @brief Set device register
#>
#> This method is used to set the configuration of the device. In this method the register value is altered directly.
#> The user has to take care, that a valid register configuration is programmed.
#> The method only alters the variable of the class.
#>
#> @param[in] Cfg: structure with the desired regiser configuration
#> - <span style="color: #ff9900;"> 'RegR0': </span>: Desired value for register R0
#>
def SetRegCfg(self, dCfg):
if 'RegR0' in dCfg:
self.RegR0Final = dCfg["RegR0"]
# DOXYGEN ------------------------------------------------------
#> @brief Set device basic hardware and class configuration
#>
#> This method is used to set the configuration of the class
#>
#> @param[in] Cfg: structure with the desired configuration
#> - <span style="color: #ff9900;"> 'Mask': </span>: Mask of USPI interface
#> - <span style="color: #ff9900;"> 'Chn': </span>: Channel of USPI interface <br>
#> - <span style="color: #ff9900;"> 'Type': </span>: Type of configuration interface; currently only 'USpi' is supported <br>
#> - <span style="color: #ff9900;"> 'RfFreqStrt': </span>: RF start frequency of transmitter <br>
#> - <span style="color: #ff9900;"> 'RfRevDiv': </span>: RF reference divider for PLL <br>
#> - <span style="color: #ff9900;"> 'RfSysClk': </span>: Input clock frequency <br>
#>
def DevSetCfg(self, dCfg):
if 'Mask' in dCfg:
self.USpiCfg_Mask = dCfg["Mask"]
if 'Chn' in dCfg:
self.USpiCfg_Chn = dCfg["Chn"]
if 'Type' in dCfg:
self.USpiCfg_Type = dCfg["Type"]
if 'RfFreqStrt' in dCfg:
self.RfFreqStrt = dCfg["RfFreqStrt"]
if 'RfRefDiv' in dCfg:
self.RfRefDiv = dCfg["RfRefDiv"]
if 'RfSysClk' in dCfg:
self.RfSysClk = dCfg["RfSysClk"]
# DOXYGEN ------------------------------------------------------
#> @brief Reset device
#>
#> Not yet implemented; Function call has no effect;
#> Standard device driver function
def DevRst(self):
# reset currently not implemented
pass
# DOXYGEN ------------------------------------------------------
#> @brief Enable device
#>
#> Not yet implemented; Function call has no effect;
#> Standard device driver function
def DevEna(self):
# enable currently not implemented
pass
# DOXYGEN ------------------------------------------------------
#> @brief Disable device
#>
#> Not yet implemented; Function call has no effect;
#> Standard device driver function
def DevDi(self):
# disable currently not implemented
pass
# DOXYGEN ------------------------------------------------------
#> @brief Programm device registers to the transmitter
#>
#> This function programms the register to the device. The function expects an array with 19 register values according to the
#> device data sheet. In addition, a valid Radarbook self.ct must be stated at the class constructor.
#>
#> @param[in] Regs array with register values <br>
#> The register values are programmed to the device over the selected SPI interface. The device registers are programmed according
#> to the following sequence. This ensures the timing constraints of the device.
#> - With the first command the registers 1-13 are programmed
#> - With the second command the registers 14-15 are programmed
#> - With the third command the registers 16 -17 are programmed
#> - With the fourth command the residual registers are set
#>
#> @return Return code of the command
def DevSetReg(self, Regs):
Ret = -1
if self.USpiCfg_Type == 'USpi':
dUSpiCfg = dict()
dUSpiCfg["Mask"] = self.USpiCfg_Mask
dUSpiCfg["Chn"] = self.USpiCfg_Chn
self.Brd.Brd.Dsp_SendSpiData(dUSpiCfg, Regs[0:13])
self.Brd.Brd.Dsp_SendSpiData(dUSpiCfg, Regs[13:15])
self.Brd.Brd.Dsp_SendSpiData(dUSpiCfg, Regs[15:17])
Ret = self.Brd.Brd.Dsp_SendSpiData(dUSpiCfg, Regs[17:19])
return Ret
# DOXYGEN ------------------------------------------------------
#> @brief Programm device registers to the transmitter directly
#>
#> This function programms the register to the device, without caring for timing constraints.
#>
#> @param[in] Regs array with register values number of entries must be smaller than 28 <br>
#> The register values are programmed to the device over the selected SPI interface.
#>
#> @return Return code of the command
def DevSetRegDirect(self, Regs):
Ret = -1
if self.USpiCfg_Type == 'USpi':
dUSpiCfg = dict()
dUSpiCfg["Mask"] = self.USpiCfg_Mask
dUSpiCfg["Chn"] = self.USpiCfg_Chn
if len(Regs) > 28:
Regs = Regs[0:28]
Ret = self.Brd.Brd.Dsp_SendSpiData(dUSpiCfg, Regs)
return Ret
# DOXYGEN ------------------------------------------------------
#> @brief Get device registers
#>
#> Not yet implemented; Function call has no effect;
#> Standard device driver function
def DevGetReg(self, Regs):
pass
# DOXYGEN ------------------------------------------------------
#> @brief Initialize device
#>
#> This function generates the configuration from the settings programmed to the class self.ct.
#> First the registers are generated GenRegs() and thereafter the DevSetReg() method is called
#>
#> @return Return code of the DevSetReg method
#>
#> e.g. Enable the first TX antenna and set the power to 100 and call the Ini function. In this case the transmitted is
#> configured
#> @code
#> Brd = Radarbook('PNet','192.168.1.1')
#> USpiCfg.Mask = 1
#> USpiCfg.Chn = 1
#> Adf5901 = DevAdf5901(Brd,USpiCfg)
#> Cfg.TxPwr = 100
#> Cfg.TxChn = 1
#> Adf5901.SetCfg(Cfg)
#> Adf5901.Ini()
#> @endcode
def Ini(self):
Data = self.GenRegs()
Ret = self.DevSetReg(Data)
# DOXYGEN ------------------------------------------------------
#> @brief This function generates the register values for the device
#>
#> This function generates the register values for the device according to the sequence state in the datasheet.
#> The class settings are automatically included in the generated registers.
#>
#> @return Array with device register values.
#>
#> @note If the standard configuration is used. The Ini method should be called to configure the device.
def GenRegs(self):
Data = zeros(19, dtype = uint32)
#--------------------------------------------------------------
# Initialize Register 7:
# Master Reset
#--------------------------------------------------------------
Data[0] = self.GenRegFlag('R7', 0 , 'MsRst', 1)
#--------------------------------------------------------------
# Initialize Register 10:
# Reserved
#--------------------------------------------------------------
Data[1] = self.GenRegFlag('R10', 0 )
#--------------------------------------------------------------
# Initialize Register 9:
# Reserved
#--------------------------------------------------------------
Data[2] = self.GenRegFlag('R9', 0 )
#--------------------------------------------------------------
# Initialize Register 8:
# Requency divider for calibration
#--------------------------------------------------------------
Data[3] = self.GenRegFlag('R8', 0, 'FreqCalDiv', 500) #
#--------------------------------------------------------------
# Initialize Register 0:
# Reserved
#--------------------------------------------------------------
Data[4] = self.GenRegFlag('R0', 0, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupNCntr', 1, 'PupRCntr', 1, 'PupAdc', 1, 'PupVco', 1, 'PupLo', 1) #0x809FE520
#--------------------------------------------------------------
# Initialize Register 7:
# Mater reset
#--------------------------------------------------------------
if self.RDiv2 > 1:
Flag = 1
else:
Flag = 0
Data[5] = self.GenRegFlag('R7', 0, 'RDiv', self.RfRefDiv, 'ClkDiv', 500, 'RDiv2', Flag) #0x011F4827,
RefClk = self.RfSysClk/self.RfRefDiv/self.RDiv2
Div = self.RfFreqStrt/(2*RefClk)
DivInt = floor(Div)
DivFrac = round((Div - DivInt) * 2**25)
DivFracMsb = DivFrac/2**13
DivFracLsb = DivFrac % 2**13
#--------------------------------------------------------------
# Initialize Register 6:
# Reserved: Frac LSB:
#--------------------------------------------------------------
Data[6] = self.GenRegFlag('R6', 0, 'FracLsb', DivFracLsb)
#--------------------------------------------------------------
# Initialize Register 5:
# Reserved: Frac MSB:
#--------------------------------------------------------------
Data[7] = self.GenRegFlag('R5', 0, 'FracMsb', DivFracMsb, 'Int', DivInt) #0x01E28005
#--------------------------------------------------------------
# Initialize Register 4:
# Analog Test Bus Configuration
#--------------------------------------------------------------
Data[8] = self.GenRegFlag('R4', 0) #0x00200004
#--------------------------------------------------------------
# Initialize Register 3:
# Io Configuration
#--------------------------------------------------------------
Data[9] = self.GenRegFlag('R3', 0, 'ReadBackCtrl', 0, 'IoLev', 1, 'MuxOut', 0)
#--------------------------------------------------------------
# Initialize Register 2:
# Adc configuration
#--------------------------------------------------------------
Data[10] = int("0x00020642",0)#self.GenRegFlag('R2', 0, 'AdcClkDiv', 100, 'AdcAv', 0)
#--------------------------------------------------------------
# Initialize Register 1:
# Tx Amplitude Calibration
#--------------------------------------------------------------
Data[11] = self.GenRegFlag('R1', 0, 'TxAmpCalRefCode', self.TxPwr)
#--------------------------------------------------------------
# Initialize Register 0:
# Enable and Calibration: Calibrate VCO
#--------------------------------------------------------------
Data[12] = self.GenRegFlag('R0', 0, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupNCntr', 1, 'PupRCntr', 1, 'PupVco', 1, 'VcoCal', 1, 'PupAdc', 1)
#--------------------------------------------------------------
# Initialize Register 0:
# Enable and Calibration: Tx1 On, Lo On, VCO On
#--------------------------------------------------------------
Data[13] = self.GenRegFlag('R0', 0, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupNCntr', 1, 'PupRCntr', 1, 'PupVco', 1, 'PupTx1', 1, 'PupLo', 1, 'PupAdc', 1)
#--------------------------------------------------------------
# Initialize Register 0:
# Enable and Calibration: Tx1 Amplitude Calibration
#--------------------------------------------------------------
Data[14] = self.GenRegFlag('R0', 0, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupNCntr', 1, 'PupRCntr', 1, 'PupVco', 1, 'PupTx1', 1, 'PupLo', 1, 'Tx1AmpCal', 1, 'PupAdc', 1)
#--------------------------------------------------------------
# Initialize Register 0:
# Enable and Calibration: Tx2 On, Lo On, VCO on
#--------------------------------------------------------------
Data[15] = self.GenRegFlag('R0', 0, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupNCntr', 1, 'PupRCntr', 1, 'PupVco', 1, 'PupTx2', 1, 'PupLo', 1, 'PupAdc', 1)
#--------------------------------------------------------------
# Initialize Register 0:
# Enable and Calibration: Tx2 Amplitude Calibration
#--------------------------------------------------------------
Data[16] = self.GenRegFlag('R0', 0, 'AuxBufGain', 4, 'AuxDiv', 1, 'PupNCntr', 1, 'PupRCntr', 1, 'PupVco', 1, 'PupTx2', 1, 'PupLo', 1, 'Tx2AmpCal', 1, 'PupAdc', 1)
#--------------------------------------------------------------
# Initialize Register 9:
# ??? R9 ENABLES VTUNE INPUT!!!!!!!!!!!!
#--------------------------------------------------------------
#Data = [Data; self.GenRegFlag('R9', 0)];
Data[17] = int ('0x2800B929',0)
#--------------------------------------------------------------
# Initialize Register 0:
# R0
#--------------------------------------------------------------
Data[18] = self.RegR0Final
return Data
def DefineConst(self):
# ----------------------------------------------------
# Define Register 1
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R0"
dReg["Adr"] = 0
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 0
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "PupLo"
dField["Strt"] = 5
dField["Stop"] = 5
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "PupTx1"
dField["Strt"] = 6
dField["Stop"] = 6
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "PupTx2"
dField["Strt"] = 7
dField["Stop"] = 7
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "PupAdc"
dField["Strt"] = 8
dField["Stop"] = 8
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "VcoCal"
dField["Strt"] = 9
dField["Stop"] = 9
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "PupVco"
dField["Strt"] = 10
dField["Stop"] = 10
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Tx1AmpCal"
dField["Strt"] = 11
dField["Stop"] = 11
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Tx2AmpCal"
dField["Strt"] = 12
dField["Stop"] = 12
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 13
dField["Stop"] = 13
dField["Val"] = 1
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "PupNCntr"
dField["Strt"] = 14
dField["Stop"] = 14
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "PupRCntr"
dField["Strt"] = 15
dField["Stop"] = 15
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 16
dField["Stop"] = 19
dField["Val"] = 15
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "AuxDiv"
dField["Strt"] = 20
dField["Stop"] = 20
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "AuxBufGain"
dField["Strt"] = 21
dField["Stop"] = 23
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 24
dField["Stop"] = 31
dField["Val"] = 128
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 2
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R1"
dReg["Adr"] = 1
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 1
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "TxAmpCalRefCode"
dField["Strt"] = 5
dField["Stop"] = 12
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 13
dField["Stop"] = 31
dField["Val"] = 524207
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 3
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R2"
dReg["Adr"] = 2
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 2
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "AdcClkDiv"
dField["Strt"] = 5
dField["Stop"] = 12
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "AdcAv"
dField["Strt"] = 13
dField["Stop"] = 14
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "AdcStrt"
dField["Strt"] = 15
dField["Stop"] = 15
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 16
dField["Stop"] = 31
dField["Val"] = 2
dField["Res"] = 0
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 4
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R3"
dReg["Adr"] = 3
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 3
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "ReadBackCtrl"
dField["Strt"] = 5
dField["Stop"] = 10
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "IoLev"
dField["Strt"] = 11
dField["Stop"] = 11
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "MuxOut"
dField["Strt"] = 12
dField["Stop"] = 15
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 16
dField["Stop"] = 31
dField["Val"] = 393
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 5
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R4"
dReg["Adr"] = 4
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 4
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "AnaTstBus"
dField["Strt"] = 5
dField["Stop"] = 14
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "TstBusToPin"
dField["Strt"] = 15
dField["Stop"] = 15
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "TstBusToAdc"
dField["Strt"] = 16
dField["Stop"] = 16
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 17
dField["Stop"] = 31
dField["Val"] = 16
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 6
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R5"
dReg["Adr"] = 5
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 5
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "FracMsb"
dField["Strt"] = 5
dField["Stop"] = 16
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Int"
dField["Strt"] = 17
dField["Stop"] = 28
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 29
dField["Stop"] = 31
dField["Val"] = 0
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 7
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R6"
dReg["Adr"] = 6
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 6
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "FracLsb"
dField["Strt"] = 5
dField["Stop"] = 17
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 18
dField["Stop"] = 31
dField["Val"] = 0
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 8
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R7"
dReg["Adr"] = 7
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 7
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "RDiv"
dField["Strt"] = 5
dField["Stop"] = 9
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "RefDoub"
dField["Strt"] = 10
dField["Stop"] = 10
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "RDiv2"
dField["Strt"] = 11
dField["Stop"] = 11
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "ClkDiv"
dField["Strt"] = 12
dField["Stop"] = 23
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 24
dField["Stop"] = 24
dField["Val"] = 1
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "MsRst"
dField["Strt"] = 25
dField["Stop"] = 25
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 26
dField["Stop"] = 31
dField["Val"] = 0
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 9
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R8"
dReg["Adr"] = 8
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 8
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "FreqCalDiv"
dField["Strt"] = 5
dField["Stop"] = 14
dField["Val"] = 0
dField["Res"] = 0
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 15
dField["Stop"] = 31
dField["Val"] = 32768
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 10
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R9"
dReg["Adr"] = 9
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 9
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 5
dField["Stop"] = 31
dField["Val"] = 22087113
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
# ----------------------------------------------------
# Define Register 11
# ----------------------------------------------------
dReg = dict()
dReg["Name"] = "R10"
dReg["Adr"] = 10
dReg["Val"] = 0
lFields = list()
dField = dict()
dField["Name"] = "Ctrl"
dField["Strt"] = 0
dField["Stop"] = 4
dField["Val"] = 10
dField["Res"] = 1
lFields.append(dField)
dField = dict()
dField["Name"] = "Res"
dField["Strt"] = 5
dField["Stop"] = 31
dField["Val"] = 16777215
dField["Res"] = 1
lFields.append(dField)
dReg["lFields"] = lFields
self.lRegs.append(dReg)
|
the-stack_106_28764 | '''Comparing a simple CNN with a convolutional MoE model on the CIFAR10 dataset. Based on the cifar10_cnn.py file in the
keras/examples folder.
'''
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, MaxPooling2D, Conv2D
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
from ConvolutionalMoE import Conv2DMoE
from DenseMoE import DenseMoE
#from scipy.io import savemat
import os
batch_size = 32
num_classes = 10
epochs = 1
data_augmentation = True
num_predictions = 20
#which_model = 'cnn' # 'moe' or 'cnn'
which_model = 'moe' # 'moe' or 'cnn'
job_idx = 3
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if which_model == 'moe':
# MoE model
num_experts_per_filter = 2
model = Sequential()
model.add(Conv2DMoE(32, num_experts_per_filter, (3, 3), expert_activation='relu', gating_activation='softmax', padding='same', input_shape=x_train.shape[1:]))
model.add(Conv2DMoE(32, num_experts_per_filter, (3, 3), expert_activation='relu', gating_activation='softmax'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2DMoE(64, num_experts_per_filter, (3, 3), expert_activation='relu', gating_activation='softmax', padding='same'))
model.add(Conv2DMoE(64, num_experts_per_filter, (3, 3), expert_activation='relu', gating_activation='softmax'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(DenseMoE(512, num_experts_per_filter, expert_activation='relu', gating_activation='softmax'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
elif which_model == 'cnn':
# plain Conv model
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
# FIXME: In tf2.0, this API is updated!
#opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
opt = tf.keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
hist = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
shear_range=0., # set range for random shear
zoom_range=0., # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
fill_mode='nearest', # set mode for filling points outside the input boundaries
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
rescale=None, # set rescaling factor (applied before any other transformation)
preprocessing_function=None, # set function that will be applied on each input
data_format=None # image data format, either "channels_first" or "channels_last"
)
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs,
#steps_per_epoch=len(x_train) / batch_size,
steps_per_epoch=5,
validation_data=(x_test, y_test),
workers=4)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
|
the-stack_106_28766 | # coding: utf-8
from __future__ import unicode_literals
import os
from os import path
import random
import datetime
from pathlib import Path
from bin.wiki_entity_linking import wikipedia_processor as wp
from bin.wiki_entity_linking import training_set_creator, kb_creator
from bin.wiki_entity_linking.kb_creator import DESC_WIDTH
import spacy
from spacy.kb import KnowledgeBase
from spacy.util import minibatch, compounding
"""
Demonstrate how to build a knowledge base from WikiData and run an Entity Linking algorithm.
"""
ROOT_DIR = Path("C:/Users/Sofie/Documents/data/")
OUTPUT_DIR = ROOT_DIR / "wikipedia"
TRAINING_DIR = OUTPUT_DIR / "training_data_nel"
PRIOR_PROB = OUTPUT_DIR / "prior_prob.csv"
ENTITY_COUNTS = OUTPUT_DIR / "entity_freq.csv"
ENTITY_DEFS = OUTPUT_DIR / "entity_defs.csv"
ENTITY_DESCR = OUTPUT_DIR / "entity_descriptions.csv"
KB_DIR = OUTPUT_DIR / "kb_1"
KB_FILE = "kb"
NLP_1_DIR = OUTPUT_DIR / "nlp_1"
NLP_2_DIR = OUTPUT_DIR / "nlp_2"
# get latest-all.json.bz2 from https://dumps.wikimedia.org/wikidatawiki/entities/
WIKIDATA_JSON = ROOT_DIR / "wikidata" / "wikidata-20190304-all.json.bz2"
# get enwiki-latest-pages-articles-multistream.xml.bz2 from https://dumps.wikimedia.org/enwiki/latest/
ENWIKI_DUMP = (
ROOT_DIR / "wikipedia" / "enwiki-20190320-pages-articles-multistream.xml.bz2"
)
# KB construction parameters
MAX_CANDIDATES = 10
MIN_ENTITY_FREQ = 20
MIN_PAIR_OCC = 5
# model training parameters
EPOCHS = 10
DROPOUT = 0.5
LEARN_RATE = 0.005
L2 = 1e-6
CONTEXT_WIDTH = 128
def now():
return datetime.datetime.now()
def run_pipeline():
# set the appropriate booleans to define which parts of the pipeline should be re(run)
print("START", now())
print()
nlp_1 = spacy.load("en_core_web_lg")
nlp_2 = None
kb_2 = None
# one-time methods to create KB and write to file
to_create_prior_probs = False
to_create_entity_counts = False
to_create_kb = False
# read KB back in from file
to_read_kb = True
to_test_kb = False
# create training dataset
create_wp_training = False
# train the EL pipe
train_pipe = True
measure_performance = True
# test the EL pipe on a simple example
to_test_pipeline = True
# write the NLP object, read back in and test again
to_write_nlp = True
to_read_nlp = True
test_from_file = False
# STEP 1 : create prior probabilities from WP (run only once)
if to_create_prior_probs:
print("STEP 1: to_create_prior_probs", now())
wp.read_prior_probs(ENWIKI_DUMP, PRIOR_PROB)
print()
# STEP 2 : deduce entity frequencies from WP (run only once)
if to_create_entity_counts:
print("STEP 2: to_create_entity_counts", now())
wp.write_entity_counts(PRIOR_PROB, ENTITY_COUNTS, to_print=False)
print()
# STEP 3 : create KB and write to file (run only once)
if to_create_kb:
print("STEP 3a: to_create_kb", now())
kb_1 = kb_creator.create_kb(
nlp=nlp_1,
max_entities_per_alias=MAX_CANDIDATES,
min_entity_freq=MIN_ENTITY_FREQ,
min_occ=MIN_PAIR_OCC,
entity_def_output=ENTITY_DEFS,
entity_descr_output=ENTITY_DESCR,
count_input=ENTITY_COUNTS,
prior_prob_input=PRIOR_PROB,
wikidata_input=WIKIDATA_JSON,
)
print("kb entities:", kb_1.get_size_entities())
print("kb aliases:", kb_1.get_size_aliases())
print()
print("STEP 3b: write KB and NLP", now())
if not path.exists(KB_DIR):
os.makedirs(KB_DIR)
kb_1.dump(KB_DIR / KB_FILE)
nlp_1.to_disk(NLP_1_DIR)
print()
# STEP 4 : read KB back in from file
if to_read_kb:
print("STEP 4: to_read_kb", now())
nlp_2 = spacy.load(NLP_1_DIR)
kb_2 = KnowledgeBase(vocab=nlp_2.vocab, entity_vector_length=DESC_WIDTH)
kb_2.load_bulk(KB_DIR / KB_FILE)
print("kb entities:", kb_2.get_size_entities())
print("kb aliases:", kb_2.get_size_aliases())
print()
# test KB
if to_test_kb:
check_kb(kb_2)
print()
# STEP 5: create a training dataset from WP
if create_wp_training:
print("STEP 5: create training dataset", now())
training_set_creator.create_training(
wikipedia_input=ENWIKI_DUMP,
entity_def_input=ENTITY_DEFS,
training_output=TRAINING_DIR,
)
# STEP 6: create and train the entity linking pipe
if train_pipe:
print("STEP 6: training Entity Linking pipe", now())
type_to_int = {label: i for i, label in enumerate(nlp_2.entity.labels)}
print(" -analysing", len(type_to_int), "different entity types")
el_pipe = nlp_2.create_pipe(
name="entity_linker",
config={
"context_width": CONTEXT_WIDTH,
"pretrained_vectors": nlp_2.vocab.vectors.name,
"type_to_int": type_to_int,
},
)
el_pipe.set_kb(kb_2)
nlp_2.add_pipe(el_pipe, last=True)
other_pipes = [pipe for pipe in nlp_2.pipe_names if pipe != "entity_linker"]
with nlp_2.disable_pipes(*other_pipes): # only train Entity Linking
optimizer = nlp_2.begin_training()
optimizer.learn_rate = LEARN_RATE
optimizer.L2 = L2
# define the size (nr of entities) of training and dev set
train_limit = 5000
dev_limit = 5000
# for training, get pos & neg instances that correspond to entries in the kb
train_data = training_set_creator.read_training(
nlp=nlp_2,
training_dir=TRAINING_DIR,
dev=False,
limit=train_limit,
kb=el_pipe.kb,
)
print("Training on", len(train_data), "articles")
print()
# for testing, get all pos instances, whether or not they are in the kb
dev_data = training_set_creator.read_training(
nlp=nlp_2, training_dir=TRAINING_DIR, dev=True, limit=dev_limit, kb=None
)
print("Dev testing on", len(dev_data), "articles")
print()
if not train_data:
print("Did not find any training data")
else:
for itn in range(EPOCHS):
random.shuffle(train_data)
losses = {}
batches = minibatch(train_data, size=compounding(4.0, 128.0, 1.001))
batchnr = 0
with nlp_2.disable_pipes(*other_pipes):
for batch in batches:
try:
docs, golds = zip(*batch)
nlp_2.update(
docs=docs,
golds=golds,
sgd=optimizer,
drop=DROPOUT,
losses=losses,
)
batchnr += 1
except Exception as e:
print("Error updating batch:", e)
if batchnr > 0:
el_pipe.cfg["context_weight"] = 1
el_pipe.cfg["prior_weight"] = 1
dev_acc_context, _ = _measure_acc(dev_data, el_pipe)
losses["entity_linker"] = losses["entity_linker"] / batchnr
print(
"Epoch, train loss",
itn,
round(losses["entity_linker"], 2),
" / dev acc avg",
round(dev_acc_context, 3),
)
# STEP 7: measure the performance of our trained pipe on an independent dev set
if len(dev_data) and measure_performance:
print()
print("STEP 7: performance measurement of Entity Linking pipe", now())
print()
counts, acc_r, acc_r_d, acc_p, acc_p_d, acc_o, acc_o_d = _measure_baselines(
dev_data, kb_2
)
print("dev counts:", sorted(counts.items(), key=lambda x: x[0]))
oracle_by_label = [(x, round(y, 3)) for x, y in acc_o_d.items()]
print("dev acc oracle:", round(acc_o, 3), oracle_by_label)
random_by_label = [(x, round(y, 3)) for x, y in acc_r_d.items()]
print("dev acc random:", round(acc_r, 3), random_by_label)
prior_by_label = [(x, round(y, 3)) for x, y in acc_p_d.items()]
print("dev acc prior:", round(acc_p, 3), prior_by_label)
# using only context
el_pipe.cfg["context_weight"] = 1
el_pipe.cfg["prior_weight"] = 0
dev_acc_context, dev_acc_cont_d = _measure_acc(dev_data, el_pipe)
context_by_label = [(x, round(y, 3)) for x, y in dev_acc_cont_d.items()]
print("dev acc context avg:", round(dev_acc_context, 3), context_by_label)
# measuring combined accuracy (prior + context)
el_pipe.cfg["context_weight"] = 1
el_pipe.cfg["prior_weight"] = 1
dev_acc_combo, dev_acc_combo_d = _measure_acc(dev_data, el_pipe)
combo_by_label = [(x, round(y, 3)) for x, y in dev_acc_combo_d.items()]
print("dev acc combo avg:", round(dev_acc_combo, 3), combo_by_label)
# STEP 8: apply the EL pipe on a toy example
if to_test_pipeline:
print()
print("STEP 8: applying Entity Linking to toy example", now())
print()
run_el_toy_example(nlp=nlp_2)
# STEP 9: write the NLP pipeline (including entity linker) to file
if to_write_nlp:
print()
print("STEP 9: testing NLP IO", now())
print()
print("writing to", NLP_2_DIR)
nlp_2.to_disk(NLP_2_DIR)
print()
# verify that the IO has gone correctly
if to_read_nlp:
print("reading from", NLP_2_DIR)
nlp_3 = spacy.load(NLP_2_DIR)
print("running toy example with NLP 3")
run_el_toy_example(nlp=nlp_3)
# testing performance with an NLP model from file
if test_from_file:
nlp_2 = spacy.load(NLP_1_DIR)
nlp_3 = spacy.load(NLP_2_DIR)
el_pipe = nlp_3.get_pipe("entity_linker")
dev_limit = 5000
dev_data = training_set_creator.read_training(
nlp=nlp_2, training_dir=TRAINING_DIR, dev=True, limit=dev_limit, kb=None
)
print("Dev testing from file on", len(dev_data), "articles")
print()
dev_acc_combo, dev_acc_combo_dict = _measure_acc(dev_data, el_pipe)
combo_by_label = [(x, round(y, 3)) for x, y in dev_acc_combo_dict.items()]
print("dev acc combo avg:", round(dev_acc_combo, 3), combo_by_label)
print()
print("STOP", now())
def _measure_acc(data, el_pipe=None, error_analysis=False):
# If the docs in the data require further processing with an entity linker, set el_pipe
correct_by_label = dict()
incorrect_by_label = dict()
docs = [d for d, g in data if len(d) > 0]
if el_pipe is not None:
docs = list(el_pipe.pipe(docs))
golds = [g for d, g in data if len(d) > 0]
for doc, gold in zip(docs, golds):
try:
correct_entries_per_article = dict()
for entity, kb_dict in gold.links.items():
start, end = entity
# only evaluating on positive examples
for gold_kb, value in kb_dict.items():
if value:
offset = _offset(start, end)
correct_entries_per_article[offset] = gold_kb
for ent in doc.ents:
ent_label = ent.label_
pred_entity = ent.kb_id_
start = ent.start_char
end = ent.end_char
offset = _offset(start, end)
gold_entity = correct_entries_per_article.get(offset, None)
# the gold annotations are not complete so we can't evaluate missing annotations as 'wrong'
if gold_entity is not None:
if gold_entity == pred_entity:
correct = correct_by_label.get(ent_label, 0)
correct_by_label[ent_label] = correct + 1
else:
incorrect = incorrect_by_label.get(ent_label, 0)
incorrect_by_label[ent_label] = incorrect + 1
if error_analysis:
print(ent.text, "in", doc)
print(
"Predicted",
pred_entity,
"should have been",
gold_entity,
)
print()
except Exception as e:
print("Error assessing accuracy", e)
acc, acc_by_label = calculate_acc(correct_by_label, incorrect_by_label)
return acc, acc_by_label
def _measure_baselines(data, kb):
# Measure 3 performance baselines: random selection, prior probabilities, and 'oracle' prediction for upper bound
counts_d = dict()
random_correct_d = dict()
random_incorrect_d = dict()
oracle_correct_d = dict()
oracle_incorrect_d = dict()
prior_correct_d = dict()
prior_incorrect_d = dict()
docs = [d for d, g in data if len(d) > 0]
golds = [g for d, g in data if len(d) > 0]
for doc, gold in zip(docs, golds):
try:
correct_entries_per_article = dict()
for entity, kb_dict in gold.links.items():
start, end = entity
for gold_kb, value in kb_dict.items():
# only evaluating on positive examples
if value:
offset = _offset(start, end)
correct_entries_per_article[offset] = gold_kb
for ent in doc.ents:
label = ent.label_
start = ent.start_char
end = ent.end_char
offset = _offset(start, end)
gold_entity = correct_entries_per_article.get(offset, None)
# the gold annotations are not complete so we can't evaluate missing annotations as 'wrong'
if gold_entity is not None:
counts_d[label] = counts_d.get(label, 0) + 1
candidates = kb.get_candidates(ent.text)
oracle_candidate = ""
best_candidate = ""
random_candidate = ""
if candidates:
scores = []
for c in candidates:
scores.append(c.prior_prob)
if c.entity_ == gold_entity:
oracle_candidate = c.entity_
best_index = scores.index(max(scores))
best_candidate = candidates[best_index].entity_
random_candidate = random.choice(candidates).entity_
if gold_entity == best_candidate:
prior_correct_d[label] = prior_correct_d.get(label, 0) + 1
else:
prior_incorrect_d[label] = prior_incorrect_d.get(label, 0) + 1
if gold_entity == random_candidate:
random_correct_d[label] = random_correct_d.get(label, 0) + 1
else:
random_incorrect_d[label] = random_incorrect_d.get(label, 0) + 1
if gold_entity == oracle_candidate:
oracle_correct_d[label] = oracle_correct_d.get(label, 0) + 1
else:
oracle_incorrect_d[label] = oracle_incorrect_d.get(label, 0) + 1
except Exception as e:
print("Error assessing accuracy", e)
acc_prior, acc_prior_d = calculate_acc(prior_correct_d, prior_incorrect_d)
acc_rand, acc_rand_d = calculate_acc(random_correct_d, random_incorrect_d)
acc_oracle, acc_oracle_d = calculate_acc(oracle_correct_d, oracle_incorrect_d)
return (
counts_d,
acc_rand,
acc_rand_d,
acc_prior,
acc_prior_d,
acc_oracle,
acc_oracle_d,
)
def _offset(start, end):
return "{}_{}".format(start, end)
def calculate_acc(correct_by_label, incorrect_by_label):
acc_by_label = dict()
total_correct = 0
total_incorrect = 0
all_keys = set()
all_keys.update(correct_by_label.keys())
all_keys.update(incorrect_by_label.keys())
for label in sorted(all_keys):
correct = correct_by_label.get(label, 0)
incorrect = incorrect_by_label.get(label, 0)
total_correct += correct
total_incorrect += incorrect
if correct == incorrect == 0:
acc_by_label[label] = 0
else:
acc_by_label[label] = correct / (correct + incorrect)
acc = 0
if not (total_correct == total_incorrect == 0):
acc = total_correct / (total_correct + total_incorrect)
return acc, acc_by_label
def check_kb(kb):
for mention in ("Bush", "Douglas Adams", "Homer", "Brazil", "China"):
candidates = kb.get_candidates(mention)
print("generating candidates for " + mention + " :")
for c in candidates:
print(
" ",
c.prior_prob,
c.alias_,
"-->",
c.entity_ + " (freq=" + str(c.entity_freq) + ")",
)
print()
def run_el_toy_example(nlp):
text = (
"In The Hitchhiker's Guide to the Galaxy, written by Douglas Adams, "
"Douglas reminds us to always bring our towel, even in China or Brazil. "
"The main character in Doug's novel is the man Arthur Dent, "
"but Dougledydoug doesn't write about George Washington or Homer Simpson."
)
doc = nlp(text)
print(text)
for ent in doc.ents:
print(" ent", ent.text, ent.label_, ent.kb_id_)
print()
if __name__ == "__main__":
run_pipeline()
|
the-stack_106_28768 | #!/usr/bin/python3
"""
AUTHOR: Matthew May - [email protected]
"""
# Imports
import json
import redis
import io
from sys import exit
from dbconst import META, PORTMAP, REDIS_IP, SYSLOG_PATH, DB_PATH, HQ_IP
from time import gmtime, localtime, sleep, strftime
import maxminddb
import itertools
from collections import defaultdict
# import logging
# import re
# from argparse import ArgumentParser, RawDescriptionHelpFormatter
# from os import getuid
# def menu():
# Instantiate parser
# parser = ArgumentParser(
# prog='DataServer.py',
# usage='%(progs)s [OPTIONS]',
# formatter_class=RawDescriptionHelpFormatter,
# description=dedent('''\
# --------------------------------------------------------------
# Data server for attack map application.
# --------------------------------------------------------------'''))
# @TODO --> Add support for command line args?
# define command line arguments
# parser.add_argument('-db', '--database', dest='DB_PATH', required=True, type=str, help='path to maxmind database')
# parser.add_argument('-m', '--readme', dest='readme', help='print readme')
# parser.add_argument('-o', '--output', dest='output', help='file to write logs to')
# parser.add_argument('-r', '--random', action='store_true', dest='randomize', help='generate random IPs/protocols for demo')
# parser.add_argument('-rs', '--redis-server-ip', dest='REDIS_IP', type=str, help='redis server ip address')
# parser.add_argument('-sp', '--syslog-path', dest='SYSLOG_PATH', type=str, help='path to syslog file')
# parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='run server in verbose mode')
# Parse arguments/options
# args = parser.parse_args()
# return args
# @TODO
# Refactor/improve parsing
# This function depends heavily on which appliances are generating logs
# For now it is only here for testing
def parse_syslog(line):
line = line.split()
data = line[-1]
data = data.split(',')
if len(data) != 6:
print('NOT A VALID LOG')
return False
else:
src_ip = data[0]
dst_ip = data[1]
src_port = data[2]
dst_port = data[3]
type_attack = data[4]
cve_attack = data[5]
data_dict = {
'src_ip':src_ip,
'dst_ip':dst_ip,
'src_port':src_port,
'dst_port':dst_port,
'type_attack':type_attack,
'cve_attack':cve_attack
}
return data_dict
def clean_db(unclean, src_or_dst):
selected = {}
for tag in META:
if tag['tag'] in unclean:
head = unclean[tag['tag']]
for node in tag['path']:
if node in head:
head = head[node]
else:
head = None
break
selected[src_or_dst + "_" + tag['lookup']] = head
return selected
def connect_redis():
r = redis.StrictRedis(host=REDIS_IP, port=6379, db=0)
return r
def get_msg_type():
# @TODO
# Add support for more message types later
return "Traffic"
# Check to see if packet is using an interesting TCP/UDP protocol based on source or destination port
def get_tcp_udp_proto(src_port, dst_port):
src_port = int(src_port)
dst_port = int(dst_port)
if src_port in PORTMAP:
return PORTMAP[src_port]
if dst_port in PORTMAP:
return PORTMAP[dst_port]
return "OTHER"
def parse_maxminddb(ip):
try:
reader = maxminddb.open_database(DB_PATH)
response = reader.get(ip)
reader.close()
return response
except FileNotFoundError:
print('DB not found')
print('SHUTTING DOWN')
exit()
except ValueError:
return False
def merge_dicts(*args):
super_dict = {}
for arg in args:
super_dict.update(arg)
return super_dict
# Create clean dictionary using unclean db dictionary contents
server_start_time = strftime("%d-%m-%Y %H:%M:%S", localtime()) # local time
event_count = 0
unknowns = defaultdict(int)
src_continents_tracked = defaultdict(int)
src_countries_tracked = defaultdict(int)
src_ips_tracked = defaultdict(int)
dst_continents_tracked = defaultdict(int)
dst_countries_tracked = defaultdict(int)
dst_ips_tracked = defaultdict(int)
country_to_code = {}
ip_to_code = {}
def track_flags(super_dict, tracking_dict, key1, key2):
if key1 in super_dict and key2 in super_dict and key1 not in tracking_dict:
tracking_dict[super_dict[key1]] = super_dict[key2]
def track_stats(super_dict, tracking_dict, key):
node = super_dict.get(key, False)
if node is not False:
tracking_dict[node] += 1
else:
unknowns[key] += 1
def to_json(syslog_data_dict):
src_ip_db_unclean = parse_maxminddb(syslog_data_dict['src_ip'])
dst_ip_db_unclean = parse_maxminddb(syslog_data_dict['dst_ip'])
if src_ip_db_unclean and dst_ip_db_unclean:
global event_count, ip_to_code, country_to_code, unknowns, \
src_continents_tracked, src_countries_tracked, src_ips_tracked, \
dst_continents_tracked, dst_countries_tracked, dst_ips_tracked
msg_type = {'msg_type':get_msg_type()}
msg_type2 = {'msg_type2':syslog_data_dict['type_attack']}
msg_type3 = {'msg_type3':syslog_data_dict['cve_attack']}
proto = {'protocol':get_tcp_udp_proto(
syslog_data_dict['src_port'],
syslog_data_dict['dst_port']
)}
super_dict = merge_dicts(
syslog_data_dict, msg_type, msg_type2, msg_type3, proto,
clean_db(src_ip_db_unclean, src_or_dst="src"),
clean_db(dst_ip_db_unclean, src_or_dst="dst"),
)
# Track Stats
event_count += 1
event_time = strftime("%d-%m-%Y %H:%M:%S", localtime()) # local time
# event_time = strftime("%Y-%m-%d %H:%M:%S", gmtime()) # UTC time
# Append stats to super_dict
super_dict['event_count'] = event_count
super_dict['event_time'] = event_time
super_dict['unknowns'] = unknowns
track_stats(super_dict, src_continents_tracked, 'src_continent')
track_stats(super_dict, dst_continents_tracked, 'dst_continent')
track_stats(super_dict, src_countries_tracked, 'src_country')
track_stats(super_dict, dst_countries_tracked, 'dst_country')
track_stats(super_dict, src_ips_tracked, 'src_ip')
track_stats(super_dict, dst_ips_tracked, 'dst_ip')
for src_or_dst, val_type in itertools.product(["src_", "dst_"], [
"continents_tracked", "countries_tracked", "ips_tracked"]):
key = src_or_dst + val_type
super_dict[key] = globals()[key]
track_flags(super_dict, country_to_code, 'src_country', 'src_iso_code')
track_flags(super_dict, country_to_code, 'dst_country', 'dst_iso_code')
super_dict['country_to_code'] = country_to_code
track_flags(super_dict, ip_to_code, 'src_ip', 'src_iso_code')
track_flags(super_dict, ip_to_code, 'dst_ip', 'dst_iso_code')
super_dict['ip_to_code'] = ip_to_code
json_data = json.dumps(super_dict)
return json_data
else:
return
def main():
# if getuid() != 0:
# print('Please run this script as root')
# print('SHUTTING DOWN')
# exit()
# args = menu()
# Connect to Redis
redis_instance = connect_redis()
# Find HQ lat/long
# hq_dict = find_hq_lat_long()
# Follow/parse/format/publish syslog data
with io.open(SYSLOG_PATH, "r", encoding='ISO-8859-1') as syslog_file:
syslog_file.readlines()
while True:
where = syslog_file.tell()
line = syslog_file.readline()
if not line:
sleep(.1)
syslog_file.seek(where)
else:
syslog_data_dict = parse_syslog(line)
if not syslog_data_dict:
continue
json_data = to_json(syslog_data_dict)
if not json_data:
continue
redis_instance.publish('attack-map-production', json_data)
# if args.verbose:
# print(ip_db_unclean)
# print('------------------------')
# print(json_data)
# print('Event Count: {}'.format(event_count))
# print('------------------------')
print('Event Count: {}'.format(event_count))
print('------------------------')
def shutdown_and_report_stats():
print('\nSHUTTING DOWN')
# Report stats tracked
print('\nREPORTING STATS...')
print('\nEvent Count: {}'.format(event_count)) # report event count
print('\nContinent Stats...') # report continents stats
for key in src_continents_tracked:
print('{}: {}'.format(key, src_continents_tracked[key]))
print('\nCountry Stats...') # report country stats
for country in src_countries_tracked:
print('{}: {}'.format(country, src_countries_tracked[country]))
print('\nCountries to iso_codes...')
for key in country_to_code:
print('{}: {}'.format(key, country_to_code[key]))
print('\nIP Stats...') # report IP stats
for ip in src_ips_tracked:
print('{}: {}'.format(ip, src_ips_tracked[ip]))
print('\nIPs to iso_codes...')
for key in ip_to_code:
print('{}: {}'.format(key, ip_to_code[key]))
print('\nUnknowns...')
for key in unknowns:
print('{}: {}'.format(key, unknowns[key]))
exit()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
shutdown_and_report_stats()
|
the-stack_106_28769 | # See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
import design
import utils
from tech import layer, GDS
class s8_col_end(design.design):
def __init__(self, version, name=""):
super().__init__(name)
if version == "colend":
self.name = "s8sram16x16_colend"
elif version == "colend_p_cent":
self.name = "s8sram16x16_colend_p_cent"
elif version == "colenda":
self.name = "s8sram16x16_colenda"
elif version == "colenda_p_cent":
self.name = "s8sram16x16_colenda_p_cent"
else:
debug.error("Invalid type for col_end", -1)
design.design.__init__(self, name=self.name)
(self.width, self.height) = utils.get_libcell_size(self.name,
GDS["unit"],
layer["mem"])
# pin_map = utils.get_libcell_pins(pin_names, self.name, GDS["unit"])
|
the-stack_106_28770 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
"""
# Integrating RazorPay
### Validate Currency
Example:
from frappe.integration_broker.doctype.integration_service.integration_service import get_integration_controller
controller = get_integration_controller("Razorpay")
controller().validate_transaction_currency(currency)
### 2. Redirect for payment
Example:
payment_details = {
"amount": 600,
"title": "Payment for bill : 111",
"description": "payment via cart",
"reference_doctype": "Payment Request",
"reference_docname": "PR0001",
"payer_email": "[email protected]",
"payer_name": "Nuran Verkleij",
"order_id": "111",
"currency": "INR"
}
# Redirect the user to this url
url = controller().get_payment_url(**payment_details)
### 3. On Completion of Payment
Write a method for `on_payment_authorized` in the reference doctype
Example:
def on_payment_authorized(payment_status):
# this method will be called when payment is complete
##### Notes:
payment_status - payment gateway will put payment status on callback.
For razorpay payment status is Authorized
"""
from __future__ import unicode_literals
import frappe
from frappe.utils import get_url, call_hook_method, cint
from frappe import _
import urllib, json
from frappe.integration_broker.doctype.integration_service.integration_service import IntegrationService
class RazorpaySettings(IntegrationService):
service_name = "Razorpay"
supported_currencies = ["INR"]
scheduler_events = {
"all": [
"frappe.integrations.doctype.razorpay_settings.razorpay_settings.capture_payment"
]
}
def validate(self):
if not self.flags.ignore_mandatory:
self.validate_razorpay_credentails()
def on_update(self):
pass
def enable(self):
call_hook_method('payment_gateway_enabled', gateway='Razorpay')
if not self.flags.ignore_mandatory:
self.validate_razorpay_credentails()
def validate_razorpay_credentails(self):
if self.api_key and self.api_secret:
try:
self.get_request(url="https://api.razorpay.com/v1/payments",
auth=(self.api_key, self.get_password(fieldname="api_secret", raise_exception=False)))
except Exception:
frappe.throw(_("Seems API Key or API Secret is wrong !!!"))
def validate_transaction_currency(self, currency):
if currency not in self.supported_currencies:
frappe.throw(_("Please select another payment method. {0} does not support transactions in currency '{1}'").format(self.service_name, currency))
def get_payment_url(self, **kwargs):
return get_url("./integrations/razorpay_checkout?{0}".format(urllib.urlencode(kwargs)))
def create_request(self, data):
self.data = frappe._dict(data)
try:
self.integration_request = super(RazorpaySettings, self).create_request(self.data, "Host", \
"Razorpay")
return self.authorize_payment()
except Exception:
frappe.log_error(frappe.get_traceback())
return{
"redirect_to": frappe.redirect_to_message(_('Server Error'), _("Seems issue with server's razorpay config. Don't worry, in case of failure amount will get refunded to your account.")),
"status": 401
}
def authorize_payment(self):
"""
An authorization is performed when user’s payment details are successfully authenticated by the bank.
The money is deducted from the customer’s account, but will not be transferred to the merchant’s account
until it is explicitly captured by merchant.
"""
data = json.loads(self.integration_request.data)
settings = self.get_settings(data)
redirect_to = data.get('notes', {}).get('redirect_to') or None
redirect_message = data.get('notes', {}).get('redirect_message') or None
try:
resp = self.get_request("https://api.razorpay.com/v1/payments/{0}"
.format(self.data.razorpay_payment_id), auth=(settings.api_key,
settings.api_secret))
if resp.get("status") == "authorized":
self.integration_request.db_set('status', 'Authorized', update_modified=False)
self.flags.status_changed_to = "Authorized"
else:
frappe.log_error(str(resp), 'Razorpay Payment not authorized')
except:
frappe.log_error(frappe.get_traceback())
# failed
pass
status = frappe.flags.integration_request.status_code
if self.flags.status_changed_to == "Authorized":
if self.data.reference_doctype and self.data.reference_docname:
custom_redirect_to = None
try:
custom_redirect_to = frappe.get_doc(self.data.reference_doctype,
self.data.reference_docname).run_method("on_payment_authorized", self.flags.status_changed_to)
except Exception:
frappe.log_error(frappe.get_traceback())
if custom_redirect_to:
redirect_to = custom_redirect_to
redirect_url = 'payment-success'
else:
redirect_url = 'payment-failed'
if redirect_to:
redirect_url += '?' + urllib.urlencode({'redirect_to': redirect_to})
if redirect_message:
redirect_url += '&' + urllib.urlencode({'redirect_message': redirect_message})
return {
"redirect_to": redirect_url,
"status": status
}
def get_settings(self, data):
settings = frappe._dict({
"api_key": self.api_key,
"api_secret": self.get_password(fieldname="api_secret", raise_exception=False)
})
if cint(data.get('notes', {}).get('use_sandbox')):
settings.update({
"api_key": frappe.conf.sandbox_api_key,
"api_secret": frappe.conf.sandbox_api_secret,
})
return settings
def capture_payment(is_sandbox=False, sanbox_response=None):
"""
Verifies the purchase as complete by the merchant.
After capture, the amount is transferred to the merchant within T+3 days
where T is the day on which payment is captured.
Note: Attempting to capture a payment whose status is not authorized will produce an error.
"""
controller = frappe.get_doc("Razorpay Settings")
for doc in frappe.get_all("Integration Request", filters={"status": "Authorized",
"integration_request_service": "Razorpay"}, fields=["name", "data"]):
try:
if is_sandbox:
resp = sanbox_response
else:
data = json.loads(doc.data)
settings = controller.get_settings(data)
resp = controller.post_request("https://api.razorpay.com/v1/payments/{0}/capture".format(data.get("razorpay_payment_id")),
auth=(settings.api_key, settings.api_secret), data={"amount": data.get("amount")})
if resp.get("status") == "captured":
frappe.db.set_value("Integration Request", doc.name, "status", "Completed")
except Exception:
doc = frappe.get_doc("Integration Request", doc.name)
doc.status = "Failed"
doc.error = frappe.get_traceback()
frappe.log_error(doc.error, '{0} Failed'.format(doc.name))
@frappe.whitelist(allow_guest=True, xss_safe=True)
def get_checkout_url(**kwargs):
try:
return frappe.get_doc("Razorpay Settings").get_payment_url(**kwargs)
except Exception:
frappe.respond_as_web_page(_("Something went wrong"),
_("Looks like something is wrong with this site's Razorpay configuration. No payment has been made."),
indicator_color='red',
http_status_code=frappe.ValidationError.http_status_code)
@frappe.whitelist()
def get_service_details():
return """
<div>
<p> Steps to configure Service
<ol>
<li> Get Razorpay api credentials by login to:
<a href="https://razorpay.com/" target="_blank">
https://razorpay.com/
</a>
</li>
<br>
<li> Setup credentials on Razorpay Settings doctype.
Click on
<button class="btn btn-default btn-xs disabled"> Razorpay Settings </button>
top right corner
</li>
<br>
<li>
After saving settings,
<label>
<span class="input-area">
<input type="checkbox" class="input-with-feedback" checked disabled>
</span>
<span class="label-area small">Enable</span>
</label>
Razorpay Integration Service and Save a document.
</li>
<br>
<li>
To view Razorpays payment logs,
<button class="btn btn-default btn-xs disabled"> Show Log </button>
</li>
</ol>
</div>
""" |
the-stack_106_28771 | from __future__ import print_function
from collections import defaultdict
from PIL import Image
from torch.autograd import Variable
from torchvision import datasets, transforms
import argparse
import codecs
import errno
import numpy as np
import os
import os
import os.path
import pickle
import random
import scipy as sp
import sys
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import custom_datasets
sys.path.append("..")
import util
import fp_train
import fp_eval
from fingerprint import Fingerprints
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--ckpt', type=str, default="/tmp/user/mnist/")
parser.add_argument('--log-dir', type=str)
parser.add_argument('--adv-ex-dir')
parser.add_argument('--fingerprint-dir')
parser.add_argument('--data-dir', type=str)
parser.add_argument('--eps', type=float, default=0.1)
parser.add_argument('--num-dx', type=int, default=5)
parser.add_argument('--num-class', type=int, default=10)
#parser.add_argument('--tau', type=str, default="0.1,0.2")
parser.add_argument('--name', default="dataset-name")
util.add_boolean_argument(parser, "verbose", default=False)
util.add_boolean_argument(parser, "debug", default=False)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.data_dir, train=True, download=True, transform=transform),
batch_size=args.batch_size, shuffle=False, **kwargs)
"""
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.data_dir, train=False, transform=transform),
batch_size=args.batch_size, shuffle=False, **kwargs)
"""
test_set_path = os.path.join(args.adv_ex_dir,'Random_Test_%s_.p' % ('mnist'))
test_loader = torch.utils.data.DataLoader(
custom_datasets.Adv(filename=test_set_path, transp=True),
batch_size=args.batch_size, shuffle=False, **kwargs)
random_loader = torch.utils.data.DataLoader(
custom_datasets.RandomMNIST(transform=transform),
batch_size=args.batch_size, shuffle=False, **kwargs)
list_advs = ["adapt-pgd"] #, "bim-a", "bim-b", "jsma", "cw-l2"]
# List of attacks, copy from run_search
dataset = 'mnist'
list_adv_loader=[]
for advs in list_advs:
attack_file = os.path.join(args.adv_ex_dir, 'Adv_%s_%s.p' % (dataset, advs))
adv_loader= torch.utils.data.DataLoader(
custom_datasets.Adv(filename=attack_file, transp=True),
batch_size=args.batch_size, shuffle=False, **kwargs)
list_adv_loader.append(adv_loader)
from model import CW_Net as Net
#from small_model import Very_Small_Net as Net
print("Eval using model", Net)
model = Net()
print("Loading ckpt", args.ckpt)
model.load_state_dict(torch.load(args.ckpt))
if args.cuda:
model.cuda()
model.eval()
print("Args:", args)
fixed_dxs = pickle.load(open(os.path.join(args.fingerprint_dir, "fp_inputs_dx.pkl"), "rb"))
fixed_dys = pickle.load(open(os.path.join(args.fingerprint_dir, "fp_outputs.pkl"), "rb"))
fp = Fingerprints()
fp.dxs = fixed_dxs
fp.dys = fixed_dys
loaders = [test_loader]
loaders.extend(list_adv_loader)
names = ["test"]
names.extend(list_advs)
assert (len(names) == len(loaders))
reject_thresholds = [0. + 0.001 * i for i in range(2000)]
results = {}
data_loader = test_loader
ds_name = "test"
print("Dataset", ds_name)
test_results_by_tau, test_stats_by_tau = fp_eval.eval_with_fingerprints(model, data_loader, ds_name, fp, reject_thresholds, None, args)
results["test"] = test_results_by_tau
for data_loader, ds_name in zip(loaders, names):
if ds_name == "test": continue
print("Dataset", ds_name)
results_by_tau, stats_by_tau = fp_eval.eval_with_fingerprints(model, data_loader, ds_name, fp, reject_thresholds, test_stats_by_tau, args)
results[ds_name] = results_by_tau
# Get precision / recall where positive examples = adversarials, negative examples = real inputs.
for item,advs in enumerate(list_advs):
print("AUC-ROC for %s",advs)
pos_names = [advs] # advs
neg_names = [names[0]] # test
fp_eval.get_pr_wrapper(results, pos_names, neg_names, reject_thresholds, args)
|
the-stack_106_28773 | from tkinter.messagebox import NO
from typing import Optional, List
from fastapi import FastAPI, Request
from pydantic import BaseModel, HttpUrl
from async_lru import alru_cache
import logging
import requests
import json
import sys
import base64
import zlib
from fastapi.templating import Jinja2Templates
from requests.auth import HTTPProxyAuth
from typing import Optional
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logger = logging.getLogger("fastapi")
#https://dev.to/tomas223/logging-tracing-in-python-fastapi-with-opencensus-a-azure-2jcm
class QueryInputs(BaseModel):
template: str
endpoint: HttpUrl
content_type: str
payload: Optional[str]
username: Optional[str]
password: Optional[str]
class Endpoint(BaseModel):
def __hash__(self): # make hashable BaseModel subclass
return hash((type(self),) + tuple(self.__dict__.values()))
url: HttpUrl
key: str
payload: Optional[str]
username: Optional[str]
password: Optional[str]
class MultiQueryInputs(BaseModel):
template: str
endpoints: List[Endpoint]
content_type: str
app = FastAPI()
@alru_cache
async def _read_get_endpoint(endpoint: Endpoint) -> str:
headers = {
'Content-Type': 'application/json'
}
if endpoint.username is not None and len(endpoint.username) > 0:
return requests.get(endpoint.url, auth=(endpoint.username,endpoint.password), data=endpoint.payload, headers=headers, verify=False)
else:
return requests.get(endpoint.url, verify=False, data=endpoint.payload, headers=headers)
@app.post("/")
async def root(request: Request, input: QueryInputs):
"""Esta función se encarga de realizar la consulta a la API y devolver el resultado en formato según una plantilla
Args:
request (Request): petición HTTP
input (QueryInputs): Objeto con la petición al API y la plantilla a utilizar
Returns:
response: Respuesta formateada en formato según la plantilla
"""
endpoint = Endpoint(url=input.endpoint, key="data", username=input.username, password=input.password, payload=input.payload)
res = await _read_get_endpoint(endpoint)
logger.info(_read_get_endpoint.cache_info())
if res.status_code == 200:
data = json.loads(res.content)
templates = Jinja2Templates(directory="../templates")
response = templates.TemplateResponse(input.template, {"request" : request, "data": data})
response.headers['content-type'] = f"{input.content_type}; charset=utf-8"
return response
else:
return {"error": "Error in the endpoint request"}
@app.post("/multiple")
async def multiple(request: Request, input: MultiQueryInputs):
#import ipdb; ipdb.set_trace()
data = {"request" : request}
flag_error = False
for endpoint in input.endpoints:
res = await _read_get_endpoint(endpoint)
if res.status_code == 200:
data[endpoint.key] = json.loads(res.content)
else:
flag_error = True
#logger.info(_read_get_endpoint.cache_info())
if not flag_error:
templates = Jinja2Templates(directory="../templates")
response = templates.TemplateResponse(input.template, data)
response.headers['content-type'] = f"{input.content_type}; charset=utf-8"
return response
else:
return {"error": "Error in the endpoint request"}
@app.post("/generate")
async def generate_get(request: Request):
input_json = await request.json()
template_request_body = json.dumps(input_json).encode('utf-8')
return base64.urlsafe_b64encode(zlib.compress(template_request_body, 9)).decode('ascii')
@app.get("/{input}")
async def get_templated_data(request: Request, input: str):
template_request_body = zlib.decompress(base64.urlsafe_b64decode(input)).decode('utf-8')
if 'endpoints' in template_request_body:
return await multiple(request, MultiQueryInputs.parse_raw(template_request_body))
else:
return await root(request, QueryInputs.parse_raw(template_request_body))
|
the-stack_106_28774 | # Copyright (C) 2015, Anuj Sharma ([email protected])
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Structural alignment using Quaternion Characteristic Polynomial (QCP).
QCPSuperimposer finds the best rotation and translation to put
two point sets on top of each other (minimizing the RMSD). This is
eg. useful to superimpose crystal structures. QCP stands for
Quaternion Characteristic Polynomial, which is used in the algorithm.
"""
from numpy import dot, sqrt, array, matrix, inner, zeros
from .qcprotmodule import FastCalcRMSDAndRotation
class QCPSuperimposer:
"""Quaternion Characteristic Polynomial (QCP) Superimposer.
QCPSuperimposer finds the best rotation and translation to put
two point sets on top of each other (minimizing the RMSD). This is
eg. useful to superimposing 3D structures of proteins.
QCP stands for Quaternion Characteristic Polynomial, which is used
in the algorithm.
Reference:
Douglas L Theobald (2005), "Rapid calculation of RMSDs using a
quaternion-based characteristic polynomial.", Acta Crystallogr
A 61(4):478-480
"""
def __init__(self):
"""Initialize the class."""
self._clear()
# Private methods
def _clear(self):
self.reference_coords = None
self.coords = None
self.transformed_coords = None
self.rot = None
self.tran = None
self.rms = None
self.init_rms = None
def _rms(self, coords1, coords2):
"""Return rms deviations between coords1 and coords2 (PRIVATE)."""
diff = coords1 - coords2
return sqrt(sum(dot(diff, diff)) / coords1.shape[0])
def _inner_product(self, coords1, coords2):
G1 = inner(coords1, coords1).diagonal().sum()
G2 = inner(coords2, coords2).diagonal().sum()
A = dot(coords1.T, coords2)
return ((G1 + G2) / 2, A)
def _align(self, centered_coords1, centered_coords2):
(E0, A) = self._inner_product(centered_coords1, centered_coords2)
(
rmsd,
r0,
r1,
r2,
r3,
r4,
r5,
r6,
r7,
r8,
q1,
q2,
q3,
q4,
) = FastCalcRMSDAndRotation(
A[0][0],
A[0][1],
A[0][2],
A[1][0],
A[1][1],
A[1][2],
A[2][0],
A[2][1],
A[2][2],
E0,
len(centered_coords1),
-1.0,
)
rot = array([r0, r1, r2, r3, r4, r5, r6, r7, r8]).reshape(3, 3)
return (rmsd, rot.T, [q1, q2, q3, q4])
# Public methods
def set(self, reference_coords, coords):
"""Set the coordinates to be superimposed.
coords will be put on top of reference_coords.
- reference_coords: an NxDIM array
- coords: an NxDIM array
DIM is the dimension of the points, N is the number
of points to be superimposed.
"""
# clear everything from previous runs
self._clear()
# store cordinates
self.reference_coords = reference_coords
self.coords = coords
n = reference_coords.shape
m = coords.shape
if n != m or n[1] != 3 or m[1] != 3:
raise Exception("Coordinate number/dimension mismatch.")
self.n = n[0]
def run(self):
"""Superimpose the coordinate sets."""
if self.coords is None or self.reference_coords is None:
raise Exception("No coordinates set.")
coords = self.coords
reference_coords = self.reference_coords
# center on centroid
av1 = sum(coords) / self.n
av2 = sum(reference_coords) / self.n
coords = coords - av1
reference_coords = reference_coords - av2
#
(self.rms, self.rot, self.lquart) = self._align(coords, reference_coords)
self.tran = av2 - dot(av1, self.rot)
def get_transformed(self):
"""Get the transformed coordinate set."""
if self.coords is None or self.reference_coords is None:
raise Exception("No coordinates set.")
if self.rot is None:
raise Exception("Nothing superimposed yet.")
if self.transformed_coords is None:
self.transformed_coords = dot(self.coords, self.rot) + self.tran
return self.transformed_coords
def get_rotran(self):
"""Right multiplying rotation matrix and translation."""
if self.rot is None:
raise Exception("Nothing superimposed yet.")
return self.rot, self.tran
def get_init_rms(self):
"""Root mean square deviation of untransformed coordinates."""
if self.coords is None:
raise Exception("No coordinates set yet.")
if self.init_rms is None:
self.init_rms = self._rms(self.coords, self.reference_coords)
return self.init_rms
def get_rms(self):
"""Root mean square deviation of superimposed coordinates."""
if self.rms is None:
raise Exception("Nothing superimposed yet.")
return self.rms
|
the-stack_106_28775 | """ Implementation of all available options """
from __future__ import print_function
import configargparse
from onmt.models.sru import CheckSRU
def config_opts(parser):
parser.add('-config', '--config', required=False,
is_config_file_arg=True, help='config file path')
parser.add('-save_config', '--save_config', required=False,
is_write_out_config_file_arg=True,
help='config file save path')
def general_opts(parser):
group = parser.add_argument_group('general')
group.add_argument('-levels', '--levels',
type=int,
nargs='+',
required=True,
help='list of simplification levels')
def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Embedding Options
group = parser.add_argument_group('Model-Embeddings')
group.add('--src_word_vec_size', '-src_word_vec_size',
type=int, default=500,
help='Word embedding size for src.')
group.add('--tgt_word_vec_size', '-tgt_word_vec_size',
type=int, default=500,
help='Word embedding size for tgt.')
group.add('--word_vec_size', '-word_vec_size', type=int, default=-1,
help='Word embedding size for src and tgt.')
group.add('--share_decoder_embeddings', '-share_decoder_embeddings',
action='store_true',
help="Use a shared weight matrix for the input and "
"output word embeddings in the decoder.")
group.add('--share_embeddings', '-share_embeddings', action='store_true',
help="Share the word embeddings between encoder "
"and decoder. Need to use shared dictionary for this "
"option.")
group.add('--position_encoding', '-position_encoding', action='store_true',
help="Use a sin to mark relative words positions. "
"Necessary for non-RNN style models.")
group = parser.add_argument_group('Model-Embedding Features')
group.add('--feat_merge', '-feat_merge', type=str, default='concat',
choices=['concat', 'sum', 'mlp'],
help="Merge action for incorporating features embeddings. "
"Options [concat|sum|mlp].")
group.add('--feat_vec_size', '-feat_vec_size', type=int, default=-1,
help="If specified, feature embedding sizes "
"will be set to this. Otherwise, feat_vec_exponent "
"will be used.")
group.add('--feat_vec_exponent', '-feat_vec_exponent',
type=float, default=0.7,
help="If -feat_merge_size is not set, feature "
"embedding sizes will be set to N^feat_vec_exponent "
"where N is the number of values the feature takes.")
# Encoder-Decoder Options
group = parser.add_argument_group('Model- Encoder-Decoder')
group.add('--model_architecture', '-model_architecture', default='encoder_multi_decoders',
choices=['encoder_decoders', 'encoder_multi_decoders'],
help="Type of model architecture to use."
"Options are [encoder_decoders|encoder_multi_decoders].")
group.add('--model_type', '-model_type', default='text',
choices=['text', 'img', 'audio'],
help="Type of source model to use. Allows "
"the system to incorporate non-text inputs. "
"Options are [text|img|audio].")
group.add('--model_dtype', '-model_dtype', default='fp32',
choices=['fp32', 'fp16'],
help='Data type of the model.')
group.add('--encoder_type', '-encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],
help="Type of encoder layer to use. Non-RNN layers "
"are experimental. Options are "
"[rnn|brnn|mean|transformer|cnn].")
group.add('--decoder_type', '-decoder_type', type=str, default='rnn',
choices=['rnn', 'transformer', 'cnn'],
help="Type of decoder layer to use. Non-RNN layers "
"are experimental. Options are "
"[rnn|transformer|cnn].")
group.add('--layers', '-layers', type=int, default=-1,
help='Number of layers in enc/dec.')
group.add('--enc_layers', '-enc_layers', type=int, default=2,
help='Number of layers in the encoder')
group.add('--dec_layers', '-dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add('--rnn_size', '-rnn_size', type=int, default=-1,
help="Size of rnn hidden states. Overwrites "
"enc_rnn_size and dec_rnn_size")
group.add('--enc_rnn_size', '-enc_rnn_size', type=int, default=500,
help="Size of encoder rnn hidden states. "
"Must be equal to dec_rnn_size except for "
"speech-to-text.")
group.add('--dec_rnn_size', '-dec_rnn_size', type=int, default=500,
help="Size of decoder rnn hidden states. "
"Must be equal to enc_rnn_size except for "
"speech-to-text.")
group.add('--audio_enc_pooling', '-audio_enc_pooling',
type=str, default='1',
help="The amount of pooling of audio encoder, "
"either the same amount of pooling across all layers "
"indicated by a single number, or different amounts of "
"pooling per layer separated by comma.")
group.add('--cnn_kernel_width', '-cnn_kernel_width', type=int, default=3,
help="Size of windows in the cnn, the kernel_size is "
"(cnn_kernel_width, 1) in conv layer")
group.add('--input_feed', '-input_feed', type=int, default=1,
help="Feed the context vector at each time step as "
"additional input (via concatenation with the word "
"embeddings) to the decoder.")
group.add('--bridge', '-bridge', action="store_true",
help="Have an additional layer between the last encoder "
"state and the first decoder state")
group.add('--rnn_type', '-rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
action=CheckSRU,
help="The gate type to use in the RNNs")
# group.add('--residual', '-residual', action="store_true",
# help="Add residual connections between RNN layers.")
group.add('--brnn', '-brnn', action=DeprecateAction,
help="Deprecated, use `encoder_type`.")
group.add('--context_gate', '-context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="Type of context gate to use. "
"Do not select for no context gate.")
# Attention options
group = parser.add_argument_group('Model- Attention')
group.add('--global_attention', '-global_attention',
type=str, default='general',
choices=['dot', 'general', 'mlp', 'none'],
help="The attention type to use: "
"dotprod or general (Luong) or MLP (Bahdanau)")
group.add('--global_attention_function', '-global_attention_function',
type=str, default="softmax", choices=["softmax", "sparsemax"])
group.add('--self_attn_type', '-self_attn_type',
type=str, default="scaled-dot",
help='Self attention type in Transformer decoder '
'layer -- currently "scaled-dot" or "average" ')
group.add('--max_relative_positions', '-max_relative_positions',
type=int, default=0,
help="Maximum distance between inputs in relative "
"positions representations. "
"For more detailed information, see: "
"https://arxiv.org/pdf/1803.02155.pdf")
group.add('--heads', '-heads', type=int, default=8,
help='Number of heads for transformer self-attention')
group.add('--transformer_ff', '-transformer_ff', type=int, default=2048,
help='Size of hidden transformer feed-forward')
# Generator and loss options.
group.add('--copy_attn', '-copy_attn', action="store_true",
help='Train copy attention layer.')
group.add('--copy_attn_type', '-copy_attn_type',
type=str, default=None,
choices=['dot', 'general', 'mlp', 'none'],
help="The copy attention type to use. Leave as None to use "
"the same as -global_attention.")
group.add('--generator_function', '-generator_function', default="softmax",
choices=["softmax", "sparsemax"],
help="Which function to use for generating "
"probabilities over the target vocabulary (choices: "
"softmax, sparsemax)")
group.add('--copy_attn_force', '-copy_attn_force', action="store_true",
help='When available, train to copy.')
group.add('--reuse_copy_attn', '-reuse_copy_attn', action="store_true",
help="Reuse standard attention for copy")
group.add('--copy_loss_by_seqlength', '-copy_loss_by_seqlength',
action="store_true",
help="Divide copy loss by length of sequence")
group.add('--coverage_attn', '-coverage_attn', action="store_true",
help='Train a coverage attention layer.')
group.add('--lambda_coverage', '-lambda_coverage', type=float, default=1,
help='Lambda value for coverage.')
group.add('--loss_scale', '-loss_scale', type=float, default=0,
help="For FP16 training, the static loss scale to use. If not "
"set, the loss scale is dynamically computed.")
def preprocess_opts(parser):
""" Pre-procesing options """
# Data options
group = parser.add_argument_group('Data')
group.add('--data_type', '-data_type', default="text",
help="Type of the source input. "
"Options are [text|img|audio].")
group.add_argument('-src', '--src',
required=True,
help='Source data file path (without .#level suffix)')
group.add_argument('-tgt', '--tgt',
required=True,
help='Target data file path (without .#level suffix)')
group.add_argument('-train_valid_test_split', '--train_valid_test_split',
default=[0.8, 0.1, 0.1],
nargs='+',
type=float,
help='Percentage of split between train / validation data ~ [0.0, 1.0]')
group.add('--src_dir', '-src_dir', default="",
help="Source directory for image or audio files.")
group.add('--save_data', '-save_data', required=True,
help="Output file for the prepared data")
group.add('--max_shard_size', '-max_shard_size', type=int, default=0,
help="""Deprecated use shard_size instead""")
group.add('--shard_size', '-shard_size', type=int, default=1000000,
help="Divide src_corpus and tgt_corpus into "
"smaller multiple src_copus and tgt corpus files, then "
"build shards, each shard will have "
"opt.shard_size samples except last shard. "
"shard_size=0 means no segmentation "
"shard_size>0 means segment dataset into multiple shards, "
"each shard has shard_size samples")
# Dictionary options, for text corpus
group = parser.add_argument_group('Vocab')
group.add('--src_vocab', '-src_vocab', default="",
help="Path to an existing source vocabulary. Format: "
"one word per line.")
group.add('--tgt_vocab', '-tgt_vocab', default="",
help="Path to an existing target vocabulary. Format: "
"one word per line.")
group.add('--features_vocabs_prefix', '-features_vocabs_prefix',
type=str, default='',
help="Path prefix to existing features vocabularies")
group.add('--src_vocab_size', '-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
group.add('--tgt_vocab_size', '-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
group.add('--vocab_size_multiple', '-vocab_size_multiple',
type=int, default=1,
help="Make the vocabulary size a multiple of this value")
group.add('--src_words_min_frequency',
'-src_words_min_frequency', type=int, default=0)
group.add('--tgt_words_min_frequency',
'-tgt_words_min_frequency', type=int, default=0)
group.add('--dynamic_dict', '-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add('--share_vocab', '-share_vocab', action='store_true',
help="Share source and target vocabulary")
# Truncation options, for text corpus
group = parser.add_argument_group('Pruning')
group.add('--src_seq_length', '-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
group.add('--src_seq_length_trunc', '-src_seq_length_trunc',
type=int, default=None,
help="Truncate source sequence length.")
group.add('--tgt_seq_length', '-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
group.add('--tgt_seq_length_trunc', '-tgt_seq_length_trunc',
type=int, default=None,
help="Truncate target sequence length.")
group.add('--lower', '-lower', action='store_true', help='lowercase data')
group.add('--filter_valid', '-filter_valid', action='store_true',
help='Filter validation data by src and/or tgt length')
# Data processing options
group = parser.add_argument_group('Random')
group.add('--shuffle', '-shuffle', type=int, default=0,
help="Shuffle data")
group.add('--seed', '-seed', type=int, default=3435,
help="Random seed")
group = parser.add_argument_group('Logging')
group.add('--report_every', '-report_every', type=int, default=100000,
help="Report status every this many sentences")
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
# Options most relevant to speech
group = parser.add_argument_group('Speech')
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add('--window_stride', '-window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add('--window', '-window', default='hamming',
help="Window type for spectrogram generation.")
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3,
choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
def train_opts(parser):
""" Training and saving options """
group = parser.add_argument_group('General')
group.add('--data', '-data', required=True,
help='Path prefix to the ".train.pt" and '
'".valid.pt" file path from preprocess.py')
group.add('--save_model', '-save_model', default='./output/trained_models',
help="Path for save models (the model will be saved as "
"<save_model>/model_N.pt where N is the number "
"of steps")
group.add('--save_checkpoint_steps', '-save_checkpoint_steps',
type=int, default=5000,
help="""Save a checkpoint every X steps""")
group.add('--keep_checkpoint', '-keep_checkpoint', type=int, default=-1,
help="Keep X checkpoints (negative: keep all)")
# GPU
group.add('--gpuid', '-gpuid', default=[], nargs='*', type=int,
help="Deprecated see world_size and gpu_ranks.")
group.add('--gpu_ranks', '-gpu_ranks', default=[], nargs='*', type=int,
help="list of ranks of each process.")
group.add('--world_size', '-world_size', default=1, type=int,
help="total number of distributed processes.")
group.add('--gpu_backend', '-gpu_backend',
default="nccl", type=str,
help="Type of torch distributed backend")
group.add('--gpu_verbose_level', '-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add('--master_ip', '-master_ip', default="localhost", type=str,
help="IP of master for torch.distributed training.")
group.add('--master_port', '-master_port', default=10000, type=int,
help="Port of master for torch.distributed training.")
group.add('--seed', '-seed', type=int, default=-1,
help="Random seed used for the experiments "
"reproducibility.")
# Init options
group = parser.add_argument_group('Initialization')
group.add('--param_init', '-param_init', type=float, default=0.1,
help="Parameters are initialized over uniform distribution "
"with support (-param_init, param_init). "
"Use 0 to not use initialization")
group.add('--param_init_glorot', '-param_init_glorot', action='store_true',
help="Init parameters with xavier_uniform. "
"Required for transfomer.")
group.add('--train_from', '-train_from', default='', type=str,
help="If training from a checkpoint then this is the "
"path to the pretrained model's state_dict.")
group.add('--reset_optim', '-reset_optim', default='none',
choices=['none', 'all', 'states', 'keep_states'],
help="Optimization resetter when train_from.")
# Pretrained word vectors
group.add('--pre_word_vecs_enc', '-pre_word_vecs_enc',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the encoder side. "
"See README for specific formatting instructions.")
group.add('--pre_word_vecs_dec', '-pre_word_vecs_dec',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the decoder side. "
"See README for specific formatting instructions.")
# Fixed word vectors
group.add('--fix_word_vecs_enc', '-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
group.add('--fix_word_vecs_dec', '-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the decoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add('--batch_size', '-batch_size', type=int, default=64,
help='Maximum batch size for training')
group.add('--fixed_shard_batches', '-fixed_shard_batches', type=int, default=10,
help='Number of batches from fixed shard, before moving to the next. Used only in DatasetLazyMixerIter.')
group.add('--batch_type', '-batch_type', default='sents',
choices=["sents", "tokens"],
help="Batch grouping for batch_size. Standard "
"is sents. Tokens will do dynamic batching")
group.add('--normalization', '-normalization', default='sents',
choices=["sents", "tokens"],
help='Normalization method of the gradient.')
group.add('--accum_count', '-accum_count', type=int, default=1,
help="Accumulate gradient this many times. "
"Approximately equivalent to updating "
"batch_size * accum_count batches at once. "
"Recommended for Transformer.")
group.add('--valid_steps', '-valid_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add('--valid_batch_size', '-valid_batch_size', type=int, default=32,
help='Maximum batch size for validation')
group.add('--max_generator_batches', '-max_generator_batches',
type=int, default=32,
help="Maximum batches of words in a sequence to run "
"the generator on in parallel. Higher is faster, but "
"uses more memory. Set to 0 to disable.")
group.add('--train_steps', '-train_steps', type=int, default=100000,
help='Number of training steps')
group.add('--single_pass', '-single_pass', action='store_true',
help="Make a single pass over the training dataset.")
group.add('--epochs', '-epochs', type=int, default=0,
help='Deprecated epochs see train_steps')
group.add('--optim', '-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam',
'sparseadam', 'adafactor', 'fusedadam'],
help="Optimization method.")
group.add('--adagrad_accumulator_init', '-adagrad_accumulator_init',
type=float, default=0,
help="Initializes the accumulator values in adagrad. "
"Mirrors the initial_accumulator_value option "
"in the tensorflow adagrad (use 0.1 for their default).")
group.add('--max_grad_norm', '-max_grad_norm', type=float, default=5,
help="If the norm of the gradient vector exceeds this, "
"renormalize it to have the norm equal to "
"max_grad_norm")
group.add('--dropout', '-dropout', type=float, default=0.3,
help="Dropout probability; applied in LSTM stacks.")
group.add('--truncated_decoder', '-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add('--adam_beta1', '-adam_beta1', type=float, default=0.9,
help="The beta1 parameter used by Adam. "
"Almost without exception a value of 0.9 is used in "
"the literature, seemingly giving good results, "
"so we would discourage changing this value from "
"the default without due consideration.")
group.add('--adam_beta2', '-adam_beta2', type=float, default=0.999,
help='The beta2 parameter used by Adam. '
'Typically a value of 0.999 is recommended, as this is '
'the value suggested by the original paper describing '
'Adam, and is also the value adopted in other frameworks '
'such as Tensorflow and Kerras, i.e. see: '
'https://www.tensorflow.org/api_docs/python/tf/train/Adam'
'Optimizer or '
'https://keras.io/optimizers/ . '
'Whereas recently the paper "Attention is All You Need" '
'suggested a value of 0.98 for beta2, this parameter may '
'not work well for normal models / default '
'baselines.')
group.add('--label_smoothing', '-label_smoothing', type=float, default=0.0,
help="Label smoothing value epsilon. "
"Probabilities of all non-true labels "
"will be smoothed by epsilon / (vocab_size - 1). "
"Set to zero to turn off label smoothing. "
"For more detailed information, see: "
"https://arxiv.org/abs/1512.00567")
group.add('--average_decay', '-average_decay', type=float, default=0,
help="Moving average decay. "
"Set to other than 0 (e.g. 1e-4) to activate. "
"Similar to Marian NMT implementation: "
"http://www.aclweb.org/anthology/P18-4020 "
"For more detail on Exponential Moving Average: "
"https://en.wikipedia.org/wiki/Moving_average")
group.add('--average_every', '-average_every', type=int, default=1,
help="Step for moving average. "
"Default is every update, "
"if -average_decay is set.")
# learning rate
group = parser.add_argument_group('Optimization- Rate')
group.add('--learning_rate', '-learning_rate', type=float, default=1.0,
help="Starting learning rate. "
"Recommended settings: sgd = 1, adagrad = 0.1, "
"adadelta = 1, adam = 0.001")
group.add('--learning_rate_decay', '-learning_rate_decay',
type=float, default=0.5,
help="If update_learning_rate, decay learning rate by "
"this much if steps have gone past "
"start_decay_steps")
group.add('--start_decay_steps', '-start_decay_steps',
type=int, default=50000,
help="Start decaying every decay_steps after "
"start_decay_steps")
group.add('--decay_steps', '-decay_steps', type=int, default=10000,
help="Decay every decay_steps")
group.add('--decay_method', '-decay_method', type=str, default="none",
choices=['noam', 'rsqrt', 'none'],
help="Use a custom decay rate.")
group.add('--warmup_steps', '-warmup_steps', type=int, default=4000,
help="Number of warmup steps for custom decay.")
group = parser.add_argument_group('Logging')
group.add('--report_every', '-report_every', type=int, default=50,
help="Print stats at this interval.")
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
group.add('--exp_host', '-exp_host', type=str, default="",
help="Send logs to this crayon server.")
group.add('--exp', '-exp', type=str, default="",
help="Name of the experiment for logging.")
# Use TensorboardX for visualization during training
group.add('--tensorboard', '-tensorboard', action="store_true",
help="Use tensorboardX for visualization during training. "
"Must have the library tensorboardX.")
group.add("--tensorboard_log_dir", "-tensorboard_log_dir",
type=str, default="./output/runs",
help="Log directory for Tensorboard. "
"This is also the name of the run.")
group = parser.add_argument_group('Speech')
# Options most relevant to speech
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3, choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
def translate_opts(parser):
""" Translation / inference options """
group = parser.add_argument_group('Model')
group.add('--model', '-model', dest='models', metavar='MODEL',
nargs='+', type=str, default=[], required=True,
help="Path to model .pt file(s). "
"Multiple models can be specified, "
"for ensemble decoding.")
group.add('--fp32', '-fp32', action='store_true',
help="Force the model to be in FP32 "
"because FP16 is very slow on GTX1080(ti).")
group.add('--avg_raw_probs', '-avg_raw_probs', action='store_true',
help="If this is set, during ensembling scores from "
"different models will be combined by averaging their "
"raw probabilities and then taking the log. Otherwise, "
"the log probabilities will be averaged directly. "
"Necessary for models whose output layers can assign "
"zero probability.")
group = parser.add_argument_group('Data')
group.add('--data_type', '-data_type', default="text",
help="Type of the source input. Options: [text|img].")
group.add('--src', '-src', required=True,
help="Source sequence to decode (one line per "
"sequence)")
group.add('--src_dir', '-src_dir', default="",
help='Source directory for image or audio files')
group.add('--tgt', '-tgt',
help='True target sequence (optional)')
group.add('--shard_size', '-shard_size', type=int, default=10000,
help="Divide src and tgt (if applicable) into "
"smaller multiple src and tgt files, then "
"build shards, each shard will have "
"opt.shard_size samples except last shard. "
"shard_size=0 means no segmentation "
"shard_size>0 means segment dataset into multiple shards, "
"each shard has shard_size samples")
group.add('--output', '-output', default='./output/translations',
help="Path to output the predictions (each line will "
"be the decoded sequence")
group.add('--report_time', '-report_time', action='store_true',
help="Report some translation time metrics")
# Options most relevant to summarization.
group.add('--dynamic_dict', '-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add('--share_vocab', '-share_vocab', action='store_true',
help="Share source and target vocabulary")
group = parser.add_argument_group('Random Sampling')
group.add('--random_sampling_topk', '-random_sampling_topk',
default=1, type=int,
help="Set this to -1 to do random sampling from full "
"distribution. Set this to value k>1 to do random "
"sampling restricted to the k most likely next tokens. "
"Set this to 1 to use argmax or for doing beam "
"search.")
group.add('--random_sampling_temp', '-random_sampling_temp',
default=1., type=float,
help="If doing random sampling, divide the logits by "
"this before computing softmax during decoding.")
group.add('--seed', '-seed', type=int, default=829,
help="Random seed")
group = parser.add_argument_group('Beam')
group.add('--beam_size', '-beam_size', type=int, default=5,
help='Beam size')
group.add('--min_length', '-min_length', type=int, default=0,
help='Minimum prediction length')
group.add('--max_length', '-max_length', type=int, default=100,
help='Maximum prediction length.')
group.add('--max_sent_length', '-max_sent_length', action=DeprecateAction,
help="Deprecated, use `-max_length` instead")
# Alpha and Beta values for Google Length + Coverage penalty
# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7
group.add('--stepwise_penalty', '-stepwise_penalty', action='store_true',
help="Apply penalty at every decoding step. "
"Helpful for summary penalty.")
group.add('--length_penalty', '-length_penalty', default='none',
choices=['none', 'wu', 'avg'],
help="Length Penalty to use.")
group.add('--coverage_penalty', '-coverage_penalty', default='none',
choices=['none', 'wu', 'summary'],
help="Coverage Penalty to use.")
group.add('--alpha', '-alpha', type=float, default=0.,
help="Google NMT length penalty parameter "
"(higher = longer generation)")
group.add('--beta', '-beta', type=float, default=-0.,
help="Coverage penalty parameter")
group.add('--block_ngram_repeat', '-block_ngram_repeat',
type=int, default=0,
help='Block repetition of ngrams during decoding.')
group.add('--ignore_when_blocking', '-ignore_when_blocking',
nargs='+', type=str, default=[],
help="Ignore these strings when blocking repeats. "
"You want to block sentence delimiters.")
group.add('--replace_unk', '-replace_unk', action="store_true",
help="Replace the generated UNK tokens with the "
"source token that had highest attention weight. If "
"phrase_table is provided, it will lookup the "
"identified source token and give the corresponding "
"target token. If it is not provided(or the identified "
"source token does not exist in the table) then it "
"will copy the source token")
group = parser.add_argument_group('Logging')
group.add('--verbose', '-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
group.add('--attn_debug', '-attn_debug', action="store_true",
help='Print best attn for each word')
group.add('--dump_beam', '-dump_beam', type=str, default="",
help='File to dump beam information to.')
group.add('--n_best', '-n_best', type=int, default=1,
help="If verbose is set, will output the n_best "
"decoded sentences")
group.add('--exp', '-exp', type=str, default="",
help="Name of the experiment for logging.")
group = parser.add_argument_group('Efficiency')
group.add('--batch_size', '-batch_size', type=int, default=30,
help='Batch size')
group.add('--gpu', '-gpu', type=int, default=-1,
help="Device to run on")
# Options most relevant to speech.
group = parser.add_argument_group('Speech')
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help='Window size for spectrogram in seconds')
group.add('--window_stride', '-window_stride', type=float, default=.01,
help='Window stride for spectrogram in seconds')
group.add('--window', '-window', default='hamming',
help='Window type for spectrogram generation')
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3, choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
def evaluate_opts(parser):
""" Evaluation options """
group = parser.add_argument_group('Data')
group.add('--src', '-src', required=True,
help="Source sequence to decode (one line per "
"sequence)")
group.add('--tgt', '-tgt',
help='True target sequence')
group.add('--pred', '-pred',
help='Predicted sequence')
group.add('--report_rouge', '-report_rouge', action='store_true',
help="Report rouge 1/2/3/L/SU4 score after translation "
"call tools/test_rouge.py on command line")
group.add('--report_bleu', '-report_bleu', action='store_true',
help="Report bleu score after translation, "
"call tools/multi-bleu.perl on command line")
group.add('--report_sari', '-report_sari', action='store_true',
help="Report sari score after translation, "
"call tools/sari.py on command line")
group.add('--report_flesch_reading_ease', '-report_flesch_reading_ease', action='store_true',
help="Report Flesch reading ease after translation, "
"call tools/readability/readability.py on command line")
group.add('--report_flesch_kincaid_grade_level', '-report_flesch_kincaid_grade_level', action='store_true',
help="Report Flesch-Kincaid grade level after translation, "
"call tools/readability/readability.py on command line")
group.add('--output', '-output',
help="Path to output the predictions (each line will "
"be the decoded sequence")
group = parser.add_argument_group('Logging')
group.add('--verbose', '-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class StoreLoggingLevelAction(configargparse.Action):
""" Convert string to logging level """
import logging
LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"NOTSET": logging.NOTSET
}
CHOICES = list(LEVELS.keys()) + [str(_) for _ in LEVELS.values()]
def __init__(self, option_strings, dest, help=None, **kwargs):
super(StoreLoggingLevelAction, self).__init__(
option_strings, dest, help=help, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
# Get the key 'value' in the dict, or just use 'value'
level = StoreLoggingLevelAction.LEVELS.get(value, value)
setattr(namespace, self.dest, level)
class DeprecateAction(configargparse.Action):
""" Deprecate action """
def __init__(self, option_strings, dest, help=None, **kwargs):
super(DeprecateAction, self).__init__(option_strings, dest, nargs=0,
help=help, **kwargs)
def __call__(self, parser, namespace, values, flag_name):
help = self.help if self.help is not None else ""
msg = "Flag '%s' is deprecated. %s" % (flag_name, help)
raise configargparse.ArgumentTypeError(msg)
|
the-stack_106_28776 | # Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""A small wrapper script around the core JS compiler. This calls that
compiler with the settings given to it. It can also read data from C/C++
header files (so that the JS compiler can see the constants in those
headers, for the libc implementation in JS).
"""
from __future__ import print_function
import difflib
import os
import json
import subprocess
import re
import time
import logging
import shutil
import pprint
from collections import OrderedDict
from tools import shared
from tools import gen_struct_info
from tools import jsrun
from tools.response_file import substitute_response_files
from tools.shared import WINDOWS, asstr, path_from_root, exit_with_error
from tools.toolchain_profiler import ToolchainProfiler
from tools.minified_js_name_generator import MinifiedJsNameGenerator
logger = logging.getLogger('emscripten')
STDERR_FILE = os.environ.get('EMCC_STDERR_FILE')
if STDERR_FILE:
STDERR_FILE = os.path.abspath(STDERR_FILE)
logger.info('logging stderr in js compiler phase into %s' % STDERR_FILE)
STDERR_FILE = open(STDERR_FILE, 'w')
def get_configuration():
if hasattr(get_configuration, 'configuration'):
return get_configuration.configuration
configuration = shared.Configuration(environ=os.environ)
get_configuration.configuration = configuration
return configuration
def quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["'" + p + "'" for p in prop.split('.')])
else:
return prop
def access_quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["['" + p + "']" for p in prop.split('.')])
else:
return '.' + prop
def emscript_fastcomp(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
"""Runs the emscripten LLVM-to-JS compiler.
Args:
infile: The path to the input LLVM assembly file.
outfile: An open file object where the output is written.
"""
assert shared.Settings.ASM_JS, 'fastcomp is asm.js-only (mode 1 or 2)'
success = False
try:
# Overview:
# * Run LLVM backend to emit JS. JS includes function bodies, memory initializer,
# and various metadata
# * Run compiler.js on the metadata to emit the shell js code, pre/post-ambles,
# JS library dependencies, etc.
# metadata is modified by reference in some of the below
# these functions are split up to force variables to go out of scope and allow
# memory to be reclaimed
with ToolchainProfiler.profile_block('get_and_parse_backend'):
backend_output = compile_js(infile, temp_files, DEBUG)
funcs, metadata, mem_init = parse_fastcomp_output(backend_output, DEBUG)
fixup_metadata_tables(metadata)
funcs = fixup_functions(funcs, metadata)
with ToolchainProfiler.profile_block('compiler_glue'):
glue, forwarded_data = compiler_glue(metadata, compiler_engine, temp_files, DEBUG)
with ToolchainProfiler.profile_block('function_tables_and_exports'):
(post, function_table_data, bundled_args) = (
function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG))
with ToolchainProfiler.profile_block('write_output_file'):
finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG)
success = True
finally:
outfile.close()
if not success:
shared.try_delete(outfile.name) # remove partial output
def compile_js(infile, temp_files, DEBUG):
"""Compile infile with asm.js backend, return the contents of the compiled js"""
with temp_files.get_file('.4.js') as temp_js:
backend_cmd = create_backend_cmd(infile, temp_js)
if DEBUG:
logger.debug('emscript: llvm backend: ' + ' '.join(backend_cmd))
t = time.time()
shared.print_compiler_stage(backend_cmd)
with ToolchainProfiler.profile_block('emscript_llvm_backend'):
shared.check_call(backend_cmd)
if DEBUG:
logger.debug(' emscript: llvm backend took %s seconds' % (time.time() - t))
# Split up output
backend_output = open(temp_js).read()
return backend_output
def parse_fastcomp_output(backend_output, DEBUG):
start_funcs_marker = '// EMSCRIPTEN_START_FUNCTIONS'
end_funcs_marker = '// EMSCRIPTEN_END_FUNCTIONS'
metadata_split_marker = '// EMSCRIPTEN_METADATA'
start_funcs = backend_output.index(start_funcs_marker)
end_funcs = backend_output.rindex(end_funcs_marker)
metadata_split = backend_output.rindex(metadata_split_marker)
funcs = backend_output[start_funcs + len(start_funcs_marker):end_funcs]
metadata_raw = backend_output[metadata_split + len(metadata_split_marker):]
mem_init = backend_output[end_funcs + len(end_funcs_marker):metadata_split]
# we no longer use the "Runtime" object. TODO: stop emiting it in the backend
mem_init = mem_init.replace('Runtime.', '')
try:
metadata = json.loads(metadata_raw, object_pairs_hook=OrderedDict)
except ValueError:
logger.error('emscript: failure to parse metadata output from compiler backend. raw output is: \n' + metadata_raw)
raise
# This key is being added to fastcomp but doesn't exist in the current
# version.
metadata.setdefault('externFunctions', [])
if 'externUses' not in metadata:
exit_with_error('Your fastcomp compiler is out of date, please update! (need >= 1.38.26)')
# JS optimizer turns some heap accesses to others as an optimization, so make HEAP8 imply HEAPU8, HEAP16->HEAPU16, and HEAPF64->HEAPF32.
if 'Int8Array' in metadata['externUses']:
metadata['externUses'] += ['Uint8Array']
if 'Int16Array' in metadata['externUses']:
metadata['externUses'] += ['Uint16Array']
if 'Float64Array' in metadata['externUses']:
metadata['externUses'] += ['Float32Array']
# If we are generating references to Math.fround() from here in emscripten.py, declare it used as well.
if provide_fround() or metadata['simd']:
metadata['externUses'] += ['Math.fround']
# functions marked llvm.used in the code are exports requested by the user
shared.Building.user_requested_exports += metadata['exports']
# In MINIMAL_RUNTIME stackSave() and stackRestore are JS library functions. If LLVM backend generated
# calls to invoke_*() functions that save and restore the stack, we must include the stack functions
# explicitly into the build. (In traditional runtime the stack functions are always present, so this
# tracking is not needed)
if shared.Settings.MINIMAL_RUNTIME and (len(metadata['invokeFuncs']) > 0 or shared.Settings.LINKABLE):
shared.Settings.EXPORTED_FUNCTIONS += ['stackSave', 'stackRestore']
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$stackSave', '$stackRestore']
return funcs, metadata, mem_init
def fixup_metadata_tables(metadata):
# if emulating pointer casts, force all tables to the size of the largest
# (for wasm, we use binaryen's fpcast-emu pass, we don't need to do anything
# here)
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
max_size = 0
for k, v in metadata['tables'].items():
max_size = max(max_size, v.count(',') + 1)
for k, v in metadata['tables'].items():
curr = v.count(',') + 1
if curr < max_size:
if v.count('[]') == 1:
metadata['tables'][k] = v.replace(']', (','.join(['0'] * (max_size - curr)) + ']'))
else:
metadata['tables'][k] = v.replace(']', (',0' * (max_size - curr)) + ']')
if shared.Settings.SIDE_MODULE:
for k in metadata['tables'].keys():
metadata['tables'][k] = metadata['tables'][k].replace('var FUNCTION_TABLE_', 'var SIDE_FUNCTION_TABLE_')
def fixup_functions(funcs, metadata):
# function table masks
table_sizes = {}
for k, v in metadata['tables'].items():
# undercounts by one, but that is what we want
table_sizes[k] = str(v.count(','))
# if shared.Settings.ASSERTIONS >= 2 and table_sizes[k] == 0:
# shared.warning('no function pointers with signature ' + k + ', but there is a call, which will abort if it occurs (this can result from undefined behavior, check for compiler warnings on your source files and consider -Werror)'
funcs = re.sub(r"#FM_(\w+)#", lambda m: table_sizes[m.groups(0)[0]], funcs)
# fix +float into float.0, if not running js opts
if not shared.Settings.RUNNING_JS_OPTS:
def fix_dot_zero(m):
num = m.group(3)
# TODO: handle 0x floats?
if num.find('.') < 0:
e = num.find('e')
if e < 0:
num += '.0'
else:
num = num[:e] + '.0' + num[e:]
return m.group(1) + m.group(2) + num
funcs = re.sub(r'([(=,+\-*/%<>:?] *)\+(-?)((0x)?[0-9a-f]*\.?[0-9]+([eE][-+]?[0-9]+)?)', fix_dot_zero, funcs)
return funcs
def compiler_glue(metadata, compiler_engine, temp_files, DEBUG):
if DEBUG:
logger.debug('emscript: js compiler glue')
t = time.time()
# FIXME: do these one by one as normal js lib funcs
metadata['declares'] = [i64_func for i64_func in metadata['declares'] if i64_func not in ['getHigh32', 'setHigh32']]
update_settings_glue(metadata, DEBUG)
assert not (metadata['simd'] and shared.Settings.WASM), 'SIMD is used, but not supported in WASM mode yet'
assert not (shared.Settings.SIMD and shared.Settings.WASM), 'SIMD is requested, but not supported in WASM mode yet'
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
return glue, forwarded_data
def analyze_table(function_table_data):
def table_size(table):
table_contents = table[table.index('[') + 1: table.index(']')]
if len(table_contents) == 0: # empty table
return 0
return table_contents.count(',') + 1
# note that this is a minimal estimate, as when asm2wasm lays out tables it adds padding
table_total_size = sum(table_size(s) for s in function_table_data.values())
shared.Settings.WASM_TABLE_SIZE = table_total_size
# Extracts from JS library code dependencies to runtime primitives.
def get_asm_extern_primitives(pre):
primitives = re.search(r'\/\/ ASM_LIBRARY EXTERN PRIMITIVES: ([^\n]*)', pre)
if primitives:
return [x.strip().replace('Math_', 'Math.') for x in primitives.group(1).split(',')]
else:
return []
def function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG):
if DEBUG:
logger.debug('emscript: python processing: function tables and exports')
t = time.time()
forwarded_json = json.loads(forwarded_data)
# merge in information from llvm backend
function_table_data = metadata['tables']
if shared.Settings.WASM:
analyze_table(function_table_data)
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
pre = apply_script_source(pre)
asm_extern_primitives = get_asm_extern_primitives(pre)
metadata['externUses'] += asm_extern_primitives
pre = memory_and_global_initializers(pre, metadata, mem_init)
pre, funcs_js = get_js_funcs(pre, funcs)
all_exported_functions = get_all_exported_functions(function_table_data)
all_implemented = get_all_implemented(forwarded_json, metadata)
report_missing_symbols(all_implemented, pre)
implemented_functions = get_implemented_functions(metadata)
pre = include_asm_consts(pre, forwarded_json, metadata)
pre = apply_table(pre)
outfile.write(pre)
pre = None
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO and len(funcs_js) > 1:
funcs_js[1] = re.sub(r'/\* PRE_ASM \*/(.*)\n', move_preasm, funcs_js[1])
if 'pre' in function_table_data:
pre_tables = function_table_data['pre']
del function_table_data['pre']
else:
pre_tables = ''
function_table_sigs = list(function_table_data.keys())
in_table, debug_tables, function_tables_defs = make_function_tables_defs(
implemented_functions, all_implemented, function_table_data, metadata)
exported_implemented_functions = get_exported_implemented_functions(
all_exported_functions, all_implemented, metadata)
# List of function signatures of used 'invoke_xxx()' functions in the application
# For backwards compatibility if one might be using a mismatching Emscripten compiler version, if 'invokeFuncs' is not present in metadata,
# use the full list of signatures in function table and generate invoke_() functions for all signatures in the program (producing excessive code size)
# we must also emit the full list if we are emitting code that can be linked later
if 'invokeFuncs' in metadata and not shared.Settings.LINKABLE:
invoke_function_names = metadata['invokeFuncs']
else:
invoke_function_names = ['invoke_' + x for x in function_table_sigs]
asm_setup = create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata)
basic_funcs = create_basic_funcs(function_table_sigs, invoke_function_names)
basic_vars = create_basic_vars(exported_implemented_functions, forwarded_json, metadata)
funcs_js += create_mftCall_funcs(function_table_data)
exports = create_exports(exported_implemented_functions, in_table, function_table_data, metadata)
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
if not shared.Settings.RELOCATABLE:
global_vars = metadata['externs']
else:
global_vars = [] # linkable code accesses globals through function calls
global_funcs = set(key for key, value in forwarded_json['Functions']['libraryFunctions'].items() if value != 2)
global_funcs = sorted(global_funcs.difference(set(global_vars)).difference(implemented_functions))
if shared.Settings.RELOCATABLE:
global_funcs += ['g$' + extern for extern in metadata['externs']]
global_funcs += ['fp$' + extern for extern in metadata['externFunctions']]
# Tracks the set of used (minified) function names in
# JS symbols imported to asm.js module.
minified_js_names = MinifiedJsNameGenerator()
# Converts list of imports ['foo', 'bar', ...] to a dictionary of
# name mappings in form { 'minified': 'unminified', ... }
def define_asmjs_import_names(imports):
if shared.Settings.MINIFY_ASMJS_IMPORT_NAMES:
return [(minified_js_names.generate(), i) for i in imports]
else:
return [(i, i) for i in imports]
basic_funcs = define_asmjs_import_names(basic_funcs)
global_funcs = define_asmjs_import_names(global_funcs)
basic_vars = define_asmjs_import_names(basic_vars)
global_vars = define_asmjs_import_names(global_vars)
bg_funcs = basic_funcs + global_funcs
bg_vars = basic_vars + global_vars
asm_global_funcs = create_asm_global_funcs(bg_funcs, metadata)
asm_global_vars = create_asm_global_vars(bg_vars)
the_global = create_the_global(metadata)
sending_vars = bg_funcs + bg_vars
sending = OrderedDict([(math_fix(minified), unminified) for (minified, unminified) in sending_vars])
if shared.Settings.WASM:
add_standard_wasm_imports(sending)
sorted_sending_keys = sorted(sending.keys())
sending = '{ ' + ', '.join('"' + k + '": ' + sending[k] for k in sorted_sending_keys) + ' }'
receiving = create_receiving(function_table_data, function_tables_defs,
exported_implemented_functions, metadata['initializers'])
post = apply_table(post)
post = apply_static_code_hooks(post)
if shared.Settings.MINIMAL_RUNTIME:
# Generate invocations for all global initializers directly off the asm export object, e.g. asm['__GLOBAL__INIT']();
post = post.replace('/*** RUN_GLOBAL_INITIALIZERS(); ***/', '\n'.join(["asm['" + x + "']();" for x in global_initializer_funcs(metadata['initializers'])]))
if shared.Settings.WASM:
# Declare all exports out to global JS scope so that JS library functions can access them in a way that minifies well with Closure
# e.g. var a,b,c,d,e,f;
post = post.replace('/*** ASM_MODULE_EXPORTS_DECLARES ***/', 'var ' + ','.join(shared.Settings.MODULE_EXPORTS) + ';')
# Generate assignments from all asm.js/wasm exports out to the JS variables above: e.g. a = asm['a']; b = asm['b'];
post = post.replace('/*** ASM_MODULE_EXPORTS ***/', receiving)
receiving = ''
function_tables_impls = make_function_tables_impls(function_table_data)
final_function_tables = '\n'.join(function_tables_impls) + '\n' + function_tables_defs
if shared.Settings.EMULATED_FUNCTION_POINTERS:
final_function_tables = (
final_function_tables
.replace("asm['", '')
.replace("']", '')
.replace('var SIDE_FUNCTION_TABLE_', 'var FUNCTION_TABLE_')
.replace('var dynCall_', '//')
)
if DEBUG:
logger.debug('asm text sizes' + str([
[len(s) for s in funcs_js], len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables),
len('\n'.join(function_tables_impls)), len(function_tables_defs) + (function_tables_defs.count('\n') * len(' ')),
len(exports), len(the_global), len(sending), len(receiving)]))
logger.debug(' emscript: python processing: function tables and exports took %s seconds' % (time.time() - t))
bundled_args = (funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports)
return (post, function_table_data, bundled_args)
def finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG):
function_table_sigs = function_table_data.keys()
module = create_module_asmjs(function_table_sigs, metadata, *bundled_args)
if DEBUG:
logger.debug('emscript: python processing: finalize')
t = time.time()
write_output_file(outfile, post, module)
module = None
if DEBUG:
logger.debug(' emscript: python processing: finalize took %s seconds' % (time.time() - t))
write_cyberdwarf_data(outfile, metadata)
# Given JS code that consists only exactly of a series of "var a = ...;\n var b = ...;" statements,
# this function collapses the redundant 'var ' statements at the beginning of each line to a
# single var a =..., b=..., c=...; statement.
def collapse_redundant_vars(code):
if shared.Settings.WASM:
return code # Skip if targeting Wasm, this does not matter there
old_code = ''
while code != old_code: # Repeated vars overlap, so can't run in one regex pass. Runs in O(log(N)) time
old_code = code
code = re.sub(r'(var [^;]*);\s*var ', r'\1,\n ', code)
return code
def global_initializer_funcs(initializers):
# If we have at most one global ctor, no need to group global initializers.
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# do not group ctors into one.
return ['globalCtors'] if (len(initializers) > 1 and not shared.Settings.EVAL_CTORS) else initializers
# Each .cpp file with global constructors generates a __GLOBAL__init() function that needs to be
# called to construct the global objects in that compilation unit. This function groups all these
# global initializer functions together into a single globalCtors() function that lives inside the
# asm.js/wasm module, and gets exported out to JS scope to be called at the startup of the application.
def create_global_initializer(initializers):
# If we have no global ctors, don't even generate a dummy empty function to save code space
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# we do not group ctors into one.
if 'globalCtors' not in global_initializer_funcs(initializers):
return ''
global_initializer = ''' function globalCtors() {
%s
}''' % '\n '.join(i + '();' for i in initializers)
return global_initializer
def create_module_asmjs(function_table_sigs, metadata,
funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports):
receiving += create_named_globals(metadata)
runtime_funcs = create_runtime_funcs_asmjs(exports, metadata)
asm_start_pre = create_asm_start_pre(asm_setup, the_global, sending, metadata)
memory_views = create_memory_views(metadata)
asm_temp_vars = create_asm_temp_vars(metadata)
asm_runtime_thread_local_vars = create_asm_runtime_thread_local_vars()
stack = ''
if not shared.Settings.RELOCATABLE and not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
if 'STACKTOP' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACKTOP = {{{ STACK_BASE }}};\n')
if 'STACK_MAX' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACK_MAX = {{{ STACK_MAX }}};\n')
if 'tempFloat' in shared.Settings.ASM_PRIMITIVE_VARS:
temp_float = ' var tempFloat = %s;\n' % ('Math_fround(0)' if provide_fround() else '0.0')
else:
temp_float = ''
async_state = ' var asyncState = 0;\n' if shared.Settings.EMTERPRETIFY_ASYNC else ''
f0_fround = ' const f0 = Math_fround(0);\n' if provide_fround() else ''
replace_memory = create_replace_memory(metadata)
start_funcs_marker = '\n// EMSCRIPTEN_START_FUNCS\n'
asm_end = create_asm_end(exports)
asm_variables = collapse_redundant_vars(memory_views + asm_global_vars + asm_temp_vars + asm_runtime_thread_local_vars + '\n' + asm_global_funcs + stack + temp_float + async_state + f0_fround)
asm_global_initializer = create_global_initializer(metadata['initializers'])
module = [
asm_start_pre,
asm_variables,
replace_memory,
start_funcs_marker,
asm_global_initializer
] + runtime_funcs + funcs_js + [
'\n ',
pre_tables, final_function_tables, asm_end,
'\n', receiving, ';\n'
]
if shared.Settings.SIDE_MODULE:
module.append('''
parentModule['registerFunctions'](%s, Module);
''' % str([str(f) for f in function_table_sigs]))
return module
def write_output_file(outfile, post, module):
for i in range(len(module)): # do this loop carefully to save memory
module[i] = normalize_line_endings(module[i])
outfile.write(module[i])
post = normalize_line_endings(post)
outfile.write(post)
def write_cyberdwarf_data(outfile, metadata):
if not shared.Settings.CYBERDWARF:
return
assert('cyberdwarf_data' in metadata)
cd_file_name = outfile.name + ".cd"
with open(cd_file_name, 'w') as f:
json.dump({'cyberdwarf': metadata['cyberdwarf_data']}, f)
def create_backend_cmd(infile, temp_js):
"""Create asm.js backend command from settings dict"""
args = [
shared.LLVM_COMPILER, infile, '-march=js', '-filetype=asm', '-o', temp_js,
'-emscripten-stack-size=%d' % shared.Settings.TOTAL_STACK,
'-O%s' % shared.Settings.OPT_LEVEL,
]
if shared.Settings.PRECISE_F32:
args += ['-emscripten-precise-f32']
if shared.Settings.USE_PTHREADS:
args += ['-emscripten-enable-pthreads']
if shared.Settings.WARN_UNALIGNED:
args += ['-emscripten-warn-unaligned']
if shared.Settings.RESERVED_FUNCTION_POINTERS > 0:
args += ['-emscripten-reserved-function-pointers=%d' % shared.Settings.RESERVED_FUNCTION_POINTERS]
if shared.Settings.ASSERTIONS > 0:
args += ['-emscripten-assertions=%d' % shared.Settings.ASSERTIONS]
if shared.Settings.ALIASING_FUNCTION_POINTERS == 0:
args += ['-emscripten-no-aliasing-function-pointers']
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args += ['-emscripten-emulated-function-pointers']
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
args += ['-emscripten-emulate-function-pointer-casts']
if shared.Settings.RELOCATABLE:
args += ['-emscripten-relocatable']
args += ['-emscripten-global-base=0']
elif shared.Settings.GLOBAL_BASE >= 0:
args += ['-emscripten-global-base=%d' % shared.Settings.GLOBAL_BASE]
if shared.Settings.SIDE_MODULE:
args += ['-emscripten-side-module']
if shared.Settings.LEGALIZE_JS_FFI != 1:
args += ['-emscripten-legalize-javascript-ffi=0']
if shared.Settings.DISABLE_EXCEPTION_CATCHING != 1:
args += ['-enable-emscripten-cpp-exceptions']
if shared.Settings.DISABLE_EXCEPTION_CATCHING == 2:
args += ['-emscripten-cpp-exceptions-whitelist=' + ','.join(shared.Settings.EXCEPTION_CATCHING_WHITELIST or ['fake'])]
if not shared.Settings.EXIT_RUNTIME:
args += ['-emscripten-no-exit-runtime']
if shared.Settings.WORKAROUND_IOS_9_RIGHT_SHIFT_BUG:
args += ['-emscripten-asmjs-work-around-ios-9-right-shift-bug']
if shared.Settings.WASM:
args += ['-emscripten-wasm']
if shared.Building.is_wasm_only():
args += ['-emscripten-only-wasm']
if shared.Settings.CYBERDWARF:
args += ['-enable-cyberdwarf']
return args
def optimize_syscalls(declares, DEBUG):
"""Disables filesystem if only a limited subset of syscalls is used.
Our syscalls are static, and so if we see a very limited set of them - in particular,
no open() syscall and just simple writing - then we don't need full filesystem support.
If FORCE_FILESYSTEM is set, we can't do this. We also don't do it if INCLUDE_FULL_LIBRARY, since
not including the filesystem would mean not including the full JS libraries, and the same for
MAIN_MODULE since a side module might need the filesystem.
"""
relevant_settings = ['FORCE_FILESYSTEM', 'INCLUDE_FULL_LIBRARY', 'MAIN_MODULE']
if any(shared.Settings[s] for s in relevant_settings):
return
if shared.Settings.FILESYSTEM == 0:
# without filesystem support, it doesn't matter what syscalls need
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
else:
syscall_prefixes = ('__syscall', 'fd_', '__wasi_fd_')
syscalls = [d for d in declares if d.startswith(syscall_prefixes)]
# check if the only filesystem syscalls are in: close, ioctl, llseek, write
# (without open, etc.. nothing substantial can be done, so we can disable
# extra filesystem support in that case)
if set(syscalls).issubset(set([
'__syscall6', '__syscall54', '__syscall140',
'fd_seek', '__wasi_fd_seek',
'fd_write', '__wasi_fd_write',
'fd_close', '__wasi_fd_close',
])):
if DEBUG:
logger.debug('very limited syscalls (%s) so disabling full filesystem support', ', '.join(str(s) for s in syscalls))
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
def is_int(x):
try:
int(x)
return True
except ValueError:
return False
def align_memory(addr):
return (addr + 15) & -16
def align_static_bump(metadata):
metadata['staticBump'] = align_memory(metadata['staticBump'])
return metadata['staticBump']
def update_settings_glue(metadata, DEBUG):
optimize_syscalls(metadata['declares'], DEBUG)
if shared.Settings.CYBERDWARF:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE.append("cyberdwarf_Debugger")
shared.Settings.EXPORTED_FUNCTIONS.append("cyberdwarf_Debugger")
# Integrate info from backend
if shared.Settings.SIDE_MODULE:
# we don't need any JS library contents in side modules
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = []
if metadata.get('cantValidate') and shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of non-supported features: ' + metadata['cantValidate'])
shared.Settings.ASM_JS = 2
all_funcs = shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE + [shared.JS.to_nice_ident(d) for d in metadata['declares']]
implemented_funcs = [x[1:] for x in metadata['implementedFunctions']]
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = sorted(set(all_funcs).difference(implemented_funcs))
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [x[1:] for x in metadata['externs']]
if metadata['simd']:
shared.Settings.SIMD = 1
if shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of SIMD')
shared.Settings.ASM_JS = 2
shared.Settings.MAX_GLOBAL_ALIGN = metadata['maxGlobalAlign']
shared.Settings.IMPLEMENTED_FUNCTIONS = metadata['implementedFunctions']
# Extract the list of function signatures that MAIN_THREAD_EM_ASM blocks in
# the compiled code have, each signature will need a proxy function invoker
# generated for it.
def read_proxied_function_signatures(asmConsts):
proxied_function_signatures = set()
for _, sigs, proxying_types in asmConsts.values():
for sig, proxying_type in zip(sigs, proxying_types):
if proxying_type == 'sync_on_main_thread_':
proxied_function_signatures.add(sig + '_sync')
elif proxying_type == 'async_on_main_thread_':
proxied_function_signatures.add(sig + '_async')
return list(proxied_function_signatures)
shared.Settings.PROXIED_FUNCTION_SIGNATURES = read_proxied_function_signatures(metadata['asmConsts'])
shared.Settings.STATIC_BUMP = align_static_bump(metadata)
if shared.Settings.WASM_BACKEND:
shared.Settings.BINARYEN_FEATURES = metadata['features']
shared.Settings.WASM_TABLE_SIZE = metadata['tableSize']
if shared.Settings.RELOCATABLE:
# When building relocatable output (e.g. MAIN_MODULE) the reported table
# size does not include the reserved slot at zero for the null pointer.
# Instead we use __table_base to offset the elements by 1.
shared.Settings.WASM_TABLE_SIZE += 1
shared.Settings.MAIN_READS_PARAMS = metadata['mainReadsParams']
# static code hooks
class StaticCodeHooks:
atinits = []
atmains = []
atexits = []
def apply_static_code_hooks(code):
code = code.replace('{{{ ATINITS }}}', StaticCodeHooks.atinits)
code = code.replace('{{{ ATMAINS }}}', StaticCodeHooks.atmains)
code = code.replace('{{{ ATEXITS }}}', StaticCodeHooks.atexits)
return code
def apply_forwarded_data(forwarded_data):
forwarded_json = json.loads(forwarded_data)
# Be aware of JS static allocations
shared.Settings.STATIC_BUMP = forwarded_json['STATIC_BUMP']
shared.Settings.DYNAMICTOP_PTR = forwarded_json['DYNAMICTOP_PTR']
# Be aware of JS static code hooks
StaticCodeHooks.atinits = str(forwarded_json['ATINITS'])
StaticCodeHooks.atmains = str(forwarded_json['ATMAINS'])
StaticCodeHooks.atexits = str(forwarded_json['ATEXITS'])
def compile_settings(compiler_engine, temp_files):
# Save settings to a file to work around v8 issue 1579
with temp_files.get_file('.txt') as settings_file:
with open(settings_file, 'w') as s:
json.dump(shared.Settings.to_dict(), s, sort_keys=True)
# Call js compiler
env = os.environ.copy()
env['EMCC_BUILD_DIR'] = os.getcwd()
out = jsrun.run_js_tool(path_from_root('src', 'compiler.js'), compiler_engine,
[settings_file], stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'), env=env)
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
glue, forwarded_data = out.split('//FORWARDED_DATA:')
apply_forwarded_data(forwarded_data)
return glue, forwarded_data
class Memory():
def __init__(self):
# Note: if RELOCATABLE, then only relative sizes can be computed, and we don't
# actually write out any absolute memory locations ({{{ STACK_BASE }}}
# does not exist, etc.)
# Memory layout:
# * first the static globals
self.global_base = shared.Settings.GLOBAL_BASE
self.static_bump = shared.Settings.STATIC_BUMP
# * then the stack (up on fastcomp, down on upstream)
self.stack_low = align_memory(self.global_base + self.static_bump)
self.stack_high = align_memory(self.stack_low + shared.Settings.TOTAL_STACK)
if shared.Settings.WASM_BACKEND:
self.stack_base = self.stack_high
self.stack_max = self.stack_low
else:
self.stack_base = self.stack_low
self.stack_max = self.stack_high
# * then dynamic memory begins
self.dynamic_base = align_memory(self.stack_high)
if self.dynamic_base >= shared.Settings.TOTAL_MEMORY:
exit_with_error('Memory is not large enough for static data (%d) plus the stack (%d), please increase TOTAL_MEMORY (%d) to at least %d' % (self.static_bump, shared.Settings.TOTAL_STACK, shared.Settings.TOTAL_MEMORY, self.dynamic_base))
def apply_memory(js):
# Apply the statically-at-compile-time computed memory locations.
memory = Memory()
# Write it all out
js = js.replace('{{{ STATIC_BUMP }}}', str(memory.static_bump))
js = js.replace('{{{ STACK_BASE }}}', str(memory.stack_base))
js = js.replace('{{{ STACK_MAX }}}', str(memory.stack_max))
js = js.replace('{{{ DYNAMIC_BASE }}}', str(memory.dynamic_base))
logger.debug('global_base: %d stack_base: %d, stack_max: %d, dynamic_base: %d, static bump: %d', memory.global_base, memory.stack_base, memory.stack_max, memory.dynamic_base, memory.static_bump)
shared.Settings.DYNAMIC_BASE = memory.dynamic_base
return js
def apply_table(js):
js = js.replace('{{{ WASM_TABLE_SIZE }}}', str(shared.Settings.WASM_TABLE_SIZE))
return js
def apply_script_source(js):
js = js.replace('{{{ TARGET_BASENAME }}}', shared.Settings.TARGET_BASENAME)
return js
def memory_and_global_initializers(pre, metadata, mem_init):
if shared.Settings.SIMD == 1:
pre = open(path_from_root(os.path.join('src', 'ecmascript_simd.js'))).read() + '\n\n' + pre
staticbump = shared.Settings.STATIC_BUMP
pthread = ''
if shared.Settings.USE_PTHREADS:
pthread = 'if (!ENVIRONMENT_IS_PTHREAD)'
global_initializers = ''
if not shared.Settings.MINIMAL_RUNTIME:
# In traditional runtime, global initializers are pushed to the __ATINIT__ array to be processed when runtime is loaded
# In MINIMAL_RUNTIME global initializers are invoked directly off of the asm[''] export object, so this does not apply.
global_initializers = global_initializer_funcs(metadata['initializers'])
if len(global_initializers) > 0:
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in global_initializers)
global_initializers = '/* global initializers */ {pthread} __ATINIT__.push({global_initializers});'.format(pthread=pthread, global_initializers=global_initializers)
else:
global_initializers = '/* global initializers */ /*__ATINIT__.push();*/'
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''\
STATICTOP = STATIC_BASE + {staticbump};
{global_initializers}
{mem_init}'''.format(staticbump=staticbump,
global_initializers=global_initializers,
mem_init=mem_init))
if shared.Settings.SIDE_MODULE:
pre = pre.replace('GLOBAL_BASE', 'gb')
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre)
return pre
def get_js_funcs(pre, funcs):
funcs_js = [funcs]
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
funcs_js.append(parts[1])
return pre, funcs_js
def get_all_exported_functions(function_table_data):
# both asm.js and otherwise
all_exported_functions = set(shared.Settings.EXPORTED_FUNCTIONS)
# additional functions to export from asm, if they are implemented
for additional_export in shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE:
all_exported_functions.add('_' + additional_export)
if shared.Settings.EXPORT_FUNCTION_TABLES:
for table in function_table_data.values():
for func in table.split('[')[1].split(']')[0].split(','):
if func[0] == '_':
all_exported_functions.add(func)
return all_exported_functions
def get_all_implemented(forwarded_json, metadata):
return set(metadata['implementedFunctions']).union(forwarded_json['Functions']['implementedFunctions'])
def report_missing_symbols(all_implemented, pre):
# we are not checking anyway, so just skip this
if not shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS and not shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
return
# the initial list of missing functions are that the user explicitly exported
# but were not implemented in compiled code
missing = list(set(shared.Settings.USER_EXPORTED_FUNCTIONS) - all_implemented)
for requested in missing:
if ('function ' + asstr(requested)) in pre:
continue
# special-case malloc, EXPORTED by default for internal use, but we bake in a
# trivial allocator and warn at runtime if used in ASSERTIONS
if missing == '_malloc':
continue
if shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS:
exit_with_error('undefined exported function: "%s"', requested)
elif shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
shared.warning('undefined exported function: "%s"', requested)
def get_exported_implemented_functions(all_exported_functions, all_implemented, metadata):
funcs = set(metadata['exports'])
export_bindings = shared.Settings.EXPORT_BINDINGS
export_all = shared.Settings.EXPORT_ALL
for key in all_implemented:
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
funcs.add(key)
if not export_all:
for name, alias in metadata['aliases'].items():
# here we export the aliases,
# if not the side module (which imports the alias)
# will not be able to get to the actual implementation
if alias in all_implemented and name in all_exported_functions:
funcs.add(alias)
funcs = list(funcs) + global_initializer_funcs(metadata['initializers'])
if shared.Settings.ALLOW_MEMORY_GROWTH:
funcs.append('_emscripten_replace_memory')
if not shared.Settings.SIDE_MODULE and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore', 'establishStackSpace']
if shared.Settings.EMTERPRETIFY:
funcs += ['emterpret']
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs += ['setAsyncState', 'emtStackSave', 'emtStackRestore', 'getEmtStackMax', 'setEmtStackMax']
return sorted(set(funcs))
def get_implemented_functions(metadata):
return set(metadata['implementedFunctions'])
def proxy_debug_print(sync):
if shared.Settings.PTHREADS_DEBUG:
if sync:
return 'warnOnce("sync proxying function " + code);'
else:
return 'warnOnce("async proxying function " + code);'
return ''
def include_asm_consts(pre, forwarded_json, metadata):
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
if metadata['asmConsts']:
exit_with_error('EM_ASM is not yet supported in shared wasm module (it cannot be stored in the wasm itself, need some solution)')
asm_consts, all_sigs = all_asm_consts(metadata)
asm_const_funcs = []
for sig, call_type in all_sigs:
if 'j' in sig:
exit_with_error('emscript: EM_ASM should not receive i64s as inputs, they are not valid in JS')
if '_emscripten_asm_const_' + call_type + sig in forwarded_json['Functions']['libraryFunctions']:
continue # Only one invoker needs to be emitted for each ASM_CONST (signature x call_type) item
forwarded_json['Functions']['libraryFunctions']['_emscripten_asm_const_' + call_type + sig] = 1
args = ['a%d' % i for i in range(len(sig) - 1)]
all_args = ['code'] + args
pre_asm_const = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
proxy_args = ['-1 - code', str(int(sync_proxy))] + args
pre_asm_const += ' if (ENVIRONMENT_IS_PTHREAD) { ' + proxy_debug_print(sync_proxy) + 'return _emscripten_proxy_to_main_thread_js(' + ', '.join(proxy_args) + '); }\n'
if shared.Settings.EMTERPRETIFY_ASYNC and shared.Settings.ASSERTIONS:
# we cannot have an EM_ASM on the stack when saving/loading
pre_asm_const += " assert(typeof EmterpreterAsync !== 'object' || EmterpreterAsync.state !== 2, 'cannot have an EM_ASM on the stack when emterpreter pauses/resumes - the JS is not emterpreted, so we would end up running it again from the start');\n"
asm_const_funcs.append(r'''
function _emscripten_asm_const_%s(%s) {
%s return ASM_CONSTS[code](%s);
}''' % (call_type + asstr(sig), ', '.join(all_args), pre_asm_const, ', '.join(args)))
asm_consts_text = '\nvar ASM_CONSTS = [' + ',\n '.join(asm_consts) + '];\n'
asm_funcs_text = '\n'.join(asm_const_funcs) + '\n'
em_js_funcs = create_em_js(forwarded_json, metadata)
em_js_text = '\n'.join(em_js_funcs) + '\n'
body_marker = '// === Body ==='
return pre.replace(body_marker, body_marker + '\n' + asm_consts_text + asstr(asm_funcs_text) + em_js_text)
# Test if the parentheses at body[openIdx] and body[closeIdx] are a match to
# each other.
def parentheses_match(body, openIdx, closeIdx):
if closeIdx < 0:
closeIdx += len(body)
count = 1
for i in range(openIdx + 1, closeIdx + 1):
if body[i] == body[openIdx]:
count += 1
elif body[i] == body[closeIdx]:
count -= 1
if count <= 0:
return i == closeIdx
return False
def trim_asm_const_body(body):
body = body.strip()
orig = None
while orig != body:
orig = body
if len(body) > 1 and body[0] == '"' and body[-1] == '"':
body = body[1:-1].replace('\\"', '"').strip()
if len(body) > 1 and body[0] == '{' and body[-1] == '}' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
if len(body) > 1 and body[0] == '(' and body[-1] == ')' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
return body
def all_asm_consts(metadata):
asm_consts = [0] * len(metadata['asmConsts'])
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
const = '{ ' + const + ' }'
args = []
arity = max(len(s) for s in sigs) - 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') ' + const
asm_consts[int(k)] = const
assert(len(sigs) == len(call_types))
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
return asm_consts, all_sigs
def unfloat(s):
"""lower float to double for ffis"""
return 'd' if s == 'f' else s
def make_function_tables_defs(implemented_functions, all_implemented, function_table_data, metadata):
class Counter(object):
next_bad_item = 0
next_item = 0
pre = []
in_table = set()
debug_tables = {}
def make_params(sig):
return ','.join('p%d' % p for p in range(len(sig) - 1))
def make_coerced_params(sig):
return ','.join(shared.JS.make_coercion('p%d', unfloat(sig[p + 1])) % p for p in range(len(sig) - 1))
def make_coercions(sig):
return ';'.join('p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p + 1])) for p in range(len(sig) - 1)) + ';'
# when emulating function pointer casts, we need to know what is the target of each pointer
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
function_pointer_targets = {}
for sig, table in function_table_data.items():
start = table.index('[')
end = table.rindex(']')
body = table[start + 1:end].split(',')
for i, parsed in enumerate(x.strip() for x in body):
if parsed != '0':
assert i not in function_pointer_targets
function_pointer_targets[i] = [sig, str(parsed)]
def make_table(sig, raw):
if '[]' in raw:
return ('', '') # empty table
params = make_params(sig)
coerced_params = make_coerced_params(sig)
coercions = make_coercions(sig)
def make_bad(target=None):
i = Counter.next_bad_item
Counter.next_bad_item += 1
if target is None:
target = i
name = 'b' + str(i)
if not shared.Settings.ASSERTIONS:
if 'abort' in shared.Settings.RUNTIME_FUNCS_TO_IMPORT:
code = 'abort(%s);' % target
else:
# Advanced use: developers is generating code that does not include the function 'abort()'. Generate invalid
# function pointers to be no-op passthroughs that silently continue execution.
code = '\n/*execution is supposed to abort here, but you did not include "abort" in RUNTIME_FUNCS_TO_IMPORT (to save code size?). Silently trucking through, enjoy :)*/\n'
else:
code = 'nullFunc_' + sig + '(%d);' % target
if sig[0] != 'v':
code += 'return %s' % shared.JS.make_initializer(sig[0]) + ';'
return name, make_func(name, code, params, coercions)
bad, bad_func = make_bad() # the default bad func
if shared.Settings.ASSERTIONS <= 1:
Counter.pre = [bad_func]
else:
Counter.pre = []
start = raw.index('[')
end = raw.rindex(']')
body = raw[start + 1:end].split(',')
if shared.Settings.EMULATED_FUNCTION_POINTERS:
def receive(item):
if item == '0':
return item
if item not in all_implemented:
# this is not implemented; it would normally be wrapped, but with emulation, we just use it directly outside
return item
in_table.add(item)
return "asm['" + item + "']"
body = [receive(b) for b in body]
for j in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
curr = 'jsCall_%s_%s' % (sig, j)
body[1 + j] = curr
implemented_functions.add(curr)
Counter.next_item = 0
def fix_item(item):
j = Counter.next_item
Counter.next_item += 1
newline = Counter.next_item % 30 == 29
if item == '0':
# emulate all non-null pointer calls, if asked to
if j > 0 and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM and j in function_pointer_targets:
proper_sig, proper_target = function_pointer_targets[j]
if shared.Settings.EMULATED_FUNCTION_POINTERS:
if proper_target in all_implemented:
proper_target = "asm['" + proper_target + "']"
def make_emulated_param(i):
if i >= len(sig):
return shared.JS.make_initializer(proper_sig[i]) # extra param, just send a zero
return shared.JS.make_coercion('p%d' % (i - 1), proper_sig[i], convert_from=sig[i])
proper_code = proper_target + '(' + ','.join([make_emulated_param(i + 1) for i in range(len(proper_sig) - 1)]) + ')'
if proper_sig[0] != 'v':
# proper sig has a return, which the wrapper may or may not use
proper_code = shared.JS.make_coercion(proper_code, proper_sig[0])
if proper_sig[0] != sig[0]:
# first coercion ensured we call the target ok; this one ensures we return the right type in the wrapper
proper_code = shared.JS.make_coercion(proper_code, sig[0], convert_from=proper_sig[0])
if sig[0] != 'v':
proper_code = 'return ' + proper_code
else:
# proper sig has no return, we may need a fake return
if sig[0] != 'v':
proper_code = 'return ' + shared.JS.make_initializer(sig[0])
name = 'fpemu_%s_%d' % (sig, j)
wrapper = make_func(name, proper_code, params, coercions)
Counter.pre.append(wrapper)
return name if not newline else (name + '\n')
if shared.Settings.ASSERTIONS <= 1:
return bad if not newline else (bad + '\n')
specific_bad, specific_bad_func = make_bad(j)
Counter.pre.append(specific_bad_func)
return specific_bad if not newline else (specific_bad + '\n')
clean_item = item.replace("asm['", '').replace("']", '')
# when emulating function pointers, we don't need wrappers
# but if relocating, then we also have the copies in-module, and do
# in wasm we never need wrappers though
if clean_item not in implemented_functions and not (shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.RELOCATABLE) and not shared.Settings.WASM:
# this is imported into asm, we must wrap it
call_ident = clean_item
if call_ident in metadata['redirects']:
call_ident = metadata['redirects'][call_ident]
if not call_ident.startswith('_') and not call_ident.startswith('Math_'):
call_ident = '_' + call_ident
code = call_ident + '(' + coerced_params + ')'
if sig[0] != 'v':
# ffis cannot return float
if sig[0] == 'f':
code = '+' + code
code = 'return ' + shared.JS.make_coercion(code, sig[0])
code += ';'
Counter.pre.append(make_func(clean_item + '__wrapper', code, params, coercions))
assert not sig == 'X', 'must know the signature in order to create a wrapper for "%s" (TODO for shared wasm modules)' % item
return clean_item + '__wrapper'
return item if not newline else (item + '\n')
if shared.Settings.ASSERTIONS >= 2:
debug_tables[sig] = body
body = ','.join(fix_item(b) for b in body)
return ('\n'.join(Counter.pre), ''.join([raw[:start + 1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in function_table_data.items()]
Counter.pre = []
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n'
function_tables_defs += '\n// EMSCRIPTEN_END_FUNCS\n'
function_tables_defs += '\n'.join([info[1] for info in infos])
return in_table, debug_tables, function_tables_defs
def make_func(name, code, params, coercions):
return 'function %s(%s) {\n %s %s\n}' % (name, params, coercions, code)
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
# asm.js function tables have one table in each linked asm.js module, so we
# can't just dynCall into them - ftCall exists for that purpose. In wasm,
# even linked modules share the table, so it's all fine.
def asm_js_emulated_function_pointers():
return shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.WASM
def make_function_tables_impls(function_table_data):
function_tables_impls = []
for sig, table in function_table_data.items():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i]) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i]) for i in range(1, len(sig))])
sig_mask = str(table.count(','))
if not (shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS):
ret = 'FUNCTION_TABLE_%s[index&%s](%s)' % (sig, sig_mask, coerced_args)
else:
# for wasm with emulated function pointers, emit an mft_SIG(..) call, we avoid asm.js function tables there.
ret = 'mftCall_%s(index%s%s)' % (sig, ',' if len(sig) > 1 else '', coerced_args)
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion(ret, sig[0])
if not asm_js_emulated_function_pointers():
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
else:
function_tables_impls.append('''
var dynCall_%s = ftCall_%s;
''' % (sig, sig))
ffi_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], ffi_arg=True) for i in range(1, len(sig))])
for i in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall_%s(%d%s%s)' % (sig, i, ',' if ffi_args else '', ffi_args), sig[0], ffi_result=True)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
return function_tables_impls
def create_mftCall_funcs(function_table_data):
if not asm_js_emulated_function_pointers():
return []
if shared.Settings.WASM or not shared.Settings.RELOCATABLE:
return []
mftCall_funcs = []
# in wasm, emulated function pointers are just simple table calls
for sig, table in function_table_data.items():
return_type, sig_args = sig[0], sig[1:]
num_args = len(sig_args)
params = ','.join(['ptr'] + ['p%d' % i for i in range(num_args)])
coerced_params = ','.join([shared.JS.make_coercion('ptr', 'i')] + [shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i])) for i in range(num_args)])
coercions = ';'.join(['ptr = ptr | 0'] + ['p%d = %s' % (i, shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i]))) for i in range(num_args)]) + ';'
mini_coerced_params = ','.join([shared.JS.make_coercion('p%d' % i, sig_args[i]) for i in range(num_args)])
maybe_return = '' if return_type == 'v' else 'return'
final_return = maybe_return + ' ' + shared.JS.make_coercion('ftCall_' + sig + '(' + coerced_params + ')', unfloat(return_type)) + ';'
if shared.Settings.EMULATED_FUNCTION_POINTERS == 1:
body = final_return
else:
sig_mask = str(table.count(','))
body = ('if (((ptr|0) >= (fb|0)) & ((ptr|0) < (fb + ' + sig_mask + ' | 0))) { ' + maybe_return + ' ' +
shared.JS.make_coercion(
'FUNCTION_TABLE_' + sig + '[(ptr-fb)&' + sig_mask + '](' +
mini_coerced_params + ')', return_type, ffi_arg=True
) + '; ' + ('return;' if return_type == 'v' else '') + ' }' + final_return)
mftCall_funcs.append(make_func('mftCall_' + sig, body, params, coercions) + '\n')
return mftCall_funcs
def get_function_pointer_error(sig, function_table_sigs):
if shared.Settings.ASSERTIONS == 0:
# Release build: do the most minimal sized abort possible
return "abort();"
else:
# ASSERTIONS-enabled build, identify the pointer and the failing signature.
return "abortFnPtrError(x, '" + sig + "');"
def signature_sort_key(sig):
def closure(other):
ret = 0
minlen = min(len(other), len(sig))
maxlen = min(len(other), len(sig))
if other.startswith(sig) or sig.startswith(other):
ret -= 1000 # prioritize prefixes, could be dropped params
ret -= 133 * difflib.SequenceMatcher(a=other, b=sig).ratio() # prioritize on diff similarity
ret += 15 * abs(len(other) - len(sig)) / float(maxlen) # deprioritize the bigger the length difference is
for i in range(minlen):
if other[i] == sig[i]:
ret -= 5 / float(maxlen) # prioritize on identically-placed params
ret += 20 * len(other) # deprioritize on length
return ret
return closure
def asm_backend_uses(metadata, symbol):
# If doing dynamic linking, we should generate full set of runtime primitives, since we cannot know up front ahead
# of time what the dynamically linked in modules will need. Also with SAFE_HEAP and Emterpretify, generate full set of views.
if shared.Settings.MAIN_MODULE or shared.Settings.SIDE_MODULE or shared.Settings.SAFE_HEAP or shared.Settings.EMTERPRETIFY:
return True
# Allow querying asm_backend_uses(metadata, 'Math.') to find if any of the Math objects are used
if symbol.endswith('.'):
return any(e.startswith(symbol) for e in metadata['externUses'])
else:
# Querying a single symbol
return symbol in metadata['externUses']
def create_asm_global_funcs(bg_funcs, metadata):
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul', 'min', 'max', 'clz32']]
if provide_fround():
maths += ['Math.fround']
asm_global_funcs = ''
for math in maths:
if asm_backend_uses(metadata, math):
asm_global_funcs += ' var ' + math.replace('.', '_') + '=global' + access_quote(math) + ';\n'
asm_global_funcs += ''.join([' var ' + unminified + '=env' + access_quote(math_fix(minified)) + ';\n' for (minified, unminified) in bg_funcs])
asm_global_funcs += global_simd_funcs(access_quote, metadata)
if shared.Settings.USE_PTHREADS:
asm_global_funcs += ''.join([' var Atomics_' + ty + '=global' + access_quote('Atomics') + access_quote(ty) + ';\n' for ty in ['load', 'store', 'exchange', 'compareExchange', 'add', 'sub', 'and', 'or', 'xor']])
return asm_global_funcs
def create_asm_global_vars(bg_vars):
asm_global_vars = ''.join([' var ' + unminified + '=env' + access_quote(minified) + '|0;\n' for (minified, unminified) in bg_vars])
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# wasm side modules internally define their stack, these are set at module startup time
asm_global_vars += '\n var STACKTOP = 0, STACK_MAX = 0;\n'
return asm_global_vars
def global_simd_funcs(access_quote, metadata):
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
if not (metadata['simd'] or shared.Settings.SIMD):
return ''
def string_contains_any(s, str_list):
return any(sub in s for sub in str_list)
nonexisting_simd_symbols = ['Int8x16_fromInt8x16', 'Uint8x16_fromUint8x16', 'Int16x8_fromInt16x8', 'Uint16x8_fromUint16x8', 'Int32x4_fromInt32x4', 'Uint32x4_fromUint32x4', 'Float32x4_fromFloat32x4', 'Float64x2_fromFloat64x2']
nonexisting_simd_symbols += ['Int32x4_addSaturate', 'Int32x4_subSaturate', 'Uint32x4_addSaturate', 'Uint32x4_subSaturate']
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8', 'Float64x2'] for y in ['load2', 'store2']]
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8'] for y in ['load1', 'store1']]
simd = make_simd_types(metadata)
simd_func_text = ''
simd_func_text += ''.join([' var SIMD_' + ty + '=global' + access_quote('SIMD') + access_quote(ty) + ';\n' for ty in simd['types']])
def generate_symbols(types, funcs):
symbols = [' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in types for g in funcs]
symbols = [x for x in symbols if not string_contains_any(x, nonexisting_simd_symbols)]
return ''.join(symbols)
simd_func_text += generate_symbols(simd['int_types'], simd['int_funcs'])
simd_func_text += generate_symbols(simd['float_types'], simd['float_funcs'])
simd_func_text += generate_symbols(simd['bool_types'], simd['bool_funcs'])
# SIMD conversions (not bitcasts) between same lane sizes:
def add_simd_cast(dst, src):
return ' var SIMD_' + dst + '_from' + src + '=SIMD_' + dst + '.from' + src + ';\n'
def add_simd_casts(t1, t2):
return add_simd_cast(t1, t2) + add_simd_cast(t2, t1)
# Bug: Skip importing conversions for int<->uint for now, they don't validate
# as asm.js. https://bugzilla.mozilla.org/show_bug.cgi?id=1313512
# This is not an issue when building SSEx code, because it doesn't use these.
# (but it will be an issue if using SIMD.js intrinsics from vector.h to
# explicitly call these)
# if metadata['simdInt8x16'] and metadata['simdUint8x16']:
# simd_func_text += add_simd_casts('Int8x16', 'Uint8x16')
# if metadata['simdInt16x8'] and metadata['simdUint16x8']:
# simd_func_text += add_simd_casts('Int16x8', 'Uint16x8')
# if metadata['simdInt32x4'] and metadata['simdUint32x4']:
# simd_func_text += add_simd_casts('Int32x4', 'Uint32x4')
if metadata['simdInt32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Int32x4', 'Float32x4')
if metadata['simdUint32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Uint32x4', 'Float32x4')
if metadata['simdInt32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Int32x4', 'Float64x2') # Unofficial, needed for emscripten_int32x4_fromFloat64x2
if metadata['simdUint32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Uint32x4', 'Float64x2') # Unofficial, needed for emscripten_uint32x4_fromFloat64x2
# Unofficial, Bool64x2 does not yet exist, but needed for Float64x2 comparisons.
if metadata['simdFloat64x2']:
simd_func_text += ' var SIMD_Int32x4_fromBool64x2Bits = global.SIMD.Int32x4.fromBool64x2Bits;\n'
return simd_func_text
def make_simd_types(metadata):
simd_float_types = []
simd_int_types = []
simd_bool_types = []
simd_funcs = ['splat', 'check', 'extractLane', 'replaceLane']
simd_intfloat_funcs = ['add', 'sub', 'neg', 'mul',
'equal', 'lessThan', 'greaterThan',
'notEqual', 'lessThanOrEqual', 'greaterThanOrEqual',
'select', 'swizzle', 'shuffle',
'load', 'store', 'load1', 'store1', 'load2', 'store2']
simd_intbool_funcs = ['and', 'xor', 'or', 'not']
if metadata['simdUint8x16']:
simd_int_types += ['Uint8x16']
simd_intfloat_funcs += ['fromUint8x16Bits']
if metadata['simdInt8x16']:
simd_int_types += ['Int8x16']
simd_intfloat_funcs += ['fromInt8x16Bits']
if metadata['simdUint16x8']:
simd_int_types += ['Uint16x8']
simd_intfloat_funcs += ['fromUint16x8Bits']
if metadata['simdInt16x8']:
simd_int_types += ['Int16x8']
simd_intfloat_funcs += ['fromInt16x8Bits']
if metadata['simdUint32x4']:
simd_int_types += ['Uint32x4']
simd_intfloat_funcs += ['fromUint32x4Bits']
if metadata['simdInt32x4'] or shared.Settings.SIMD:
# Always import Int32x4 when building with -s SIMD=1, since memcpy is SIMD optimized.
simd_int_types += ['Int32x4']
simd_intfloat_funcs += ['fromInt32x4Bits']
if metadata['simdFloat32x4']:
simd_float_types += ['Float32x4']
simd_intfloat_funcs += ['fromFloat32x4Bits']
if metadata['simdFloat64x2']:
simd_float_types += ['Float64x2']
simd_intfloat_funcs += ['fromFloat64x2Bits']
if metadata['simdBool8x16']:
simd_bool_types += ['Bool8x16']
if metadata['simdBool16x8']:
simd_bool_types += ['Bool16x8']
if metadata['simdBool32x4']:
simd_bool_types += ['Bool32x4']
if metadata['simdBool64x2']:
simd_bool_types += ['Bool64x2']
simd_float_funcs = simd_funcs + simd_intfloat_funcs + ['div', 'min', 'max', 'minNum', 'maxNum', 'sqrt',
'abs', 'reciprocalApproximation', 'reciprocalSqrtApproximation']
simd_int_funcs = simd_funcs + simd_intfloat_funcs + simd_intbool_funcs + ['shiftLeftByScalar', 'shiftRightByScalar', 'addSaturate', 'subSaturate']
simd_bool_funcs = simd_funcs + simd_intbool_funcs + ['anyTrue', 'allTrue']
simd_types = simd_float_types + simd_int_types + simd_bool_types
return {
'types': simd_types,
'float_types': simd_float_types,
'int_types': simd_int_types,
'bool_types': simd_bool_types,
'funcs': simd_funcs,
'float_funcs': simd_float_funcs,
'int_funcs': simd_int_funcs,
'bool_funcs': simd_bool_funcs,
'intfloat_funcs': simd_intfloat_funcs,
'intbool_funcs': simd_intbool_funcs,
}
def asm_safe_heap():
"""optimized safe heap in asm, when we can"""
return shared.Settings.SAFE_HEAP and not shared.Settings.SAFE_HEAP_LOG and not shared.Settings.RELOCATABLE
def provide_fround():
return shared.Settings.PRECISE_F32 or shared.Settings.SIMD
def create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata):
function_table_sigs = function_table_data.keys()
asm_setup = ''
if shared.Settings.ASSERTIONS >= 2:
debug_tables_map = 'var debug_tables = {\n'
for sig in function_table_data:
# if the table is empty, debug_tables will not contain it
body = debug_tables.get(sig, [])
asm_setup += 'var debug_table_' + sig + ' = [' + ','.join(['0' if x == '0' else "'" + x.replace("'", '"') + "'" for x in body]) + '];\n'
debug_tables_map += " '" + sig + "': debug_table_" + sig + ',\n'
asm_setup += debug_tables_map + '};\n'
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
asm_setup += 'function nullFunc_' + sig + '(x) { ' + get_function_pointer_error(sig, function_table_sigs) + ' }\n'
if shared.Settings.RELOCATABLE:
if not shared.Settings.SIDE_MODULE:
asm_setup += 'var gb = GLOBAL_BASE, fb = 0;\n'
side = 'parent' if shared.Settings.SIDE_MODULE else ''
def check(extern):
if shared.Settings.ASSERTIONS:
return ('\n assert(%sModule["%s"] || %s, "external symbol `%s` is missing.' % (side, extern, extern, extern) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=1 in the environment");')
return ''
for extern in metadata['externs']:
asm_setup += 'var g$' + extern + ' = function() {' + check(extern) + '\n return ' + side + 'Module["' + extern + '"];\n}\n'
for extern in metadata['externFunctions']:
barename, sig = extern.split('$')
fullname = "fp$" + extern
key = '%sModule["%s"]' % (side, fullname)
asm_setup += '''\
var %s = function() {
if (!%s) { %s
var fid = addFunction(%sModule["%s"] || %s, "%s");
%s = fid;
}
return %s;
}
''' % (fullname, key, check(barename), side, barename, barename, sig, key, key)
asm_setup += create_invoke_wrappers(invoke_function_names)
asm_setup += setup_function_pointers(function_table_sigs)
if shared.Settings.EMULATED_FUNCTION_POINTERS:
function_tables_impls = make_function_tables_impls(function_table_data)
asm_setup += '\n' + '\n'.join(function_tables_impls) + '\n'
return asm_setup
def setup_function_pointers(function_table_sigs):
asm_setup = ''
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
asm_setup += '\n' + shared.JS.make_jscall(sig) + '\n'
# nothing special to do here for wasm, we just use dynCalls
if not shared.Settings.WASM:
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args = ['a%d' % i for i in range(len(sig) - 1)]
full_args = ['x'] + args
table_access = 'FUNCTION_TABLE_' + sig
if shared.Settings.SIDE_MODULE:
table_access = 'parentModule["' + table_access + '"]' # side module tables were merged into the parent, we need to access the global one
table_read = table_access + '[x]'
prelude = ''
if shared.Settings.ASSERTIONS:
prelude = '''
if (x < 0 || x >= %s.length) { err("Function table mask error (out of range)"); %s ; abort(x) }''' % (table_access, get_function_pointer_error(sig, function_table_sigs))
asm_setup += '''
function ftCall_%s(%s) {%s
return %s(%s);
}
''' % (sig, ', '.join(full_args), prelude, table_read, ', '.join(args))
return asm_setup
def create_basic_funcs(function_table_sigs, invoke_function_names):
basic_funcs = shared.Settings.RUNTIME_FUNCS_TO_IMPORT
if shared.Settings.STACK_OVERFLOW_CHECK:
basic_funcs += ['abortStackOverflow']
if shared.Settings.EMTERPRETIFY:
basic_funcs += ['abortStackOverflowEmterpreter']
if shared.Settings.SAFE_HEAP:
if asm_safe_heap():
basic_funcs += ['segfault', 'alignfault', 'ftfault']
else:
# Binaryen generates calls to these two so they are always needed with wasm
if shared.Settings.WASM:
basic_funcs += ['segfault', 'alignfault']
basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_LOAD_D', 'SAFE_HEAP_STORE', 'SAFE_HEAP_STORE_D', 'SAFE_FT_MASK']
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
basic_funcs += ['nullFunc_' + sig]
basic_funcs += invoke_function_names
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
basic_funcs.append('jsCall_%s' % sig)
if asm_js_emulated_function_pointers():
basic_funcs.append('ftCall_%s' % sig)
return basic_funcs
def create_basic_vars(exported_implemented_functions, forwarded_json, metadata):
basic_vars = []
if 'tempDoublePtr' in shared.Settings.ASM_PRIMITIVE_VARS:
basic_vars += ['tempDoublePtr']
if shared.Settings.RELOCATABLE:
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
basic_vars += ['gb', 'fb', 'STACKTOP', 'STACK_MAX']
else:
# wasm side modules have a specific convention for these
basic_vars += ['__memory_base', '__table_base']
if shared.Settings.EMTERPRETIFY:
basic_vars += ['EMTSTACKTOP', 'EMT_STACK_MAX', 'eb']
return basic_vars
def create_exports(exported_implemented_functions, in_table, function_table_data, metadata):
asm_runtime_funcs = create_asm_runtime_funcs()
all_exported = exported_implemented_functions + asm_runtime_funcs + function_tables(function_table_data)
# In asm.js + emulated function pointers, export all the table because we use
# JS to add the asm.js module's functions to the table (which is external
# in this mode). In wasm, we don't need that since wasm modules can
# directly add functions to the imported Table.
if not shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS:
all_exported += in_table
exports = []
for export in sorted(set(all_exported)):
exports.append(quote(export) + ": " + export)
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# named globals in side wasm modules are exported globals from asm/wasm
for k, v in metadata['namedGlobals'].items():
exports.append(quote('_' + str(k)) + ': ' + str(v))
# aliases become additional exports
for k, v in metadata['aliases'].items():
exports.append(quote(str(k)) + ': ' + str(v))
# shared wasm emulated function pointer mode requires us to know the function pointer for
# each function. export fp$func => function pointer for func
if shared.Settings.WASM and shared.Settings.RELOCATABLE and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
for k, v in metadata['functionPointers'].items():
exports.append(quote('fp$' + str(k)) + ': ' + str(v))
return '{ ' + ', '.join(exports) + ' }'
def create_asm_runtime_funcs():
funcs = []
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE) and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore', 'establishStackSpace']
return funcs
def function_tables(function_table_data):
if not asm_js_emulated_function_pointers():
return ['dynCall_' + table for table in function_table_data]
else:
return []
def create_the_global(metadata):
# the global is only needed for asm.js
if shared.Settings.WASM:
return '{}'
fundamentals = []
if asm_backend_uses(metadata, 'Math.'):
fundamentals += ['Math']
for f in ['Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array', 'NaN', 'Infinity']:
if asm_backend_uses(metadata, f):
fundamentals += [f]
if metadata['simd'] or shared.Settings.SIMD:
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
fundamentals += ['SIMD']
return '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
RUNTIME_ASSERTIONS = '''
assert(runtimeInitialized, 'you need to wait for the runtime to be ready (e.g. wait for main() to be called)');
assert(!runtimeExited, 'the runtime was exited (use NO_EXIT_RUNTIME to keep it alive after main() exits)');'''
def create_receiving(function_table_data, function_tables_defs, exported_implemented_functions, initializers):
receiving = ''
if not shared.Settings.ASSERTIONS or shared.Settings.MINIMAL_RUNTIME:
runtime_assertions = ''
else:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are some support code.
# WASM=1 already inserts runtime assertions, so no need to do it again here (see create_receiving_wasm)
if not shared.Settings.WASM:
receiving_functions = [f for f in exported_implemented_functions if f not in ('_memcpy', '_memset', '_emscripten_replace_memory', '__start_module')]
wrappers = []
for name in receiving_functions:
wrappers.append('''\
var real_%(name)s = asm["%(name)s"];
asm["%(name)s"] = function() {%(runtime_assertions)s
return real_%(name)s.apply(null, arguments);
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving = '\n'.join(wrappers)
shared.Settings.MODULE_EXPORTS = module_exports = exported_implemented_functions + function_tables(function_table_data)
if not shared.Settings.SWAPPABLE_ASM_MODULE:
if shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
imported_exports = [s for s in module_exports if s not in initializers]
if shared.Settings.WASM and shared.Settings.MINIMAL_RUNTIME:
# In Wasm exports are assigned inside a function to variables existing in top level JS scope, i.e.
# var _main;
# WebAssembly.instantiate(Module["wasm"], imports).then((function(output) {
# var asm = output.instance.exports;
# _main = asm["_main"];
receiving += '\n'.join([s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
if shared.Settings.MINIMAL_RUNTIME:
# In asm.js exports can be directly processed at top level, i.e.
# var asm = Module["asm"](asmGlobalArg, asmLibraryArg, buffer);
# var _main = asm["_main"];
receiving += '\n'.join(['var ' + s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
receiving += '\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"];' for s in module_exports]) + '\n'
else:
if shared.Settings.target_environment_may_be('node') and shared.Settings.target_environment_may_be('web'):
global_object = '(typeof process !== "undefined" ? global : this)'
elif shared.Settings.target_environment_may_be('node'):
global_object = 'global'
else:
global_object = 'this'
if shared.Settings.MINIMAL_RUNTIME:
module_assign = ''
else:
module_assign = 'Module[__exportedFunc] = '
receiving += 'for(var __exportedFunc in asm) ' + global_object + '[__exportedFunc] = ' + module_assign + 'asm[__exportedFunc];\n'
else:
receiving += 'Module["asm"] = asm;\n'
wrappers = []
for name in module_exports:
wrappers.append('''\
var %(name)s = Module["%(name)s"] = function() {%(runtime_assertions)s
return Module["asm"]["%(name)s"].apply(null, arguments)
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving += '\n'.join(wrappers)
if shared.Settings.EXPORT_FUNCTION_TABLES and not shared.Settings.WASM:
for table in function_table_data.values():
tableName = table.split()[1]
table = table.replace('var ' + tableName, 'var ' + tableName + ' = Module["' + tableName + '"]')
receiving += table + '\n'
if shared.Settings.EMULATED_FUNCTION_POINTERS:
# in asm.js emulated function tables, emit the table on the outside, where
# JS can manage it (for wasm, a native wasm Table is used directly, and we
# don't need this)
if not shared.Settings.WASM:
receiving += '\n' + function_tables_defs.replace('// EMSCRIPTEN_END_FUNCS\n', '')
# wasm still needs definitions for dyncalls on the outside, for JS
receiving += '\n' + ''.join(['Module["dynCall_%s"] = dynCall_%s\n' % (sig, sig) for sig in function_table_data])
if not shared.Settings.WASM:
for sig in function_table_data.keys():
name = 'FUNCTION_TABLE_' + sig
fullname = name if not shared.Settings.SIDE_MODULE else ('SIDE_' + name)
receiving += 'Module["' + name + '"] = ' + fullname + ';\n'
return receiving
def create_fp_accessors(metadata):
if not shared.Settings.RELOCATABLE:
return ''
# Create `fp$XXX` handlers for determining function pionters (table addresses)
# at runtime.
# For SIDE_MODULEs these are generated by the proxyHandler at runtime.
accessors = []
for fullname in metadata['declares']:
if not fullname.startswith('fp$'):
continue
_, name, sig = fullname.split('$')
mangled = asmjs_mangle(name)
side = 'parent' if shared.Settings.SIDE_MODULE else ''
assertion = ('\n assert(%sModule["%s"] || typeof %s !== "undefined", "external function `%s` is missing.' % (side, mangled, mangled, name) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=XX in the environment");')
accessors.append('''
Module['%(full)s'] = function() {
%(assert)s
var func = Module['%(mangled)s'];
if (!func)
func = %(mangled)s;
var fp = addFunction(func, '%(sig)s');
Module['%(full)s'] = function() { return fp };
return fp;
}
''' % {'full': asmjs_mangle(fullname), 'mangled': mangled, 'assert': assertion, 'sig': sig})
return '\n'.join(accessors)
def create_named_globals(metadata):
if not shared.Settings.RELOCATABLE:
return ''
named_globals = '''
var NAMED_GLOBALS = {
%s
};
for (var named in NAMED_GLOBALS) {
Module['_' + named] = gb + NAMED_GLOBALS[named];
}
Module['NAMED_GLOBALS'] = NAMED_GLOBALS;
''' % ',\n '.join('"' + k + '": ' + str(v) for k, v in metadata['namedGlobals'].items())
if shared.Settings.WASM:
# wasm side modules are pure wasm, and cannot create their g$..() methods, so we help them out
# TODO: this works if we are the main module, but if the supplying module is later, it won't, so
# we'll need another solution for that. one option is to scan the module imports, if/when
# wasm supports that, then the loader can do this.
named_globals += '''
for (var named in NAMED_GLOBALS) {
(function(named) {
var addr = Module['_' + named];
Module['g$_' + named] = function() { return addr };
})(named);
}
'''
named_globals += ''.join(["Module['%s'] = Module['%s']\n" % (k, v) for k, v in metadata['aliases'].items()])
return named_globals
def create_runtime_funcs_asmjs(exports, metadata):
if shared.Settings.ASSERTIONS or shared.Settings.STACK_OVERFLOW_CHECK >= 2:
stack_check = ' if ((STACKTOP|0) >= (STACK_MAX|0)) abortStackOverflow(size|0);\n'
else:
stack_check = ''
funcs = ['''
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
STACKTOP = (STACKTOP + 15)&-16;
%s
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
function establishStackSpace(stackBase, stackMax) {
stackBase = stackBase|0;
stackMax = stackMax|0;
STACKTOP = stackBase;
STACK_MAX = stackMax;
}
''' % stack_check]
if shared.Settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME moves stack functions to library.
funcs = []
if shared.Settings.EMTERPRETIFY:
funcs.append('''
function emterpret(pc) { // this will be replaced when the emterpreter code is generated; adding it here allows validation until then
pc = pc | 0;
assert(0);
}''')
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs.append('''
function setAsyncState(x) {
x = x | 0;
asyncState = x;
}
function emtStackSave() {
return EMTSTACKTOP|0;
}
function emtStackRestore(x) {
x = x | 0;
EMTSTACKTOP = x;
}
function getEmtStackMax() {
return EMT_STACK_MAX | 0;
}
function setEmtStackMax(x) {
x = x | 0;
EMT_STACK_MAX = x;
}
''')
if asm_safe_heap():
if '_sbrk' in metadata['implementedFunctions']:
brk_check = 'if ((dest + bytes|0) > (HEAP32[(_emscripten_get_sbrk_ptr()|0)>>2]|0)) segfault();'
else:
# sbrk and malloc were not linked in, but SAFE_HEAP is used - so safe heap
# can ignore the sbrk location.
brk_check = ''
funcs.append('''
function SAFE_HEAP_STORE(dest, value, bytes) {
dest = dest | 0;
value = value | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
HEAP32[dest>>2] = value;
} else if ((bytes|0) == 1) {
HEAP8[dest>>0] = value;
} else {
if ((dest&1)) alignfault();
HEAP16[dest>>1] = value;
}
}
function SAFE_HEAP_STORE_D(dest, value, bytes) {
dest = dest | 0;
value = +value;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
HEAPF64[dest>>3] = value;
} else {
if ((dest&3)) alignfault();
HEAPF32[dest>>2] = value;
}
}
function SAFE_HEAP_LOAD(dest, bytes, unsigned) {
dest = dest | 0;
bytes = bytes | 0;
unsigned = unsigned | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
return HEAP32[dest>>2] | 0;
} else if ((bytes|0) == 1) {
if (unsigned) {
return HEAPU8[dest>>0] | 0;
} else {
return HEAP8[dest>>0] | 0;
}
}
if ((dest&1)) alignfault();
if (unsigned) return HEAPU16[dest>>1] | 0;
return HEAP16[dest>>1] | 0;
}
function SAFE_HEAP_LOAD_D(dest, bytes) {
dest = dest | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
return +HEAPF64[dest>>3];
}
if ((dest&3)) alignfault();
return +HEAPF32[dest>>2];
}
function SAFE_FT_MASK(value, mask) {
value = value | 0;
mask = mask | 0;
var ret = 0;
ret = value & mask;
if ((ret|0) != (value|0)) ftfault();
return ret | 0;
}
''' % {'brk_check': brk_check})
return funcs
def create_asm_start_pre(asm_setup, the_global, sending, metadata):
shared_array_buffer = ''
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
shared_array_buffer = "asmGlobalArg['Atomics'] = Atomics;"
module_global = 'var asmGlobalArg = ' + the_global + ';'
module_library = 'var asmLibraryArg = ' + sending + ';'
asm_function_top = ('// EMSCRIPTEN_START_ASM\n'
'var asm = (/** @suppress {uselessCode} */ function(global, env, buffer) {')
use_asm = "'almost asm';"
if shared.Settings.ASM_JS == 1:
use_asm = "'use asm';"
lines = [
asm_setup,
module_global,
shared_array_buffer,
module_library,
asm_function_top,
use_asm,
create_first_in_asm(),
]
return '\n'.join(lines)
def create_asm_temp_vars(metadata):
temp_ints = ['__THREW__', 'threwValue', 'setjmpId', 'tempInt', 'tempBigInt', 'tempBigIntS', 'tempValue']
temp_doubles = ['tempDouble']
rtn = ''
for i in temp_ints:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0;\n'
for i in temp_doubles:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0.0;\n'
if asm_backend_uses(metadata, 'NaN'):
rtn += 'var nan = global%s;\n' % (access_quote('NaN'))
if asm_backend_uses(metadata, 'Infinity'):
rtn += 'var inf = global%s;\n' % (access_quote('Infinity'))
return rtn
def create_asm_runtime_thread_local_vars():
if not shared.Settings.USE_PTHREADS:
return ''
return '''
var __pthread_ptr = 0;
var __pthread_is_main_runtime_thread = 0;
var __pthread_is_main_browser_thread = 0;
'''
def create_replace_memory(metadata):
if not shared.Settings.ALLOW_MEMORY_GROWTH:
return ''
emscripten_replace_memory = '''
function _emscripten_replace_memory(newBuffer) {
'''
for heap, view in [
('HEAP8', 'Int8Array'),
('HEAPU8', 'Uint8Array'),
('HEAP16', 'Int16Array'),
('HEAPU16', 'Uint16Array'),
('HEAP32', 'Int32Array'),
('HEAPU32', 'Uint32Array'),
('HEAPF32', 'Float32Array'),
('HEAPF64', 'Float64Array')]:
if asm_backend_uses(metadata, view):
emscripten_replace_memory += ' %s = new %s(newBuffer);\n' % (heap, view)
emscripten_replace_memory += '''
buffer = newBuffer;
return true;
}
'''
return emscripten_replace_memory
def create_asm_end(exports):
if shared.Settings.MINIMAL_RUNTIME and shared.Settings.WASM:
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
''' % (exports)
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
(asmGlobalArg, asmLibraryArg, buffer);
''' % (exports)
def create_first_in_asm():
return ''
def create_memory_views(metadata):
"""Generates memory views for the different heap types.
Generated symbols:
Int8View Int16View Int32View
Uint8View Uint16View Uint32View
Float32View Float64View
"""
ret = '\n'
for info in HEAP_TYPE_INFOS:
heap_name = '{}Array'.format(info.long_name)
access = access_quote(heap_name)
if asm_backend_uses(metadata, heap_name):
format_args = {
'heap': info.heap_name,
'long': info.long_name,
'access': access,
}
ret += ' var {heap} = new global{access}(buffer);\n'.format(**format_args)
return ret
class HeapTypeInfo(object):
"""Struct that holds data for a type of HEAP* views."""
def __init__(self, heap_name, long_name, shift_amount):
assert heap_name.startswith('HEAP')
self.heap_name = heap_name
self.long_name = long_name
self.shift_amount = shift_amount
def short_name(self):
"""The unique part of the heap name for this type.
Derive this from heap_name instead of the other way around so that searching,
e.g. for HEAP8, from the generated JS code leads back here.
"""
return self.heap_name[len('HEAP'):]
def is_int(self):
"""Whether this heap type is an integer type or not."""
return self.short_name()[0] != 'F'
def coerce(self, expression):
"""Adds asm.js type coercion to a string expression."""
if self.is_int():
return expression + '| 0'
else:
return '+' + expression
HEAP_TYPE_INFOS = [
HeapTypeInfo(heap_name='HEAP8', long_name='Int8', shift_amount=0),
HeapTypeInfo(heap_name='HEAP16', long_name='Int16', shift_amount=1),
HeapTypeInfo(heap_name='HEAP32', long_name='Int32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPU8', long_name='Uint8', shift_amount=0),
HeapTypeInfo(heap_name='HEAPU16', long_name='Uint16', shift_amount=1),
HeapTypeInfo(heap_name='HEAPU32', long_name='Uint32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF32', long_name='Float32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF64', long_name='Float64', shift_amount=3),
]
def emscript_wasm_backend(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
# Overview:
# * Run wasm-emscripten-finalize to extract metadata and modify the binary
# to use emscripten's wasm<->JS ABI
# * Use the metadata to generate the JS glue that goes with the wasm
metadata = finalize_wasm(temp_files, infile, outfile, memfile, DEBUG)
update_settings_glue(metadata, DEBUG)
if shared.Settings.SIDE_MODULE:
return
if DEBUG:
logger.debug('emscript: js compiler glue')
if DEBUG:
t = time.time()
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
t = time.time()
forwarded_json = json.loads(forwarded_data)
# For the wasm backend the implementedFunctions from compiler.js should
# alwasys be empty. This only gets populated for __asm function when using
# the JS backend.
assert not forwarded_json['Functions']['implementedFunctions']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
# memory and global initializers
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in metadata['initializers'])
staticbump = shared.Settings.STATIC_BUMP
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''STATICTOP = STATIC_BASE + %d;
/* global initializers */ %s __ATINIT__.push(%s);
''' % (staticbump,
'if (!ENVIRONMENT_IS_PTHREAD)' if shared.Settings.USE_PTHREADS else '',
global_initializers))
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre)
if shared.Settings.RELOCATABLE and not shared.Settings.SIDE_MODULE:
pre += 'var gb = GLOBAL_BASE, fb = 0;\n'
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
exports = metadata['exports']
if shared.Settings.ASYNCIFY:
exports += ['asyncify_start_unwind', 'asyncify_stop_unwind', 'asyncify_start_rewind', 'asyncify_stop_rewind']
report_missing_symbols(set([asmjs_mangle(f) for f in exports]), pre)
asm_consts, asm_const_funcs = create_asm_consts_wasm(forwarded_json, metadata)
em_js_funcs = create_em_js(forwarded_json, metadata)
asm_const_pairs = ['%s: %s' % (key, value) for key, value in asm_consts]
asm_const_map = 'var ASM_CONSTS = {\n ' + ', \n '.join(asm_const_pairs) + '\n};\n'
pre = pre.replace(
'// === Body ===',
('// === Body ===\n\n' + asm_const_map +
asstr('\n'.join(asm_const_funcs)) +
'\n'.join(em_js_funcs) + '\n'))
pre = apply_table(pre)
outfile.write(pre)
pre = None
invoke_funcs = metadata['invokeFuncs']
if shared.Settings.RELOCATABLE:
invoke_funcs.append('invoke_X')
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
sending = create_sending_wasm(invoke_funcs, forwarded_json, metadata)
receiving = create_receiving_wasm(exports)
module = create_module_wasm(sending, receiving, invoke_funcs, metadata)
write_output_file(outfile, post, module)
module = None
outfile.close()
def remove_trailing_zeros(memfile):
with open(memfile, 'rb') as f:
mem_data = f.read()
end = len(mem_data)
while end > 0 and (mem_data[end - 1] == b'\0' or mem_data[end - 1] == 0):
end -= 1
with open(memfile, 'wb') as f:
f.write(mem_data[:end])
def finalize_wasm(temp_files, infile, outfile, memfile, DEBUG):
wasm_emscripten_finalize = os.path.join(shared.Building.get_binaryen_bin(), 'wasm-emscripten-finalize')
wasm_dis = os.path.join(shared.Building.get_binaryen_bin(), 'wasm-dis')
def debug_copy(src, dst):
if DEBUG:
shutil.copyfile(src, os.path.join(shared.CANONICAL_TEMP_DIR, dst))
if src[-2:] == '.o' or src[-5:] == '.wasm':
tmp = dst + '.wast'
shared.check_call([wasm_dis, src, '-o', os.path.join(shared.CANONICAL_TEMP_DIR, tmp)])
basename = shared.unsuffixed(outfile.name)
wasm = basename + '.wasm'
base_wasm = infile
debug_copy(infile, 'base.wasm')
write_source_map = shared.Settings.DEBUG_LEVEL >= 4
if write_source_map:
base_source_map = base_wasm + '.map'
sourcemap_cmd = [shared.PYTHON, path_from_root('tools', 'wasm-sourcemap.py'),
base_wasm,
'--dwarfdump=' + shared.LLVM_DWARFDUMP,
'-o', base_source_map]
if not shared.Settings.SOURCE_MAP_BASE:
logger.warning("Wasm source map won't be usable in a browser without --source-map-base")
shared.check_call(sourcemap_cmd)
debug_copy(base_source_map, 'base_wasm.map')
cmd = [wasm_emscripten_finalize, base_wasm, '-o', wasm]
# tell binaryen to look at the features section, and if there isn't one, to use MVP
# (which matches what llvm+lld has given us)
cmd += ['--detect-features']
if shared.Settings.DEBUG_LEVEL >= 2 or shared.Settings.PROFILING_FUNCS or shared.Settings.EMIT_SYMBOL_MAP or shared.Settings.ASYNCIFY_WHITELIST or shared.Settings.ASYNCIFY_BLACKLIST:
cmd.append('-g')
if shared.Settings.LEGALIZE_JS_FFI != 1:
cmd.append('--no-legalize-javascript-ffi')
if write_source_map:
cmd.append('--input-source-map=' + base_source_map)
cmd.append('--output-source-map=' + wasm + '.map')
cmd.append('--output-source-map-url=' + shared.Settings.SOURCE_MAP_BASE + os.path.basename(shared.Settings.WASM_BINARY_FILE) + '.map')
if not shared.Settings.MEM_INIT_IN_WASM:
cmd.append('--separate-data-segments=' + memfile)
if shared.Settings.SIDE_MODULE:
cmd.append('--side-module')
else:
# --global-base is used by wasm-emscripten-finalize to calculate the size
# of the static data used. The argument we supply here needs to match the
# global based used by lld (see Building.link_lld). For relocatable this is
# zero for the global base although at runtime __memory_base is used.
# For non-relocatable output we used shared.Settings.GLOBAL_BASE.
# TODO(sbc): Can we remove this argument infer this from the segment
# initializer?
if shared.Settings.RELOCATABLE:
cmd.append('--global-base=0')
else:
cmd.append('--global-base=%s' % shared.Settings.GLOBAL_BASE)
if shared.Settings.SAFE_STACK:
cmd.append('--check-stack-overflow')
if shared.Settings.STANDALONE_WASM:
cmd.append('--standalone-wasm')
shared.print_compiler_stage(cmd)
stdout = shared.check_call(cmd, stdout=subprocess.PIPE).stdout
if write_source_map:
debug_copy(wasm + '.map', 'post_finalize.map')
debug_copy(wasm, 'post_finalize.wasm')
if not shared.Settings.MEM_INIT_IN_WASM:
# we have a separate .mem file. binaryen did not strip any trailing zeros,
# because it's an ABI question as to whether it is valid to do so or not.
# we can do so here, since we make sure to zero out that memory (even in
# the dynamic linking case, our loader zeros it out)
remove_trailing_zeros(memfile)
return load_metadata_wasm(stdout, DEBUG)
def create_asm_consts_wasm(forwarded_json, metadata):
asm_consts = {}
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
args = []
max_arity = 16
arity = 0
for i in range(max_arity):
if ('$' + str(i)) in const:
arity = i + 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') {' + const + '}'
asm_consts[int(k)] = const
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
asm_const_funcs = []
if all_sigs:
# emit the signature-reading helper function only if we have any EM_ASM
# functions in the module
check = ''
if shared.Settings.ASSERTIONS:
check = ' else abort("unexpected char in asm const signature " + ch);'
asm_const_funcs.append(r'''
// Avoid creating a new array
var _readAsmConstArgsArray = [];
function readAsmConstArgs(sigPtr, buf) {
var args = _readAsmConstArgsArray;
args.length = 0;
while (1) {
var ch = HEAPU8[sigPtr++];
if (!ch) return args;
if (ch === 'd'.charCodeAt(0) || ch === 'f'.charCodeAt(0)) {
buf = alignMemory(buf, 8);
args.push(HEAPF64[(buf >> 3)]);
buf += 8;
} else if (ch === 'i'.charCodeAt(0)) {
buf = alignMemory(buf, 4);
args.push(HEAP32[(buf >> 2)]);
buf += 4;
}%s
}
}
''' % check)
for sig, call_type in set(all_sigs):
const_name = '_emscripten_asm_const_' + call_type + sig
forwarded_json['Functions']['libraryFunctions'][const_name] = 1
preamble = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
preamble += ('\n if (ENVIRONMENT_IS_PTHREAD) { ' +
proxy_debug_print(sync_proxy) +
'return _emscripten_proxy_to_main_thread_js(-1 - code, ' +
str(int(sync_proxy)) +
', code, sigPtr, argbuf); }')
if shared.Settings.RELOCATABLE:
preamble += '\n code -= %s;\n' % shared.Settings.GLOBAL_BASE
asm_const_funcs.append(r'''
function %s(code, sigPtr, argbuf) {%s
var args = readAsmConstArgs(sigPtr, argbuf);
return ASM_CONSTS[code].apply(null, args);
}''' % (const_name, preamble))
asm_consts = [(key, value) for key, value in asm_consts.items()]
asm_consts.sort()
return asm_consts, asm_const_funcs
def create_em_js(forwarded_json, metadata):
em_js_funcs = []
separator = '<::>'
for name, raw in metadata.get('emJsFuncs', {}).items():
assert separator in raw
args, body = raw.split(separator, 1)
args = args[1:-1]
if args == 'void':
args = []
else:
args = args.split(',')
arg_names = [arg.split()[-1].replace("*", "") for arg in args if arg]
func = 'function {}({}){}'.format(name, ','.join(arg_names), asstr(body))
em_js_funcs.append(func)
forwarded_json['Functions']['libraryFunctions'][name] = 1
return em_js_funcs
def add_standard_wasm_imports(send_items_map):
# Normally we import these into the wasm (so that JS could use them even
# before the wasm loads), while in standalone mode we do not depend
# on JS to create them, but create them in the wasm and export them.
if not shared.Settings.STANDALONE_WASM:
memory_import = 'wasmMemory'
if shared.Settings.MODULARIZE and shared.Settings.USE_PTHREADS:
# Pthreads assign wasmMemory in their worker startup. In MODULARIZE mode, they cannot assign inside the
# Module scope, so lookup via Module as well.
memory_import += " || Module['wasmMemory']"
send_items_map['memory'] = memory_import
send_items_map['table'] = 'wasmTable'
# With the wasm backend __memory_base and __table_base and only needed for
# relocatable output.
if shared.Settings.RELOCATABLE or not shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__memory_base'] = str(shared.Settings.GLOBAL_BASE) # tell the memory segments where to place themselves
# the wasm backend reserves slot 0 for the NULL function pointer
table_base = '1' if shared.Settings.WASM_BACKEND else '0'
send_items_map['__table_base'] = table_base
if shared.Settings.RELOCATABLE and shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__stack_pointer'] = 'STACK_BASE'
if shared.Settings.MAYBE_WASM2JS or shared.Settings.AUTODEBUG or shared.Settings.LINKABLE:
# legalization of i64 support code may require these in some modes
send_items_map['setTempRet0'] = 'setTempRet0'
send_items_map['getTempRet0'] = 'getTempRet0'
if shared.Settings.AUTODEBUG:
send_items_map['log_execution'] = '''function(loc) {
console.log('log_execution ' + loc);
}'''
send_items_map['get_i32'] = '''function(loc, index, value) {
console.log('get_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_i64'] = '''function(loc, index, low, high) {
console.log('get_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['get_f32'] = '''function(loc, index, value) {
console.log('get_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_f64'] = '''function(loc, index, value) {
console.log('get_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_anyref'] = '''function(loc, index, value) {
console.log('get_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['get_exnref'] = '''function(loc, index, value) {
console.log('get_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i32'] = '''function(loc, index, value) {
console.log('set_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i64'] = '''function(loc, index, low, high) {
console.log('set_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['set_f32'] = '''function(loc, index, value) {
console.log('set_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_f64'] = '''function(loc, index, value) {
console.log('set_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_anyref'] = '''function(loc, index, value) {
console.log('set_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_exnref'] = '''function(loc, index, value) {
console.log('set_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['load_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('load_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['load_val_i32'] = '''function(loc, value) {
console.log('load_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['load_val_i64'] = '''function(loc, low, high) {
console.log('load_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['load_val_f32'] = '''function(loc, value) {
console.log('loaload_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['load_val_f64'] = '''function(loc, value) {
console.log('load_val_f64 ' + [loc, value]);
return value;
}'''
send_items_map['store_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('store_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['store_val_i32'] = '''function(loc, value) {
console.log('store_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['store_val_i64'] = '''function(loc, low, high) {
console.log('store_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['store_val_f32'] = '''function(loc, value) {
console.log('loastore_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['store_val_f64'] = '''function(loc, value) {
console.log('store_val_f64 ' + [loc, value]);
return value;
}'''
def create_sending_wasm(invoke_funcs, forwarded_json, metadata):
basic_funcs = []
if shared.Settings.SAFE_HEAP:
basic_funcs += ['segfault', 'alignfault']
em_asm_sigs = [zip(sigs, call_types) for _, sigs, call_types in metadata['asmConsts'].values()]
# flatten em_asm_sigs
em_asm_sigs = [sig for sigs in em_asm_sigs for sig in sigs]
em_asm_funcs = ['_emscripten_asm_const_' + call_type + sig for sig, call_type in em_asm_sigs]
em_js_funcs = list(metadata['emJsFuncs'].keys())
declared_items = ['_' + item for item in metadata['declares']]
send_items = set(basic_funcs + invoke_funcs + em_asm_funcs + em_js_funcs + declared_items)
def fix_import_name(g):
if g.startswith('Math_'):
return g.split('_')[1]
# Unlike fastcomp the wasm backend doesn't use the '_' prefix for native
# symbols. Emscripten currently expects symbols to start with '_' so we
# artificially add them to the output of emscripten-wasm-finalize and them
# strip them again here.
# note that we don't do this for EM_JS functions (which, rarely, may have
# a '_' prefix)
if g.startswith('_') and g not in metadata['emJsFuncs']:
return g[1:]
return g
send_items_map = OrderedDict()
for name in send_items:
internal_name = fix_import_name(name)
if internal_name in send_items_map:
exit_with_error('duplicate symbol in exports to wasm: %s', name)
send_items_map[internal_name] = name
add_standard_wasm_imports(send_items_map)
sorted_keys = sorted(send_items_map.keys())
return '{ ' + ', '.join('"' + k + '": ' + send_items_map[k] for k in sorted_keys) + ' }'
def create_receiving_wasm(exports):
receiving = []
if not shared.Settings.ASSERTIONS:
runtime_assertions = ''
else:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are
# some support code
for e in exports:
receiving.append('''\
var real_%(mangled)s = asm["%(e)s"];
asm["%(e)s"] = function() {%(assertions)s
return real_%(mangled)s.apply(null, arguments);
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
if not shared.Settings.SWAPPABLE_ASM_MODULE:
for e in exports:
receiving.append('var %(mangled)s = Module["%(mangled)s"] = asm["%(e)s"];' % {'mangled': asmjs_mangle(e), 'e': e})
else:
receiving.append('Module["asm"] = asm;')
for e in exports:
receiving.append('''\
var %(mangled)s = Module["%(mangled)s"] = function() {%(assertions)s
return Module["asm"]["%(e)s"].apply(null, arguments)
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
return '\n'.join(receiving) + '\n'
def create_module_wasm(sending, receiving, invoke_funcs, metadata):
invoke_wrappers = create_invoke_wrappers(invoke_funcs)
receiving += create_named_globals(metadata)
receiving += create_fp_accessors(metadata)
module = []
module.append('var asmGlobalArg = {};\n')
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
module.append("if (typeof SharedArrayBuffer !== 'undefined') asmGlobalArg['Atomics'] = Atomics;\n")
module.append('var asmLibraryArg = %s;\n' % (sending))
if shared.Settings.ASYNCIFY and shared.Settings.ASSERTIONS:
module.append('Asyncify.instrumentWasmImports(asmLibraryArg);\n')
module.append("var asm = createWasm();\n")
module.append(receiving)
module.append(invoke_wrappers)
return module
def load_metadata_wasm(metadata_raw, DEBUG):
try:
metadata_json = json.loads(metadata_raw)
except Exception:
logger.error('emscript: failure to parse metadata output from wasm-emscripten-finalize. raw output is: \n' + metadata_raw)
raise
metadata = {
'aliases': {},
'declares': [],
'implementedFunctions': [],
'externs': [],
'simd': False,
'maxGlobalAlign': 0,
'staticBump': 0,
'tableSize': 0,
'initializers': [],
'exports': [],
'namedGlobals': {},
'emJsFuncs': {},
'asmConsts': {},
'invokeFuncs': [],
'features': [],
'mainReadsParams': 1,
}
assert 'tableSize' in metadata_json.keys()
for key, value in metadata_json.items():
# json.loads returns `unicode` for strings but other code in this file
# generally works with utf8 encoded `str` objects, and they don't alwasy
# mix well. e.g. s.replace(x, y) will blow up is `s` a uts8 str containing
# non-ascii and either x or y are unicode objects.
# TODO(sbc): Remove this encoding if we switch to unicode elsewhere
# (specifically the glue returned from compile_settings)
if type(value) == list:
value = [asstr(v) for v in value]
if key not in metadata:
exit_with_error('unexpected metadata key received from wasm-emscripten-finalize: %s', key)
metadata[key] = value
# Initializers call the global var version of the export, so they get the mangled name.
metadata['initializers'] = [asmjs_mangle(i) for i in metadata['initializers']]
if DEBUG:
logger.debug("Metadata parsed: " + pprint.pformat(metadata))
# Calculate the subset of exports that were explicitly marked with llvm.used.
# These are any exports that were not requested on the command line and are
# not known auto-generated system functions.
unexpected_exports = [e for e in metadata['exports'] if treat_as_user_function(e)]
unexpected_exports = [asmjs_mangle(e) for e in unexpected_exports]
unexpected_exports = [e for e in unexpected_exports if e not in shared.Settings.EXPORTED_FUNCTIONS]
shared.Building.user_requested_exports += unexpected_exports
return metadata
def create_invoke_wrappers(invoke_funcs):
"""Asm.js-style exception handling: invoke wrapper generation."""
invoke_wrappers = ''
for invoke in invoke_funcs:
sig = invoke[len('invoke_'):]
invoke_wrappers += '\n' + shared.JS.make_invoke(sig) + '\n'
return invoke_wrappers
def treat_as_user_function(name):
library_functions_in_module = ('setTempRet0', 'getTempRet0', 'stackAlloc',
'stackSave', 'stackRestore',
'establishStackSpace', '__growWasmMemory',
'__heap_base', '__data_end')
if name.startswith('dynCall_'):
return False
if name in library_functions_in_module:
return False
return True
def asmjs_mangle(name):
"""Mangle a name the way asm.js/JSBackend globals are mangled.
Prepends '_' and replaces non-alphanumerics with '_'.
Used by wasm backend for JS library consistency with asm.js.
"""
if treat_as_user_function(name):
return '_' + name
else:
return name
def normalize_line_endings(text):
"""Normalize to UNIX line endings.
On Windows, writing to text file will duplicate \r\n to \r\r\n otherwise.
"""
if WINDOWS:
return text.replace('\r\n', '\n')
return text
def run(infile, outfile, memfile):
temp_files = get_configuration().get_temp_files()
infile, outfile = substitute_response_files([infile, outfile])
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO:
generated_struct_info_name = 'generated_struct_info.json'
def generate_struct_info():
with ToolchainProfiler.profile_block('gen_struct_info'):
out = shared.Cache.get_path(generated_struct_info_name)
gen_struct_info.main(['-q', '-c', '-o', out])
return out
shared.Settings.STRUCT_INFO = shared.Cache.get(generated_struct_info_name, generate_struct_info)
# do we need an else, to define it for the bootstrap case?
outfile_obj = open(outfile, 'w')
emscripter = emscript_wasm_backend if shared.Settings.WASM_BACKEND else emscript_fastcomp
return temp_files.run_and_clean(lambda: emscripter(
infile, outfile_obj, memfile, shared.NODE_JS, temp_files, shared.DEBUG)
)
|
the-stack_106_28777 | from .dependencies.interfaceFAAL import interfaceFAAL
from gensim.test.utils import datapath
from gensim.models import KeyedVectors
from gensim.similarities import WmdSimilarity
import json
import os
from subprocess import *
import copy
from py4j.java_gateway import JavaGateway, GatewayParameters
from .progbar import progbar
def initializeFAAL():
jarFAAL = os.path.join(os.path.dirname(__file__), 'dependencies', 'FAAL_jar', 'FAAL_jar_Global_ALeA.jar')
jarFolder = os.path.join(os.path.dirname(__file__), 'dependencies', 'FAAL_jar')
#process = call(['java', '-jar', jarFAAL], stdout=PIPE, #stderr=PIPE,
# cwd=jarFolder)
process = Popen(['java', '-jar', jarFAAL], #stdout=PIPE, #stderr=PIPE,
cwd=jarFolder)
#process = os.system("java -jar " + jarFAAL)
#while True:
# output = process.stdout.readline()
# if not output == '':
# print("FAAL jar is running...")
# break
return process
def terminateFAAL(process):
process.terminate()
def ALeA(json_semanticSelection_One, json_semanticSelection_Two, pathModel, pathOutput, scoreAlignPhon = "09_Aver_Score_Sem-Phon_Corr", verbose = False, semanticLevel = "Level_01", dividers = [","], selectBest = "07_Sim_Score_Phon_Corr_Match", selectBestThreshold = 0.65, parseVow = True):
"""
:param json_semanticSelection_One: first semantically tagged lexical list
-- format: json string - output of the ASeT algorithm
:param json_semanticSelection_Two: second semantically tagged lexical list
-- format: json string - output of the ASeT algorithm
:param pathModel: path to saved semantic model (string)
:param pathOutput: path to save the results (string - no extention; e.g. /my/folder/name_file_with_my_results)
:param scoreAlignPhon: select type of score according to which the phonetic alignments are organized (string)
-- default: "09_Aver_Score_Sem-Phon_Corr"
-- options: "07_Sim_Score_Phon_Corr_Match", "08_Sim_Score_Phon_Glob_Match", "09_Aver_Score_Sem-Phon_Corr", or "10_Aver_Score_Sem-Phon_Glob"
-- "07_Sim_Score_Phon_Corr_Match" uses the function "(((SumFeat) / (NrFeat * 7.71)) / (LenAlign * 4.77117)"
-- "09_Aver_Score_Sem-Phon_Corr" is the average between the semantic score and the "07_Sim_Score_Phon_Corr_Match"
-- "10_Aver_Score_Sem-Phon_Glob" is the average between the semantic score and the "08_Sim_Score_Phon_Glob_Match"
-- see FAAL documentation for details ( https://github.com/MKilani/FAAL )
:param verbose: print data during execution (boolean)
-- default: True
:param semanticLevel: level of the semantic tags according to which the comaprison is performed. The options, for now, are: "Level_01", "Level_02", "Level_03" (see ASeT algorithm for details)
:param dividers: dividers used to split meanings (array of strings [string, string]
-- default: [","]
:param selectBest: parameter according to which the algorithm selects the best matches among those identified by the ALeA on the basis of the other parameters
-- default: "07_Sim_Score_Phon_Corr_Match"
-- options: "07_Sim_Score_Phon_Corr_Match", "08_Sim_Score_Phon_Glob_Match", "09_Aver_Score_Sem-Phon_Corr", or "10_Aver_Score_Sem-Phon_Glob"
-- "07_Sim_Score_Phon_Corr_Match" uses the function "(((SumFeat) / (NrFeat * 7.71)) / (LenAlign * 4.77117)"
-- "09_Aver_Score_Sem-Phon_Corr" is the average between the semantic score and the "07_Sim_Score_Phon_Corr_Match"
-- "10_Aver_Score_Sem-Phon_Glob" is the average between the semantic score and the "08_Sim_Score_Phon_Glob_Match"
-- see FAAL documentation for details ( https://github.com/MKilani/FAAL )
:param selectBestThreshold: threshold for the parameter selectBest
-- default: 0.65
:param parseVow: this allows to decide if the phonetic comparison should take into consideration vowels or not. Ignoring vowels can be useful when dealing with unrelated or relatively distant languages, or with languages in which vowels are rather unstable and semantically secondary (e.g. Semitic languages)
-- default: True
"""
gateway = JavaGateway()
addition_app = gateway.entry_point
semanticSelectionDict_One = json.loads(json_semanticSelection_One)
semanticSelectionDict_Two = json.loads(json_semanticSelection_Two)
semanticSelectionDict = {}
SemanticIndex_ListTwo = {}
for key_Two in semanticSelectionDict_Two:
entryTwo = semanticSelectionDict_Two[key_Two]
ID_Token = entryTwo["00_ID_token"]
for match_ID in entryTwo["03_Matches"][semanticLevel]:
semantic_item_temp = entryTwo["03_Matches"][semanticLevel][match_ID]["11_Semantic_Field"]
ID_Cluster = entryTwo["03_Matches"][semanticLevel][match_ID]["05_ID_Cluster"]
if semantic_item_temp in SemanticIndex_ListTwo:
SemanticIndex_ListTwo[semantic_item_temp].append({"Key" : key_Two, "ID_token" : ID_Token, "ID_match" : match_ID, "ID_Cluster" : ID_Cluster})
else:
SemanticIndex_ListTwo[semantic_item_temp] = [{"Key" : key_Two, "ID_token" : ID_Token, "ID_match" : match_ID, "ID_Cluster" : ID_Cluster}]
hurry = SemanticIndex_ListTwo["hurry"]
#Combine lists
counterNewPairs = 0
print("*- Phonetic comparison -*")
print("-> Start")
# set up progress bar
indexBar = -1
print("Progress:")
for key_One in semanticSelectionDict_One:
indexBar = indexBar + 1
entry = semanticSelectionDict_One[key_One]
ID_Token_00 = entry["00_ID_token"]
Meaning_token_01 = entry["01_Meaning_token"]
Form_token_02 = entry["02_Form_token"]
last_match = list(entry["03_Matches"][semanticLevel].keys())[-1]
max_cluster_ID = entry["03_Matches"][semanticLevel][last_match]["05_ID_Cluster"]
for new_ID_cluster in range(0, max_cluster_ID+1):
new_entry = {}
new_entry["00_ID_token"] = ID_Token_00
new_entry["01_Meaning_token"] = Meaning_token_01
new_entry["02_Form_token"] = Form_token_02
new_match_count = 0
new_matches = {}
for match_ID in entry["03_Matches"][semanticLevel]:
if entry["03_Matches"][semanticLevel][match_ID]["05_ID_Cluster"] > new_ID_cluster:
continue
if entry["03_Matches"][semanticLevel][match_ID]["05_ID_Cluster"] <= new_ID_cluster:
semanticToMatch = entry["03_Matches"][semanticLevel][match_ID]["11_Semantic_Field"]
#new_match_count = 0
if semanticToMatch in SemanticIndex_ListTwo:
for matchTwo in SemanticIndex_ListTwo[semanticToMatch]:
progbar(indexBar, len(semanticSelectionDict_One) - 1, 20)
new_match = {}
if matchTwo["ID_Cluster"] <= new_ID_cluster:
entry_Two = semanticSelectionDict_Two[matchTwo["Key"]]
new_match["00_ID_Match"] = entry_Two["00_ID_token"]
new_match["01_Meaning_Match"] = entry_Two["01_Meaning_token"]
new_match["02_Form_Match"] = entry_Two["02_Form_token"]
new_match["03_Best_Match_Sem"] = [semanticToMatch, semanticToMatch]
new_match["05_ID_Cluster"] = new_ID_cluster
new_match["06_Sim_Score_Sem_Match"] = 1.0
new_match["11_Semantic_Field"] = semanticToMatch
new_matches[new_match_count] = new_match.copy()
new_match_count = new_match_count + 1
new_entry["03_Matches"] = {}
new_entry["03_Matches"][semanticLevel] = new_matches
semanticSelectionDict[counterNewPairs] = {}
semanticSelectionDict[counterNewPairs][new_ID_cluster] = new_entry
counterNewPairs = counterNewPairs + 1
print ()
print("-> Load Model")
# load the google word2vec model
temp_file = datapath(pathModel)
model = KeyedVectors.load(temp_file)
print("-> Model loaded")
counter = 0
for key_A in semanticSelectionDict:
for sem_Cluster in semanticSelectionDict[key_A]:
meaningRaw = semanticSelectionDict[key_A][sem_Cluster]['01_Meaning_token']
for divider in dividers:
meaningRaw = meaningRaw.replace(divider, "£")
meaningRaw = meaningRaw.replace(" ", " ")
meaningRaw = meaningRaw.replace(" ", " ")
meaningRaw = meaningRaw.replace(" ", " ")
meaningRaw = meaningRaw.replace("£ ", "£")
meaningRaw = meaningRaw.replace(" £", "£")
listMeaningsSplit = meaningRaw.split("£")
listMeanings =[]
for ID in range(0, len(listMeaningsSplit)):
listMeanings.append(listMeaningsSplit[ID].split(" "))
numberMatchesOutput = len(listMeanings)
print("-> Compile semantic index")
print (str(counter+1) + " of " + str(len(semanticSelectionDict)))
counter = counter + 1
index = WmdSimilarity(listMeanings, model, numberMatchesOutput)
print("-> Semantic index compiled")
for key_B in semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel]:
meaningToCheckRaw = semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["01_Meaning_Match"]
for divider in dividers:
meaningToCheckRaw = meaningToCheckRaw.replace(divider, "£")
meaningToCheckRaw = meaningToCheckRaw.replace(" ", " ")
meaningToCheckRaw = meaningToCheckRaw.replace(" ", " ")
meaningToCheckRaw = meaningToCheckRaw.replace(" ", " ")
meaningToCheckRaw = meaningToCheckRaw.replace("£ ", "£")
meaningToCheckRaw = meaningToCheckRaw.replace(" £", "£")
meaningToCheck = meaningToCheckRaw.split("£")
bestResult = 0.0
bestMatch = ["", ""]
for meaning in meaningToCheck:
query = [meaning]
resultsQuery = index[query]
resultsQueryWithIndexes = list(enumerate(resultsQuery))
if len(resultsQueryWithIndexes) > 0:
if resultsQueryWithIndexes[0][1][1] > bestResult:
bestResult = resultsQueryWithIndexes[0][1][1]
bestMatch = []
bestMatch.append(" ".join(listMeanings[resultsQueryWithIndexes[0][1][0]]))
bestMatch.append(meaning)
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["06_Sim_Score_Sem_Match"] = bestResult
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["03_Best_Match_Sem"] = bestMatch
#semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]['09_Aver_Score_Sem-Phon_Corr'] = (semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["07_Sim_Score_Phon_Corr_Match"] + semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["06_Sim_Score_Sem_Match"]) / 2
print("*- Phonetic comparison -*")
print("-> Start")
# set up progress bar
indexBar = -1
print ("Progress:")
for key_A in semanticSelectionDict:
for sem_Cluster in semanticSelectionDict[key_A]:
indexBar = indexBar + 1
progbar(indexBar, len(semanticSelectionDict) - 1, 20)
if semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel] == {}:
continue
ID_word_A = semanticSelectionDict[key_A][sem_Cluster]['00_ID_token']
meaning_word_A = semanticSelectionDict[key_A][sem_Cluster]['01_Meaning_token']
word_A_list = semanticSelectionDict[key_A][sem_Cluster]['02_Form_token']
#print (word_A)
previous_Key = ""
for key_B in semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel]:
if key_B == previous_Key:
continue
previous_Key = key_B
ID_word_B = semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["00_ID_Match"]
meaning_word_B = semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["01_Meaning_Match"]
word_B_list = semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["02_Form_Match"]
resultsComparison = {}
IDBestMatch = []
#Compare phonetically FAAL - when more than one varian, select that providing the best alignment according to the selected score "score"
index_WordA = -1
for word_A in word_A_list:
index_WordA = index_WordA + 1
index_WordB = -1
for word_B in word_B_list:
index_WordB = index_WordB + 1
if parseVow == False:
noVowWord_A = removeVow(word_A)
noVowWord_B = removeVow(word_B)
resultsComparisonTemp = interfaceFAAL(noVowWord_A, noVowWord_B, addition_app)
else:
resultsComparisonTemp = interfaceFAAL(word_A, word_B)
#indexBar = indexBar + 1
#progbar(indexBar, (len(semanticSelectionDict)*len(semanticSelectionDict[key_A])* len(semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel])*len(word_A_list)* len(word_B_list)) - 1, 20)
#print (resultsComparisonTemp)
if resultsComparison == {}:
resultsComparison = resultsComparisonTemp
IDBestMatch = []
IDBestMatch.append(index_WordA)
IDBestMatch.append(word_A)
IDBestMatch.append(index_WordB)
IDBestMatch.append(word_B)
else:
if resultsComparisonTemp[scoreAlignPhon] > resultsComparison[scoreAlignPhon]:
resultsComparison = resultsComparisonTemp
IDBestMatch = []
IDBestMatch.append(index_WordA)
IDBestMatch.append(word_A)
IDBestMatch.append(index_WordB)
IDBestMatch.append(word_B)
#phoneticSelectionFile = open("/Users/iome/Desktop/dataTLA/lemmata/phonetics.txt", "a+")
#phoneticSelectionFile.write(key_A + "||" + key_B + "||" + resultsComparison + "||" + IDBestMatch + "\n")
#phoneticSelectionFile.close()
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]['12_ResultsComp'] = resultsComparison
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]['04_Best_Match_Phon'] = IDBestMatch
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]['07_Sim_Score_Phon_Corr_Match'] = semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["12_ResultsComp"]["bestAlignCorrected"]
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]['08_Sim_Score_Phon_Glob_Match'] = semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["12_ResultsComp"]["bestAlignGlobal"]
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]['09_Aver_Score_Sem-Phon_Corr'] = (semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["07_Sim_Score_Phon_Corr_Match"] + semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["06_Sim_Score_Sem_Match"])/2
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]['10_Aver_Score_Sem-Phon_Glob'] = (semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["08_Sim_Score_Phon_Glob_Match"] + semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][key_B]["06_Sim_Score_Sem_Match"]) / 2
print ()
# set up progress bar
indexBar = -1
print("Progress:")
semanticSelectionDict_ordered = {}
for key_A in semanticSelectionDict:
indexBar = indexBar + 1
progbar(indexBar, len(semanticSelectionDict) - 1, 20)
if key_A not in semanticSelectionDict_ordered:
semanticSelectionDict_ordered[key_A] = {}
temporaryEntries = []
for sem_Cluster in semanticSelectionDict[key_A]:
if sem_Cluster not in semanticSelectionDict_ordered[key_A]:
semanticSelectionDict_ordered[key_A][sem_Cluster] = {}
semanticSelectionDict_ordered[key_A][sem_Cluster]["00_ID_token"] = semanticSelectionDict[key_A][sem_Cluster]["00_ID_token"]
semanticSelectionDict_ordered[key_A][sem_Cluster]["01_Meaning_token"] = semanticSelectionDict[key_A][sem_Cluster]["01_Meaning_token"]
semanticSelectionDict_ordered[key_A][sem_Cluster]["02_Form_token"] = semanticSelectionDict[key_A][sem_Cluster]["02_Form_token"]
if semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel] == {}:
semanticSelectionDict_ordered[key_A][sem_Cluster]["03_Matches"] = {}
semanticSelectionDict_ordered[key_A][sem_Cluster]["03_Matches"][semanticLevel] = semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel]
continue
for n in range(0, len(semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel])):
if len(temporaryEntries) == 0:
temporaryEntries.append(semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][0])
else:
if semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n][scoreAlignPhon] >= temporaryEntries[0][scoreAlignPhon]:
temporaryEntries.insert(0, semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n])
elif semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n][scoreAlignPhon] < \
temporaryEntries[-1][scoreAlignPhon]:
temporaryEntries.append(semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n])
else:
for z in range(1, len(temporaryEntries)):
if semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n]\
[scoreAlignPhon] < temporaryEntries[z-1][scoreAlignPhon] and \
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n] \
[scoreAlignPhon] >= temporaryEntries[z][scoreAlignPhon]:
#if not semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n]\
# [scoreAlignPhon] < temporaryEntries[z-1][scoreAlignPhon] and \
# semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n] \
# ["00_ID_Match"] == temporaryEntries[z]["00_ID_Match"]:
temporaryEntries.insert(z,semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][n])
break
semanticSelectionDict_ordered[key_A][sem_Cluster]["03_Matches"] = {}
semanticSelectionDict_ordered[key_A][sem_Cluster]["03_Matches"][semanticLevel] = {}
temporaryEntriesCleaned = []
#remove doubles from temporary entry
doubleEntry = False
for temporaryEntry in temporaryEntries:
for temporaryEntryCleaned in temporaryEntriesCleaned:
if temporaryEntry["00_ID_Match"] == temporaryEntryCleaned["00_ID_Match"]:
doubleEntry = True
if doubleEntry == False:
temporaryEntriesCleaned.append(copy.deepcopy(temporaryEntry))
doubleEntry = False
for ID in range (0, len(temporaryEntriesCleaned)):
semanticSelectionDict_ordered[key_A][sem_Cluster]["03_Matches"][semanticLevel][ID] = temporaryEntriesCleaned[ID]
json_semanticSelectionDict = json.dumps(semanticSelectionDict_ordered, sort_keys=True, indent=3, ensure_ascii=False)
#print(json_semanticSelectionDict)
print()
print("-> End")
print()
# set up progress bar
indexBar = -1
print("Select top matches - Progress:")
semanticSelectionDict = json.loads(json_semanticSelectionDict)
semanticSelectionDict_ordered_best = {}
resultsSimplified = []
resultsSimplifiedString = ""
for key_A in semanticSelectionDict:
indexBar = indexBar + 1
progbar(indexBar, len(semanticSelectionDict) - 1, 20)
if key_A not in semanticSelectionDict_ordered_best:
semanticSelectionDict_ordered_best[key_A] = {}
temporaryEntries = []
counter = 0
for sem_Cluster in semanticSelectionDict[key_A]:
if sem_Cluster not in semanticSelectionDict_ordered_best[key_A]:
semanticSelectionDict_ordered_best[key_A][sem_Cluster] = {}
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["00_ID_token"] = \
semanticSelectionDict[key_A][sem_Cluster]["00_ID_token"]
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["01_Meaning_token"] = \
semanticSelectionDict[key_A][sem_Cluster]["01_Meaning_token"]
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["02_Form_token"] = \
semanticSelectionDict[key_A][sem_Cluster]["02_Form_token"]
if semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel] == {}:
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"] = {}
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"][semanticLevel] = \
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel]
continue
for n in range(0, len(semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel])):
if len(temporaryEntries) == 0:
temporaryEntries.append(
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][str(0)])
else:
if semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][str(n)][
selectBest] > selectBestThreshold:
temporaryEntries.append(
semanticSelectionDict[key_A][sem_Cluster]["03_Matches"][semanticLevel][str(n)])
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"] = {}
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"][semanticLevel] = {}
for ID in range(0, len(temporaryEntries)):
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"][semanticLevel][str(ID)] = copy.deepcopy(temporaryEntries[ID])
resultsSimplifiedString = resultsSimplifiedString + "Cluster: " + str(sem_Cluster) + " :: " + str(semanticSelectionDict_ordered_best[key_A][sem_Cluster]["00_ID_token"]) + " - '" + ", ".join(semanticSelectionDict[key_A][sem_Cluster]["02_Form_token"]) + "' - " + \
semanticSelectionDict[key_A][sem_Cluster]["01_Meaning_token"] + " :: " + str(semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"][semanticLevel][str(ID)]["00_ID_Match"]) + " - '" + ", ".join(semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"][semanticLevel][str(ID)]["02_Form_Match"]) + "' - " + \
semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"][semanticLevel][str(ID)]["01_Meaning_Match"] + " :: " + str(semanticSelectionDict_ordered_best[key_A][sem_Cluster]["03_Matches"][semanticLevel][str(ID)][selectBest]) + "\n"
resultsSimplifiedString = resultsSimplifiedString + "---------\n"
if verbose == True:
print()
print()
print(resultsSimplifiedString)
json_semanticSelectionDict_best = json.dumps(semanticSelectionDict_ordered_best, sort_keys=True, indent=3, ensure_ascii=False)
Results = open(
pathOutput + ".json",
"w") #
Results.write(json_semanticSelectionDict)
Results.close()
ResultsBest = open(
pathOutput + "_best_" + str(selectBestThreshold) + ".json",
"w") #
ResultsBest.write(json_semanticSelectionDict_best)
ResultsBest.close()
ResultsBestSimplified = open(
pathOutput + "_bestSimplified_" + str(selectBestThreshold) + ".txt",
"w") #
ResultsBestSimplified.write(resultsSimplifiedString)
ResultsBestSimplified.close()
return json_semanticSelectionDict, json_semanticSelectionDict_best, resultsSimplifiedString
def removeVow(wordToParse):
wordToParse = "$" + wordToParse + "$"
wordToParse = wordToParse.replace("̯", "")
wordToParse = wordToParse.replace("͜", "")
wordToParse = wordToParse.replace("i:", "i")
wordToParse = wordToParse.replace("y:", "y")
wordToParse = wordToParse.replace("ɨ:", "ɨ")
wordToParse = wordToParse.replace("ʉ:", "ʉ")
wordToParse = wordToParse.replace("ɯ:", "ɯ")
wordToParse = wordToParse.replace("u:", "u")
wordToParse = wordToParse.replace("ɪ:", "ɪ")
wordToParse = wordToParse.replace("ʏ:", "ʏ")
wordToParse = wordToParse.replace("ʊ:", "ʊ")
wordToParse = wordToParse.replace("e:", "e")
wordToParse = wordToParse.replace("ø:", "ø")
wordToParse = wordToParse.replace("ɘ:", "ɘ")
wordToParse = wordToParse.replace("ɵ:", "ɵ")
wordToParse = wordToParse.replace("ɤ:", "ɤ")
wordToParse = wordToParse.replace("o:", "o")
wordToParse = wordToParse.replace("ɛ:", "ɛ")
wordToParse = wordToParse.replace("œ:", "œ")
wordToParse = wordToParse.replace("ə:", "ə")
wordToParse = wordToParse.replace("ɞ:", "ɞ")
wordToParse = wordToParse.replace("ʌ:", "ʌ")
wordToParse = wordToParse.replace("ɔ:", "ɔ")
wordToParse = wordToParse.replace("æ:", "æ")
wordToParse = wordToParse.replace("ɶ:", "ɶ")
wordToParse = wordToParse.replace("a:", "a")
wordToParse = wordToParse.replace("ɑ:", "ɑ")
wordToParse = wordToParse.replace("ɒ:", "ɒ")
wordToParse = wordToParse.replace("ɐ:", "ɐ")
wordToParse = wordToParse.replace("ɜ:", "ɜ")
wordToParse = wordToParse.replace("$i", "ʔ")
wordToParse = wordToParse.replace("$y", "ʔ")
wordToParse = wordToParse.replace("$ɨ", "ʔ")
wordToParse = wordToParse.replace("$ʉ", "ʔ")
wordToParse = wordToParse.replace("$ɯ", "ʔ")
wordToParse = wordToParse.replace("$u", "ʔ")
wordToParse = wordToParse.replace("$ɪ", "ʔ")
wordToParse = wordToParse.replace("$ʏ", "ʔ")
wordToParse = wordToParse.replace("$ʊ", "ʔ")
wordToParse = wordToParse.replace("$e", "ʔ")
wordToParse = wordToParse.replace("$ø", "ʔ")
wordToParse = wordToParse.replace("$ɘ", "ʔ")
wordToParse = wordToParse.replace("$ɵ", "ʔ")
wordToParse = wordToParse.replace("$ɤ", "ʔ")
wordToParse = wordToParse.replace("$o", "ʔ")
wordToParse = wordToParse.replace("$ɛ", "ʔ")
wordToParse = wordToParse.replace("$œ", "ʔ")
wordToParse = wordToParse.replace("$ə", "ʔ")
wordToParse = wordToParse.replace("$ɞ", "ʔ")
wordToParse = wordToParse.replace("$ʌ", "ʔ")
wordToParse = wordToParse.replace("$ɔ", "ʔ")
wordToParse = wordToParse.replace("$æ", "ʔ")
wordToParse = wordToParse.replace("$ɶ", "ʔ")
wordToParse = wordToParse.replace("$a", "ʔ")
wordToParse = wordToParse.replace("$ɑ", "ʔ")
wordToParse = wordToParse.replace("$ɒ", "ʔ")
wordToParse = wordToParse.replace("$ɐ", "ʔ")
wordToParse = wordToParse.replace("$ɜ", "ʔ")
wordToParse = wordToParse.replace("i$", "ʔ")
wordToParse = wordToParse.replace("y$", "ʔ")
wordToParse = wordToParse.replace("ɨ$", "ʔ")
wordToParse = wordToParse.replace("ʉ$", "ʔ")
wordToParse = wordToParse.replace("ɯ$", "ʔ")
wordToParse = wordToParse.replace("u$", "ʔ")
wordToParse = wordToParse.replace("ɪ$", "ʔ")
wordToParse = wordToParse.replace("ʏ$", "ʔ")
wordToParse = wordToParse.replace("ʊ$", "ʔ")
wordToParse = wordToParse.replace("e$", "ʔ")
wordToParse = wordToParse.replace("ø$", "ʔ")
wordToParse = wordToParse.replace("ɘ$", "ʔ")
wordToParse = wordToParse.replace("ɵ$", "ʔ")
wordToParse = wordToParse.replace("ɤ$", "ʔ")
wordToParse = wordToParse.replace("o$", "ʔ")
wordToParse = wordToParse.replace("ɛ$", "ʔ")
wordToParse = wordToParse.replace("œ$", "ʔ")
wordToParse = wordToParse.replace("ə$", "ʔ")
wordToParse = wordToParse.replace("ɞ$", "ʔ")
wordToParse = wordToParse.replace("ʌ$", "ʔ")
wordToParse = wordToParse.replace("ɔ$", "ʔ")
wordToParse = wordToParse.replace("æ$", "ʔ")
wordToParse = wordToParse.replace("ɶ$", "ʔ")
wordToParse = wordToParse.replace("a$", "ʔ")
wordToParse = wordToParse.replace("ɑ$", "ʔ")
wordToParse = wordToParse.replace("ɒ$", "ʔ")
wordToParse = wordToParse.replace("ɐ$", "ʔ")
wordToParse = wordToParse.replace("ɜ$", "ʔ")
wordToParse = wordToParse.replace("$", "")
wordToParse = wordToParse.replace("i", "")
wordToParse = wordToParse.replace("y", "")
wordToParse = wordToParse.replace("ɨ", "")
wordToParse = wordToParse.replace("ʉ", "")
wordToParse = wordToParse.replace("ɯ", "")
wordToParse = wordToParse.replace("u", "")
wordToParse = wordToParse.replace("ɪ", "")
wordToParse = wordToParse.replace("ʏ", "")
wordToParse = wordToParse.replace("ʊ", "")
wordToParse = wordToParse.replace("e", "")
wordToParse = wordToParse.replace("ø", "")
wordToParse = wordToParse.replace("ɘ", "")
wordToParse = wordToParse.replace("ɵ", "")
wordToParse = wordToParse.replace("ɤ", "")
wordToParse = wordToParse.replace("o", "")
wordToParse = wordToParse.replace("ɛ", "")
wordToParse = wordToParse.replace("œ", "")
wordToParse = wordToParse.replace("ə", "")
wordToParse = wordToParse.replace("ɞ", "")
wordToParse = wordToParse.replace("ʌ", "")
wordToParse = wordToParse.replace("ɔ", "")
wordToParse = wordToParse.replace("æ", "")
wordToParse = wordToParse.replace("ɶ", "")
wordToParse = wordToParse.replace("a", "")
wordToParse = wordToParse.replace("ɑ", "")
wordToParse = wordToParse.replace("ɒ", "")
wordToParse = wordToParse.replace("ɐ", "")
wordToParse = wordToParse.replace("ɜ", "")
return wordToParse |
the-stack_106_28778 | from collections import deque
from itertools import permutations
def calc(lhs: str, rhs: str) -> str:
current_op = lhs[-1]
tail_op = rhs[-1]
lhs = lhs[:-1]
rhs = rhs[:-1]
ret = 0
if current_op == '*':
ret = int(lhs) * int(rhs)
elif current_op == '-':
ret = int(lhs) - int(rhs)
elif current_op == '+':
ret = int(lhs) + int(rhs)
return '{}{}'.format(ret, tail_op)
def solution(expression: str) -> int:
answer = 0
exp_args = []
s = 0
for idx, ch in enumerate(expression):
if '0' <= ch <= '9':
continue
else:
exp_args.append(expression[s:idx + 1])
s = idx + 1
else:
exp_args.append('{}.'.format(expression[s:]))
ops = ['-', '+', '*']
for op in ops:
if op not in expression:
ops.remove(op)
pri = permutations(ops)
for op_orders in pri:
exp_dq = deque(exp_args)
for op in op_orders:
is_new_arg = True
while is_new_arg:
is_new_arg = False
args_num = len(exp_dq)
cnt = 0
while cnt < args_num:
lhs = exp_dq.popleft()
cnt += 1
if op == lhs[-1]:
rhs = exp_dq.popleft()
cnt += 1
exp_dq.appendleft(calc(lhs, rhs))
is_new_arg = True
else:
exp_dq.append(lhs)
r = exp_dq.popleft()
answer = max(answer, abs(int(r[:-1])))
return answer
if __name__ == '__main__':
result = solution(expression='100-200*300-500+20')
print(result)
|
the-stack_106_28779 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
from federatedml.model_base import ModelBase
from federatedml.param.scorecard_param import ScorecardParam
from federatedml.util.consts import FLOAT_ZERO
from federatedml.util import LOGGER
from federatedml.feature.instance import Instance
class Scorecard(ModelBase):
def __init__(self):
super().__init__()
self.model_param = ScorecardParam()
self.metric_name = "scorecard"
self.metric_namespace = "train"
self.metric_type = "SCORECARD"
self.use_match_id = False
def _init_model(self, params):
self.model_param = params
self.method = params.method
self.offset = params.offset
self.factor = params.factor
self.factor_base = params.factor_base
self.upper_limit_ratio = params.upper_limit_ratio
self.lower_limit_value = params.lower_limit_value
self.need_run = params.need_run
@staticmethod
def compute_credit_score(result, offset, factor, factor_base, upper_limit_value, lower_limit_value,
use_match_id=False):
predict_result = result
if use_match_id:
predict_result = result.features
predict_score = predict_result[2]
# deal with special predict score values
if abs(predict_score - 0) <= FLOAT_ZERO and predict_score >= 0:
credit_score = upper_limit_value
elif abs(predict_score - 1) <= FLOAT_ZERO and predict_score > 0:
credit_score = lower_limit_value
elif predict_score > 1 or predict_score < 0:
credit_score = -1
else:
odds = (1 - predict_score) / predict_score
credit_score = offset + factor / np.log(factor_base) * np.log(odds)
# credit score should be within range
if credit_score > upper_limit_value:
credit_score = upper_limit_value
if credit_score < lower_limit_value:
credit_score = lower_limit_value
credit_score = round(credit_score, 2)
if use_match_id:
credit_result = copy.deepcopy(result)
credit_result.features = [predict_result[0], predict_result[1], predict_score, credit_score]
else:
credit_result = [predict_result[0], predict_result[1], predict_score, credit_score]
return credit_result
def _set_summary(self):
formula = f"Score = {self.offset} + {self.factor} / ln({self.factor_base}) * ln(Odds)"
self.set_summary({"scorecard_compute_formula": formula})
LOGGER.info(f"Scorecard Computation Formula: {formula}")
def fit(self, prediction_result):
LOGGER.info(f"Start Scorecard Transform, method: {self.method}")
offset, factor, factor_base = self.offset, self.factor, self.factor_base
if factor_base != 2:
LOGGER.warning(f"scorecard param 'factor_base' given is {factor_base}, which is not equal to 2.")
upper_limit_value, lower_limit_value = self.upper_limit_ratio * offset, self.lower_limit_value
if isinstance(prediction_result.first()[1], Instance):
self.use_match_id = True
score_result = prediction_result.mapValues(lambda v: Scorecard.compute_credit_score(v, offset, factor,
factor_base,
upper_limit_value,
lower_limit_value,
self.use_match_id))
result_schema = copy.deepcopy(prediction_result.schema)
result_schema["header"] = ["label", "predict_result", "predict_score", "credit_score"]
"""
result_schema = {"header": ["label", "predict_result", "predict_score", "credit_score"],
"sid_name": schema.get('sid_name')}
"""
score_result.schema = result_schema
self._set_summary()
LOGGER.info(f"Finish Scorecard Transform!")
return score_result
|
the-stack_106_28780 | import torch
from e3nn.math import normalize2mom
from e3nn.util.jit import compile_mode
from e3nn.o3 import SO3Grid
@compile_mode('script')
class SO3Activation(torch.nn.Module):
r'''Apply non linearity on the signal on SO(3)
Parameters
----------
lmax_in : int
input lmax
lmax_out : int
output lmax
act : function
activation function :math:`\phi`
resolution : int
SO(3) grid resolution
normalization : {'norm', 'component'}
'''
def __init__(self, lmax_in, lmax_out, act, resolution, *, normalization='component', aspect_ratio=2):
super().__init__()
self.grid_in = SO3Grid(lmax_in, resolution, normalization=normalization, aspect_ratio=aspect_ratio)
self.grid_out = SO3Grid(lmax_out, resolution, normalization=normalization, aspect_ratio=aspect_ratio)
self.act = normalize2mom(act)
self.lmax_in = lmax_in
self.lmax_out = lmax_out
def __repr__(self):
return f"{self.__class__.__name__} ({self.lmax_in} -> {self.lmax_out})"
def forward(self, features):
r'''evaluate
Parameters
----------
features : `torch.Tensor`
tensor of shape ``(..., self.irreps_in.dim)``
Returns
-------
`torch.Tensor`
tensor of shape ``(..., self.irreps_out.dim)``
'''
features = self.grid_in.to_grid(features)
features = self.act(features)
features = self.grid_out.from_grid(features)
return features
|
the-stack_106_28782 | # Loading dependencies
import airflow
from airflow import DAG
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from datetime import date, timedelta
from datetime import datetime as dt
# Import script files which are going be executed as Tasks by the DAG
import folder_watch
# DAG unique identifier
DAG_ID = 'file_add_watcher'
# Datetime object to indicate at which time the DAG should start
DAG_START_DATE = airflow.utils.dates.days_ago(1)
# Scheduled interval at which the DAG will run
# here it will run once every hour
DAG_SCHEDULE_INTERVAL = '@daily'
# Default arguments applied to the DAG
DAG_DEFAULT_ARGS = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': DAG_START_DATE,
'retries': 1,
'retry_delay': timedelta(minutes=1)
}
yesterday = date.today() - timedelta(days=1)
dt = yesterday.strftime("%Y-%m-%d")
# Creating the DAG
with DAG(
DAG_ID,
default_args=DAG_DEFAULT_ARGS,
schedule_interval=DAG_SCHEDULE_INTERVAL
) as dag:
# Initialise a TriggerDagRunOperator that waits for a python callable
# to return true so it triggers the core_pipeline dag
trigger_entry_point = TriggerDagRunOperator(
task_id='trigger_entry_point_dag', python_callable=folder_watch.main, trigger_dag_id='entry_point', dag=dag, params={'loc': '/usr/local/airflow/PDFs'})
trigger_entry_point
|
the-stack_106_28783 | #!/usr/bin/env python
#from meter.features.context import packet_direction
#from features.context import packet_direction
from features.context.packet_direction import PacketDirection
def get_packet_flow_key(packet, direction) -> tuple:
"""Creates a key signature for a packet.
Summary:
Creates a key signature for a packet so it can be
assigned to a flow.
Args:
packet: A network packet
direction: The direction of a packet
Returns:
A tuple of the String IPv4 addresses of the destination,
the source port as an int,
the time to live value,
the window size, and
TCP flags.
"""
if 'TCP' in packet:
protocol = 'TCP'
elif 'UDP' in packet:
protocol = 'UDP'
else:
raise Exception('Only TCP protocols are supported.')
#if direction == packet_direction.FORWARD:
if direction == PacketDirection.FORWARD:
dest_ip = packet['IP'].dst
src_ip = packet['IP'].src
src_port = packet[protocol].sport
dest_port = packet[protocol].dport
else:
dest_ip = packet['IP'].src
src_ip = packet['IP'].dst
src_port = packet[protocol].dport
dest_port = packet[protocol].sport
return dest_ip, src_ip, src_port, dest_port
|
the-stack_106_28784 | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.11.1-SNAPSHOT
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class RegistryClientEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'revision': 'RevisionDTO',
'id': 'str',
'uri': 'str',
'position': 'PositionDTO',
'permissions': 'PermissionsDTO',
'bulletins': 'list[BulletinEntity]',
'disconnected_node_acknowledged': 'bool',
'component': 'RegistryDTO'
}
attribute_map = {
'revision': 'revision',
'id': 'id',
'uri': 'uri',
'position': 'position',
'permissions': 'permissions',
'bulletins': 'bulletins',
'disconnected_node_acknowledged': 'disconnectedNodeAcknowledged',
'component': 'component'
}
def __init__(self, revision=None, id=None, uri=None, position=None, permissions=None, bulletins=None, disconnected_node_acknowledged=None, component=None):
"""
RegistryClientEntity - a model defined in Swagger
"""
self._revision = None
self._id = None
self._uri = None
self._position = None
self._permissions = None
self._bulletins = None
self._disconnected_node_acknowledged = None
self._component = None
if revision is not None:
self.revision = revision
if id is not None:
self.id = id
if uri is not None:
self.uri = uri
if position is not None:
self.position = position
if permissions is not None:
self.permissions = permissions
if bulletins is not None:
self.bulletins = bulletins
if disconnected_node_acknowledged is not None:
self.disconnected_node_acknowledged = disconnected_node_acknowledged
if component is not None:
self.component = component
@property
def revision(self):
"""
Gets the revision of this RegistryClientEntity.
The revision for this request/response. The revision is required for any mutable flow requests and is included in all responses.
:return: The revision of this RegistryClientEntity.
:rtype: RevisionDTO
"""
return self._revision
@revision.setter
def revision(self, revision):
"""
Sets the revision of this RegistryClientEntity.
The revision for this request/response. The revision is required for any mutable flow requests and is included in all responses.
:param revision: The revision of this RegistryClientEntity.
:type: RevisionDTO
"""
self._revision = revision
@property
def id(self):
"""
Gets the id of this RegistryClientEntity.
The id of the component.
:return: The id of this RegistryClientEntity.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this RegistryClientEntity.
The id of the component.
:param id: The id of this RegistryClientEntity.
:type: str
"""
self._id = id
@property
def uri(self):
"""
Gets the uri of this RegistryClientEntity.
The URI for futures requests to the component.
:return: The uri of this RegistryClientEntity.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this RegistryClientEntity.
The URI for futures requests to the component.
:param uri: The uri of this RegistryClientEntity.
:type: str
"""
self._uri = uri
@property
def position(self):
"""
Gets the position of this RegistryClientEntity.
The position of this component in the UI if applicable.
:return: The position of this RegistryClientEntity.
:rtype: PositionDTO
"""
return self._position
@position.setter
def position(self, position):
"""
Sets the position of this RegistryClientEntity.
The position of this component in the UI if applicable.
:param position: The position of this RegistryClientEntity.
:type: PositionDTO
"""
self._position = position
@property
def permissions(self):
"""
Gets the permissions of this RegistryClientEntity.
The permissions for this component.
:return: The permissions of this RegistryClientEntity.
:rtype: PermissionsDTO
"""
return self._permissions
@permissions.setter
def permissions(self, permissions):
"""
Sets the permissions of this RegistryClientEntity.
The permissions for this component.
:param permissions: The permissions of this RegistryClientEntity.
:type: PermissionsDTO
"""
self._permissions = permissions
@property
def bulletins(self):
"""
Gets the bulletins of this RegistryClientEntity.
The bulletins for this component.
:return: The bulletins of this RegistryClientEntity.
:rtype: list[BulletinEntity]
"""
return self._bulletins
@bulletins.setter
def bulletins(self, bulletins):
"""
Sets the bulletins of this RegistryClientEntity.
The bulletins for this component.
:param bulletins: The bulletins of this RegistryClientEntity.
:type: list[BulletinEntity]
"""
self._bulletins = bulletins
@property
def disconnected_node_acknowledged(self):
"""
Gets the disconnected_node_acknowledged of this RegistryClientEntity.
Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: The disconnected_node_acknowledged of this RegistryClientEntity.
:rtype: bool
"""
return self._disconnected_node_acknowledged
@disconnected_node_acknowledged.setter
def disconnected_node_acknowledged(self, disconnected_node_acknowledged):
"""
Sets the disconnected_node_acknowledged of this RegistryClientEntity.
Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:param disconnected_node_acknowledged: The disconnected_node_acknowledged of this RegistryClientEntity.
:type: bool
"""
self._disconnected_node_acknowledged = disconnected_node_acknowledged
@property
def component(self):
"""
Gets the component of this RegistryClientEntity.
:return: The component of this RegistryClientEntity.
:rtype: RegistryDTO
"""
return self._component
@component.setter
def component(self, component):
"""
Sets the component of this RegistryClientEntity.
:param component: The component of this RegistryClientEntity.
:type: RegistryDTO
"""
self._component = component
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RegistryClientEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_106_28787 | #!/usr/bin/env python3
import itertools
import logging
from reagent.core import aggregators as agg
from reagent.core.observers import IntervalAggregatingObserver, ValueListObserver
from reagent.reporting.reporter_base import ReporterBase
from reagent.workflow.training_reports import SlateQTrainingReport
logger = logging.getLogger(__name__)
class SlateQReporter(ReporterBase):
def __init__(self, report_interval: int = 100):
self.report_interval = report_interval
super().__init__(self.value_list_observers, self.aggregating_observers)
@property
def value_list_observers(self):
return {"cpe_results": ValueListObserver("cpe_details")}
@property
def aggregating_observers(self):
return {
name: IntervalAggregatingObserver(self.report_interval, aggregator)
for name, aggregator in itertools.chain(
[
("td_loss", agg.MeanAggregator("td_loss")),
("recent_rewards", agg.RecentValuesAggregator("logged_rewards")),
(
"logged_action_q_value",
agg.MeanAggregator("model_values_on_logged_actions"),
),
],
[
(
f"{key}_tb",
agg.TensorBoardHistogramAndMeanAggregator(key, log_key),
)
for key, log_key in [
("td_loss", "td_loss"),
("reward_loss", "reward_loss"),
("logged_rewards", "reward/logged"),
]
],
)
}
def generate_training_report(self) -> SlateQTrainingReport:
return SlateQTrainingReport()
|
the-stack_106_28789 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import logging
import argparse
import platform
import subprocess
os.environ["PYTHONUNBUFFERED"] = "y"
PY2 = sys.version_info[0] == 2
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
from scripts.lib.zulip_tools import run, subprocess_text_output, OKBLUE, ENDC, WARNING
from scripts.lib.setup_venv import setup_virtualenv, VENV_DEPENDENCIES
from scripts.lib.node_cache import setup_node_modules, NPM_CACHE_PATH
from version import PROVISION_VERSION
if False:
from typing import Any
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
"xenial",
],
}
PY2_VENV_PATH = "/srv/zulip-venv"
PY3_VENV_PATH = "/srv/zulip-py3-venv"
VAR_DIR_PATH = os.path.join(ZULIP_PATH, 'var')
LOG_DIR_PATH = os.path.join(VAR_DIR_PATH, 'log')
UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'uploads')
TEST_UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'test_uploads')
COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'coverage')
LINECOVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'linecoverage-report')
NODE_TEST_COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'node-coverage')
# TODO: De-duplicate this with emoji_dump.py
EMOJI_CACHE_PATH = "/srv/zulip-emoji-cache"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
EMOJI_CACHE_PATH = "/home/travis/zulip-emoji-cache"
if PY2:
VENV_PATH = PY2_VENV_PATH
else:
VENV_PATH = PY3_VENV_PATH
if not os.path.exists(os.path.join(ZULIP_PATH, ".git")):
print("Error: No Zulip git repository present!")
print("To setup the Zulip development environment, you should clone the code")
print("from GitHub, rather than using a Zulip production release tarball.")
sys.exit(1)
# Check the RAM on the user's system, and throw an effort if <1.5GB.
# This avoids users getting segfaults running `pip install` that are
# generally more annoying to debug.
with open("/proc/meminfo") as meminfo:
ram_size = meminfo.readlines()[0].strip().split(" ")[-2]
ram_gb = float(ram_size) / 1024.0 / 1024.0
if ram_gb < 1.5:
print("You have insufficient RAM (%s GB) to run the Zulip development environment." % (
round(ram_gb, 2),))
print("We recommend at least 2 GB of RAM, and require at least 1.5 GB.")
sys.exit(1)
try:
run(["mkdir", "-p", VAR_DIR_PATH])
if os.path.exists(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink')):
os.remove(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink'))
os.symlink(
os.path.join(ZULIP_PATH, 'README.md'),
os.path.join(VAR_DIR_PATH, 'zulip-test-symlink')
)
os.remove(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink'))
except OSError as err:
print("Error: Unable to create symlinks. Make sure you have permission to create symbolic links.")
print("See this page for more information:")
print(" http://zulip.readthedocs.io/en/latest/dev-env-first-time-contributors.html#os-symlink-error")
sys.exit(1)
if platform.architecture()[0] == '64bit':
arch = 'amd64'
elif platform.architecture()[0] == '32bit':
arch = "i386"
else:
logging.critical("Only x86 is supported; ping [email protected] if you want another architecture.")
sys.exit(1)
# Ideally we wouldn't need to install a dependency here, before we
# know the codename.
subprocess.check_call(["sudo", "apt-get", "install", "-y", "lsb-release"])
vendor = subprocess_text_output(["lsb_release", "-is"])
codename = subprocess_text_output(["lsb_release", "-cs"])
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
logging.critical("Unsupported platform: {} {}".format(vendor, codename))
sys.exit(1)
POSTGRES_VERSION_MAP = {
"trusty": "9.3",
"xenial": "9.5",
}
POSTGRES_VERSION = POSTGRES_VERSION_MAP[codename]
UBUNTU_COMMON_APT_DEPENDENCIES = [
"closure-compiler",
"memcached",
"rabbitmq-server",
"redis-server",
"hunspell-en-us",
"supervisor",
"git",
"libssl-dev",
"yui-compressor",
"wget",
"ca-certificates", # Explicit dependency in case e.g. wget is already installed
"puppet", # Used by lint-all
"gettext", # Used by makemessages i18n
"curl", # Used for fetching PhantomJS as wget occasionally fails on redirects
"netcat", # Used for flushing memcached
] + VENV_DEPENDENCIES
APT_DEPENDENCIES = {
"trusty": UBUNTU_COMMON_APT_DEPENDENCIES + [
"postgresql-9.3",
"postgresql-9.3-tsearch-extras",
"postgresql-9.3-pgroonga",
],
"xenial": UBUNTU_COMMON_APT_DEPENDENCIES + [
"postgresql-9.5",
"postgresql-9.5-tsearch-extras",
"postgresql-9.5-pgroonga",
],
}
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/%s/tsearch_data/" % (POSTGRES_VERSION,)
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
LOUD = dict(_out=sys.stdout, _err=sys.stderr)
user_id = os.getuid()
def setup_shell_profile(shell_profile):
# type: (str) -> None
source_activate_command = "source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),)
shell_profile_path = os.path.expanduser(shell_profile)
if os.path.exists(shell_profile_path):
with open(shell_profile_path, 'a+') as shell_profile_file:
if source_activate_command not in shell_profile_file.read():
shell_profile_file.writelines(source_activate_command)
else:
with open(shell_profile_path, 'w') as shell_profile_file:
shell_profile_file.writelines(source_activate_command)
def main(options):
# type: (Any) -> int
# npm install and management commands expect to be run from the root of the
# project.
os.chdir(ZULIP_PATH)
# setup-apt-repo does an `apt-get update`
run(["sudo", "./scripts/lib/setup-apt-repo"])
run(["sudo", "apt-get", "-y", "install", "--no-install-recommends"] + APT_DEPENDENCIES[codename])
if options.is_travis:
if PY2:
MYPY_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "mypy.txt")
setup_virtualenv(PY3_VENV_PATH, MYPY_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', 'python3'])
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "py2_dev.txt")
setup_virtualenv(PY2_VENV_PATH, DEV_REQS_FILE, patch_activate_script=True)
else:
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "py3_dev.txt")
setup_virtualenv(VENV_PATH, DEV_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', 'python3'])
else:
# Import tools/setup_venv.py instead of running it so that we get an
# activated virtualenv for the rest of the provisioning process.
from tools.setup import setup_venvs
setup_venvs.main()
# Put Python2 virtualenv activation in .bash_profile.
setup_shell_profile('~/.bash_profile')
# Put Python2 virtualenv activation in .zprofile (for Zsh users).
setup_shell_profile('~/.zprofile')
run(["sudo", "cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])
# create log directory `zulip/var/log`
run(["mkdir", "-p", LOG_DIR_PATH])
# create upload directory `var/uploads`
run(["mkdir", "-p", UPLOAD_DIR_PATH])
# create test upload directory `var/test_upload`
run(["mkdir", "-p", TEST_UPLOAD_DIR_PATH])
# create coverage directory`var/coverage`
run(["mkdir", "-p", COVERAGE_DIR_PATH])
# create linecoverage directory`var/linecoverage-report`
run(["mkdir", "-p", LINECOVERAGE_DIR_PATH])
# create linecoverage directory`var/node-coverage`
run(["mkdir", "-p", NODE_TEST_COVERAGE_DIR_PATH])
if not os.path.isdir(EMOJI_CACHE_PATH):
run(["sudo", "mkdir", EMOJI_CACHE_PATH])
run(["sudo", "chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
run(["tools/setup/emoji/build_emoji"])
run(["scripts/setup/generate_secrets.py", "--development"])
run(["tools/update-authors-json", "--use-fixture"])
if options.is_travis and not options.is_production_travis:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
elif options.is_docker:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
run(["sudo", "pg_createcluster", "-e", "utf8", "--start", POSTGRES_VERSION, "main"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
if not options.is_production_travis:
# These won't be used anyway
run(["scripts/setup/configure-rabbitmq"])
run(["tools/setup/postgres-init-dev-db"])
run(["tools/do-destroy-rebuild-database"])
# Need to set up Django before using is_template_database_current.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
import django
django.setup()
from zerver.lib.test_fixtures import is_template_database_current
if options.is_force or not is_template_database_current():
run(["tools/setup/postgres-init-test-db"])
run(["tools/do-destroy-rebuild-test-database"])
else:
print("No need to regenerate the test DB.")
run(["./manage.py", "compilemessages"])
# Here we install nvm, node, and npm.
run(["sudo", "scripts/lib/install-node"])
# This is a wrapper around `npm install`, which we run last since
# it can often fail due to network issues beyond our control.
try:
# Hack: We remove `node_modules` as root to work around an
# issue with the symlinks being improperly owned by root.
if os.path.islink("node_modules"):
run(["sudo", "rm", "-f", "node_modules"])
if not os.path.isdir(NPM_CACHE_PATH):
run(["sudo", "mkdir", NPM_CACHE_PATH])
run(["sudo", "chown", "%s:%s" % (user_id, user_id), NPM_CACHE_PATH])
setup_node_modules()
except subprocess.CalledProcessError:
print(WARNING + "`npm install` failed; retrying..." + ENDC)
setup_node_modules()
version_file = os.path.join(ZULIP_PATH, 'var/provision_version')
print('writing to %s\n' % (version_file,))
open(version_file, 'w').write(PROVISION_VERSION + '\n')
print()
print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
return 0
if __name__ == "__main__":
description = ("Provision script to install Zulip")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--force', action='store_true', dest='is_force',
default=False,
help="Ignore all provisioning optimizations.")
parser.add_argument('--travis', action='store_true', dest='is_travis',
default=False,
help="Provision for Travis but without production settings.")
parser.add_argument('--production-travis', action='store_true',
dest='is_production_travis',
default=False,
help="Provision for Travis but with production settings.")
parser.add_argument('--docker', action='store_true',
dest='is_docker',
default=False,
help="Provision for Docker.")
options = parser.parse_args()
sys.exit(main(options))
|
the-stack_106_28790 | import os
import pygame
import pygame.locals
from button import *
import globs
width = 864
height = 480
images = {
'player': pygame.image.load(os.path.join("data","sprites","player.png")),
'sky': pygame.image.load(os.path.join("data","tiles","sky.png")),
'border': pygame.image.load(os.path.join("data","tiles","border.png"))
}
globs.init()
def grid(x,y):
return (x*32,y*32)
# Temporary class for testing purposes.
class drawHelper:
@staticmethod
def frame(screen):
#N
for x in range(27):
screen.blit(images['border'], grid(x,0))
#S
for x in range(27):
screen.blit(images['border'], grid(x,14))
#W
for y in range(1,14):
screen.blit(images['border'], grid(0,y))
#E
for y in range(1,14):
screen.blit(images['border'], grid(26,y))
@staticmethod
def sky(screen):
#sky
for x in range(27):
for y in range(15):
screen.blit(images['sky'], (grid(x,y)))
class Player(pygame.sprite.Sprite):
def __init__(self, pos = grid(1,1)):
self.speed = 8
self.xspeed = 0
self.yspeed = 0
self.rect = images['player'].get_rect(topleft=pos)
def event(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
self.yspeed = 0 - self.speed
if keys[pygame.K_DOWN]:
self.yspeed = self.speed
if keys[pygame.K_LEFT]:
self.xspeed = 0 - self.speed
if keys[pygame.K_RIGHT]:
self.xspeed = self.speed
def move(self):
self.rect.centerx += self.xspeed
self.rect.centery += self.yspeed
if not self.xspeed == 0:
if self.xspeed > 0:
self.xspeed -= 1
else:
self.xspeed += 1
if not self.yspeed == 0:
if self.yspeed > 0:
self.yspeed -= 1
else:
self.yspeed += 1
def draw(self, screen):
screen.blit(images['player'], self.rect)
def main():
print("game init")
pygame.init()
icon = pygame.image.load("icon_32x32.png")
pygame.display.set_icon(icon)
pygame.display.set_caption("pygame window")
screen = pygame.display.set_mode((width, height))
screen.fill((30,30,30))
fontFps = pygame.font.Font(None, 30)
font = pygame.font.Font(globs.fntSavate, 48)
button1 = startBtn(grid(11,5), "Start")
button2 = exitBtn(grid(11,8), "Exit")
running = True
clock = pygame.time.Clock()
player = Player(grid(2,2))
pygame.mixer.init()
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.load(os.path.join('data','music','titlescreen_final.ogg'))
pygame.mixer.music.play(-1)
# main loop
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
button1.event_handler(event)
button2.event_handler(event)
if globs.inGame:
#sky
drawHelper.sky(screen)
#player
player.event()
player.move()
player.draw(screen)
#ground
drawHelper.frame(screen)
else:
#sky
drawHelper.sky(screen)
#player
screen.blit(images['player'], grid(2,11))
#ground
drawHelper.frame(screen)
#title
title_shadow = font.render("DONT TOUCN THE BOMB", True, pygame.Color(50,50,50))
title_size_x = 578
title_size_y = 49
title_shadow = pygame.transform.smoothscale(title_shadow, (title_size_x // 5,title_size_y // 5))
title_shadow = pygame.transform.smoothscale(title_shadow, (title_size_x, title_size_y))
screen.blit(title_shadow, (155,60))
title = font.render("DONT TOUCH THE BOMB", True, pygame.Color('black'))
screen.blit(title, (145,50))
button1.draw(screen)
button2.draw(screen)
fps = fontFps.render(str(int(clock.get_fps())), True, pygame.Color('white'))
screen.blit(fps, (5, 5))
pygame.display.flip()
clock.tick(60)
#end
if __name__ == "__main__":
main()
|
the-stack_106_28791 | import pandas as pd;
import numpy as np;
datas = np.random.randint(10,100,(6,4));
print(datas);
df = pd.DataFrame(datas);
df.columns = ['score1','score2','score3','score4'];
df.columns = ['s1','s2','s3','s4'];
#df.index = pd.date_range('20210114', periods=6);
print(df); |
the-stack_106_28792 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from .layer_function_generator import templatedoc
from ..framework import Variable, in_dygraph_mode
from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..core import VarDesc
__all__ = [
'sequence_conv',
'sequence_softmax',
'sequence_pool',
'sequence_concat',
'sequence_first_step',
'sequence_last_step',
'sequence_slice',
'sequence_expand',
'sequence_expand_as',
'sequence_pad',
'sequence_unpad',
'sequence_reshape',
'sequence_scatter',
'sequence_enumerate',
'sequence_mask',
'sequence_reverse',
]
@templatedoc()
def sequence_conv(input,
num_filters,
filter_size=3,
filter_stride=1,
padding=True,
padding_start=None,
bias_attr=None,
param_attr=None,
act=None,
name=None):
"""
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_fluid_layers_conv2d` ).
This operator receives input sequences with variable length and other convolutional
configuration parameters(num_filters, filter_size) to apply the convolution operation.
It fills all-zero padding data on both sides of the sequence by default to ensure that
the output is the same length as the input. You can customize the padding behavior by
configuring the parameter :attr:`padding\_start` .
**Warning:** the parameter :attr:`padding` take no effect and will be deprecated in the future.
.. code-block:: text
Here we will illustrate the details of the padding operation:
For a mini-batch of 2 variable lengths sentences, containing 3, and 1 time-steps:
Assumed input (X) is a [4, N] float LoDTensor, and for the sake of simplicity, we assume N=2.
input.data = [[1, 1],
[2, 2],
[3, 3],
[4, 4]]
This is to say that input (X) has 4 words and the dimension of each word
representation is 2.
* Case1:
If padding_start is -1 and filter_size is 3.
The length of padding data is calculated as follows:
up_pad_len = max(0, -padding_start) = 1
down_pad_len = max(0, filter_size + padding_start - 1) = 1
The output of the input sequence after padding is:
data_aftet_padding = [[0, 0, 1, 1, 2, 2],
[1, 1, 2, 2, 3, 3],
[2, 2, 3, 3, 0, 0],
[0, 0, 4, 4, 0, 0]]
It will be multiplied by the filter weight to get the final output.
Assume num_filters = 3
output.data = [[ 0.3234, -0.2334, 0.7433],
[ 0.5646, 0.9464, -0.1223],
[-0.1343, 0.5653, 0.4555],
[ 0.9954, -0.1234, -0.1234]]
output.shape = [4, 3] # 3 = num_filters
output.lod = [[0, 3, 4]] # Remain the same
Args:
input (Variable): LoDTensor with shape :math:`(M, K)`, where M is the total time-step of mini-batch
and K is hidden_size of input. Only lod_level of 1 is supported. The data type should be float32 or
float64.
num_filters (int): the number of filters.
filter_size (int): the height of filter. Specified filter width is not supported, the width is
hidden_size by default. Default: 3.
filter_stride (int): stride of the filter. Currently only supports :attr:`stride` = 1.
padding (bool): the parameter :attr:`padding` take no effect and will be discarded in the
future. Currently, it will always pad input to make sure the length of the output is
the same as input whether :attr:`padding` is set true or false. Because the length of
input sequence may be shorter than :attr:`filter\_size`, which will cause the convolution
result to not be computed correctly. These padding data will not be trainable or updated
while training. Default: True.
padding_start (int): It is used to indicate the start index for padding the input
sequence, which can be negative. The negative number means to pad
:attr:`|padding_start|` time-steps of all-zero data at the beginning of each instance.
The positive number means to skip :attr:`padding_start` time-steps of each instance,
and it will pad :math:`filter\_size + padding\_start - 1` time-steps of all-zero data
at the end of the sequence to ensure that the output is the same length as the input.
If set None, the same length :math:`\\frac{filter\_size}{2}` of data will be filled
on both sides of the sequence. If set 0, the length of :math:`filter\_size - 1` data
is padded at the end of each input sequence. Default: None.
bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor with the same length as input. The data type is float32 or float64, which is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[-1, 10], dtype='float32', lod_level=1)
x_conved = fluid.layers.sequence_conv(input=x, num_filters=2, filter_size=3, padding_start=-1)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters]
filter_param = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
pre_bias = helper.create_variable_for_type_inference(dtype)
if padding_start is None:
padding_start = -int(filter_size // 2)
helper.append_op(
type='sequence_conv',
inputs={
'X': [input],
'Filter': [filter_param],
},
outputs={"Out": pre_bias},
attrs={
'contextStride': filter_stride,
'contextStart': padding_start,
'contextLength': filter_size,
})
pre_act = helper.append_bias_op(pre_bias)
return helper.append_activation(pre_act)
def sequence_softmax(input, use_cudnn=False, name=None):
"""
**Note**:
**The input type of the OP must be LoDTensor. For Tensor, use:** :ref:`api_fluid_layers_softmax`
A LoD-tensor can be regarded as several sequences, and this op apply softmax algo on each sequence.
The shape of input Tensor can be :math:`[N, 1]` or :math:`[N]`, where :math:`N`
is the sum of the length of all sequences. Recommended usage: :math:`[N]`.
For i-th sequence in a mini-batch:
.. math::
Out(X[lod[i]:lod[i+1]], :) = \\frac{\exp(X[lod[i]:lod[i+1], :])}{\sum(\exp(X[lod[i]:lod[i+1], :]))}
For example, for a LoD-Tensor with 6 sequences ([3, 2, 4, 1, 2, 3] - sequence length list in order),
the lod in the runtime is [[0, 3, 5, 9, 10, 12, 15]],
then softmax will be computed among :math:`X[0:3,:],X[3:5,:],X[5:9,:],X[9:10,:],X[10:12,:],X[12:15,:]`,
and :math:`N` turns out to be 15.
.. code-block:: text
*Case 1:
Given:
input.data = [0.7, 1, 0.6,
1.5, 1.1,
1.2, 0.2, 0.6, 1.9,
3.1,
2.5, 0.8,
0.1, 2.4, 1.3]
input.lod = [[0, 3, 5, 9, 10, 12, 15]]
then:
output.data = [0.30724832, 0.41474187, 0.2780098,
0.59868765, 0.40131235,
0.2544242, 0.09359743, 0.13963096, 0.5123474,
1.,
0.84553474, 0.15446526,
0.06995796, 0.69777346, 0.23226859]
output.lod = [[0, 3, 5, 9, 10, 12, 15]]
Args:
input (Variable):A LoDTensor with shape of :math:`[N, 1]` or :math:`[N]`, Recommended usage: :math:`[N]`.
Supported data types: float32, float64.
use_cudnn (bool, optional): Use cudnn kernel or not. Effective only when the cudnn version of the paddle
library is installed and GPU is used for training or reasoning. Default: False.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A LoD-Tensor which has the same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1)
x_sequence_softmax_1 = fluid.layers.sequence_softmax(input=x)
y = fluid.data(name='y', shape=[7],
dtype='float32', lod_level=1)
x_sequence_softmax_2 = fluid.layers.sequence_softmax(input=y)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_softmax', **locals())
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="sequence_softmax",
inputs={"X": input},
outputs={"Out": softmax_out},
attrs={"use_cudnn": use_cudnn})
return softmax_out
def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
"""
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use pool2d Op.(fluid.layers.** :ref:`api_fluid_layers_pool2d` ).
This operator only supports LoDTensor as input. It will apply specified pooling
operation on the input LoDTensor. It pools features of all time-steps of each
sequence at the last lod_level using :attr:`pool_type` mentioned in the parameters,
such as sum, average, sqrt, etc.
It supports six pool_type:
- average: :math:`Out[i] = \\frac{\sum_i X_i}{N}`
- sum: :math:`Out[i] = \sum_jX_{ij}`
- sqrt: :math:`Out[i] = \\frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}`
- max: :math:`Out[i] = max(X_i)`
- last: :math:`Out[i] = X_{N_i}`
- first: :math:`Out[i]` = X_0
where :math:`N_i` is the length of i-th input sequence.
.. code-block:: text
Case 1:
input is a 1-level LoDTensor and pad_value = 0.0:
input.lod = [[0, 2, 5, 7, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is LoDTensor:
out.shape = [4, 1]
with condition out.shape[0] == len(x.lod[-1]) == 4
for different pool_type:
average: out.data = [[2.], [4.], [3.], [0.0]], where 2.=(1. + 3.)/2, 4.=(2. + 4. + 6.)/3, 3.=(5. + 1.)/2
sum : out.data = [[4.], [12.], [6.], [0.0]], where 4.=1. + 3., 12.=2. + 4. + 6., 6.=5. + 1.
sqrt : out.data = [[2.82], [6.93], [4.24], [0.0]], where 2.82=(1. + 3.)/sqrt(2), 6.93=(2. + 4. + 6.)/sqrt(3), 4.24=(5. + 1.)/sqrt(2)
max : out.data = [[3.], [6.], [5.], [0.0]], where 3.=max(1., 3.), 6.=max(2., 4., 6.), 5.=max(5., 1.)
last : out.data = [[3.], [6.], [1.], [0.0]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.)
first : out.data = [[1.], [2.], [5.], [0.0]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.)
and all above [0.0] at last of out.data is padding data.
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
If pool_typ = sum, it will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is LoDTensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
where out.shape[0] == len(x.lod[-1]) == 5
sum: out.data = [[1.], [5.], [4.], [0.0], [12.]]
where 1.=1., 5.=3. + 2., 4.=4., 0.0=pad_value, 12.=6. + 5. + 1.
Args:
input (variable): LoDTensor with lod_level no more than 2. The data type should be float32.
pool_type (str): The pooling type that supports average, sum, sqrt, max, last or first.
is_test (bool): Only works when :attr:`pool_type` is max. If set False, a temporary Tenosr maxIndex is
created to record the index information corresponding to the maximum value, which is used for backward
gradient calculation in the training phase. Default: False.
pad_value (float): Used to pad the pooling result for empty input sequence. Default: 0.0
Returns:
Variable: LoDTensor after pooling with data type float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 10], dtype='float32', lod_level=1)
avg_x = fluid.layers.sequence_pool(input=x, pool_type='average')
sum_x = fluid.layers.sequence_pool(input=x, pool_type='sum')
sqrt_x = fluid.layers.sequence_pool(input=x, pool_type='sqrt')
max_x = fluid.layers.sequence_pool(input=x, pool_type='max')
last_x = fluid.layers.sequence_pool(input=x, pool_type='last')
first_x = fluid.layers.sequence_pool(input=x, pool_type='first')
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
max_index = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="sequence_pool",
inputs={"X": input},
outputs={"Out": pool_out,
"MaxIndex": max_index},
attrs={
"pooltype": pool_type.upper(),
"is_test": is_test,
"pad_value": pad_value
})
# when pool_type is max, variable max_index is initialized,
# so we stop the gradient explicitly here
if pool_type == 'max':
max_index.stop_gradient = True
return pool_out
@templatedoc()
def sequence_concat(input, name=None):
"""
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use concat Op.(fluid.layers.** :ref:`api_fluid_layers_concat` ).
This operator only supports LoDTensor as input. It concatenates the multiple LoDTensor from input by the LoD information,
and outputs the concatenated LoDTensor.
.. code-block:: text
input is a list of LoDTensor:
input = [x1, x2]
where:
x1.lod = [[0, 3, 5]]
x1.data = [[1], [2], [3], [4], [5]]
x1.shape = [5, 1]
x2.lod = [[0, 2, 4]]
x2.data = [[6], [7], [8], [9]]
x2.shape = [4, 1]
and should satisfy: len(x1.lod[0]) == len(x2.lod[0])
output is LoDTensor:
out.lod = [[0, 3+2, 5+4]]
out.data = [[1], [2], [3], [6], [7], [4], [5], [8], [9]]
out.shape = [9, 1]
Args:
input(list of Variable): List of LoDTensor to be concatenated. The length of each LoDTensor should be same.
The data type can be float32, float64 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Output the concatenated LoDTensor. The data type is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[-1, 10], dtype='float32', lod_level=1)
y = fluid.data(name='y', shape=[-1, 10], dtype='float32', lod_level=1)
out = fluid.layers.sequence_concat(input=[x, y])
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_concat', **locals())
check_type(input, 'input', list, 'fluid.layers.sequence_concat')
for i, input_x in enumerate(input):
check_variable_and_dtype(input_x, 'input[' + str(i) + ']',
['int64', 'float32', 'float64'],
'fluid.layers.sequence_concat')
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]})
return out
def sequence_first_step(input):
"""
This operator only supports LoDTensor as input. Given the input LoDTensor, it will
select first time-step feature of each sequence as output.
.. code-block:: text
Case 1:
input is 1-level LoDTensor:
input.lod = [[0, 2, 5, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is a LoDTensor:
out.shape = [3, 1]
out.shape[0] == len(x.lod[-1]) == 3
out.data = [[1.], [2.], [5.]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.)
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
It will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is a LoDTensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
out.shape[0] == len(x.lod[-1]) == 5
out.data = [[1.], [3.], [4.], [0.0], [6.]]
where 1.=first(1.), 3.=first(3., 2.), 4.=first(4.), 0.0 = pad_value, 6.=first(6., 5., 1.)
Args:
input(Variable): LoDTensor with lod_level no more than 2. The data type should be float32.
Returns:
Variable: LoDTensor consist of the sequence's first step vector. The data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 10], dtype='float32', lod_level=1)
x_first_step = fluid.layers.sequence_first_step(input=x)
"""
check_variable_and_dtype(input, 'input', ['float32'], 'sequence_first_step')
return sequence_pool(input=input, pool_type="first")
def sequence_last_step(input):
"""
This operator only supports LoDTensor as input. Given the input LoDTensor, it will
select last time-step feature of each sequence as output.
.. code-block:: text
Case 1:
input is 1-level LoDTensor:
input.lod = [[0, 2, 5, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is a LoDTensor:
out.shape = [3, 1]
out.shape[0] == len(x.lod[-1]) == 3
out.data = [[3.], [6.], [1.]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.)
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
It will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is a LoDTensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
out.shape[0] == len(x.lod[-1]) == 5
out.data = [[1.], [2.], [4.], [0.0], [1.]]
where 1.=last(1.), 2.=last(3., 2.), 4.=last(4.), 0.0 = pad_value, 1=last(6., 5., 1.)
Args:
input(Variable): LoDTensor with lod_level no more than 2. The data type should be float32.
Returns:
Variable: LoDTensor consist of the sequence's last step vector. The data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 10], dtype='float32', lod_level=1)
x_last_step = fluid.layers.sequence_last_step(input=x)
"""
check_variable_and_dtype(input, 'input', ['float32'], 'sequence_last_step')
return sequence_pool(input=input, pool_type="last")
def sequence_slice(input, offset, length, name=None):
"""
**Sequence Slice Layer**
The layer crops a subsequence from given sequence with given start
offset and subsequence length.
It only supports sequence data (LoDTensor with lod_level equal to 1).
.. code-block:: text
- Case:
Given the input Variable **input**:
input.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]],
input.lod = [[3, 2]],
input.dims = (5, 2),
with offset.data = [[0], [1]] and length.data = [[2], [1]],
the output Variable will be
out.data = [[a1, a2], [b1, b2], [e1, e2]],
out.lod = [[2, 1]],
out.dims = (3, 2).
Note:
The first dimension size of **input**, **offset** and **length**
should be equal. The **offset** should start from 0.
Args:
input(Variable): LoDTensor, The input Variable which consists of the complete
sequences.The data type is float32 or float64.
offset(Variable): LoDTensor, The offset to slice each sequence.The data
type is int32 or int64.
length(Variable): LoDTensor, The length of each subsequence.The data
type is int32 or int64.
name(str|None): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: The output subsequences.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
seqs = fluid.data(name='x', shape=[10, 5],
dtype='float32', lod_level=1)
offset = fluid.layers.assign(input=np.array([[0, 1]]).astype("int32"))
length = fluid.layers.assign(input=np.array([[2, 1]]).astype("int32"))
subseqs = fluid.layers.sequence_slice(input=seqs, offset=offset,
length=length)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_slice", **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
offset.stop_gradient = True
length.stop_gradient = True
helper.append_op(
type="sequence_slice",
inputs={"X": input,
"Offset": offset,
"Length": length},
outputs={"Out": out})
return out
def sequence_expand(x, y, ref_level=-1, name=None):
"""Sequence Expand Layer. This layer will expand the input variable ``x`` \
according to specified level ``ref_level`` lod of ``y``. Please note that \
the lod level of ``x`` is at most 1. If the lod level of ``x`` is 1, than \
the size of lod of ``x`` must be equal to the length of ``ref_level`` lod \
of ``y``. If the lod level of ``x`` is 0, then the first dim of ``x`` should \
be equal to the size of ``ref_level`` of ``y``. The rank of **x** is at least 2. \
When rank of ``x`` is greater than 2, then it would be viewed as a 2-D tensor.
Please note that the input ``x`` should be LodTensor or Tensor, \
and input ``y`` must be LodTensor.
Following examples will explain how sequence_expand works:
.. code-block:: text
Case 1
Consider 2 sequences [a][b] and [c][d], now we want to expand them to [a][b], [a][b], [c][d] and [c][d].
Sequence [a][b] expand twice and [c][d] expands twice, so the lod which according to is [2, 2].
Input x is a 1-level LoDTensor:
x.lod = [[2, 2]] #lod based on length may be easier to understand
x.data = [[a], [b], [c], [d]]
x.dims = [4, 1]
input y is a LoDTensor:
y.lod = [[2, 2], #the 0th level lod, according to this level
[3, 3, 1, 1]] #the 1st level lod, it has nothing to do with this level
ref_level: 0
then output is a 1-level LoDTensor out:
out.lod = [[2, 2, 2, 2]] #lod based on offset
out.data = [[a], [b], [a], [b], [c], [d], [c], [d]]
out.dims = [8, 1]
Case 2
Consider 3 sequences [a], [b], [c], now we want to expand them to [a][a], [c][c][c].
It's obvious that the lod info of expanded sequences is [2, 0, 3].
x is a Tensor:
x.data = [[a], [b], [c]]
x.dims = [3, 1]
y is a LoDTensor:
y.lod = [[2, 0, 3]]
ref_level: -1
then output is a 1-level LodTensor:
out.data = [[a], [a], [c], [c], [c]]
out.dims = [5, 1]
Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \
dims ``[M, K]``. The lod level is at most 1. The data type should be \
float32, float64, int8, int32 or int64.
y (Variable): The input variable which is a LoDTensor, the lod level is \
at least 1.
ref_level (int): Lod level of ``y`` to be referred by ``x``. If set to -1, \
refer the last level of lod.
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The expanded variable which is a LoDTensor, with dims ``[N, K]``. \
``N`` depends on the lod info of ``x`` and ``y``. \
The data type is same as input.
Return Type: Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
x = fluid.data(name='x', shape=[4, 1], dtype='float32')
y = fluid.data(name='y', shape=[8, 1],
dtype='float32', lod_level=1)
out = layers.sequence_expand(x=x, y=y, ref_level=0)
exe = fluid.Executor(fluid.CPUPlace())
place = fluid.CPUPlace()
np_data = np.array([[1], [2], [3], [4]]).astype('float32')
x_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2]], place)
print(x_lod_tensor)
#lod: [[0, 2, 4]]
# dim: 4, 1
# layout: NCHW
# dtype: float
# data: [1 2 3 4]
np_data = np.array([[1], [2], [3], [4], [5], [6], [7], [8]]).astype('float32')
y_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2], [3,3,1,1]], place)
print(y_lod_tensor)
#lod: [[0, 2, 4][0, 3, 6, 7, 8]]
# dim: 8, 1
# layout: NCHW
# dtype: int64_t
# data: [0 0 1 1 1 1 1 0]
out_main = exe.run(fluid.default_main_program(),
feed={'x': x_lod_tensor, 'y': y_lod_tensor},
fetch_list=[out], return_numpy=False)
print(out_main[0])
#lod: [[0, 2, 4, 6, 8]]
# dim: 8, 1
# layout: NCHW
# dtype: float
# data: [1 2 1 2 3 4 3 4]
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_expand', input=x, **locals())
dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='sequence_expand',
inputs={'X': x,
'Y': y},
outputs={'Out': tmp},
attrs={'ref_level': ref_level})
return tmp
def sequence_expand_as(x, y, name=None):
"""Sequence Expand As Layer. This OP will expand the input variable ``x`` \
according to the zeroth level lod of ``y``. Current implementation requires \
the level number of ``y``'s lod must be 1, and the first dimension of \
``x`` should be equal to the size of ``y``'s zeroth level lod, thus \
the expanded LodTensor has the same lod info as ``y``. The expanded result \
has nothing to do with ``x``'s lod, so the lod of Input(X) is not considered.
Please note that the input ``x`` should be LodTensor or Tensor, \
and input ``y`` must be LodTensor.
Following examples will explain how sequence_expand_as works:
.. code-block:: text
Case 1:
Consider 4 sequences [a], [b], [c], [d], now we want to expand them to [a][a][a], [b][b][b], [c] and [d].
It's obvious that the lod info of expanded sequences is [0, 3, 6, 7, 8].
Given a 1-level LodTensor ``x``:
x.data = [[a], [b], [c], [d]]
x.dims = [4, 1]
and input ``y``
y.lod = [[3, 3, 1, 1]] #lod based on length may be easier to understand
then we get 1-level LoDTensor out:
Out.lod = [[0, 3, 6, 7, 8]] #based on offset
Out.data = [[a], [a], [a], [b], [b], [b], [c], [d]]
Out.dims = [8, 1]
Case 2:
Given a common Tensor ``x``:
x.data = [[a, b], [c, d], [e, f]]
x.dims = [3, 2]
and input ``y``:
y.lod = [[0, 2, 3, 6]]
then we get a 1-level LoDTensor:
out.lod = [[0, 2, 3, 6]]
out.data = [[a, b], [a, b] [c, d], [e, f], [e, f], [e, f]]
out.dims = [6, 2]
Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \
dims ``[M, K]``. The data type should be float32, float64, int8, int32 \
or int64.
y (Variable): The input variable which is a LoDTensor with 1-level lod.
name (str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The expanded variable which is a LoDTensor with the dims ``[N, K]``. \
``N`` depends on the lod of ``y``, and the lod level must be 1. \
The data type is same as input.
Return Type: Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
x = fluid.data(name='x', shape=[4, 1], dtype='float32')
y = fluid.data(name='y', shape=[8, 1], dtype='float32', lod_level=1)
out = layers.sequence_expand_as(x=x, y=y)
exe = fluid.Executor(fluid.CPUPlace())
place = fluid.CPUPlace()
np_data = np.array([[1], [2], [3], [4]]).astype('float32')
x_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2]], place)
print(x_lod_tensor)
#lod: [[0, 2, 4]]
# dim: 4, 1
# layout: NCHW
# dtype: float
# data: [1 2 3 4]
np_data = np.array([[1], [2], [3], [4], [5], [6], [7], [8]]).astype('float32')
y_lod_tensor = fluid.create_lod_tensor(np_data, [[3,3,1,1]], place)
print(y_lod_tensor)
#lod: [[0, 3, 6, 7, 8]]
# dim: 8, 1
# layout: NCHW
# dtype: int64_t
# data: [0 0 1 0 1 1 1 0]
out_main = exe.run(fluid.default_main_program(),
feed={'x': x_lod_tensor, 'y': y_lod_tensor},
fetch_list=[out], return_numpy=False)
print(out_main[0])
#lod: [[0, 3, 6, 7, 8]]
# dim: 8, 1
# layout: NCHW
# dtype: float
# data: [1 1 1 2 2 2 3 4]
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_expand_as', input=x, **locals())
dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='sequence_expand_as',
inputs={'X': x,
'Y': y},
outputs={'Out': tmp})
return tmp
def sequence_pad(x, pad_value, maxlen=None, name=None):
"""
This layer padding the sequences in a same batch to a common length (according \
to ``maxlen``). The padding value is defined by ``pad_value``, and will be \
appended to the tail of sequences. The result is a Python tuple ``(Out, Length)``: \
the LodTensor ``Out`` is the padded sequences, and LodTensor ``Length`` is \
the length information of input sequences. For removing padding data (unpadding \
operation), See :ref:`api_fluid_layers_sequence_unpad` .
Please note that the input ``x`` should be LodTensor.
.. code-block:: text
Case 1:
Given input 1-level LoDTensor x:
x.lod = [[0, 2, 5]]
x.data = [[a],[b],[c],[d],[e]]
pad_value:
pad_value.data = [0]
maxlen = 4
the output tuple (Out, Length):
Out.data = [[[a],[b],[0],[0]],[[c],[d],[e],[0]]]
Length.data = [2, 3] #Original sequences length
Case 2:
Given input 1-level LoDTensor x:
x.lod = [[0, 2, 5]]
x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]]
pad_value:
pad_value.data = [0]
default maxlen = None, (the virtual value is 3, according to the shape of x)
the output tuple (Out, Length):
Out.data = [[[a1,a2],[b1,b2],[0,0]],[[c1,c2],[d1,d2],[e1,e2]]]
Length.data = [2, 3]
Case 3:
Given input 1-level LoDTensor x:
x.lod = [[0, 2, 5]]
x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]]
pad_value:
pad_value.data = [p1,p2]
default maxlen = None, (the virtual value is 3)
get tuple (Out, Length):
Out.data = [[[a1,a2],[b1,b2],[p1,p2]],[[c1,c2],[d1,d2],[e1,e2]]]
Length.data = [2, 3]
Args:
x (Variable): Input 1-level LodTensor with dims ``[M, K]``. The batch \
size is described by lod infor (the number of sequences ). \
The data type should be float32, float64, int8, int32 or int64.
pad_value (Variable): Padding value. It can be a scalar or a 1D tensor \
with length ``K``. If it's a scalar, it will be automatically broadcasted \
to a Tensor. The data type should be as same as ``x``.
maxlen (int, optional): The length of padded sequences, None by default. \
When it is None, all sequences will be padded up to the length of the \
longest one among them; when it a certain positive value, it must be \
greater than the length of the longest original sequence.
name (str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: A Python tuple (Out, Length): the 1st is a 0 level LodTensor \
``Out``, with the shape ``[batch_size, maxlen, K]``; the second is the original \
sequences length infor ``Length``, which should be a 0-level 1D LodTensor. \
The size of ``Length`` is equal to batch size, and the data type is int64.
Return Type: tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
x = fluid.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = fluid.layers.assign(
input=numpy.array([0.0], dtype=numpy.float32))
out = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pad', input=x, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad')
check_variable_and_dtype(pad_value, 'pad_value',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad')
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
length = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
pad_value.stop_gradient = True
length.stop_gradient = True
if maxlen is None:
maxlen = -1
helper.append_op(
type='sequence_pad',
inputs={'X': x,
'PadValue': pad_value},
outputs={'Out': out,
'Length': length},
attrs={'padded_length': maxlen})
return out, length
def sequence_unpad(x, length, name=None):
"""
**Note**:
**The input of the OP is Tensor and the output is LoDTensor. For padding operation, See:** :ref:`api_fluid_layers_sequence_pad`
The OP removes the padding data from the input based on the length information and returns a LoDTensor.
.. code-block:: text
Case 1:
Given input Variable **x**:
x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0]],
in which there are 3 sequences padded to length 5, and the actual length
specified by input Variable **length**:
length.data = [2, 3, 4],
after unpadding, the output Variable will be:
out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]]
out.lod = [[0, 2, 5, 9]]
Args:
x(Variable): A Tensor which contains padding data, and its shape size can not be less than 2.
Supported data types: float32, float64, int32, int64.
length(Variable): A 1D Tensor that stores the actual length of each sample, and the Tensor
has the same shape with the 0th dimension of the X . Supported data types: int64.
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A LoDTensor whose recursive sequence length is consistent with the information of the length parameter and it has the same data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
# pad data
x = fluid.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = fluid.layers.assign(input=numpy.array([0.0], dtype=numpy.float32))
pad_data, len = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
# unpad data
unpad_data = fluid.layers.sequence_unpad(x=pad_data, length=len)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_unpad', input=x, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_unpad')
check_variable_and_dtype(length, 'length', ['int64'],
'fluid.layers.sequence_unpad')
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
length.stop_gradient = True
helper.append_op(
type='sequence_unpad',
inputs={'X': x,
'Length': length},
outputs={'Out': out})
return out
def sequence_reshape(input, new_dim):
"""
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use reshape Op.(fluid.layers.** :ref:`api_fluid_layers_reshape` ).
This operator only supports LoDTensor as input. Given :attr:`new_dim` ,
it will compute new shape according to original length of each sequence,
original dimensions and :attr:`new_dim` . Then it will output a new LoDTensor
containing :attr:`new_dim` . Currently it only supports 1-level LoDTensor.
Please make sure that (original length * original dimensions) can be divided
by the :attr:`new_dim` with no remainder for each sequence.
.. code-block:: text
input is a LoDTensor:
input.lod = [[0, 2, 6]]
input.data = [[1, 2], [3, 4],
[5, 6], [7, 8],
[9, 10], [11, 12]]
input.shape = [6, 2]
set new_dim = 4
out is a LoDTensor:
out.lod = [[0, 1, 3]]
out.data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out.shape = [3, 4]
Args:
input (Variable): 1-level LoDTensor with shape :math:`[M, K]` . The data type should
be int32, int64, float32 or float64.
new_dim (int): New dimension that the input LoDTensor is reshaped to.
Returns:
Variable: Reshaped LoDTensor according to new dimension. The data type is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 16], dtype='float32', lod_level=1)
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=4)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_reshape', **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_reshape')
out = helper.create_variable_for_type_inference(helper.input_dtype())
helper.append_op(
type='sequence_reshape',
inputs={'X': [input]},
outputs={'Out': [out]},
attrs={'new_dim': new_dim})
return out
def sequence_scatter(input, index, updates, name=None):
"""
**Note**:
**The index and updates parameters of the OP must be LoDTensor.**
Plus the updates data to the corresponding input according to the index.
The updated algorithm is as follows: output[instance_index][index [pos]] = input[instance_index][index [pos]] + updates[pos],
where instance_idx is the K sample corresponding to pos in batch.
The value of output[i][j] depends on whether j can be found in the i+1th interval of the index. If found,
out[i][j] = input[i][j] + update[m] [n], otherwise, out[i][j] = input[i][j].
For example, in the following example, the lod information for index is divided into three sequences. Among
them, because the element 0 can be found in the first interval of the index, it is updated with the value of
the corresponding position of the updates, out[0][0] = input[0][0]+updates[0][0] . Because element 1 cannot
be found in the third interval of index, out[2][1] = input[2][1].
.. code-block:: text
*Case 1:
Given:
input.data = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]
input.dims = [3, 6]
index.data = [[0], [1], [2], [5], [4], [3], [2], [1], [3], [2], [5], [4]]
index.lod = [[0, 3, 8, 12]]
updates.data = [[0.3], [0.3], [0.4], [0.1], [0.2], [0.3], [0.4], [0.0], [0.2], [0.3], [0.1], [0.4]]
updates.lod = [[ 0, 3, 8, 12]]
Then:
out.data = [[1.3, 1.3, 1.4, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.4, 1.3, 1.2, 1.1],
[1.0, 1.0, 1.3, 1.2, 1.4, 1.1]]
out.dims = X.dims = [3, 6]
Args:
input (Variable): A Tensor with shape of :math:`[N, k_1... k_n]`. Supported data types: float32, float64, int32, int64.
index (Variable): A LoDTensor contains index information. Its LoD level must be 1 and its data type must be int64.
updates (Variable): A LodTensor contains updates information. It has the same LoD level with the index and has the
same data type with the input. Supported data types: float32, float64, int32, int64.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A Tensor which has been updated. It has the same shape and data type with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data( name="x", shape=[None, 3, 6], dtype='float32' )
index = fluid.data( name='index', shape=[12, 1], dtype='int64', lod_level=1)
updates = fluid.data( name='updates', shape=[12, 1], dtype='float32', lod_level=1)
output = fluid.layers.sequence_scatter(input, index, updates)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_scatter', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="sequence_scatter",
inputs={"X": input,
"Ids": index,
"Updates": updates},
outputs={"Out": out})
return out
def sequence_enumerate(input, win_size, pad_value=0, name=None):
"""
Generate a new sequence for the input index sequence with \
shape ``[d_1, win_size]``, which enumerates all the \
sub-sequences with length ``win_size`` of the input with \
shape ``[d_1, 1]``, and padded by ``pad_value`` if necessary in generation.
Please note that the `input` must be LodTensor.
.. code-block:: text
Input x:
x.lod = [[0, 3, 5]]
x.data = [[1], [2], [3], [4], [5]]
x.dims = [5, 1]
Attrs:
win_size = 2
pad_value = 0
Output:
out.lod = [[0, 3, 5]]
out.data = [[1, 2], [2, 3], [3, 0], [4, 5], [5, 0]]
out.dims = [5, 2]
Args:
input (Variable): The input variable which is a index sequence, \
which should be a LodTensor with shape ``[d_1, 1]`` and 1-level lod info. \
The data type should be float32, float64, int8, int32 or int64.
win_size (int): The window size for enumerating all sub-sequences.
pad_value (int, optional): The padding value, default 0.
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The enumerate sequence variable which is a LoDTensor with \
shape ``[d_1, win_size]`` and 1-level lod info. \
The data type is same as ``input``.
Return Type: Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[-1, 1], dtype='int32', lod_level=1)
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_enumerate', **locals())
out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True)
helper.append_op(
type='sequence_enumerate',
inputs={'X': input},
outputs={'Out': out},
attrs={'win_size': win_size,
'pad_value': pad_value})
return out
def sequence_mask(x, maxlen=None, dtype='int64', name=None):
"""
**SequenceMask Layer**
This layer outputs a mask according to the input :code:`x` and
:code:`maxlen` with data type of :code:`dtype`.
Supposing :code:`x` is a Tensor with shape [d_1, d_2, ..., d_n], the
:code:`y` is a mask with shape [d_1, d_2, ..., d_n, maxlen], where:
.. math::
y(i_1, i_2,..., i_n, j) = (j < x(i_1, i_2,..., i_n))
.. code-block:: text
Case:
Consider input:
x = [3, 1, 1, 0] max_len = 4
then we get out:
mask = [[1, 1, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]]
Args:
x (Variable): Input tensor of sequence_mask layer, \
whose elements are integers less than :code:`maxlen`. \
Tensor or LodTensor with shape [d_1, d_2, ..., d_n].
maxlen (int, optional): Maximum length of the sequence. If :code:`maxlen` \
is None, it would be replace with :math:`max(x)`.
dtype (np.dtype|core.VarDesc.VarType|str, optional): Data type of the output, \
``int64`` by default.
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The output sequence mask. Tensor or LodTensor with shape [d_1, d_2, ..., d_n, maxlen] \
and data type of :code:`dtype`. The data type should be float32, float64, int8, \
int32 or int64.
Return Type: Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
x = fluid.data(name='x', shape=[10], dtype='float32', lod_level=1)
mask = layers.sequence_mask(x=x)
"""
check_variable_and_dtype(x, 'x', ['int64'], 'sequence_mask')
check_dtype(dtype, 'dtype', ['int64'], 'sequence_mask')
helper = LayerHelper('sequence_mask', **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
inputs = {'X': [x]}
attrs = {'out_dtype': out.dtype}
if maxlen is not None:
if isinstance(maxlen, Variable):
inputs['MaxLenTensor'] = maxlen
else:
attrs['maxlen'] = maxlen
helper.append_op(
type='sequence_mask', inputs=inputs, outputs={'Y': out}, attrs=attrs)
out.stop_gradient = True
return out
@templatedoc()
def sequence_reverse(x, name=None):
"""
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use reverse Op.(fluid.layers.** :ref:`api_fluid_layers_reverse` ).
This operator only supports LoDTensor as input. It will reverse each sequence for input LoDTensor.
Currently it only supports 1-level LoDTensor. This operator is very useful when building a
reverse :ref:`api_fluid_layers_DynamicRNN` network.
.. code-block:: text
input(x) is a LoDTensor:
x.lod = [[0, 2, 5]]
x.data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13,14, 15, 16],
[17,18, 19, 20]]
x.shape = [5, 4]
output LoDTensor with same shape and LoD info:
out.lod = [[0, 2, 5]]
out.data = [[5, 6, 7, 8],
[1, 2, 3, 4],
[17,18, 19, 20],
[13,14, 15, 16],
[9, 10, 11, 12]]
out.shape = [5, 4]
Args:
x(Variable): LoDTensor with 1-level LoD info. Currently it only supports 1-level LoDTensor.
The data type should be float32, float64, int8, int32 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor reversed from input. The data type is same with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 10], dtype='float32', lod_level=1)
x_reversed = fluid.layers.sequence_reverse(x)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_reverse", **locals())
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'int8', 'int32', 'int64'],
'fluid.layers.sequence_reverse')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sequence_reverse",
inputs={"X": x},
outputs={"Y": out},
attrs=dict())
return out
|
the-stack_106_28793 | import re # used for split
modname = 'pyparams'
def super_split(string, delim):
ret = [i.strip() for i in string.split(delim)]
while ret.count(''):
ret.remove('')
return ret
def get_params(arglist):
ret = dict()
ret[''] = []
is_key = False
look_for_val = False
key = ''
for arg in arglist:
arg = arg.strip()
#print ("arg: ", arg)
is_key = arg[0] == '-'
if is_key:
# check if full named or abbreviated
#print ('\tkey')
if arg[0:2] == '--':
#print ("\t\tnamed key")
# Named Key
arg = arg[2:]
argtoks = super_split(arg,'=')
key = argtoks[0]
if len(argtoks) == 2:
#print ('\t\twith val: ', argtoks[1])
val = argtoks[1]
if ',' in val:
val = super_split(val, ',')
ret[key] = val
key = ''
elif len(argtoks) > 2:
print (modname, ":ERROR: Could not parse argument: ", arg)
else:
ret[key] = True
else:
#print ('\t\tabbrev key')
# Abbreviated Key
arg = arg[1:]
argtoks = arg.split('=')
is_value_present = len(argtoks) == 2
#if is_value_present:
#print('\t\twith val: ', argtoks[1])
for i in argtoks[0][0:-1]:
ret[i] = True
last_abbrev_key = argtoks[0][-1]
if not is_value_present:
ret[last_abbrev_key] = True
else:
val = argtoks[1].strip()
if ',' in val:
valtoks = super_split(val, ',')
ret[last_abbrev_key] = valtoks
else:
ret[last_abbrev_key] = val
#print ('set key: ', key, ' to val: ', val)
else:
#print ('\tvalue, current key is ', key)
# not key
if arg[0] == '"' and arg[-1] == '"':
arg = arg[1:-1]
if ',' in arg:
arg = super_split(arg, ',')
if key == '':
if isinstance(arg, list):
ret[''].extend(arg)
else:
ret[''].append(arg)
else:
ret[key] = arg
# reset the key to empty
key = ''
return ret
def get_params_strict (argstr, param_dict):
return
def get_param_usage_string (param_dict):
print ("would print param dict here:")
print (param_dict)
return |
the-stack_106_28795 | """Represent fhir entity."""
from os import stat
from anvil.transformers.fhir import make_workspace_id, make_identifier
import logging
INSTITUTES = []
class Organization:
"""Create fhir entity."""
class_name = "organization"
resource_type = "Organization"
@staticmethod
def slug(resource):
"""Make id."""
return make_workspace_id(resource)
@staticmethod
def build_entity(workspace):
"""Create fhir entity."""
study_id = workspace.id
id = Organization.slug(workspace)
institute = workspace.institute
if not institute:
logging.getLogger(__name__).warning(f'workspace {workspace.id} missing institute')
parent = f'Organization/{workspace.attributes.reconciler_name.lower()}'
entity = {
"resourceType": Organization.resource_type,
"id": f"{id}",
"meta": {
"profile": [
"http://hl7.org/fhir/StructureDefinition/Organization"
]
},
"identifier": [
{
"system": "https://anvil.terra.bio/#workspaces/anvil-datastorage/",
"value": study_id,
},
{
"system": "urn:ncpi:unique-string",
"value": f"{id}",
},
{
"system": "anvil:consortium",
"value": f"{workspace.attributes.reconciler_name.lower()}",
}
],
"name": study_id,
}
if institute:
entity['identifier'].append(
{
"system": "anvil:institute",
"value": f"{workspace.institute.lower()}",
}
)
entity['partOf'] = {
"reference": parent
}
return entity
@staticmethod
def build_practitioner_org(workspace):
"""Create fhir entity."""
institute = workspace.institute or 'Unknown'
if not institute:
logging.getLogger(__name__).warning(f'workspace {workspace.id} missing institute')
id = make_identifier(institute)
if id in INSTITUTES:
return None
INSTITUTES.append(id)
entity = {
"resourceType": Organization.resource_type,
"id": f"{id}",
"meta": {
"profile": [
"http://hl7.org/fhir/StructureDefinition/Organization"
]
},
"identifier": [
{
"system": f"https://anvil.terra.bio",
"value": id,
}
],
"name": institute,
'partOf': {
"reference": 'Organization/anvil'
}
}
return entity
@staticmethod
def build_consortium_org(workspace):
"""Create fhir entity."""
id = workspace.attributes.reconciler_name.lower()
if id in INSTITUTES:
return None
INSTITUTES.append(id)
entity = {
"resourceType": Organization.resource_type,
"id": f"{id}",
"meta": {
"profile": [
"http://hl7.org/fhir/StructureDefinition/Organization"
]
},
"identifier": [
{
"system": f"https://anvil.terra.bio",
"value": id,
}
],
"name": workspace.attributes.reconciler_name,
'partOf': {
"reference": 'Organization/anvil'
}
}
return entity
@staticmethod
def build_anvil_org():
"""Create fhir entity."""
id = 'anvil'
if id in INSTITUTES:
return None
INSTITUTES.append(id)
entity = {
"resourceType": Organization.resource_type,
"id": f"{id}",
"meta": {
"profile": [
"http://hl7.org/fhir/StructureDefinition/Organization"
]
},
"identifier": [
{
"system": f"https://anvil.terra.bio",
"value": id,
}
],
"name": 'AnVIL',
}
return entity
|
the-stack_106_28797 | # Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from oslo_log import log as logging
import six
from cinder import context
from cinder.i18n import _LW
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vmax_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class EMCVMAXFCDriver(driver.FibreChannelDriver):
"""EMC FC Drivers for VMAX using SMI-S.
Version history:
1.0.0 - Initial driver
1.1.0 - Multiple pools and thick/thin provisioning,
performance enhancement.
2.0.0 - Add driver requirement functions
2.1.0 - Add consistency group functions
2.1.1 - Fixed issue with mismatched config (bug #1442376)
2.1.2 - Clean up failed clones (bug #1440154)
2.1.3 - Fixed a problem with FAST support (bug #1435069)
2.2.0 - Add manage/unmanage
2.2.1 - Support for SE 8.0.3
2.2.2 - Update Consistency Group
2.2.3 - Pool aware scheduler(multi-pool) support
2.2.4 - Create CG from CG snapshot
2.3.0 - Name change for MV and SG for FAST (bug #1515181)
- Fix for randomly choosing port group. (bug #1501919)
- get_short_host_name needs to be called in find_device_number
(bug #1520635)
- Proper error handling for invalid SLOs (bug #1512795)
- Extend Volume for VMAX3, SE8.1.0.3
https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume
- Incorrect SG selected on an attach (#1515176)
- Cleanup Zoning (bug #1501938) NOTE: FC only
- Last volume in SG fix
- _remove_last_vol_and_delete_sg is not being called
for VMAX3 (bug #1520549)
- necessary updates for CG changes (#1534616)
- Changing PercentSynced to CopyState (bug #1517103)
- Getting iscsi ip from port in existing masking view
- Replacement of EMCGetTargetEndpoints api (bug #1512791)
- VMAX3 snapvx improvements (bug #1522821)
2.3.1 - VMAX2/VMAX3 iscsi multipath support (iscsi only)
2.3.2 - VMAX oversubscription Support (blueprint vmax-oversubscription)
2.3.3 - VMAX Driver - Live Migration for VMAX3 (bug #1587967)
"""
VERSION = "2.3.3"
def __init__(self, *args, **kwargs):
super(EMCVMAXFCDriver, self).__init__(*args, **kwargs)
self.common = emc_vmax_common.EMCVMAXCommon(
'FC',
self.VERSION,
configuration=self.configuration)
self.zonemanager_lookup_service = fczm_utils.create_lookup_service()
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
volpath = self.common.create_volume(volume)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volpath = self.common.create_cloned_volume(volume, src_vref)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def delete_volume(self, volume):
"""Deletes an EMC volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
volpath = self.common.create_snapshot(snapshot, volume)
model_update = {}
snapshot['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = snapshot['provider_location']
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
self.common.delete_snapshot(snapshot, volume)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
}
}
"""
device_info = self.common.initialize_connection(
volume, connector)
device_number = device_info['hostlunid']
storage_system = device_info['storagesystem']
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': device_number,
'target_discovered': True,
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone addition: %(data)s.",
{'data': data})
return data
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:param volume: the volume object
:param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty
"""
data = {'driver_volume_type': 'fibre_channel',
'data': {}}
loc = volume['provider_location']
name = ast.literal_eval(loc)
storage_system = name['keybindings']['SystemName']
LOG.debug("Start FC detach process for volume: %(volume)s.",
{'volume': volume['name']})
mvInstanceName = self.common.get_masking_view_by_volume(
volume, connector)
if mvInstanceName is not None:
portGroupInstanceName = (
self.common.get_port_group_from_masking_view(
mvInstanceName))
initiatorGroupInstanceName = (
self.common.get_initiator_group_from_masking_view(
mvInstanceName))
LOG.debug("Found port group: %(portGroup)s "
"in masking view %(maskingView)s.",
{'portGroup': portGroupInstanceName,
'maskingView': mvInstanceName})
# Map must be populated before the terminate_connection
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
self.common.terminate_connection(volume, connector)
LOG.debug("Looking for masking views still associated with "
"Port Group %s.", portGroupInstanceName)
# check if the initiator group has been deleted
checkIgInstanceName = (
self.common.check_ig_instance_name(initiatorGroupInstanceName))
# if it has not been deleted, check for remaining masking views
if checkIgInstanceName is not None:
mvInstances = self._get_common_masking_views(
portGroupInstanceName, initiatorGroupInstanceName)
if len(mvInstances) > 0:
LOG.debug("Found %(numViews)lu MaskingViews.",
{'numViews': len(mvInstances)})
data = {'driver_volume_type': 'fibre_channel',
'data': {}}
else: # no masking views found
LOG.debug("No MaskingViews were found. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
else: # The initiator group has been deleted
LOG.debug("Initiator Group has been deleted. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
else:
LOG.warning(_LW("Volume %(volume)s is not in any masking view."),
{'volume': volume['name']})
return data
def _get_common_masking_views(
self, portGroupInstanceName, initiatorGroupInstanceName):
"""Check to see the existence of mv in list"""
mvInstances = []
mvInstancesByPG = self.common.get_masking_views_by_port_group(
portGroupInstanceName)
mvInstancesByIG = self.common.get_masking_views_by_initiator_group(
initiatorGroupInstanceName)
for mvInstanceByPG in mvInstancesByPG:
if mvInstanceByPG in mvInstancesByIG:
mvInstances.append(mvInstanceByPG)
return mvInstances
def _build_initiator_target_map(self, storage_system, volume, connector):
"""Build the target_wwns and the initiator target map."""
target_wwns = []
init_targ_map = {}
initiator_wwns = connector['wwpns']
if self.zonemanager_lookup_service:
fc_targets = self.common.get_target_wwns_from_masking_view(
storage_system, volume, connector)
mapping = (
self.zonemanager_lookup_service.
get_device_mapping_from_network(initiator_wwns, fc_targets))
for entry in mapping:
map_d = mapping[entry]
target_wwns.extend(map_d['target_port_wwn_list'])
for initiator in map_d['initiator_port_wwn_list']:
init_targ_map[initiator] = map_d['target_port_wwn_list']
else: # No lookup service, pre-zoned case.
target_wwns = self.common.get_target_wwns(storage_system,
connector)
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return list(set(target_wwns)), init_targ_map
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
:param refresh: boolean -- If True, run update the stats first.
:returns: dict -- the stats dict
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = self.common.update_volume_stats()
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
self._stats = data
def migrate_volume(self, ctxt, volume, host):
"""Migrate a volume from one Volume Backend to another.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target(destination)
information
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
return self.common.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter.
:param host: the host dict holding the relevant
target(destination) information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self.common.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.common.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(context, cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(context, cgsnapshot, snapshots)
def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
return self.common.manage_existing(volume, external_ref)
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing VMAX volume to manage_existing.
:param self: reference to class
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: size of the volume in GB
"""
return self.common.manage_existing_get_size(volume, external_ref)
def unmanage(self, volume):
"""Export VMAX volume from Cinder.
Leave the volume intact on the backend array.
"""
return self.common.unmanage(volume)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
"""Updates LUNs in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
:param context: the context
:param group: the consistency group object to be created
:param volumes: volumes in the consistency group
:param cgsnapshot: the source consistency group snapshot
:param snapshots: snapshots of the source volumes
:param source_cg: the dictionary of a consistency group as source.
:param source_vols: a list of volume dictionaries in the source_cg.
"""
return self.common.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot, snapshots, source_cg,
source_vols)
|
the-stack_106_28799 | #!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# Copyright: (c) 2020, Jordan Borean (@jborean93) <[email protected]>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import os
import os.path
import shutil
import tarfile
from utils import (
argcomplete,
get_version,
OMI_REPO,
)
def main():
"""Main program body."""
args = parse_args()
version = get_version()
version_str = '%s.%s.%s' % version
if args.print_tag:
print("v%s-pwsh" % version_str)
elif args.output_dir:
if os.path.exists(args.output_dir):
shutil.rmtree(args.output_dir)
os.makedirs(args.output_dir)
# Create a tar.gz for each distribution libs for the GitHub release
lib_path = os.path.join(OMI_REPO, 'build', 'lib')
for distribution in os.listdir(lib_path):
artifact_dir = os.path.join(lib_path, distribution)
if distribution == "PSWSMan" or not os.path.isdir(artifact_dir):
continue
artifact_tar = os.path.join(args.output_dir, distribution) + '.tar.gz'
if distribution.startswith('.') or not os.path.isdir(artifact_dir):
continue
print("Creating '%s'" % artifact_tar)
with tarfile.open(artifact_tar, 'w:gz') as tar:
for lib_name in os.listdir(artifact_dir):
if lib_name == '.':
continue
print("\tAdding '%s' to tar" % lib_name)
tar.add(os.path.join(artifact_dir, lib_name), arcname=lib_name)
def parse_args():
"""Parse and return args."""
parser = argparse.ArgumentParser(description='Release helpers for the OMI library in PowerShell.')
run_group = parser.add_mutually_exclusive_group()
run_group.add_argument('--print-tag',
dest='print_tag',
action='store_true',
help='Print the tag number for the release.')
run_group.add_argument('--output-dir',
dest='output_dir',
action='store',
help='The directory to create the release artifacts at.')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not args.print_tag and not args.output_dir:
parser.error('argument --print-tag or --output-dir must be seet')
return args
if __name__ == '__main__':
main()
|
the-stack_106_28800 | from django.contrib.auth import get_user_model
from django.contrib.postgres.search import SearchVector
from django.shortcuts import get_object_or_404
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.generics import ListCreateAPIView, RetrieveAPIView
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from flaam_api.utils.paginations import CustomLimitOffsetPagination
from .models import Tag
from .serializers import TagDetailSerializer
UserModel = get_user_model()
class TagListView(ListCreateAPIView):
pagination_class = CustomLimitOffsetPagination
serializer_class = TagDetailSerializer
def get_queryset(self):
tags = Tag.objects.all()
favourited_by = self.request.query_params.get("favourited_by", None)
if favourited_by:
user = get_object_or_404(UserModel, pk=favourited_by)
return user.favourite_tags.all()
tag_ids = self.request.query_params.get("ids", None)
if tag_ids:
return tags.filter(id__in=tag_ids.split(","))
tag_name = self.request.query_params.get("name", None)
if tag_name:
vector = SearchVector("name") # + SearchVector("description")
tags = tags.annotate(search=vector).filter(search__icontains=tag_name)
return tags
@swagger_auto_schema(
tags=("tags",),
operation_summary="Get tags",
manual_parameters=(
openapi.Parameter(
"favourited_by",
in_=openapi.IN_QUERY,
type=openapi.TYPE_INTEGER,
description="Get user's favourite tags",
),
openapi.Parameter(
"name",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Search tags by name",
),
openapi.Parameter(
"ids",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Get tags by id",
),
),
responses={
200: TagDetailSerializer(many=True),
401: "Unauthorized.",
404: "Not Found.",
},
)
def get(self, request: Request) -> Response:
return super().get(request)
@swagger_auto_schema(
tags=("tags",),
operation_summary="Create a new tag",
responses={
201: TagDetailSerializer,
400: "Bad request.",
401: "Unauthorized.",
},
)
def post(self, request: Request) -> Response:
return super().post(request)
class TagDetailView(RetrieveAPIView):
serializer_class = TagDetailSerializer
queryset = Tag.objects.all()
@swagger_auto_schema(
tags=("tags",),
operation_summary="Get tag details",
responses={
200: TagDetailSerializer,
401: "Unauthorized.",
404: "Not found.",
},
)
def get(self, request: Request, pk: int) -> Response:
return super().get(request, pk)
class FavouriteTagView(APIView):
"""
Add or remove a tag from the user's favourites list.
"""
@swagger_auto_schema(
tags=("tags",),
operation_id="favourite_tag_add",
operation_summary="Add a tag to the user's favourites list",
responses={
204: "Success.",
401: "Unauthorized.",
404: "Not found.",
},
)
def post(self, request: Request, pk: int) -> Response:
tag = get_object_or_404(Tag, pk=pk)
tag.favorited_by.add(request.user)
return Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
tags=("tags",),
operation_id="favourite_tag_remove",
operation_summary="Remove a tag from the user's favourites list",
responses={
204: "Success.",
401: "Unauthorized.",
404: "Not found.",
},
)
def delete(self, request: Request, pk: int) -> Response:
tag = get_object_or_404(Tag, id=pk)
tag.favorited_by.remove(request.user)
return Response(status=status.HTTP_204_NO_CONTENT)
|
the-stack_106_28802 | from .errors import *
class Store:
"""
Store record
Each postcode record contains basic information like name
and coordinates in WGS84 geographic system
"""
def __init__(self, postcode: str, name: str, lon: float = None, lat: float = None):
"""
Create new store record.
Args:
postcode (str): Postcode identification (it can be international)
name (str): Simple information about the postcode (like place name)
lon (float, optional): Longitude of the postcode in WGS84
lat (float, optional): Latitude of the postcode in WGS84
"""
# Set constructor values
self.postcode = postcode
self.name = name
self.lon = lon
self.lat = lat
@property
def postcode(self) -> str:
"""
"postcode" getter
"""
return self._postcode
@postcode.setter
def postcode(self, postcode: str):
"""
"postcode" setter
The postcode can be international so the minimal requirement is to be
at least 1 character length. If you want to use
UK postcode only: https://pypi.org/project/uk-postcode-utils/
Args:
postcode (str): Postcode identification
"""
# Validation the type of the postcode
if not isinstance(postcode, str):
raise StoreInvalidDefinitionError("code value must be a string type")
# Validation of the length
if len(postcode) < 1:
raise StoreInvalidDefinitionError("code must be at leas 1 character")
self._postcode = postcode
@property
def name(self) -> str:
"""
"name" getter
"""
return self._name
@name.setter
def name(self, name: str):
"""
"name" setter
The postcode can be international so the minimal requirement is to be
at least 1 character length
Args:
name (str): Simple information about the postcode (like place name)
"""
# Validation the type of the name
if not isinstance(name, str):
raise StoreInvalidDefinitionError("name value must be a string type")
# Validation of the length
if len(name) < 1:
raise StoreInvalidDefinitionError("name must be at leas 1 character")
self._name = name
@property
def lon(self) -> float:
"""
"lon" getter
"""
return self._lon
@lon.setter
def lon(self, lon: float):
"""
"lon" setter
Set Longitude in the range -180 and +180 specifying coordinates
west and east of the Prime Meridian
Args:
lon (float, optional): Longitude of the postcode in WGS84
"""
# lon param can be null
if lon is None:
self._lon = lon
return
# Validation the type of the lon
if not isinstance(lon, float) and not isinstance(lon, int):
raise StoreInvalidDefinitionError("lon value must be a float type")
# Check for range between -180 and 180
if lon < -180 or lon > 180:
raise StoreInvalidDefinitionError("lon value must be a float type")
self._lon = lon
@property
def lat(self) -> float:
"""
"lat" getter
"""
return self._lat
@lat.setter
def lat(self, lat: float):
"""
"lat" setter
Set Latitude in the range -90 and +90 for the
southern and northern hemisphere respectively
Args:
lat (float, optional): Latitude of the postcode in WGS84
"""
# lat param can be null
if lat is None:
self._lat = lat
return
# Validation the type of the lat
if not isinstance(lat, float) and not isinstance(lat, int):
raise StoreInvalidDefinitionError("lat value must be a float type")
# Check for range between -90 and 90
if lat < -90 or lat > 90:
raise StoreInvalidDefinitionError("lat value must be a float type")
self._lat = lat
def serialize(self) -> object:
"""
export current object
With serialization you can have clean object
which can be used for JSON.dump or other actions
The main propose is to be unwrapped from Store class
"""
return {
"postcode": self.postcode,
"name": self.name,
"lon": self.lon,
"lat": self.lat
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.