filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_20111 | import numpy as np
import pandas as pd
import pdb
import os
import math
import argparse
def get_args():
parser = argparse.ArgumentParser(description='preprocessing args')
parser.add_argument('--tcga-dir', type=str,help='tcga directory: all .csv per samples per class')
parser.add_argument('--sample-file', type=str,help='sample file')
parser.add_argument('--epigen-file', type=str,help='epigenetic state file')
parser.add_argument('--muat-dir', type=str,help='muat project directory')
parser.add_argument('--output-dir', type=str,help='output directory for simplification of mutation (3 bp)')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args(args)
#edit the directory
muat_dir = '/users/primasan/projects/muat/'
metadata = pd.read_csv(muat_dir + 'extfile/metadata_icgc_pcawg.tsv',sep='\t',index_col=0)
dictMutation = pd.read_csv(muat_dir + 'extfile/dictMutation.csv',index_col=0)
dictChpos = pd.read_csv(muat_dir + 'extfile/dictChpos.csv',index_col=0)
dictGES = pd.read_csv(muat_dir + 'extfile/dictGES.csv',index_col=0)
pcawg_dir = '/scratch/project_2001668/data/tcga/alltcga/'
simplified_data = '/scratch/project_2001668/data/tcga/simplified/'
tokenized_data = '/scratch/project_2001668/data/tcga/tokenized/'
all_class = os.listdir(pcawg_dir)
for i in all_class:
pcawg_histology = i
os.makedirs(tokenized_data + pcawg_histology, exist_ok=True)
allsamples = os.listdir(simplified_data + pcawg_histology)
for j in allsamples:
onesamples = j
read_sample = pd.read_csv(simplified_data + pcawg_histology + '/' + onesamples,index_col=0)
somatic_files_persamples = read_sample
pcawg_sample = onesamples[:-4]
pd_new = read_sample
#2) tokenization from simplified mutation files
mergetriplet = pd_new.merge(dictMutation, left_on='seq', right_on='triplet', how='left',suffixes=('', '_y'))
mergetriplet.drop(mergetriplet.filter(regex='_y$').columns.tolist(),axis=1, inplace=True)
mergeges = mergetriplet.merge(dictGES, left_on='ges', right_on='ges', how='left',suffixes=('', '_y'))
mergeges.drop(mergeges.filter(regex='_y$').columns.tolist(),axis=1, inplace=True)
mergeges = mergeges.rename(columns={"token": "gestoken"})
mergechrompos = mergeges.merge(dictChpos, left_on='chrompos', right_on='chrompos', how='left',suffixes=('', '_y'))
mergechrompos.drop(mergechrompos.filter(regex='_y$').columns.tolist(),axis=1, inplace=True)
mergechrompos = mergechrompos.rename(columns={"token": "postoken"})
token_data = mergechrompos[['triplettoken','postoken','gestoken','rt','mut_type']]
#save token data
#pdb.set_trace()
token_files = 'token_' + pcawg_sample + '.csv'
token_data.to_csv(tokenized_data + pcawg_histology + '/' + token_files)
#3) create data utilities (required)
SNVonly = token_data.loc[token_data['mut_type']=='SNV']
SNVonly = SNVonly.drop(columns=['mut_type'])
MNVonly = token_data.loc[token_data['mut_type']=='MNV']
MNVonly = MNVonly.drop(columns=['mut_type'])
indelonly = token_data.loc[token_data['mut_type']=='indel']
indelonly = indelonly.drop(columns=['mut_type'])
MEISVonly = token_data.loc[token_data['mut_type'].isin(['MEI','SV'])]
Negonly = token_data.loc[token_data['mut_type']=='Normal']
Negonly = Negonly.drop(columns=['mut_type'])
#have to be exported
SNVonly.to_csv(tokenized_data + pcawg_histology + '/' + 'SNV_' + pcawg_sample + '.csv')
MNVonly.to_csv(tokenized_data + pcawg_histology + '/' + 'MNV_' + pcawg_sample + '.csv')
indelonly.to_csv(tokenized_data + pcawg_histology + '/' + 'indel_' + pcawg_sample + '.csv')
MEISVonly.to_csv(tokenized_data + pcawg_histology + '/' + 'MEISV_' + pcawg_sample + '.csv')
Negonly.to_csv(tokenized_data + pcawg_histology + '/' + 'Normal_' + pcawg_sample + '.csv')
pd_count = pd.DataFrame([len(SNVonly),len(MNVonly),len(indelonly),len(MEISVonly),len(Negonly)])
pd_count.to_csv(tokenized_data + pcawg_histology + '/' + 'count_' + pcawg_sample + '.csv') |
the-stack_0_20112 | """PyBindingCurve - a package to simulate protein-ligand binding systems
TODO:
* Add curve tracing to analytical systems
* Check combinations of readout with/without ymin/ymax
"""
import numpy as np
import matplotlib.pyplot as plt
import lmfit
from enum import Enum, auto
from pybindingcurve.systems import *
pbc_plot_style = {
"axis_label_size": 12,
"axis_label_font": "DejaVu Sans",
"title_size": 12,
"title_font": "DejaVu Sans",
# 'figure_width': 9,
# 'figure_height': 9,
"x_tick_label_font_size": 10,
"y_tick_label_font_size": 10,
"legend_font_size": 9,
"dpi": 300,
"x_axis_labelpad": None,
"y_axis_labelpad": None,
"title_labelpad": None,
"fig_size": (5 * 1.2, 4 * 1.2),
}
class Readout:
"""
Class to change the system simulation readouts
Readout is a container for static functions that change the system
readout when passed as an argument to add_curve. These functions
take as arguments the system parameters, and then the calculated
y-values as raw output from systems equations. Return types are either
arrays or singular values.
"""
@staticmethod
def fraction_l(system_parameters: dict, y):
"""
Transform readout to fraction ligand bound
Readout function to change y values into fraction ligand bound
Parameters
----------
system_parameters : dict
Dictionary of system parameters defining the system
y : float or array-like
System simulation or query values
Returns
-------
tuple
Tuple's first value is the y-axis label which should be used with
this trasformation when plotting.
The next value is either the single, or array like object
containing the transformed values.
"""
return "Fraction l bound", y / system_parameters["l"]
@staticmethod
def fraction_possible_dimer(system_parameters: dict, y):
"""
Transform readout to fraction possible dimer
Readout function to change y values into fraction possible dimer.
As maximal achievable dimer concentration is half the total monomer
concentration, we must take this into account when calculating fraction
possible dimer
Parameters
----------
system_parameters : dict
Dictionary of system parameters defining the system
y : float or array-like
System simulation or query values
Returns
-------
tuple
Tuple's first value is the y-axis label which should be used with
this trasformation when plotting.
The next value is either the single, or array like object
containing the transformed values.
"""
return "Fraction possible dimer", y / (system_parameters["p"] / 2.0)
@staticmethod
def complex_concentration(system_parameters: dict, y):
"""
Readout as complex concentration, (redundant as NULL achieves the same)
Redundant readout function, never used, but can be used for
better understanding Readouts.
Parameters
----------
system_parameters : dict
Dictionary of system parameters defining the system
y : float or array-like
System simulation or query values
Returns
-------
tuple
Tuple's first value is the y-axis label which should be used with
this trasformation when plotting, although in this case the value
is None to indicate no transformation has been carried out and the
default which is assigned by the system should be used.
The next value is either the single, or array like object
containing the non-transformed values.
"""
return None, y
class _Curve:
"""
Curve class, represents a binding curve
Consists of X and Y coordinates, along with a name and boolean
flag denoting that the curve is a traced, real physical solution.
"""
def __init__(self, xcoords: np.array, ycoords: np.array, series_name: str = ""):
"""
Curve constructor
Called by PyBindingCurve
Parameters
----------
xcoors : np.array
X coordinates of the binding system to be used to present a binding curve
ycoors : np.array
Y coordinates of the binding system to be used to present a binding curve
series_name : str, Optional
Name of curve to appear in plot legends
"""
self.xcoords = xcoords
self.ycoords = ycoords
self.name = series_name
class BindingCurve:
"""
BindingCurve class, used to simulate systems
BindingCurve objects are governed by their underlying system, defining the
(usually) protein-ligand binding system being represented. It also
provides the main interface for simulation, visualisation, querying and
the fitting of system parameters.
Parameters
----------
binding_system : BindingSystem or str
Define the binding system which will govern this BindingCurve object.
Caan either be a BindingSystem object or a human readable string
shortcut, such as '1:1' or 'competition', etc.
"""
system = None
_last_custom_readout = None
curves = []
fig = None
axes = None
plot_solution_colours = list("krgbycmrgbycmrgby") + list(
np.linspace(0.1, 0.9, num=20)
)
_min_x_axis = 0.0
_max_x_axis = 0.0
_min_y_axis = 0.0
_max_y_axis = 0.0
_num_added_traces = 0
_last_known_changing_parameter = "X"
def query(self, parameters, readout: Readout = None):
"""
Query a binding system
Get the readout from from a set of system parameters
Parameters
----------
parameters : dict
System parameters defining the system being queried. Will usually
contain protein, ligand etc concentrations, and KDs.
readout : func or None
Change the readout of the system, can be None for unmodified
(usually complex concentration), a static member function from
the pbc.Readout class, or a custom written function following the
the same defininition as those in pbc.Readout.
Returns
-------
Single floating point, or array-like
Response/signal of the system
"""
if readout is None:
return self.system.query(parameters)
else:
return readout(parameters, self.system.query(parameters))[1]
def _find_changing_parameters(self, params: dict):
"""
Find the changing parameter
Determine which parameter is changing with titration, including the
concentration of the protein or ligand. A set of varied concentrations
of parameter are in the form of array-like or list data type
Parameters
----------
params : dict
Parameters defining the binding system to be simulated.
Returns
-------
A list containing the keys of params indicating the name of
changing parameters.
"""
changing_list = []
for p in params.keys():
if isinstance(params[p], np.ndarray) or isinstance(params[p], list):
changing_list.append(p)
if len(changing_list) == 0:
return None
else:
return changing_list
def __init__(self, binding_system: [str, BindingSystem]):
"""
Construct BindingCurve objects
BindingCurve objects are initialised with different protein-ligand
binding systems definded by user. This method can also initialises
the objects as an interface throught the pbc.BindingSystem class,
providing for simulation, visualisation, querying and the fitting
of system parameters.
Parameters
----------
binding_system : str or BindingSystem
Define the binding system which will govern this BindingCurve
object. Can either be a BindingSystem object or a human readable
string shortcut, such as '1:1' or 'competition', etc.
"""
if isinstance(binding_system, str):
binding_system = binding_system.lower()
# 1:1
if binding_system in ["simple", "1:1"]:
self.system = System_analytical_one_to_one_pl()
# 1:1 kinetic
if binding_system in ["simplekinetic", "simple kinetic", "1:1kinetic", "1:1 kinetic"]:
self.system = System_kinetic_one_to_one_pl()
# Homodimer formation
if binding_system in ["homodimerformation", "homodimer formation"]:
self.system = System_analytical_homodimerformation_pp()
# Homodimer formation kinetic - only used for testing purposes
if binding_system in [
"homodimerformationkinetic",
"homodimer formation kinetic",
]:
self.system = System_kinetic_homodimerformation()
# Competition
if binding_system in ["competition", "1:1:1"]:
self.system = System_analytical_competition_pl()
# Competition
if binding_system in ["homodimerbreaking", "homodimer breaking"]:
self.system = System_kinetic_homodimerbreaking_pp()
else:
if issubclass(binding_system, BindingSystem):
self.system = binding_system()
else:
print(
"Invalid system specified, try one of: [simple, homodimer, competition, homdimer breaking], or pass a system object"
)
return None
def _initialize_plot(self):
"""
Initialise setup to being ready for curve plotting
Control setups needed for plotting a binding plot including layouts
of subplots, grid lines, and y-axis view limits, etc.
"""
if self.fig is None:
self.fig, self.axes = plt.subplots(
nrows=1, ncols=1, figsize=pbc_plot_style["fig_size"]
)
self.axes.grid(True, which="both")
self.axes.set_ylim(0, 1)
plt.tight_layout(rect=(0.05, 0.05, 0.95, 0.92))
def add_curve(self, parameters: dict, name: str = None, readout: Readout = None):
"""
Add a curve to the plot
Add a curve as specified by the system parameters to the
pbc.BindingSystem's internal plot using the underlying binding system
specified on intitialisation.
Parameters
----------
parameters : dict
Parameters defining the system to be simulated
name : str or None, optional
Name of curve to appear in plot legends
readout : Readout.function, optional
Change the system readout to one described by a custom readout
function. Predefined standard readouts can be found in the static
pbc.Readout class.
"""
if self.system is None:
print("No system defined, could not proceed")
return None
self._initialize_plot()
changing_parameters = self._find_changing_parameters(parameters)
if not len(changing_parameters) == 1:
print("Must have 1 changing parameter, no curves added.")
return
y_values = self.system.query(parameters)
if readout is not None:
self._last_custom_readout, y_values = readout(parameters, y_values)
if y_values.ndim > 1:
for i in range(y_values.ndim):
self.curves.append(
_Curve(parameters[changing_parameters[0]], y_values[i])
)
else:
self.curves.append(
_Curve(parameters[changing_parameters[0]], y_values))
self._last_known_changing_parameter = changing_parameters[0]
for curve_it, curve in enumerate(self.curves[self._num_added_traces:]):
self._num_added_traces += 1
curve_name_with_number = None
if name is None:
curve_name_with_number = f"Curve {self._num_added_traces}"
else:
if y_values.ndim == 1:
curve_name_with_number = name
else:
curve_name_with_number = name + " " + str(curve_it + 1)
self.axes.plot(
parameters[changing_parameters[0]],
curve.ycoords,
self.plot_solution_colours[self._num_added_traces] + "-",
label=curve_name_with_number,
linewidth=2,
)
self._max_x_axis = np.nanmax(
[self._max_x_axis, parameters[changing_parameters[0]][-1]]
)
self._min_x_axis = np.nanmin(
[self._min_x_axis, parameters[changing_parameters[0]][0]]
)
self._min_y_axis = np.nanmin(
[self._min_y_axis, np.nanmin(curve.ycoords)])
self._max_y_axis = np.nanmax(
[self._max_y_axis, np.nanmax(curve.ycoords)])
def add_scatter(self, xcoords, ycoords):
"""
Add scatterpoints to a plot, useful to represent real measurement data
X and Y coordinates may be added to the internal plot, useful when
fitting to experimental data, and wanting to plot the true experimental
values alongside a curve generated with fitted parameters.
Parameters
----------
xcoords : list or array-like
x-coordinates
ycoords : list or array-like
y-coordinates
"""
self._initialize_plot()
self.axes.scatter(xcoords, ycoords)
if isinstance(xcoords, np.ndarray) and isinstance(ycoords, np.ndarray):
self._min_y_axis = min(self._min_y_axis, min(np.real(ycoords)))
self._max_y_axis = max(self._max_y_axis, max(np.real(ycoords)))
def show_plot(
self,
title: str = "System simulation",
xlabel: str = None,
ylabel: str = None,
min_x: float = None,
max_x: float = None,
min_y: float = None,
max_y: float = None,
log_x_axis: bool = False,
log_y_axis: bool = False,
pbc_plot_style: dict = pbc_plot_style,
png_filename: str = None,
svg_filename: str = None,
show_legend: bool = True,
):
"""
Show the PyBindingCurve plot
Function to display the internal state of the pbc BindingCurve objects
plot.
Parameters
----------
title : str
The title of the plot (default = "System simulation")
xlabel: str
X-axis label (default = None)
ylabel : str
Y-axis label (default = None, causing label to be "[Complex]")
min_x : float
X-axis minimum (default = None)
max_x : float
X-axis maximum (default = None)
min_y : float
Y-axis minimum (default = None)
max_y : float
Y-axis maximum (default = None)
log_x_axis : bool
Log scale on X-axis (default = False)
log_y_axis : bool
Log scale on Y-axis (default = False)
ma_style : bool
Apply MA styling, making plots appear like GraFit plots
png_filename : str
File name/location where png will be written
svg_filename : str
File name/location where svg will be written
"""
if min_x is not None:
self._min_x_axis = min_x
if max_x is not None:
self._max_x_axis = max_x
if min_y is not None:
self._min_y_axis = min_y
if max_y is not None:
self._max_y_axis = max_y
if max_y is None:
self.axes.set_ylim(self._min_y_axis, self._max_y_axis * 1.1)
else:
self.axes.set_ylim(self._min_y_axis, self._max_y_axis)
self.axes.set_xlim(self._min_x_axis, self._max_x_axis)
if log_x_axis:
self.axes.set_xscale("log", nonposx="clip")
if log_y_axis:
self.axes.set_yscale("log", nonposx="clip")
if xlabel is None:
self.axes.set_xlabel(
"[" + self._last_known_changing_parameter.upper() + "]",
fontsize=pbc_plot_style["axis_label_size"],
fontname=pbc_plot_style["axis_label_font"],
labelpad=pbc_plot_style["x_axis_labelpad"],
)
else:
self.axes.set_xlabel(
xlabel,
fontsize=pbc_plot_style["axis_label_size"],
fontname=pbc_plot_style["axis_label_font"],
labelpad=pbc_plot_style["x_axis_labelpad"],
)
if ylabel is None:
if self._last_custom_readout is None:
self.axes.set_ylabel(
"[" + self.system.default_readout.upper() + "]",
fontsize=pbc_plot_style["axis_label_size"],
fontname=pbc_plot_style["axis_label_font"],
labelpad=pbc_plot_style["y_axis_labelpad"],
)
else:
self.axes.set_ylabel(
self._last_custom_readout,
fontsize=pbc_plot_style["axis_label_size"],
fontname=pbc_plot_style["axis_label_font"],
labelpad=pbc_plot_style["y_axis_labelpad"],
)
else:
self.axes.set_ylabel(
ylabel,
fontsize=pbc_plot_style["axis_label_size"],
fontname=pbc_plot_style["axis_label_font"],
labelpad=pbc_plot_style["y_axis_labelpad"],
)
self.axes.set_title(
title,
fontsize=pbc_plot_style["title_size"],
fontname=pbc_plot_style["title_font"],
pad=pbc_plot_style["title_labelpad"],
)
if show_legend:
self.axes.legend(prop={"size": pbc_plot_style["legend_font_size"]})
for tick in self.axes.xaxis.get_major_ticks():
tick.label.set_fontsize(pbc_plot_style["x_tick_label_font_size"])
for tick in self.axes.yaxis.get_major_ticks():
tick.label.set_fontsize(pbc_plot_style["y_tick_label_font_size"])
if png_filename is not None:
plt.savefig(
png_filename,
dpi=pbc_plot_style["dpi"],
metadata={"Title": "pyBindingCurve plot"},
)
if svg_filename is not None:
plt.savefig(svg_filename, metadata={
"Title": "pyBindingCurve plot"})
plt.show()
def fit(
self,
system_parameters: dict,
to_fit: dict,
ycoords: np.array,
bounds: dict = None,
):
"""
Fit the parameters of a system to a set of data points
Fit the system to a set of (usually) experimental datapoints.
The fitted parameters are stored in the system_parameters dict
which may be accessed after running this function. It is
possible to fit multiple parameters at once and define bounds
for the parameters. The function returns a dictionary of the
accuracy of fitted parameters, which may be captured, or not.
Parameters
----------
system_parameters : dict
Dictionary containing system parameters, will be used as arguments
to the systems equations.
to_fit : dict
Dictionary containing system parameters to fit.
xcoords : np.array
X coordinates of data the system parameters should be fit to
ycoords : np.array
Y coordinates of data the system parameters should be fit to
bounds : dict
Dictionary of tuples, indexed by system parameters denoting the
lower and upper bounds of a system parameter being fit, optional,
default = None
Returns
-------
tuple (dict, dict)
Tuple containing a dictionary of best fit systems parameters,
then a dictionary containing the accuracy for fitted variables.
"""
system_parameters_copy = dict(system_parameters)
# Check we have parameters to fit, and nothing is missing
if len(to_fit.keys()) == 0:
print("Nothing to fit, insert parameters to fit into to_fit dictionary")
return None
missing = sorted(
list(
set(self.system.arguments) -
set([*system_parameters_copy] + [*to_fit])
)
)
if len(missing) > 0:
print(
"Not all system parameters included in system_parameters or to_fit dictionaries, check all variables for the used equation are included"
)
print("Missing variables are: ", missing)
return None
# Add parameters for lmfit, accounting for bounds
if bounds is None:
bounds = {}
params = lmfit.Parameters()
for varname in to_fit.keys():
bnd_min = -np.inf
bnd_max = np.inf
if varname in bounds.keys():
bnd_min = bounds[varname][0]
bnd_max = bounds[varname][1]
params.add(
varname, value=to_fit[varname], min=bnd_min, max=bnd_max)
lmmini = lmfit.Minimizer(
self._residual, params, fcn_args=(
system_parameters_copy, to_fit, ycoords)
)
result = lmmini.minimize()
for k in system_parameters_copy.keys():
if isinstance(system_parameters_copy[k], lmfit.parameter.Parameter):
system_parameters_copy[k] = system_parameters_copy[k].value
return (
system_parameters_copy,
dict((p, result.params[p].stderr) for p in result.params),
)
def _residual(self, params, system_parameters: dict, to_fit: dict, y: np.array):
"""
Residual function for fitting parameters.
Helper function for lm_fit, calculating residual remaining for probed
system parameters.
Parameters
----------
params : dict
A dictionary of the parameters required to be evaluated to a fit model.
system_parameters : dict
Dictionary containing system parameters, will be used as arguments to the systems equations.
to_fit : dict
Dictionary containing system parameters to fit.
y : np.array
A array-like data containing the system parameters should be fit to
Returns
-------
The cost between the experimental datapoints and the values derived from the model.
"""
for value in params:
system_parameters[value] = float(params[value])
return self.system.query(system_parameters) - y
|
the-stack_0_20116 | import re
from copy import deepcopy
import numpy as np
def stringNormalize(string):
return string
def betterScaleToNumeric(vote, rowName, colName, betterVal=2.0, muchBetterVal=5.0):
dom = None
numericVote = None
muchBetterRegexes = [
r'^(.+)\s+is\s+much\s+better$',
r'^(.+)\s+is\s+much\s+more',
r'^(.+)\s+is\s+much\s+preferred'
]
betterRegexes = [
r'^(.+)\s+is\s+better$',
r'^(.+)\s+is\s+more',
r'^(.+)\s+is\s+preferred$'
]
rowName = stringNormalize(rowName)
colName = stringNormalize(colName)
if firstParenRegex(muchBetterRegexes, vote) is not None:
dom = stringNormalize(firstParenRegex(muchBetterRegexes, vote))
#console.log("Much better: '"+dom+"' hope this makes sense")
numericVote = muchBetterVal
elif firstParenRegex(betterRegexes, vote) is not None:
dom = stringNormalize(firstParenRegex(betterRegexes, vote))
#console.log("Better: '"+dom+"' hope this makes sense")
numericVote = betterVal
elif vote.endswith("equal"):
#Have an equality vote
#Doesn't matter which we say is dominant, call it rowName
dom = rowName
numericVote = 1
else:
raise "Vote '"+vote+"' was neither better, nor much better, nor equals, we give up"
if dom == rowName:
#The dominant node was the row node, so return the vote value
return numericVote
elif dom == colName:
#The dominant was the column, so we return the reciprocal of the vote
return 1.0/numericVote
else:
raise Exception("The dominant node from the vote was '"+dom+"' which was neither the row nor the column")
def firstParenRegex(regexes, string):
for regex in regexes:
m=re.search(regex, string)
if m is not None:
return m.group(1)
return None
def islist(val):
return (not (isinstance(val,str))) and (hasattr(val, "__len__"))
def betterScaleDataToNumeric(colHeader, vote, betterVal=2.0, muchBetterVal=5.0,
extraColHeaderRegexes=[]):
if islist(vote):
# We need to do this for a bunch of items
rval = deepcopy(vote)
for i in range(len(vote)):
rval[i]=betterScaleDataToNumeric(colHeader, vote[i], betterVal, muchBetterVal, extraColHeaderRegexes)
return rval
#print("Help me "+str(vote))
if (vote is None) or np.isreal(vote) or (vote is ""):
return vote
regexes = [
r'^(.+)\s+vs\s+(.+)\s+wrt\s+.+$',
r'^(.+)\s+vs\s+(.+)$'
]
regexes.extend(extraColHeaderRegexes)
colHeader = stringNormalize(colHeader)
vote = stringNormalize(vote)
rowNode = None
colNode = None
#First we need to split the colHeader on the word, we try each regex
for regex in regexes:
matches = re.search(regex, colHeader)
#console.log(matches)
if matches!=None:
rowNode = matches[1]
colNode = matches[2]
break
if (rowNode is None) or (colNode is None):
return
numericVote = betterScaleToNumeric(vote, rowNode, colNode, betterVal, muchBetterVal)
#print("'"+rowNode+"' vs '"+colNode+"'"+" vote = "+str(numericVote))
return numericVote
|
the-stack_0_20117 | # coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stats."""
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
from rl_reliability_metrics.analysis import data_def
from rl_reliability_metrics.analysis import stats
import unittest
class StatsTest(parameterized.TestCase, unittest.TestCase):
def setUp(self):
super(StatsTest, self).setUp()
results_dir = os.path.join(
'./',
'rl_reliability_metrics/analysis/test_data')
self.dd = data_def.DataDef(
results_dir,
algorithms=['algoA', 'algoB', 'algoC'],
tasks=['taskX', 'taskY'],
n_runs_per_experiment=2)
@parameterized.named_parameters(
('2x2x1', [[[1], [2]], [[3], [4]]], 'ATR', 1),
('3x2x2', [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]
], 'ATR', 1),
('2x2x1x2', [[[[1, 2]], [[3, 4]]], [[[5, 6]], [[7, 8]]]], 'ATRP', 0),
('2x2x5x3', [[[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14]
],
[[15, 16, 17], [18, 19, 20], [21, 22, 23], [24, 25, 26],
[27, 28, 29]]],
[[[30, 31, 32], [33, 34, 35], [36, 37, 38], [39, 40, 41],
[42, 43, 44]],
[[45, 46, 47], [48, 49, 50], [51, 52, 53], [54, 55, 56],
[57, 58, 59]]]], 'ATRP', 0),
)
def test_resample_metric_results(self, metric_results, result_dims, algo_ind):
stats_runner = stats.StatsRunner(
data=self.dd, metric='IqrAcrossRuns', n_timeframes=3)
stats_runner.result_dims = result_dims
metric_results = np.array(metric_results)
resampled = stats_runner._resample_metric_results(metric_results, algo_ind)
# The resampled subarrays should be drawn from the original subarrays.
n_task = metric_results.shape[1]
for itask in range(n_task):
algo_task_results = metric_results[algo_ind, itask]
for run_value in resampled[algo_ind][itask]:
self.assertIn(run_value, algo_task_results)
# Resampled array should be the same for all other algorithms.
n_metrics = metric_results.shape[0]
other_algo_inds = list(set(range(n_metrics)) - {algo_ind})
resampled_other_algos = resampled[other_algo_inds]
original_other_algos = metric_results[other_algo_inds]
np.testing.assert_array_equal(resampled_other_algos, original_other_algos)
if metric_results.shape[2] == 1:
# Since there is only one run, the resampled array should be the same.
np.testing.assert_array_equal(resampled, metric_results)
@parameterized.parameters(
('IqrWithinRuns', 0, [0]),
('IqrAcrossRuns', 1, [1]),
('MedianPerfDuringTraining', 2, [2, 3]),
('MedianPerfDuringTraining', None, [0, 1, 2, 3]),
)
def test_get_timeframe_points(self, metric, timeframe, expected):
stats_runner = stats.StatsRunner(
data=self.dd, metric=metric, n_timeframes=3)
timeframe_points = stats_runner.get_timeframe_points(timeframe)
np.testing.assert_array_equal(list(timeframe_points), expected)
@parameterized.parameters(
('AT', True, [[2, 3], [3, 4], [1, 2], [4, 1]]),
('AT', False, [[3, 2], [2, 1], [4, 3], [1, 4]]),
('ATP', False, [[[1, 2], [1, 2]], [[3, 4], [3, 4]]]),
)
def test_rank_per_task(self, result_dims, bigger_is_better, expected_result):
results_arrays = {
'AT': [[3, 1], [2, -2], [4, 7], [0, 9]],
'ATP': [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]],
}
results_array = np.array(results_arrays[result_dims])
stats_runner = stats.StatsRunner(data=None, metric='IqrAcrossRuns')
stats_runner.result_dims = result_dims
stats_runner.bigger_is_better = bigger_is_better
ranks = stats_runner.rank_per_task(results_array)
np.testing.assert_array_equal(ranks, expected_result)
def test_algo_meanrank_diff(self):
ranks_array = np.array([[1, 2, 1, 4], [2, 4, 3, 2]])
meanrank_diff = stats.StatsRunner._algo_meanrank_diff(ranks_array)
self.assertEqual(meanrank_diff, 0.75)
def test_pval(self):
null_distribution = np.array([3, 5, 7, -2, -4, -6, 0, 0, 1, 2])
observed_value = 5
pval = stats.StatsRunner._get_pval(null_distribution, observed_value)
self.assertEqual(pval, 3 / 10)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_20118 | from fastapi import FastAPI
app = FastAPI()
@app.get("/")
async def root():
return {"message": "Hello World"}
@app.get("/items/{item_id}")
async def read_item(item_id: int):
return {"item_id": item_id}
|
the-stack_0_20119 | ## This class is aimed to preprocess CIFAR10/CIFAR100 dataset (available on Keras API) in order to convert all training sets
## from Tensors to Array
import tensorflow as tf
def create_cifar_array():
# Import dataset from KERAS API
from keras.datasets import cifar100
# Define train and test sets (still RGB format)
(rgb_x_train, y_train), (rgb_x_test, y_test) = cifar100.load_data(label_mode='fine')
# Convert from RGB format to grayscale
x_train_tens = tf.image.rgb_to_grayscale(rgb_x_train, name=None)
x_test_tens = tf.image.rgb_to_grayscale(rgb_x_test, name=None)
# Convert Tensors to array
x_train = x_train_tens.eval(session=tf.compat.v1.Session())
x_test = x_test_tens.eval(session=tf.compat.v1.Session())
return x_train,y_train,x_test,y_test
|
the-stack_0_20120 | import time
import datetime
import pandas as pd
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
# The next 3 lines print an ASCII Bicycle Art to welcome the user.
print(' __o')
print(' _`\\<,_')
print('(*)/ (*) Hello! Let\'s explore some US bikeshare data!')
print('-'*55)
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city = input("Specify a city to analyze (Chicago, New York or Washington): ").lower()
if city not in ('chicago', 'new york', 'washington'):
print("Not an appropriate city. Please try again.")
continue
else:
break
# get user input for month (all, january, february, ... , june)
while True:
month = input("Specify a month to analyze (all, January, February, ... June): ").lower()
if month not in ('all', 'january', 'february', 'march', 'april', 'may', 'june'):
print("Not an appropriate month. Please try again.")
continue
else:
break
# get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day = input("Specify a day of week to analyze (all, Monday, Tuesday, ... Sunday): ").lower()
if day not in ('all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'):
print("Not an appropriate day of week. Please try again.")
continue
else:
break
print('-'*55)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
# The month as January=1, December=12.
df['Month'] = df['Start Time'].dt.month
# The day of the week with Monday=0, Sunday=6.
df['Day_Of_Week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df.Month == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df.Day_Of_Week == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# display the most common month
popular_month_int = df['Month'].mode().values
# use the index of the months list to get the corresponding month
months = ['January', 'February', 'March', 'April', 'May', 'June']
popular_month_str = []
for i in popular_month_int:
popular_month_str.append(months[i - 1])
print('Most Popular Month:', popular_month_str)
# display the most common day of week
popular_day = df['Day_Of_Week'].mode().values
print('Most Popular Day of Week:', popular_day)
# display the most common start hour
# extract hour from the Start Time column to create an hour column
df['Hour'] = df['Start Time'].dt.hour
# find the most popular hour
popular_hour = df['Hour'].mode().values
print('Most Popular Start Hour:', popular_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*55)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
popular_start_station = df['Start Station'].mode().values
print('Most Commonly Used Start Station:', popular_start_station)
# display most commonly used end station
popular_end_station = df['End Station'].mode().values
print('Most Commonly Used End Station:', popular_end_station)
# display most frequent combination of start station and end station trip
df['Combination'] = df['Start Station'] + ' - ' + df['End Station']
popular_combination = df['Combination'].mode().values
print('Most Frequent Combination of Start and End Station:', popular_combination)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*55)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
print('Total Travel Time:', datetime.timedelta(seconds=int(total_travel_time)))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print('Average Trip Duration:', datetime.timedelta(seconds=int(mean_travel_time)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*55)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
user_types = df['User Type'].value_counts()
print('Counts of user types:\n', user_types, sep='')
# Display counts of gender
if 'Gender' in df.columns:
genders = df['Gender'].value_counts()
print('\nCounts of gender:\n', genders, sep='')
else:
print('\nGender data not available.')
# Display earliest, most recent, and most common year of birth
if 'Birth Year' in df.columns:
earliest_year = int(df['Birth Year'].min())
recent_year = int(df['Birth Year'].max())
common_year = df['Birth Year'].mode().apply(lambda x: int(x))
print('\nOldest year of birth:', earliest_year)
print('Youngest year of birth:', recent_year)
print('Most common year of birth:', common_year.values)
else:
print('\nBirth year data not available.')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*55)
def display_raw_data(df):
"""Display 5 rows of data from the data set until the user answers no."""
iloc = 0 # This is the first location we will display
while True:
raw_data = input('\nWould you like to view individual trip data? Enter yes or no.\n')
if raw_data.lower() not in ('yes', 'no'):
print("Sorry, I didn't understand your answer. Please try again.")
continue
elif raw_data.lower() != 'yes':
break
else:
print(df[iloc:iloc+5])
iloc +=5
def main():
while True:
city, month, day = get_filters()
print('Filter', city, month, day, sep=' -> ')
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
display_raw_data(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
|
the-stack_0_20124 | import pymysql
import threading
import traceback
from conf import Config
from .base_db import HGSDatabase, DBException, DBCloseException
from tool.typing import *
class MysqlDB(HGSDatabase):
def __init__(self,
host: Optional[str] = Config.mysql_url,
name: Optional[str] = Config.mysql_name,
passwd: Optional[str] = Config.mysql_passwd,
port: Optional[str] = Config.mysql_port):
if host is None or name is None:
raise DBException
super(MysqlDB, self).__init__(host, name, passwd, port)
try:
self._db = pymysql.connect(user=self._name,
password=self._passwd,
host=self._host,
port=self._port,
database="hgssystem")
except pymysql.err.OperationalError:
raise
self._cursor = self._db.cursor()
self._lock = threading.RLock()
def close(self):
if self._cursor is not None:
self._cursor.close()
if self._db is not None:
self._db.close()
self._db = None
self._cursor = None
self._lock = None
def is_connect(self) -> bool:
if self._cursor is None or self._db is None:
return False
return True
def get_cursor(self) -> pymysql.cursors.Cursor:
if self._cursor is None or self._db is None:
raise DBCloseException
return self._cursor
def search(self, columns: List[str], table: str,
where: Union[str, List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
order_by: Optional[List[Tuple[str, str]]] = None,
group_by: Optional[List[str]] = None,
for_update: bool = False):
if type(where) is list and len(where) > 0:
where: str = " WHERE " + " AND ".join(f"({w})" for w in where)
elif type(where) is str and len(where) > 0:
where = " WHERE " + where
else:
where: str = ""
if order_by is None:
order_by: str = ""
else:
by = [f" {i[0]} {i[1]} " for i in order_by]
order_by: str = " ORDER BY" + ", ".join(by)
if limit is None or limit == 0:
limit: str = ""
else:
limit = f" LIMIT {limit}"
if offset is None:
offset: str = ""
else:
offset = f" OFFSET {offset}"
if group_by is None:
group_by: str = ""
else:
group_by = "GROUP BY " + ", ".join(group_by)
columns: str = ", ".join(columns)
if for_update:
for_update = "FOR UPDATE"
else:
for_update = ""
return self.__search(f"SELECT {columns} "
f"FROM {table} "
f"{where} {group_by} {order_by} {limit} {offset} {for_update};")
def insert(self, table: str, columns: list, values: Union[str, List[str]], not_commit: bool = False):
columns: str = ", ".join(columns)
if type(values) is str:
values: str = f"({values})"
else:
values: str = ", ".join(f"{v}" for v in values)
return self.__done(f"INSERT INTO {table}({columns}) VALUES {values};", not_commit=not_commit)
def delete(self, table: str, where: Union[str, List[str]] = None, not_commit: bool = False):
if type(where) is list and len(where) > 0:
where: str = " AND ".join(f"({w})" for w in where)
elif type(where) is not str or len(where) == 0: # 必须指定条件
return None
return self.__done(f"DELETE FROM {table} WHERE {where};", not_commit=not_commit)
def update(self, table: str, kw: "Dict[str:str]", where: Union[str, List[str]] = None, not_commit: bool = False):
if len(kw) == 0:
return None
if type(where) is list and len(where) > 0:
where: str = " AND ".join(f"({w})" for w in where)
elif type(where) is not str or len(where) == 0: # 必须指定条件
return None
kw_list = [f"{key} = {kw[key]}" for key in kw]
kw_str = ", ".join(kw_list)
return self.__done(f"UPDATE {table} SET {kw_str} WHERE {where};", not_commit=not_commit)
def __search(self, sql) -> Union[None, pymysql.cursors.Cursor]:
if self._cursor is None or self._db is None:
raise DBCloseException
try:
self._lock.acquire() # 上锁
self._cursor.execute(sql)
except pymysql.MySQLError:
print(f"sql='{sql}'")
traceback.print_exc()
return None
finally:
self._lock.release() # 释放锁
return self._cursor
def __done(self, sql, not_commit: bool = False) -> Union[None, pymysql.cursors.Cursor]:
if self._cursor is None or self._db is None:
raise DBCloseException
try:
self._lock.acquire()
self._cursor.execute(sql)
except pymysql.MySQLError:
self._db.rollback()
print(f"sql={sql}")
traceback.print_exc()
return None
finally:
if not not_commit:
self._db.commit()
self._lock.release()
return self._cursor
def commit(self):
try:
self._lock.acquire()
self._db.commit()
finally:
self._lock.release()
|
the-stack_0_20126 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.html import strip_tags
from cms.models import CMSPlugin
from djangocms_link.models import AbstractLink
from djangocms_text_ckeditor.fields import HTMLField
from filer.fields.image import FilerImageField
from filer.fields.file import FilerFileField
from django.core.validators import FileExtensionValidator
from js_color_picker.fields import RGBColorField
from djangocms_bootstrap4.fields import TagTypeField, AttributesField
from .constants import (
CAROUSEL_TEMPLATE_CHOICES,
CAROUSEL_PAUSE_CHOICES,
CAROUSEL_RIDE_CHOICES,
CAROUSEL_ASPECT_RATIO_CHOICES,
)
@python_2_unicode_compatible
class Bootstrap4Carousel(CMSPlugin):
"""
Components > "Carousel" Plugin
https://getbootstrap.com/docs/4.0/components/carousel/
"""
carousel_title = models.CharField(
verbose_name=_('Title'),
max_length=255,
null=True,
blank=True,
)
carousel_background_image = FilerImageField(
verbose_name=_('background image'),
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
carousel_style = models.CharField(
verbose_name=_('Template'),
default='default',
max_length=255,
help_text=_('This is the template that will be used for the component.'),
)
carousel_interval = models.IntegerField(
verbose_name=_('Interval'),
default=5000,
help_text=_('The amount of time to delay between automatically cycling '
'an item. If false, carousel will not automatically cycle.'),
)
carousel_controls = models.BooleanField(
verbose_name=_('Controls'),
default=True,
help_text=_('Adding in the previous and next controls.'),
)
carousel_indicators = models.BooleanField(
verbose_name=_('Indicators'),
default=True,
help_text=_('Adding in the indicators to the carousel.'),
)
carousel_keyboard = models.BooleanField(
verbose_name=_('Keyboard'),
default=True,
help_text=_('Whether the carousel should react to keyboard events.'),
)
carousel_pause = models.CharField(
verbose_name=_('Pause'),
choices=CAROUSEL_PAUSE_CHOICES,
default=CAROUSEL_PAUSE_CHOICES[0][0],
max_length=255,
help_text=_('If set to "hover", pauses the cycling of the carousel on '
'"mouseenter" and resumes the cycling of the carousel on '
'"mouseleave". If set to "false", hovering over the carousel '
'won\'t pause it.')
)
carousel_ride = models.CharField(
verbose_name=_('Ride'),
choices=CAROUSEL_RIDE_CHOICES,
default=CAROUSEL_RIDE_CHOICES[0][0],
max_length=255,
help_text=_('Autoplays the carousel after the user manually cycles the '
'first item. If "carousel", autoplays the carousel on load.'),
)
carousel_wrap = models.BooleanField(
verbose_name=_('Wrap'),
default=True,
help_text=_('Whether the carousel should cycle continuously or have '
'hard stops.'),
)
carousel_aspect_ratio = models.CharField(
verbose_name=_('Aspect ratio'),
choices=CAROUSEL_ASPECT_RATIO_CHOICES,
blank=True,
default='',
max_length=255,
help_text=_('Determines width and height of the image '
'according to the selected ratio.'),
)
full_width = models.BooleanField(
verbose_name=_('Show Full Width'),
default=False,
)
tag_type = TagTypeField()
attributes = AttributesField(
excluded_keys=[
'id', 'data-interval', 'data-keyboard',
'data-pause', 'data-ride', 'data-wrap'
],
)
def __str__(self):
return str(self.pk)
def get_short_description(self):
text = '({})'.format(self.carousel_style)
text += ' {}: {}'.format(_('Interval'), self.carousel_interval)
text += ', {}: {}'.format(_('Controls'), self.carousel_controls)
text += ', {}: {}'.format(_('Indicators'), self.carousel_indicators)
text += ', {}: {}'.format(_('Keyboard'), self.carousel_keyboard)
text += ', {}: {}'.format(_('Pause'), self.carousel_pause)
text += ', {}: {}'.format(_('Ride'), self.carousel_ride)
text += '{}: {}'.format(_('Wrap'), self.carousel_wrap)
return text
@python_2_unicode_compatible
class Bootstrap4CarouselSlide(AbstractLink, CMSPlugin):
carousel_style = models.CharField(
verbose_name=_('Template'),
default='default',
max_length=255,
help_text=_('This is the template that will be used for the component.'),
)
carousel_image = FilerImageField(
verbose_name=_('Slide image'),
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
carousel_video = FilerFileField(
verbose_name=_('Slide video background'),
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='+',
help_text=_('Use MP4 videos'),
)
carousel_video_url = models.CharField(
verbose_name=_('Slide video background url'),
blank=True,
null=True,
max_length=255
)
animate_title = models.BooleanField(
verbose_name=_('Animate Title'),
default=False,
)
title = models.CharField(
verbose_name=_('title'),
max_length=255,
blank=True,
null=True
)
carousel_content = HTMLField(
verbose_name=_('Content'),
blank=True,
default='',
help_text=_('Content may also be added using child plugins.'),
)
background_color = RGBColorField(
verbose_name=_('Background Color'),
blank=True,
null=True
)
link = models.CharField(verbose_name=_('Link'), blank=True, max_length=2040)
tag_type = TagTypeField()
def __str__(self):
return str(self.pk)
def clean(self):
super(AbstractLink, self).clean()
def get_link(self):
return AbstractLink.get_link(self)
def get_short_description(self):
image_text = content_text = ''
if self.carousel_image_id:
if self.carousel_image.name:
image_text = self.carousel_image.name
elif self.carousel_image.original_filename \
and os.path.split(self.carousel_image.original_filename)[1]:
image_text = os.path.split(self.carousel_image.original_filename)[1]
else:
image_text = 'Image'
if self.carousel_content:
text = strip_tags(self.carousel_content).strip()
if len(text) > 100:
content_text = '{}...'.format(text[:100])
else:
content_text = '{}'.format(text)
if image_text and content_text:
return '{} ({})'.format(image_text, content_text)
else:
return image_text or content_text
|
the-stack_0_20127 | # Test drop on input
import FWCore.ParameterSet.Config as cms
process = cms.Process("TESTDROPONINPUT")
process.options = cms.untracked.PSet(
numberOfStreams = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:testThinningTest1.root'),
inputCommands = cms.untracked.vstring(
'keep *',
'drop *_thingProducer_*_*',
'drop *_thinningThingProducerD_*_*',
'drop *_thinningThingProducerH_*_*',
'drop *_thinningThingProducerI_*_*',
'drop *_thinningThingProducerJ_*_*',
'drop *_thinningThingProducerK_*_*',
'drop *_thinningThingProducerL_*_*',
'drop *_aliasM_*_*',
'drop *_aliasN_*_*'
),
dropDescendantsOfDroppedBranches = cms.untracked.bool(False)
)
process.testA = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thingProducer'),
thinnedTag = cms.InputTag('thinningThingProducerA'),
associationTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerA'),
parentWasDropped = cms.bool(True),
expectedThinnedContent = cms.vint32(0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedIndexesIntoParent = cms.vuint32(0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedValues = cms.vint32(0, 1, 2, 3, 4, 5, 6, 7, 8)
)
process.testB = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerA'),
thinnedTag = cms.InputTag('thinningThingProducerB'),
associationTag = cms.InputTag('thinningThingProducerB'),
trackTag = cms.InputTag('trackOfThingsProducerB'),
expectedParentContent = cms.vint32( 0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedThinnedContent = cms.vint32(0, 1, 2, 3),
expectedIndexesIntoParent = cms.vuint32(0, 1, 2, 3),
expectedValues = cms.vint32(0, 1, 2, 3)
)
process.testC = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerA'),
thinnedTag = cms.InputTag('thinningThingProducerC'),
associationTag = cms.InputTag('thinningThingProducerC'),
trackTag = cms.InputTag('trackOfThingsProducerC'),
expectedParentContent = cms.vint32( 0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedThinnedContent = cms.vint32(4, 5, 6, 7),
expectedIndexesIntoParent = cms.vuint32(4, 5, 6, 7),
expectedValues = cms.vint32(4, 5, 6, 7)
)
process.testD = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thingProducer'),
thinnedTag = cms.InputTag('thinningThingProducerD'),
associationTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerD'),
parentWasDropped = cms.bool(True),
thinnedWasDropped = cms.bool(True),
expectedIndexesIntoParent = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18),
expectedValues = cms.vint32(10, 11, 12, 13, 14, 15, 16, 17, -1)
)
process.testE = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerD'),
thinnedTag = cms.InputTag('thinningThingProducerE'),
associationTag = cms.InputTag('thinningThingProducerE'),
trackTag = cms.InputTag('trackOfThingsProducerE'),
parentWasDropped = cms.bool(True),
expectedThinnedContent = cms.vint32(10, 11, 12, 13, 14),
expectedIndexesIntoParent = cms.vuint32(0, 1, 2, 3, 4),
expectedValues = cms.vint32(10, 11, 12, 13, 14)
)
process.testF = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerD'),
thinnedTag = cms.InputTag('thinningThingProducerF'),
associationTag = cms.InputTag('thinningThingProducerF'),
trackTag = cms.InputTag('trackOfThingsProducerF'),
parentWasDropped = cms.bool(True),
expectedThinnedContent = cms.vint32(14, 15, 16, 17),
expectedIndexesIntoParent = cms.vuint32(4, 5, 6, 7),
expectedValues = cms.vint32(14, 15, 16, 17)
)
process.testG = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thingProducer'),
thinnedTag = cms.InputTag('thinningThingProducerG'),
associationTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerG'),
parentWasDropped = cms.bool(True),
expectedThinnedContent = cms.vint32(20, 21, 22, 23, 24, 25, 26, 27, 28),
expectedIndexesIntoParent = cms.vuint32(20, 21, 22, 23, 24, 25, 26, 27, 28),
expectedValues = cms.vint32(20, 21, 22, 23, 24, 25, 26, 27, 28)
)
process.testH = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerG'),
thinnedTag = cms.InputTag('thinningThingProducerH'),
associationTag = cms.InputTag('thinningThingProducerH'),
trackTag = cms.InputTag('trackOfThingsProducerH'),
thinnedWasDropped = cms.bool(True),
expectedParentContent = cms.vint32( 20, 21, 22, 23, 24, 25, 26, 27, 28),
associationShouldBeDropped = cms.bool(True),
expectedValues = cms.vint32(20, 21, 22, 23)
)
process.testI = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerG'),
thinnedTag = cms.InputTag('thinningThingProducerI'),
associationTag = cms.InputTag('thinningThingProducerI'),
trackTag = cms.InputTag('trackOfThingsProducerI'),
thinnedWasDropped = cms.bool(True),
associationShouldBeDropped = cms.bool(True),
expectedParentContent = cms.vint32( 20, 21, 22, 23, 24, 25, 26, 27, 28),
expectedValues = cms.vint32(24, 25, 26, 27)
)
process.testJ = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thingProducer'),
thinnedTag = cms.InputTag('thinningThingProducerJ'),
associationTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerJ'),
parentWasDropped = cms.bool(True),
thinnedWasDropped = cms.bool(True),
associationShouldBeDropped = cms.bool(True),
expectedValues = cms.vint32(-1, -1, -1, -1, -1, -1, -1, -1, -1)
)
process.testK = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerJ'),
thinnedTag = cms.InputTag('thinningThingProducerK'),
associationTag = cms.InputTag('thinningThingProducerK'),
trackTag = cms.InputTag('trackOfThingsProducerK'),
parentWasDropped = cms.bool(True),
thinnedWasDropped = cms.bool(True),
associationShouldBeDropped = cms.bool(True),
expectedValues = cms.vint32(-1, -1, -1, -1)
)
process.testL = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerJ'),
thinnedTag = cms.InputTag('thinningThingProducerL'),
associationTag = cms.InputTag('thinningThingProducerL'),
trackTag = cms.InputTag('trackOfThingsProducerL'),
parentWasDropped = cms.bool(True),
thinnedWasDropped = cms.bool(True),
associationShouldBeDropped = cms.bool(True),
expectedValues = cms.vint32(-1, -1, -1, -1)
)
process.testM = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thingProducer'),
thinnedTag = cms.InputTag('thinningThingProducerM'),
associationTag = cms.InputTag('thinningThingProducerM'),
trackTag = cms.InputTag('trackOfThingsProducerM'),
parentWasDropped = cms.bool(True),
thinnedWasDropped = cms.bool(True),
expectedIndexesIntoParent = cms.vuint32(40, 41, 42, 43, 44, 45, 46, 47, 48),
expectedValues = cms.vint32(-1, -1, -1, -1, 44, 45, 46, 47, -1)
)
process.testN = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerM'),
thinnedTag = cms.InputTag('thinningThingProducerN'),
associationTag = cms.InputTag('thinningThingProducerN'),
trackTag = cms.InputTag('trackOfThingsProducerN'),
parentWasDropped = cms.bool(True),
thinnedWasDropped = cms.bool(True),
associationShouldBeDropped = cms.bool(True),
expectedValues = cms.vint32(-1, -1, -1, -1)
)
process.testO = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerM'),
thinnedTag = cms.InputTag('aliasO'),
associationTag = cms.InputTag('thinningThingProducerO'),
trackTag = cms.InputTag('trackOfThingsProducerO'),
thinnedIsAlias = cms.bool(True),
parentWasDropped = cms.bool(True),
expectedThinnedContent = cms.vint32(44, 45, 46, 47),
expectedIndexesIntoParent = cms.vuint32(4, 5, 6, 7),
expectedValues = cms.vint32(44, 45, 46, 47)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testThinningTestDropOnInput.root')
)
process.p = cms.Path(process.testA *
process.testB *
process.testC *
process.testD *
process.testE *
process.testF *
process.testG *
process.testH *
process.testI *
process.testJ *
process.testK *
process.testL *
process.testM *
process.testN *
process.testO
)
process.endPath = cms.EndPath(process.out)
|
the-stack_0_20128 | import sys
import prompt_toolkit
from prompt_toolkit.formatted_text import HTML
import pypager
python_version = sys.version_info
ptk_version = prompt_toolkit.__version__
pypager_version = pypager.__version__
HELP = (
HTML(
"""
<title>SUMMARY OF COMMANDS</title>
<keys> h H </keys> Display this help.
<keys> q Q ZZ </keys> Exit.
<line>------------------------------------------------------</line>
<subtitle> Moving </subtitle>
<keys>e ^E j ^N CR </keys> Forward one line.
<keys>y ^Y k ^K ^P </keys> Backward one line.
<keys>f ^F ^V SPACE </keys> Forward one window.
<keys>b ^B ESC-v </keys> Backward one window.
<keys>d ^D </keys> Forward one half-window.
<keys>u ^U </keys> Backward one half-window.
<keys>ESC-) RightArrow </keys> Left one half screen width.
<keys>ESC-( LeftArrow </keys> Right one half screen width.
<keys>F </keys> Forward forever; like "tail -f"
<keys>r R ^R ^L </keys> Repaint screen.
<subtitle> SEARCHING </subtitle>
<keys>/pattern </keys> Search forward.
<keys>?pattern </keys> Search backward.
<keys>n </keys> Repeat previous search.
<keys>N </keys> Repeat previous search in reverse direction.
<keys>ESC-u </keys> Undo (toggle) search highlighting.
<subtitle> JUMPING </subtitle>
<keys> g < ESC-<</keys> Go to the first line in file.
<keys> G > ESC-></keys> Go to the last line in file.
<keys>m<letter> </keys> Mark the current position with <letter>
<keys>'<letter> </keys> Go to a previously marked position.
<keys>^X^X </keys> Same as <keys>'.</keys>
A mark is any upper-case or lower-case letter.
Certain marks are predefined.
<keys>^</keys> means beginning of the file
<keys>$</keys> means end of the file
<subtitle> CHANGING FILES </subtitle>
<keys>:e </keys> Examine a new file.
<keys>^E^V </keys> Same as :e.
<keys>:n </keys> Examine the next file from the command line.
<keys>:p </keys> Examine the previous file from the command line.
<keys>:d </keys> Delete the current file from the command line list.
<keys>= ^G :f </keys> Print current file name.
<subtitle> Extras </subtitle>
<keys>w </keys> Enable/disable <b>line wrapping</b>.
<subtitle> About Pypager </subtitle>
Pypager is a <u>prompt_toolkit</u> application.
- Pypager version: <version>%s</version>
- Python version: <version>%s.%s.%s</version>
- prompt_toolkit version: <version>%s</version>
"""
)
% (
pypager_version,
python_version[0],
python_version[1],
python_version[2],
ptk_version,
)
)
|
the-stack_0_20129 | """
rorolite
========
rorolite is a command-line tool to deploy ML applications written in Python to your own server with a single command. It is an open-source tool licensed under Apache license.
The interface of ``rorolite`` is based on the interface of `rorodata platform <http://rorodata.com>`_. While ``rorolite`` is limited to running programs on already created server, the [rorodata platform][rorodata] allows allocating compute resources on demand and provides more tools to streamline data science.
Currently ``rorolite`` is limited to deploying one project per server.
See `<https://github.com/rorodata/rorolite>`_ for more details.
"""
from setuptools import setup, find_packages
__version__ = '0.2.2'
setup(
name='rorolite',
version=__version__,
author='rorodata',
author_email='[email protected]',
description='Command-line tool to deploy ML applications to your own server with a single command',
long_description=__doc__,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'click==6.7',
'Fabric3>=1.13.1.post1',
'firefly-python>=0.1.9'
],
entry_points='''
[console_scripts]
rorolite=rorolite.main:main
''',
)
|
the-stack_0_20130 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 13:22:59 2020
@author: saksh
"""
import numpy as np
np.random.seed(1337)
import tensorflow as tf
import pandas as pd
from matplotlib.pyplot import *
from ml_pap import *
matplotlib.pyplot.style.use('classic')
"""
All data imported and scaled according to s&p500
"""
sp500 = pd.read_csv('^GSPC.csv', header = 0, index_col = 'Date')
sp500.index = pd.to_datetime(sp500.index, format = '%d-%m-%y')
sp500 = sp500[1:]
nifty = pd.read_csv('^NSEI.csv', header = 0, index_col = 'Date')
nifty.index = pd.to_datetime(nifty.index, format = '%d-%m-%y')
nifty = nifty.reindex(index = sp500.index, method = 'bfill')
nifty.fillna(method = 'bfill', inplace=True)
sing_sti = pd.read_csv('^sti_d.csv', header = 0, index_col = 'Date')
sing_sti.index = pd.to_datetime(sing_sti.index, format = '%Y-%m-%d')
sing_sti = sing_sti.reindex(index = sp500.index, method = 'bfill')
sing_sti.fillna(method = 'bfill', inplace=True)
uk_100 = pd.read_csv('^ukx_d.csv', header = 0, index_col = 'Date')
uk_100.index = pd.to_datetime(uk_100.index, format = '%Y-%m-%d')
uk_100 = uk_100.reindex(index = sp500.index, method = 'bfill')
uk_100.fillna(method = 'bfill', inplace=True)
hangseng = pd.read_csv('^hsi_d.csv', header = 0, index_col = 'Date')
hangseng.index = pd.to_datetime(hangseng.index, format = '%Y-%m-%d')
hangseng = hangseng.reindex(index = sp500.index, method = 'bfill')
hangseng.fillna(method = 'bfill', inplace=True)
nikkei = pd.read_csv('^nkx_d.csv', header = 0, index_col = 'Date')
nikkei.index = pd.to_datetime(nikkei.index, format = '%Y-%m-%d')
nikkei = nikkei.reindex(index = sp500.index, method = 'bfill')
nikkei.fillna(method = 'bfill', inplace=True)
shanghai_comp = pd.read_csv('^shc_d.csv', header = 0, index_col = 'Date')
shanghai_comp.index = pd.to_datetime(shanghai_comp.index, format = '%Y-%m-%d')
shanghai_comp = shanghai_comp.reindex(index = sp500.index, method = 'bfill')
shanghai_comp.fillna(method = 'bfill', inplace=True)
df = pd.DataFrame(index = sp500.index)
df['nifty'] = nifty['Close']
df['sing_sti'] = sing_sti['Close']
df['hangseng'] = hangseng['Close']
df['nikkei'] = nikkei['Close']
df['shanghai_comp'] = shanghai_comp['Close']
df['sp500'] = sp500['Close']
df['uk_100'] = uk_100['Close']
data_cache = df.copy()
data_cache.dropna(inplace = True)
"""
Run this block to include VAR residuals in input data
"""
for label in data_cache.columns[1:]:
resids = var_resids('nifty', label, data_cache = data_cache)
data_cache[str.format("nifty_var_%s"%(label))] = resids
data_cache.dropna(inplace = True)
"""
Set model targets
"""
data_cache = data_cache[-3090:-90]
data_cache['nifty_volatility'] = np.log(data_cache['nifty']/data_cache['nifty'].shift(1))**2
data_cache.dropna(inplace = True)
data_cache['targets'] = data_cache['nifty_volatility'].shift(-1)
data_cache.dropna(inplace = True)
"""
Split datasets
Use "dense" for SVR predictions
Returns sclaer used for scaling the output variables
"""
X_train, X_test, y_train, y_test, output_scaler = make_datasets(data_cache, model_name = 'dense')
"""
Run model
Inverse transform test targets and predictions
"""
result = svr_model(X_train, y_train, {'C' : [1,10]})
y_pred = result.predict(X_test)
y_test = y_test.reshape(len(y_test), 1)
y_pred = y_pred.reshape(len(y_pred), 1)
y_pred = output_scaler.inverse_transform(y_pred)
y_test = output_scaler.inverse_transform(y_test)
"""
RMSE of inverse transformed variables
"""
m = tf.metrics.RootMeanSquaredError()
m.update_state(y_test, np.abs(y_pred))
transformed = m.result().numpy()
print("RMSE of transformed variables: %d", transformed)
df_plot = make_save_plot(data_cache.index, y_test, y_pred) |
the-stack_0_20131 | '''
====================================================================
Copyright (c) 2003-2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_shell_macosx_commands.py
'''
import os
import signal
import subprocess
import xml.sax.saxutils
import shlex
import tempfile
import pathlib
__all__ = ('setupCommands', 'getTerminalProgramList', 'getFileBrowserProgramList'
,'guiDiffFiles', 'shellDiffFiles', 'editFile'
,'shellOpen', 'commandShell', 'fileBrowser')
__sigchld_handler_installed = False
def setupCommands():
# install the sig child handler to get rid of the zombie processes
global __sigchld_handler_installed
if not __sigchld_handler_installed:
signal.signal( signal.SIGCHLD, __sigchld_handler )
__sigchld_handler_installed = True
def __sigchld_handler( signum, frame ):
try:
while True:
pid, status = os.waitpid( -1, os.WNOHANG )
if pid == 0:
break
except OSError:
pass
def getTerminalProgramList():
return ['Terminal', 'iTerm2']
def getFileBrowserProgramList():
return ['Finder']
def guiDiffFiles( app, args ):
__run_command( app, app.prefs.getDiffTool().gui_diff_tool, args )
def shellDiffFiles( app, args ):
return __run_command_with_output( app, app.prefs.getDiffTool().shell_diff_tool, args )
def editFile( app, working_dir, all_filenames ):
app.log.infoheader( T_('Edit %s') % (' '.join( [str(name) for name in all_filenames] ),) )
all_filenames = [str(path) for path in all_filenames]
p = app.prefs.editor
if p.program:
if p.options:
cmd = p.program
args = shlex.split( p.options ) + all_filenames
else:
cmd = p.program
args = all_filenames
else:
cmd = '/usr/bin/open'
args = ['-e'] + all_filenames
cur_dir = os.getcwd()
try:
os.chdir( str(working_dir) )
__run_command( app, cmd, args )
finally:
os.chdir( cur_dir )
def shellOpen( app, working_dir, all_filenames ):
app.log.infoheader( T_('Open %s') % (' '.join( [str(name) for name in all_filenames] ),) )
all_filenames = [str(path) for path in all_filenames]
cur_dir = os.getcwd()
try:
os.chdir( str(working_dir) )
__run_command( app, u'/usr/bin/open', all_filenames )
finally:
os.chdir( cur_dir )
def commandShell( app, working_dir ):
app.log.infoheader( 'Shell in %s' % (working_dir,) )
p = app.prefs.shell
if p.terminal_program == 'iTerm2':
commandShell_iTerm2( app, working_dir )
else:
commandShell_Terminal( app, working_dir )
def __titleFromPath( working_dir ):
title = []
try:
rel_path = working_dir.relative_to( os.environ['HOME'] )
except ValueError:
rel_path = working_dir
empty = pathlib.Path('.')
while rel_path != empty:
title.append( rel_path.name )
rel_path = rel_path.parent
return ' '.join( title )
def commandShell_iTerm2( app, working_dir ):
p = app.prefs.shell
# calc a title that is leaf to root so that the leaf shows up in a task bar first
title = __titleFromPath( working_dir )
commands = u'cd "%s"' % (str(working_dir).replace( '"', '\\\\"' ).replace( '$', '\\\\$' ),)
init_cmd = p.terminal_init
if len( init_cmd ) > 0:
commands = commands + u';export WB_WD="$PWD"; . "%s"' % (init_cmd.replace( '"', '\\\\"' ).replace( '$', '\\\\$' ),)
contents = u'''
tell application "iTerm"
-- make a new terminal
create window with default profile command "/bin/bash -l"
tell current window
tell current session
set name to "%(title)s"
-- start with space to prevent the shell remebering the command
write text " %(commands)s"
end tell
end tell
end tell
''' % {'title': title.replace( '"', '\\"' )
,'commands': commands.replace( '"', '\\"' )}
f = tempfile.NamedTemporaryFile( mode='w', delete=False, prefix='tmp-wb-shell', suffix='.scpt', encoding='utf=8' )
app.all_temp_files.append( f.name )
f.write( contents )
f.close()
__run_command( app, u'/usr/bin/osascript', [f.name] )
def commandShell_Terminal( app, working_dir ):
p = app.prefs.shell
# calc a title that is leaf to root so that the leaf shows up in a task bar first
title = __titleFromPath( working_dir )
commands = u"cd '%s'" % (working_dir,)
if len( p.terminal_init ) > 0:
commands = commands + ";. '%s'\n" % (p.terminal_init,)
contents = u'''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>WindowSettings</key>
<array>
<dict>
<key>CustomTitle</key>
<string>%s</string>
<key>ExecutionString</key>
<string>%s</string>
</dict>
</array>
</dict>
</plist>
''' % (xml.sax.saxutils.escape( title )
,xml.sax.saxutils.escape( commands ))
f = tempfile.NamedTemporaryFile( mode='w', delete=False, prefix='tmp-wb-term', suffix='.term', encoding='utf=8' )
app.all_temp_files.append( f.name )
f.write( contents )
f.close()
__run_command( app, u'/usr/bin/open', [f.name] )
def fileBrowser( app, working_dir ):
app.log.infoheader( 'Browse files in %s' % (working_dir,) )
__run_command( app, u'/usr/bin/open', [u'-a', u'Finder', working_dir] )
def __run_command( app, cmd, args ):
app.log.info( '%s %s' % (cmd, ' '.join( [str(arg) for arg in args] ) ) )
env = os.environ.copy()
cmd = asUtf8( cmd )
args = [asUtf8( str(arg) ) for arg in args]
os.spawnvpe( os.P_NOWAIT, cmd, [cmd]+args, env )
def __run_command_with_output( app, cmd, args ):
err_prefix = u'error running %s %s' % (cmd, ' '.join( args ))
try:
cmd = asUtf8( cmd )
args = [asUtf8( str(arg) ) for arg in args]
proc = subprocess.Popen(
[cmd]+args,
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
output = proc.stdout.read()
proc.wait()
return output
except EnvironmentError as e:
return '%s - %s' % (err_prefix, str(e))
def asUtf8( s ):
if type( s ) == str:
return s.encode( 'utf-8' )
else:
return s
|
the-stack_0_20132 | import numpy as np
from sklearn import mixture
gmm_max_iter = 10000
class GMM(object):
def __init__(self, X):
self.X = X
self.gmm_result = GMM.build_gmm(self.X)
@staticmethod
def build_gmm(X, n_components=10):
if not isinstance(X, np.ndarray):
X = np.array(X)
gmm = GMM._build_gmm(X, 'BIC', n_components=n_components)
predict = gmm.predict(X)
labels = np.unique(predict)
sinvs = {label: np.linalg.pinv(gmm.covariances_[label]) for label in labels}
log_dets = {label: np.linalg.slogdet(gmm.covariances_[label])[1] for label in labels}
return {
'classes': gmm.predict(X),
'distribution': {
i: {
'mu': gmm.means_[label],
's': gmm.covariances_[label],
'log_det': log_dets[label],
'sinv': sinvs[label],
'sp': gmm.weights_[label] * len(X),
'n': len(X),
'weight': gmm.weights_[label],
'log_weight': np.log(gmm.weights_[label]),
} for i, label in enumerate(labels)
},
'labels': labels,
}
@staticmethod
def _build_gmm(X, method, n_components=10):
if method not in ['bayesian', 'BIC']:
raise NotImplementedError(f"Please implement the method {method}")
if method == 'bayesian':
gmm = mixture.BayesianGaussianMixture(n_components=n_components, weight_concentration_prior=1e-2,
mean_precision_prior=1e-2, max_iter=gmm_max_iter, n_init=5)
gmm.fit(X)
return gmm
if method == 'BIC':
models = []
max_k = min(n_components, len(X) // 25 + 1)
for k in range(1, max_k+1):
model = mixture.GaussianMixture(n_components=k, max_iter=gmm_max_iter, n_init=5, random_state=42)
model.fit(X)
models.append(model)
BICs = [model.bic(X) for model in models]
return models[np.argmin(BICs)] |
the-stack_0_20133 | """Entity.
This module implements the class Entity as given by the Marvel API
(https://developer.marvel.com).
The class is implemented as a magic class with its attributes
automatically created at instantialisation
"""
import json
class Entity():
"""A representation of the different entities a result from api
request can be.
Uses meta-programming to create the attributes on the fly
"""
def __init__(self, entity_type: str, attributes: dict):
self.__dict__ = {}
self.entity_type = entity_type
self.__init_attribs(attributes)
def __init_attribs(self, attributes: dict):
dictionary_type_attributes = ['comics', 'series', 'stories',
'events', 'urls']
images = ['thumbnail', 'images']
entity_summary_list = ['collectedIssues', 'collections', 'variants']
for key, value in attributes.items():
if key not in dictionary_type_attributes:
self.__dict__[key] = value
elif key in images:
self.__dict__[key] = []
if key == 'thumbnail':
self.__process_image(key, value)
else:
self.__process_image_list(key, value)
elif key in ['urls', 'dates', 'prices']:
self.__dict__[key] = {}
for item in value:
self.__dict__[key][item['type']] = item[key[:-1]]
else:
self.__dict__[key] = []
if (key == 'series' and self.entity_type == 'comic')\
or key == 'originalIssue':
self.__process_entity_summary(key, value)
elif key in (entity_summary_list + dictionary_type_attributes):
self.__process_resource_list(key, value['items'])
def __process_image(self, key: str, image: dict):
image_uri = image['path'] + 'detail' + '.' + image['extention']
self.__dict__[key].append(image_uri)
def __process_image_list(self, key: str, images: list):
for image in images:
self.__process_image(key, image)
def __process_entity_summary(self, key: str, attribute: dict):
# split resourceURI and get the last part of the string
item_id = int(attribute['resourceURI'].split('/')[-1])
self.__dict__[key].append(item_id)
def __process_resource_list(self, key: str, items: list):
for item in items:
self.__process_entity_summary(key, item)
def get(self, attribute: str):
invalid_argument = 'Attribute {!r} is not a member of Entity'.format(
attribute)
if attribute not in self.__dict__:
raise AttributeError(invalid_argument)
else:
return self.__dict__[attribute]
def __eq__(self, other):
for key, value in self.__dict__.items():
if value != other.__dict__[key]:
return False
return True
|
the-stack_0_20134 | import warnings
from contextlib import suppress
from dataclasses import fields
from distutils.util import strtobool
from typing import Any, Callable, ClassVar, Dict, Optional, Tuple
class DictMixin:
"""
A Mixin that provides dict-like behavior for Dataclasses.
"""
__slots__ = tuple()
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def __setitem__(self, key, value):
setattr(self, key, value)
def __delitem__(self, key):
warnings.warn(
"Deleting a Dataclass' field! This can cause unexpected behavior "
"when invoking the Dataclass' representation, comparisson or other "
"function which may 'automatically' utilize this field!",
SyntaxWarning,
)
try:
delattr(self, key)
except AttributeError:
raise KeyError(key) from None
def keys(self):
return (obj_field.name for obj_field in fields(self))
class FieldMetaConverterMixin:
"""
A Mixin that provides field conversions based on the provided field metadata.
"""
__slots__ = tuple()
_meta_cnv: ClassVar[str] = "converter"
def __post_init__(self):
for obj_field in fields(self):
field_name = obj_field.name
with suppress(KeyError):
converter = obj_field.metadata[self._meta_cnv]
setattr(self, field_name, converter(getattr(self, field_name)))
class FieldTypeConverterMixin:
"""
A Mixin that provides field conversions based on the field type.
"""
__slots__ = tuple()
_extra_cnv: ClassVar[Dict[Tuple[Any, Any], Callable[[Any], Any]]] = {
(str, bool): lambda x: bool(strtobool(x)),
}
def __post_init__(self):
for obj_field in fields(self):
if not obj_field.metadata.get("convert"):
continue
field_name = obj_field.name
field_type = obj_field.type
value = getattr(self, field_name)
if isinstance(value, field_type):
continue
true_type = type(value)
converter = self._extra_cnv.get((true_type, field_type))
if converter is not None:
value = converter(value)
else:
value = field_type(value)
setattr(self, field_name, value)
|
the-stack_0_20135 | from .tags_getter import get_tags_values_latest, get_tags_values_range
from .tags_getter import LIMITS_BASIC, LIMITS_DETAILED, LIMITS_NONE
from django.core.exceptions import ObjectDoesNotExist
from datacon.models import ViewSet
from uuid import UUID
from datetime import datetime
# TODO: DataSet refactoring
# Result of multiple tags query:
#
# time_to_obtain - total time between start and end of data getting
# tag_count - total tag count
# tags - list of results (see below)
# error
#
#
# tags is a list of dictionaries:
#
# name
# display_name
# units
# error
# readings - list of dictionaries:
# ----
# reading
# error (if not only_valid) - TODO
# timestamp_receive
# timestamp_packet
# time_to_obtain (only if diag_info)
# trends - list of dictionaries (for each seconds value in get_trends)
# ----
# period_seconds
# number
# direction
# slope
# peak_max
# reading
# time
# peak_min
# reading
# time
# average
# reading
# ----
# limits - list of dictionaries (only if get_limits >= LIMITS_BASIC)
# ----
# state
# details (only if get_limits >= LIMITS_DETAILED)
# ----
# lower_critical (if exists)
# reading
# status
# lower (if exists)
# reading
# status
# strict (if exists)
# reading
# status
# upper (if exists)
# reading
# status
# upper_critical (if exists)
# reading
# status
# ----
# ----
# ----
# trend
# -----
# period_seconds
# number
# direction
# slope
# peak_max
# reading
# time
# peak_min
# reading
# time
# average
# reading
def _get_all_tags(viewset_id):
v_set = None
vid = UUID(viewset_id)
try:
v_set = ViewSet.objects.get(uid=viewset_id)
except ObjectDoesNotExist:
return None
out_tags = []
out_tags.extend(v_set.tags_numeric.all())
out_tags.extend(v_set.tags_discrete.all())
out_tags.extend(v_set.tags_text.all())
return out_tags
def get_viewset_latest(viewset_id,
only_valid=True,
round_numerics=2,
get_limits=LIMITS_BASIC,
get_trends=[],
diag_info=False):
t_start = datetime.now()
result = {}
tags = _get_all_tags(viewset_id)
if tags is None:
result = {
"tags": [],
"tag_count": 0,
"error": "Invalid ViewSet ID"
}
else:
result = get_tags_values_latest(tags, only_valid, round_numerics,
get_limits, get_trends, diag_info)
result["time_to_obtain"] = (datetime.now() - t_start).total_seconds()
return result
def get_viewset_range(viewset_id,
date_start=None,
date_end=None,
number=50,
only_valid=True,
round_numerics=2,
get_limits=LIMITS_BASIC,
get_trends=[],
diag_info=False,
bound_earlier=True,
bound_later=False):
t_start = datetime.now()
result = {}
tags = _get_all_tags(viewset_id)
if tags is None:
result = {
"tags": [],
"tag_count": 0,
"error": "Invalid ViewSet ID"
}
else:
result = get_tags_values_range(tags, date_start, date_end, number,
only_valid, round_numerics, get_limits,
get_trends, diag_info, bound_earlier,
bound_later)
result["time_to_obtain"] = (datetime.now() - t_start).total_seconds()
return result
|
the-stack_0_20136 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pushes native libraries to a device.
"""
import optparse
import os
import sys
from util import build_device
from util import build_utils
from util import md5_check
BUILD_ANDROID_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(BUILD_ANDROID_DIR)
import devil_chromium
from pylib import constants
def DoPush(options):
libraries = build_utils.ParseGnList(options.libraries)
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
serial_number = device.GetSerialNumber()
# A list so that it is modifiable in Push below.
needs_directory = [True]
for lib in libraries:
device_path = os.path.join(options.device_dir, lib)
host_path = os.path.join(options.libraries_dir, lib)
def Push():
if needs_directory:
device.RunShellCommand(
['mkdir', '-p', options.device_dir], check_return=True)
needs_directory[:] = [] # = False
device.PushChangedFiles([(os.path.abspath(host_path), device_path)])
record_path = '%s.%s.push.md5.stamp' % (host_path, serial_number)
md5_check.CallAndRecordIfStale(
Push,
record_path=record_path,
input_paths=[host_path],
input_strings=[device_path])
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
parser.add_option('--libraries-dir',
help='Directory that contains stripped libraries.')
parser.add_option('--device-dir',
help='Device directory to push the libraries to.')
parser.add_option('--libraries',
help='List of native libraries.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--output-directory',
help='The output directory.')
options, _ = parser.parse_args(args)
required_options = ['libraries', 'device_dir', 'libraries']
build_utils.CheckOptions(options, parser, required=required_options)
devil_chromium.Initialize(
output_directory=os.path.abspath(options.output_directory))
DoPush(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_0_20137 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SaferScanner is just like re.Scanner, but it neuters any grouping in the lexicon
# regular expressions and throws an error on group references, named groups, or
# regex in-pattern flags. Any of those can break correct operation of Scanner.
import re
import six
from sre_constants import BRANCH, SUBPATTERN, GROUPREF, GROUPREF_IGNORE, GROUPREF_EXISTS
from sys import version_info
class SaferScannerBase(re.Scanner):
@classmethod
def subpat(cls, phrase, flags):
return cls.scrub_sub(re.sre_parse.parse(phrase, flags), flags)
@classmethod
def scrub_sub(cls, sub, flags):
scrubbedsub = []
seqtypes = (type(()), type([]))
for op, arg in sub.data:
if type(arg) in seqtypes:
arg = [cls.scrub_sub(a, flags) if isinstance(a, re.sre_parse.SubPattern) else a
for a in arg]
if op in (BRANCH, SUBPATTERN):
arg = [None] + arg[1:]
if op in (GROUPREF, GROUPREF_IGNORE, GROUPREF_EXISTS):
raise ValueError("Group references not allowed in SaferScanner lexicon")
scrubbedsub.append((op, arg))
if sub.pattern.groupdict:
raise ValueError("Named captures not allowed in SaferScanner lexicon")
if sub.pattern.flags ^ flags:
raise ValueError("RE flag setting not allowed in SaferScanner lexicon (%s)" % (bin(sub.pattern.flags),))
return re.sre_parse.SubPattern(sub.pattern, scrubbedsub)
class Py2SaferScanner(SaferScannerBase):
def __init__(self, lexicon, flags=0):
self.lexicon = lexicon
p = []
s = re.sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(re.sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p) + 1, self.subpat(phrase, flags))),
]))
s.groups = len(p) + 1
p = re.sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.p = p
self.scanner = re.sre_compile.compile(p)
class Py36SaferScanner(SaferScannerBase):
def __init__(self, lexicon, flags=0):
self.lexicon = lexicon
p = []
s = re.sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
gid = s.opengroup()
p.append(re.sre_parse.SubPattern(s, [(SUBPATTERN, (gid, 0, 0, re.sre_parse.parse(phrase, flags))), ]))
s.closegroup(gid, p[-1])
p = re.sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.p = p
self.scanner = re.sre_compile.compile(p)
class Py38SaferScanner(SaferScannerBase):
def __init__(self, lexicon, flags=0):
self.lexicon = lexicon
p = []
s = re.sre_parse.State()
s.flags = flags
for phrase, action in lexicon:
gid = s.opengroup()
p.append(re.sre_parse.SubPattern(s, [(SUBPATTERN, (gid, 0, 0, re.sre_parse.parse(phrase, flags))), ]))
s.closegroup(gid, p[-1])
p = re.sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.p = p
self.scanner = re.sre_compile.compile(p)
SaferScanner = Py36SaferScanner if six.PY3 else Py2SaferScanner
SaferScanner = Py38SaferScanner if version_info >= (3, 8) else SaferScanner
|
the-stack_0_20140 | #!/usr/bin/env python
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import logging
import math
import sys
import chainer
import numpy as np
import six
import torch
import torch.nn.functional as F
import warpctc_pytorch as warp_ctc
from chainer import reporter
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from ctc_prefix_score import CTCPrefixScore
from e2e_asr_common import end_detect
from e2e_asr_common import label_smoothing_dist
CTC_LOSS_THRESHOLD = 10000
CTC_SCORING_RATIO = 1.5
MAX_DECODER_OUTPUT = 5
def to_cuda(m, x):
assert isinstance(m, torch.nn.Module)
device_id = torch.cuda.device_of(next(m.parameters()).data).idx
if device_id == -1:
return x
return x.cuda(device_id)
def lecun_normal_init_parameters(module):
for p in module.parameters():
data = p.data
if data.dim() == 1:
# bias
data.zero_()
elif data.dim() == 2:
# linear weight
n = data.size(1)
stdv = 1. / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() == 4:
# conv weight
n = data.size(1)
for k in data.size()[2:]:
n *= k
stdv = 1. / math.sqrt(n)
data.normal_(0, stdv)
else:
raise NotImplementedError
# get output dim for latter BLSTM
def _get_vgg2l_odim(idim, in_channel=3, out_channel=128):
idim = idim / in_channel
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 1st max pooling
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 2nd max pooling
return int(idim) * out_channel # numer of channels
# get output dim for latter BLSTM
def _get_max_pooled_size(idim, out_channel=128, n_layers=2, ksize=2, stride=2):
for _ in range(n_layers):
idim = math.floor((idim - (ksize - 1) - 1) / stride)
return idim # numer of channels
def linear_tensor(linear, x):
'''Apply linear matrix operation only for the last dimension of a tensor
:param Link linear: Linear link (M x N matrix)
:param Variable x: Tensor (D_1 x D_2 x ... x M matrix)
:return:
:param Variable x: Tensor (D_1 x D_2 x ... x N matrix)
'''
y = linear(x.contiguous().view((-1, x.size()[-1])))
return y.view((x.size()[:-1] + (-1,)))
class Reporter(chainer.Chain):
def report(self, loss_ctc, loss_att, acc, mtl_loss):
reporter.report({'loss_ctc': loss_ctc}, self)
reporter.report({'loss_att': loss_att}, self)
reporter.report({'acc': acc}, self)
logging.info('mtl loss:' + str(mtl_loss))
reporter.report({'loss': mtl_loss}, self)
# TODO(watanabe) merge Loss and E2E: there is no need to make these separately
class Loss(torch.nn.Module):
def __init__(self, predictor, mtlalpha):
super(Loss, self).__init__()
self.mtlalpha = mtlalpha
self.loss = None
self.accuracy = None
self.predictor = predictor
self.reporter = Reporter()
def forward(self, x):
'''Loss forward
:param x:
:return:
'''
self.loss = None
loss_ctc, loss_att, acc = self.predictor(x)
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = loss_att.data[0]
loss_ctc_data = None
elif alpha == 1:
self.loss = loss_ctc
loss_att_data = None
loss_ctc_data = loss_ctc.data[0]
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
loss_att_data = loss_att.data[0]
loss_ctc_data = loss_ctc.data[0]
if self.loss.data[0] < CTC_LOSS_THRESHOLD and not math.isnan(self.loss.data[0]):
self.reporter.report(loss_ctc_data, loss_att_data, acc, self.loss.data[0])
else:
logging.warning('loss (=%f) is not correct', self.loss.data)
return self.loss
def pad_list(xs, pad_value=float("nan")):
assert isinstance(xs[0], Variable)
n_batch = len(xs)
max_len = max(x.size(0) for x in xs)
pad = Variable(xs[0].data.new(n_batch, max_len, * xs[0].size()[1:]).zero_() + pad_value)
for i in range(n_batch):
pad[i, :xs[i].size(0)] = xs[i]
return pad
def set_forget_bias_to_one(bias):
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
class E2E(torch.nn.Module):
def __init__(self, idim, odim, args):
super(E2E, self).__init__()
self.etype = args.etype
self.verbose = args.verbose
self.char_list = args.char_list
self.outdir = args.outdir
self.mtlalpha = args.mtlalpha
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
# subsample info
# +1 means input (+1) and layers outputs (args.elayer)
subsample = np.ones(args.elayers + 1, dtype=np.int)
if args.etype == 'blstmp':
ss = args.subsample.split("_")
for j in range(min(args.elayers + 1, len(ss))):
subsample[j] = int(ss[j])
else:
logging.warning(
'Subsampling is not performed for vgg*. It is performed in max pooling layers at CNN.')
logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))
self.subsample = subsample
# label smoothing info
if args.lsm_type:
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(odim, args.lsm_type, transcript=args.train_label)
else:
labeldist = None
# encoder
self.enc = Encoder(args.etype, idim, args.elayers, args.eunits, args.eprojs,
self.subsample, args.dropout_rate)
# ctc
self.ctc = CTC(odim, args.eprojs, args.dropout_rate)
# attention
if args.atype == 'noatt':
self.att = NoAtt()
elif args.atype == 'dot':
self.att = AttDot(args.eprojs, args.dunits, args.adim)
elif args.atype == 'add':
self.att = AttAdd(args.eprojs, args.dunits, args.adim)
elif args.atype == 'location':
self.att = AttLoc(args.eprojs, args.dunits,
args.adim, args.aconv_chans, args.aconv_filts)
elif args.atype == 'location2d':
self.att = AttLoc2D(args.eprojs, args.dunits,
args.adim, args.awin, args.aconv_chans, args.aconv_filts)
elif args.atype == 'location_recurrent':
self.att = AttLocRec(args.eprojs, args.dunits,
args.adim, args.aconv_chans, args.aconv_filts)
elif args.atype == 'coverage':
self.att = AttCov(args.eprojs, args.dunits, args.adim, args.awin)
elif args.atype == 'coverage_location':
self.att = AttCovLoc(args.eprojs, args.dunits, args.adim, args.awin,
args.aconv_chans, args.aconv_filts)
elif args.atype == 'multi_head_dot':
self.att = AttMultiHeadDot(args.eprojs, args.dunits,
args.aheads, args.adim, args.adim)
elif args.atype == 'multi_head_add':
self.att = AttMultiHeadAdd(args.eprojs, args.dunits,
args.aheads, args.adim, args.adim)
elif args.atype == 'multi_head_loc':
self.att = AttMultiHeadLoc(args.eprojs, args.dunits,
args.aheads, args.adim, args.adim,
args.aconv_chans, args.aconv_filts)
elif args.atype == 'multi_head_multi_res_loc':
self.att = AttMultiHeadMultiResLoc(args.eprojs, args.dunits,
args.aheads, args.adim, args.adim,
args.aconv_chans, args.aconv_filts)
else:
logging.error(
"Error: need to specify an appropriate attention archtecture")
sys.exit()
# decoder
self.dec = Decoder(args.eprojs, odim, args.dlayers, args.dunits,
self.sos, self.eos, self.att, self.verbose, self.char_list,
labeldist, args.lsm_weight)
# weight initialization
self.init_like_chainer()
# additional forget-bias init in encoder ?
# for m in self.modules():
# if isinstance(m, torch.nn.LSTM):
# for name, p in m.named_parameters():
# if "bias_ih" in name:
# set_forget_bias_to_one(p)
def init_like_chainer(self):
"""Initialize weight like chainer
chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0
pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)
however, there are two exceptions as far as I know.
- EmbedID.W ~ Normal(0, 1)
- LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)
"""
lecun_normal_init_parameters(self)
# exceptions
# embed weight ~ Normal(0, 1)
self.dec.embed.weight.data.normal_(0, 1)
# forget-bias = 1.0
# https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745
for l in six.moves.range(len(self.dec.decoder)):
set_forget_bias_to_one(self.dec.decoder[l].bias_ih)
# x[i]: ('utt_id', {'ilen':'xxx',...}})
def forward(self, data):
'''E2E forward
:param data:
:return:
'''
# utt list of frame x dim
xs = [d[1]['feat'] for d in data]
# remove 0-output-length utterances
tids = [d[1]['tokenid'].split() for d in data]
filtered_index = filter(lambda i: len(tids[i]) > 0, range(len(xs)))
sorted_index = sorted(filtered_index, key=lambda i: -len(xs[i]))
if len(sorted_index) != len(xs):
logging.warning('Target sequences include empty tokenid (batch %d -> %d).' % (
len(xs), len(sorted_index)))
xs = [xs[i] for i in sorted_index]
# utt list of olen
ys = [np.fromiter(map(int, tids[i]), dtype=np.int64)
for i in sorted_index]
ys = [to_cuda(self, Variable(torch.from_numpy(y))) for y in ys]
# subsample frame
xs = [xx[::self.subsample[0], :] for xx in xs]
ilens = np.fromiter((xx.shape[0] for xx in xs), dtype=np.int64)
hs = [to_cuda(self, Variable(torch.from_numpy(xx))) for xx in xs]
# 1. encoder
xpad = pad_list(hs)
hpad, hlens = self.enc(xpad, ilens)
# # 3. CTC loss
if self.mtlalpha == 0:
loss_ctc = None
else:
loss_ctc = self.ctc(hpad, hlens, ys)
# 4. attention loss
if self.mtlalpha == 1:
loss_att = None
acc = None
else:
loss_att, acc = self.dec(hpad, hlens, ys)
return loss_ctc, loss_att, acc
def recognize(self, x, recog_args, char_list, rnnlm=None):
'''E2E beam search
:param x:
:param recog_args:
:param char_list:
:return:
'''
prev = self.training
self.eval()
# subsample frame
x = x[::self.subsample[0], :]
ilen = [x.shape[0]]
h = to_cuda(self, Variable(torch.from_numpy(
np.array(x, dtype=np.float32)), volatile=True))
# 1. encoder
# make a utt list (1) to use the same interface for encoder
h, _ = self.enc(h.unsqueeze(0), ilen)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(h).data[0]
else:
lpz = None
# 2. decoder
# decode the first utterance
y = self.dec.recognize_beam(h[0], lpz, recog_args, char_list, rnnlm)
if prev:
self.train()
return y
# ------------- CTC Network --------------------------------------------------------------------------------------------
class _ChainerLikeCTC(warp_ctc._CTC):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens):
is_cuda = True if acts.is_cuda else False
acts = acts.contiguous()
loss_func = warp_ctc.gpu_ctc if is_cuda else warp_ctc.cpu_ctc
grads = torch.zeros(acts.size()).type_as(acts)
minibatch_size = acts.size(1)
costs = torch.zeros(minibatch_size).cpu()
loss_func(acts,
grads,
labels,
label_lens,
act_lens,
minibatch_size,
costs)
# modified only here from original
costs = torch.FloatTensor([costs.sum()]) / acts.size(1)
ctx.grads = Variable(grads)
ctx.grads /= ctx.grads.size(1)
return costs
def chainer_like_ctc_loss(acts, labels, act_lens, label_lens):
"""Chainer like CTC Loss
acts: Tensor of (seqLength x batch x outputDim) containing output from network
labels: 1 dimensional Tensor containing all the targets of the batch in one sequence
act_lens: Tensor of size (batch) containing size of each output sequence from the network
act_lens: Tensor of (batch) containing label length of each example
"""
assert len(labels.size()) == 1 # labels must be 1 dimensional
from torch.nn.modules.loss import _assert_no_grad
_assert_no_grad(labels)
_assert_no_grad(act_lens)
_assert_no_grad(label_lens)
return _ChainerLikeCTC.apply(acts, labels, act_lens, label_lens)
class CTC(torch.nn.Module):
def __init__(self, odim, eprojs, dropout_rate):
super(CTC, self).__init__()
self.dropout_rate = dropout_rate
self.loss = None
self.ctc_lo = torch.nn.Linear(eprojs, odim)
self.loss_fn = chainer_like_ctc_loss # CTCLoss()
def forward(self, hpad, ilens, ys):
'''CTC forward
:param hs:
:param ys:
:return:
'''
self.loss = None
ilens = Variable(torch.from_numpy(np.fromiter(ilens, dtype=np.int32)))
olens = Variable(torch.from_numpy(np.fromiter(
(x.size(0) for x in ys), dtype=np.int32)))
# zero padding for hs
y_hat = linear_tensor(
self.ctc_lo, F.dropout(hpad, p=self.dropout_rate))
# zero padding for ys
y_true = torch.cat(ys).cpu().int() # batch x olen
# get length info
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
logging.info(self.__class__.__name__ + ' output lengths: ' + str(olens))
# get ctc loss
# expected shape of seqLength x batchSize x alphabet_size
y_hat = y_hat.transpose(0, 1)
self.loss = to_cuda(self, self.loss_fn(y_hat, y_true, ilens, olens))
logging.info('ctc loss:' + str(self.loss.data[0]))
return self.loss
def log_softmax(self, hpad):
'''log_softmax of frame activations
:param hs:
:return:
'''
return F.log_softmax(linear_tensor(self.ctc_lo, hpad), dim=2)
def mask_by_length(xs, length, fill=0):
assert xs.size(0) == len(length)
ret = Variable(xs.data.new(*xs.size()).fill_(fill))
for i, l in enumerate(length):
ret[i, :l] = xs[i, :l]
return ret
# ------------- Attention Network --------------------------------------------------------------------------------------
class NoAtt(torch.nn.Module):
'''No attention'''
def __init__(self):
super(NoAtt, self).__init__()
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.c = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.c = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
'''NoAtt forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: dummy (does not use)
:param Variable att_prev: dummy (does not use)
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: previous attentioin weights
:rtype: Variable
'''
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# initialize attention weight with uniform dist.
if att_prev is None:
att_prev = [Variable(enc_hs_pad.data.new(
l).zero_() + (1.0 / l)) for l in enc_hs_len]
# if no bias, 0 0-pad goes 0
att_prev = pad_list(att_prev, 0)
self.c = torch.sum(self.enc_h * att_prev.view(batch, self.h_length, 1), dim=1)
return self.c, att_prev
class AttDot(torch.nn.Module):
'''Dot product attention
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
'''
def __init__(self, eprojs, dunits, att_dim):
super(AttDot, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
'''AttDot forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: dummy (does not use)
:param Variable att_prev: dummy (does not use)
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: previous attentioin weight (B x T_max)
:rtype: Variable
'''
batch = enc_hs_pad.size(0)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = torch.tanh(
linear_tensor(self.mlp_enc, self.enc_h))
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
e = torch.sum(self.pre_compute_enc_h * torch.tanh(self.mlp_dec(dec_z)).view(batch, 1, self.att_dim),
dim=2) # utt x frame
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttAdd(torch.nn.Module):
'''Additive attention
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
'''
def __init__(self, eprojs, dunits, att_dim):
super(AttAdd, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
'''AttLoc forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: docoder hidden state (B x D_dec)
:param Variable att_prev: dummy (does not use)
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: previous attentioin weights (B x T_max)
:rtype: Variable
'''
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = linear_tensor(self.mlp_enc, self.enc_h)
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
# NOTE consider zero padding when compute w.
e = linear_tensor(self.gvec, torch.tanh(
self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttLoc(torch.nn.Module):
'''location-aware attention
Reference: Attention-Based Models for Speech Recognition
(https://arxiv.org/pdf/1506.07503.pdf)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
'''
def __init__(self, eprojs, dunits, att_dim, aconv_chans, aconv_filts):
super(AttLoc, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1, aconv_chans, (1, 2 * aconv_filts + 1), padding=(0, aconv_filts), bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.aconv_chans = aconv_chans
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
'''AttLoc forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: docoder hidden state (B x D_dec)
:param Variable att_prev: previous attetion weight (B x T_max)
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: previous attentioin weights (B x T_max)
:rtype: Variable
'''
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = linear_tensor(self.mlp_enc, self.enc_h)
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev is None:
att_prev = [Variable(enc_hs_pad.data.new(
l).zero_() + (1.0 / l)) for l in enc_hs_len]
# if no bias, 0 0-pad goes 0
att_prev = pad_list(att_prev, 0)
# att_prev: utt x frame -> utt x 1 x 1 x frame -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = linear_tensor(self.mlp_att, att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
# NOTE consider zero padding when compute w.
e = linear_tensor(self.gvec, torch.tanh(
att_conv + self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttCov(torch.nn.Module):
'''Coverage mechanism attention
Reference: Get To The Point: Summarization with Pointer-Generator Network
(https://arxiv.org/abs/1704.04368)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
'''
def __init__(self, eprojs, dunits, att_dim):
super(AttCov, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.wvec = torch.nn.Linear(1, att_dim)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_list, scaling=2.0):
'''AttCov forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: docoder hidden state (B x D_dec)
:param list att_prev_list: list of previous attetion weight
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: list of previous attentioin weights
:rtype: list
'''
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = linear_tensor(self.mlp_enc, self.enc_h)
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev_list is None:
att_prev = [Variable(enc_hs_pad.data.new(
l).zero_() + (1.0 / l)) for l in enc_hs_len]
# if no bias, 0 0-pad goes 0
att_prev_list = [pad_list(att_prev, 0)]
# att_prev_list: L' * [B x T] => cov_vec B x T
cov_vec = sum(att_prev_list)
# cov_vec: B x T => B x T x 1 => B x T x att_dim
cov_vec = linear_tensor(self.wvec, cov_vec.unsqueeze(-1))
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
# NOTE consider zero padding when compute w.
e = linear_tensor(self.gvec, torch.tanh(
cov_vec + self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
w = F.softmax(scaling * e, dim=1)
att_prev_list += [w]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, att_prev_list
class AttLoc2D(torch.nn.Module):
'''2D location-aware attention
This attention is an extended version of location aware attention.
It take not only one frame before attention weights, but also earlier frames into account.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param int att_win: attention window size (default=5)
'''
def __init__(self, eprojs, dunits, att_dim, att_win, aconv_chans, aconv_filts):
super(AttLoc2D, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1, aconv_chans, (att_win, 2 * aconv_filts + 1), padding=(0, aconv_filts), bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.aconv_chans = aconv_chans
self.att_win = att_win
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
'''AttLoc2D forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: docoder hidden state (B x D_dec)
:param Variable att_prev: previous attetion weight (B x att_win x T_max)
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: previous attentioin weights (B x att_win x T_max)
:rtype: Variable
'''
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = linear_tensor(self.mlp_enc, self.enc_h)
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev is None:
# B * [Li x att_win]
att_prev = [Variable(
enc_hs_pad.data.new(l, self.att_win).zero_() + 1.0 / l) for l in enc_hs_len]
# if no bias, 0 0-pad goes 0
att_prev = pad_list(att_prev, 0).transpose(1, 2)
# att_prev: B x att_win x Tmax -> B x 1 x att_win x Tmax -> B x C x 1 x Tmax
att_conv = self.loc_conv(att_prev.unsqueeze(1))
# att_conv: B x C x 1 x Tmax -> B x Tmax x C
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = linear_tensor(self.mlp_att, att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
# NOTE consider zero padding when compute w.
e = linear_tensor(self.gvec, torch.tanh(
att_conv + self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
# update att_prev: B x att_win x Tmax -> B x att_win+1 x Tmax -> B x att_win x Tmax
att_prev = torch.cat([att_prev, w.unsqueeze(1)], dim=1)
att_prev = att_prev[:, 1:]
return c, att_prev
class AttLocRec(torch.nn.Module):
'''location-aware recurrent attention
This attention is an extended version of location aware attention.
With the use of RNN, it take the effect of the history of attention weights into account.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
'''
def __init__(self, eprojs, dunits, att_dim, aconv_chans, aconv_filts):
super(AttLocRec, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1, aconv_chans, (1, 2 * aconv_filts + 1), padding=(0, aconv_filts), bias=False)
self.att_lstm = torch.nn.LSTMCell(aconv_chans, att_dim, bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_states, scaling=2.0):
'''AttLocRec forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: docoder hidden state (B x D_dec)
:param tuple att_prev_states: previous attetion weight and lstm states
((B, T_max), ((B, att_dim), (B, att_dim)))
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: previous attention weights and lstm states (w, (hx, cx))
((B, T_max), ((B, att_dim), (B, att_dim)))
:rtype: tuple
'''
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = linear_tensor(self.mlp_enc, self.enc_h)
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev_states is None:
# initialize attention weight with uniform dist.
att_prev = [Variable(
enc_hs_pad.data.new(l).fill_(1.0 / l)) for l in enc_hs_len]
# if no bias, 0 0-pad goes 0
att_prev = pad_list(att_prev, 0)
# initialize lstm states
att_h = Variable(enc_hs_pad.data.new(batch, self.att_dim).zero_())
att_c = Variable(enc_hs_pad.data.new(batch, self.att_dim).zero_())
att_states = (att_h, att_c)
else:
att_prev = att_prev_states[0]
att_states = att_prev_states[1]
# B x 1 x 1 x T -> B x C x 1 x T
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# apply non-linear
att_conv = F.relu(att_conv)
# B x C x 1 x T -> B x C x 1 x 1 -> B x C
att_conv = F.max_pool2d(att_conv, (1, att_conv.size(3))).view(batch, -1)
att_h, att_c = self.att_lstm(att_conv, att_states)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
# NOTE consider zero padding when compute w.
e = linear_tensor(self.gvec, torch.tanh(
att_h.unsqueeze(1) + self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, (att_prev, (att_h, att_c))
class AttCovLoc(torch.nn.Module):
'''Coverage mechanism location aware attention
This attention is a combination of coverage and location-aware attentions.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
'''
def __init__(self, eprojs, dunits, att_dim, aconv_chans, aconv_filts):
super(AttCovLoc, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1, aconv_chans, (1, 2 * aconv_filts + 1), padding=(0, aconv_filts), bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.aconv_chans = aconv_chans
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_list, scaling=2.0):
'''AttCovLoc forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: docoder hidden state (B x D_dec)
:param list att_prev_list: list of previous attetion weight
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: list of previous attentioin weights
:rtype: list
'''
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = linear_tensor(self.mlp_enc, self.enc_h)
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev_list is None:
att_prev = [Variable(enc_hs_pad.data.new(
l).zero_() + (1.0 / l)) for l in enc_hs_len]
# if no bias, 0 0-pad goes 0
att_prev_list = [pad_list(att_prev, 0)]
# att_prev_list: L' * [B x T] => cov_vec B x T
cov_vec = sum(att_prev_list)
# cov_vec: B x T -> B x 1 x 1 x T -> B x C x 1 x T
att_conv = self.loc_conv(cov_vec.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = linear_tensor(self.mlp_att, att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
# NOTE consider zero padding when compute w.
e = linear_tensor(self.gvec, torch.tanh(
att_conv + self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
w = F.softmax(scaling * e, dim=1)
att_prev_list += [w]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, att_prev_list
class AttMultiHeadDot(torch.nn.Module):
'''Multi head dot product attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int ahead: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
'''
def __init__(self, eprojs, dunits, aheads, att_dim_k, att_dim_v):
super(AttMultiHeadDot, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
for h in six.moves.range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
'''AttMultiHeadDot forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: decoder hidden state (B x D_dec)
:param Variable att_prev: dummy (does not use)
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B x D_enc)
:rtype: Variable
:return: list of previous attentioin weight (B x T_max) * aheads
:rtype: list
'''
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
torch.tanh(linear_tensor(self.mlp_k[h], self.enc_h)) for h in six.moves.range(self.aheads)]
if self.pre_compute_v is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [
linear_tensor(self.mlp_v[h], self.enc_h) for h in six.moves.range(self.aheads)]
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
c = []
w = []
for h in six.moves.range(self.aheads):
e = torch.sum(self.pre_compute_k[h] * torch.tanh(self.mlp_q[h](dec_z)).view(
batch, 1, self.att_dim_k), dim=2) # utt x frame
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [torch.sum(self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1)]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadAdd(torch.nn.Module):
'''Multi head additive attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using additive attention for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int ahead: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
'''
def __init__(self, eprojs, dunits, aheads, att_dim_k, att_dim_v):
super(AttMultiHeadAdd, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
for h in six.moves.range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
'''AttMultiHeadAdd forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: decoder hidden state (B x D_dec)
:param Variable att_prev: dummy (does not use)
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B, D_enc)
:rtype: Variable
:return: list of previous attentioin weight (B x T_max) * aheads
:rtype: list
'''
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
linear_tensor(self.mlp_k[h], self.enc_h) for h in six.moves.range(self.aheads)]
if self.pre_compute_v is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [
linear_tensor(self.mlp_v[h], self.enc_h) for h in six.moves.range(self.aheads)]
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
c = []
w = []
for h in six.moves.range(self.aheads):
e = linear_tensor(
self.gvec[h],
torch.tanh(
self.pre_compute_k[h] + self.mlp_q[h](dec_z).view(batch, 1, self.att_dim_k))).squeeze(2)
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [torch.sum(self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1)]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadLoc(torch.nn.Module):
'''Multi head location based attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using location-aware attention for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
'''
def __init__(self, eprojs, dunits, aheads, att_dim_k, att_dim_v, aconv_chans, aconv_filts):
super(AttMultiHeadLoc, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
self.loc_conv = torch.nn.ModuleList()
self.mlp_att = torch.nn.ModuleList()
for h in six.moves.range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
self.loc_conv += [torch.nn.Conv2d(
1, aconv_chans, (1, 2 * aconv_filts + 1), padding=(0, aconv_filts), bias=False)]
self.mlp_att += [torch.nn.Linear(aconv_chans, att_dim_k, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
'''AttMultiHeadLoc forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: decoder hidden state (B x D_dec)
:param Variable att_prev: list of previous attentioin weight (B x T_max) * aheads
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B x D_enc)
:rtype: Variable
:return: list of previous attentioin weight (B x T_max) * aheads
:rtype: list
'''
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
linear_tensor(self.mlp_k[h], self.enc_h) for h in six.moves.range(self.aheads)]
if self.pre_compute_v is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [
linear_tensor(self.mlp_v[h], self.enc_h) for h in six.moves.range(self.aheads)]
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
att_prev = []
for h in six.moves.range(self.aheads):
att_prev += [[Variable(enc_hs_pad.data.new(
l).zero_() + (1.0 / l)) for l in enc_hs_len]]
# if no bias, 0 0-pad goes 0
att_prev[h] = pad_list(att_prev[h], 0)
c = []
w = []
for h in six.moves.range(self.aheads):
att_conv = self.loc_conv[h](att_prev[h].view(batch, 1, 1, self.h_length))
att_conv = att_conv.squeeze(2).transpose(1, 2)
att_conv = linear_tensor(self.mlp_att[h], att_conv)
e = linear_tensor(
self.gvec[h],
torch.tanh(
self.pre_compute_k[h] + att_conv + self.mlp_q[h](dec_z).view(
batch, 1, self.att_dim_k))).squeeze(2)
w += [F.softmax(scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [torch.sum(self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1)]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadMultiResLoc(torch.nn.Module):
'''Multi head multi resolution location based attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using location-aware attention for each head.
Furthermore, it uses different filter size for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param int aconv_chans: maximum # channels of attention convolution
each head use #ch = aconv_chans * (head + 1) / aheads
e.g. aheads=4, aconv_chans=100 => filter size = 25, 50, 75, 100
:param int aconv_filts: filter size of attention convolution
'''
def __init__(self, eprojs, dunits, aheads, att_dim_k, att_dim_v, aconv_chans, aconv_filts):
super(AttMultiHeadMultiResLoc, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
self.loc_conv = torch.nn.ModuleList()
self.mlp_att = torch.nn.ModuleList()
for h in six.moves.range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
afilts = aconv_filts * (h + 1) // aheads
self.loc_conv += [torch.nn.Conv2d(
1, aconv_chans, (1, 2 * afilts + 1), padding=(0, afilts), bias=False)]
self.mlp_att += [torch.nn.Linear(aconv_chans, att_dim_k, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
def reset(self):
'''reset states'''
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
'''AttMultiHeadMultiResLoc forward
:param Variable enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_h_len: padded encoder hidden state lenght (B)
:param Variable dec_z: decoder hidden state (B x D_dec)
:param Variable att_prev: list of previous attentioin weight (B x T_max) * aheads
:param float scaling: scaling parameter before applying softmax
:return: attentioin weighted encoder state (B x D_enc)
:rtype: Variable
:return: list of previous attentioin weight (B x T_max) * aheads
:rtype: list
'''
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
linear_tensor(self.mlp_k[h], self.enc_h) for h in six.moves.range(self.aheads)]
if self.pre_compute_v is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [
linear_tensor(self.mlp_v[h], self.enc_h) for h in six.moves.range(self.aheads)]
if dec_z is None:
dec_z = Variable(enc_hs_pad.data.new(batch, self.dunits).zero_())
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
att_prev = []
for h in six.moves.range(self.aheads):
att_prev += [[Variable(enc_hs_pad.data.new(
l).zero_() + (1.0 / l)) for l in enc_hs_len]]
# if no bias, 0 0-pad goes 0
att_prev[h] = pad_list(att_prev[h], 0)
c = []
w = []
for h in six.moves.range(self.aheads):
att_conv = self.loc_conv[h](att_prev[h].view(batch, 1, 1, self.h_length))
att_conv = att_conv.squeeze(2).transpose(1, 2)
att_conv = linear_tensor(self.mlp_att[h], att_conv)
e = linear_tensor(
self.gvec[h],
torch.tanh(
self.pre_compute_k[h] + att_conv + self.mlp_q[h](dec_z).view(
batch, 1, self.att_dim_k))).squeeze(2)
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [torch.sum(self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1)]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
def th_accuracy(y_all, pad_target, ignore_label):
pad_pred = y_all.data.view(pad_target.size(
0), pad_target.size(1), y_all.size(1)).max(2)[1]
mask = pad_target.data != ignore_label
numerator = torch.sum(pad_pred.masked_select(
mask) == pad_target.data.masked_select(mask))
denominator = torch.sum(mask)
return float(numerator) / float(denominator)
# ------------- Decoder Network ----------------------------------------------------------------------------------------
class Decoder(torch.nn.Module):
def __init__(self, eprojs, odim, dlayers, dunits, sos, eos, att, verbose=0,
char_list=None, labeldist=None, lsm_weight=0.):
super(Decoder, self).__init__()
self.dunits = dunits
self.dlayers = dlayers
self.embed = torch.nn.Embedding(odim, dunits)
self.decoder = torch.nn.ModuleList()
self.decoder += [torch.nn.LSTMCell(dunits + eprojs, dunits)]
for l in six.moves.range(1, self.dlayers):
self.decoder += [torch.nn.LSTMCell(dunits, dunits)]
self.ignore_id = 0 # NOTE: 0 for CTC?
self.output = torch.nn.Linear(dunits, odim)
self.loss = None
self.att = att
self.dunits = dunits
self.sos = sos
self.eos = eos
self.verbose = verbose
self.char_list = char_list
# for label smoothing
self.labeldist = labeldist
self.vlabeldist = None
self.lsm_weight = lsm_weight
def zero_state(self, hpad):
return Variable(hpad.data.new(hpad.size(0), self.dunits).zero_())
def forward(self, hpad, hlen, ys):
'''Decoder forward
:param hs:
:param ys:
:return:
'''
hpad = mask_by_length(hpad, hlen, 0)
self.loss = None
# prepare input and output word sequences with sos/eos IDs
eos = Variable(ys[0].data.new([self.eos]))
sos = Variable(ys[0].data.new([self.sos]))
ys_in = [torch.cat([sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, eos], dim=0) for y in ys]
# padding for ys with -1
# pys: utt x olen
pad_ys_in = pad_list(ys_in, self.eos)
pad_ys_out = pad_list(ys_out, self.ignore_id)
# get dim, length info
batch = pad_ys_out.size(0)
olength = pad_ys_out.size(1)
logging.info(self.__class__.__name__ + ' input lengths: ' + str(hlen))
logging.info(self.__class__.__name__ + ' output lengths: ' + str([y.size(0) for y in ys_out]))
# initialization
c_list = [self.zero_state(hpad)]
z_list = [self.zero_state(hpad)]
for l in six.moves.range(1, self.dlayers):
c_list.append(self.zero_state(hpad))
z_list.append(self.zero_state(hpad))
att_w = None
z_all = []
self.att.reset() # reset pre-computation of h
# pre-computation of embedding
eys = self.embed(pad_ys_in) # utt x olen x zdim
# loop for an output sequence
for i in six.moves.range(olength):
att_c, att_w = self.att(hpad, hlen, z_list[0], att_w)
ey = torch.cat((eys[:, i, :], att_c), dim=1) # utt x (zdim + hdim)
z_list[0], c_list[0] = self.decoder[0](ey, (z_list[0], c_list[0]))
for l in six.moves.range(1, self.dlayers):
z_list[l], c_list[l] = self.decoder[l](
z_list[l - 1], (z_list[l], c_list[l]))
z_all.append(z_list[-1])
z_all = torch.stack(z_all, dim=1).view(batch * olength, self.dunits)
# compute loss
y_all = self.output(z_all)
self.loss = F.cross_entropy(y_all, pad_ys_out.view(-1),
ignore_index=self.ignore_id,
size_average=True)
# -1: eos, which is removed in the loss computation
self.loss *= (np.mean([len(x) for x in ys_in]) - 1)
acc = th_accuracy(y_all, pad_ys_out, ignore_label=self.ignore_id)
logging.info('att loss:' + str(self.loss.data))
# show predicted character sequence for debug
if self.verbose > 0 and self.char_list is not None:
y_hat = y_all.view(batch, olength, -1)
y_true = pad_ys_out
for (i, y_hat_), y_true_ in zip(enumerate(y_hat.data.cpu().numpy()), y_true.data.cpu().numpy()):
if i == MAX_DECODER_OUTPUT:
break
idx_hat = np.argmax(y_hat_[y_true_ != self.ignore_id], axis=1)
idx_true = y_true_[y_true_ != self.ignore_id]
seq_hat = [self.char_list[int(idx)] for idx in idx_hat]
seq_true = [self.char_list[int(idx)] for idx in idx_true]
seq_hat = "".join(seq_hat)
seq_true = "".join(seq_true)
logging.info("groundtruth[%d]: " % i + seq_true)
logging.info("prediction [%d]: " % i + seq_hat)
if self.labeldist is not None:
if self.vlabeldist is None:
self.vlabeldist = to_cuda(self, Variable(torch.from_numpy(self.labeldist)))
loss_reg = - torch.sum((F.log_softmax(y_all, dim=1) * self.vlabeldist).view(-1), dim=0) / len(ys_in)
self.loss = (1. - self.lsm_weight) * self.loss + self.lsm_weight * loss_reg
return self.loss, acc
def recognize_beam(self, h, lpz, recog_args, char_list, rnnlm=None):
'''beam search implementation
:param Variable h:
:param Namespace recog_args:
:param char_list:
:return:
'''
logging.info('input lengths: ' + str(h.size(0)))
# initialization
c_list = [self.zero_state(h.unsqueeze(0))]
z_list = [self.zero_state(h.unsqueeze(0))]
for l in six.moves.range(1, self.dlayers):
c_list.append(self.zero_state(h.unsqueeze(0)))
z_list.append(self.zero_state(h.unsqueeze(0)))
a = None
self.att.reset() # reset pre-computation of h
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = recog_args.ctc_weight
# preprate sos
y = self.sos
vy = Variable(h.data.new(1).zero_().long(), volatile=True)
if recog_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * h.size(0)))
minlen = int(recog_args.minlenratio * h.size(0))
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list,
'z_prev': z_list, 'a_prev': a, 'rnnlm_prev': None}
else:
hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list, 'z_prev': z_list, 'a_prev': a}
if lpz is not None:
ctc_prefix_score = CTCPrefixScore(lpz.numpy(), 0, self.eos, np)
hyp['ctc_state_prev'] = ctc_prefix_score.initial_state()
hyp['ctc_score_prev'] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
hyps_best_kept = []
for hyp in hyps:
vy.unsqueeze(1)
vy[0] = hyp['yseq'][i]
ey = self.embed(vy) # utt list (1) x zdim
ey.unsqueeze(0)
att_c, att_w = self.att(h.unsqueeze(0), [h.size(0)], hyp['z_prev'][0], hyp['a_prev'])
ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)
z_list[0], c_list[0] = self.decoder[0](ey, (hyp['z_prev'][0], hyp['c_prev'][0]))
for l in six.moves.range(1, self.dlayers):
z_list[l], c_list[l] = self.decoder[l](
z_list[l - 1], (hyp['z_prev'][l], hyp['c_prev'][l]))
# get nbest local scores and their ids
local_att_scores = F.log_softmax(self.output(z_list[-1]), dim=1).data
if rnnlm:
rnnlm_state, z_rnnlm = rnnlm.predictor(hyp['rnnlm_prev'], vy)
local_lm_scores = F.log_softmax(z_rnnlm, dim=1).data
local_scores = local_att_scores + recog_args.lm_weight * local_lm_scores
else:
local_scores = local_att_scores
if lpz is not None:
local_best_scores, local_best_ids = torch.topk(
local_att_scores, ctc_beam, dim=1)
ctc_scores, ctc_states = ctc_prefix_score(
hyp['yseq'], local_best_ids[0], hyp['ctc_state_prev'])
local_scores = \
(1.0 - ctc_weight) * local_att_scores[:, local_best_ids[0]] \
+ ctc_weight * torch.from_numpy(ctc_scores - hyp['ctc_score_prev'])
if rnnlm:
local_scores += recog_args.lm_weight * local_lm_scores[:, local_best_ids]
local_best_scores, joint_best_ids = torch.topk(local_scores, beam, dim=1)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)
for j in six.moves.range(beam):
new_hyp = {}
# [:] is needed!
new_hyp['z_prev'] = z_list[:]
new_hyp['c_prev'] = c_list[:]
new_hyp['a_prev'] = att_w[:]
new_hyp['score'] = hyp['score'] + local_best_scores[0, j]
new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))
new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
new_hyp['yseq'][len(hyp['yseq'])] = local_best_ids[0, j]
if rnnlm:
new_hyp['rnnlm_prev'] = rnnlm_state
if lpz is not None:
new_hyp['ctc_state_prev'] = ctc_states[joint_best_ids[0, j]]
new_hyp['ctc_score_prev'] = ctc_scores[joint_best_ids[0, j]]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug('number of pruned hypothes: ' + str(len(hyps)))
logging.debug(
'best hypo: ' + ''.join([char_list[int(x)] for x in hyps[0]['yseq'][1:]]))
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last postion in the loop')
for hyp in hyps:
hyp['yseq'].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp['yseq']) > minlen:
hyp['score'] += (i + 1) * penalty
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info('end detected at %d', i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug('remeined hypothes: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
break
for hyp in hyps:
logging.debug(
'hypo: ' + ''.join([char_list[int(x)] for x in hyp['yseq'][1:]]))
logging.debug('number of ended hypothes: ' + str(len(ended_hyps)))
nbest_hyps = sorted(
ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), recog_args.nbest)]
logging.info('total log probability: ' + str(nbest_hyps[0]['score']))
logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))
# remove sos
return nbest_hyps
# ------------- Encoder Network ----------------------------------------------------------------------------------------
class Encoder(torch.nn.Module):
'''ENCODER NETWORK CLASS
This is the example of docstring.
:param str etype: type of encoder network
:param int idim: number of dimensions of encoder network
:param int elayers: number of layers of encoder network
:param int eunits: number of lstm units of encoder network
:param int epojs: number of projection units of encoder network
:param str subsample: subsampling number e.g. 1_2_2_2_1
:param float dropout: dropout rate
:return:
'''
def __init__(self, etype, idim, elayers, eunits, eprojs, subsample, dropout, in_channel=1):
super(Encoder, self).__init__()
if etype == 'blstm':
self.enc1 = BLSTM(idim, elayers, eunits, eprojs, dropout)
logging.info('BLSTM without projection for encoder')
elif etype == 'blstmp':
self.enc1 = BLSTMP(idim, elayers, eunits,
eprojs, subsample, dropout)
logging.info('BLSTM with every-layer projection for encoder')
elif etype == 'vggblstmp':
self.enc1 = VGG2L(in_channel)
self.enc2 = BLSTMP(_get_vgg2l_odim(idim, in_channel=in_channel),
elayers, eunits, eprojs,
subsample, dropout)
logging.info('Use CNN-VGG + BLSTMP for encoder')
elif etype == 'vggblstm':
self.enc1 = VGG2L(in_channel)
self.enc2 = BLSTM(_get_vgg2l_odim(idim, in_channel=in_channel),
elayers, eunits, eprojs, dropout)
logging.info('Use CNN-VGG + BLSTM for encoder')
else:
logging.error(
"Error: need to specify an appropriate encoder archtecture")
sys.exit()
self.etype = etype
def forward(self, xs, ilens):
'''Encoder forward
:param xs:
:param ilens:
:return:
'''
if self.etype == 'blstm':
xs, ilens = self.enc1(xs, ilens)
elif self.etype == 'blstmp':
xs, ilens = self.enc1(xs, ilens)
elif self.etype == 'vggblstmp':
xs, ilens = self.enc1(xs, ilens)
xs, ilens = self.enc2(xs, ilens)
elif self.etype == 'vggblstm':
xs, ilens = self.enc1(xs, ilens)
xs, ilens = self.enc2(xs, ilens)
else:
logging.error(
"Error: need to specify an appropriate encoder archtecture")
sys.exit()
return xs, ilens
class BLSTMP(torch.nn.Module):
def __init__(self, idim, elayers, cdim, hdim, subsample, dropout):
super(BLSTMP, self).__init__()
for i in six.moves.range(elayers):
if i == 0:
inputdim = idim
else:
inputdim = hdim
setattr(self, "bilstm%d" % i, torch.nn.LSTM(inputdim, cdim, dropout=dropout,
num_layers=1, bidirectional=True, batch_first=True))
# bottleneck layer to merge
setattr(self, "bt%d" % i, torch.nn.Linear(2 * cdim, hdim))
self.elayers = elayers
self.cdim = cdim
self.subsample = subsample
def forward(self, xpad, ilens):
'''BLSTMP forward
:param xs:
:param ilens:
:return:
'''
# logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
for layer in six.moves.range(self.elayers):
xpack = pack_padded_sequence(xpad, ilens, batch_first=True)
bilstm = getattr(self, 'bilstm' + str(layer))
bilstm.flatten_parameters()
ys, (hy, cy) = bilstm(xpack)
# ys: utt list of frame x cdim x 2 (2: means bidirectional)
ypad, ilens = pad_packed_sequence(ys, batch_first=True)
sub = self.subsample[layer + 1]
if sub > 1:
ypad = ypad[:, ::sub]
ilens = [(i + 1) // sub for i in ilens]
# (sum _utt frame_utt) x dim
projected = getattr(self, 'bt' + str(layer)
)(ypad.contiguous().view(-1, ypad.size(2)))
xpad = torch.tanh(projected.view(ypad.size(0), ypad.size(1), -1))
del hy, cy
return xpad, ilens # x: utt list of frame x dim
class BLSTM(torch.nn.Module):
def __init__(self, idim, elayers, cdim, hdim, dropout):
super(BLSTM, self).__init__()
self.nblstm = torch.nn.LSTM(idim, cdim, elayers, batch_first=True,
dropout=dropout, bidirectional=True)
self.l_last = torch.nn.Linear(cdim * 2, hdim)
def forward(self, xpad, ilens):
'''BLSTM forward
:param xs:
:param ilens:
:return:
'''
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
xpack = pack_padded_sequence(xpad, ilens, batch_first=True)
ys, (hy, cy) = self.nblstm(xpack)
del hy, cy
# ys: utt list of frame x cdim x 2 (2: means bidirectional)
ypad, ilens = pad_packed_sequence(ys, batch_first=True)
# (sum _utt frame_utt) x dim
projected = torch.tanh(self.l_last(
ypad.contiguous().view(-1, ypad.size(2))))
xpad = projected.view(ypad.size(0), ypad.size(1), -1)
return xpad, ilens # x: utt list of frame x dim
class VGG2L(torch.nn.Module):
def __init__(self, in_channel=1):
super(VGG2L, self).__init__()
# CNN layer (VGG motivated)
self.conv1_1 = torch.nn.Conv2d(in_channel, 64, 3, stride=1, padding=1)
self.conv1_2 = torch.nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv2_1 = torch.nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.conv2_2 = torch.nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.in_channel = in_channel
def forward(self, xs, ilens):
'''VGG2L forward
:param xs:
:param ilens:
:return:
'''
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
# x: utt x frame x dim
# xs = F.pad_sequence(xs)
# x: utt x 1 (input channel num) x frame x dim
xs = xs.view(xs.size(0), xs.size(1), self.in_channel,
xs.size(2) // self.in_channel).transpose(1, 2)
# NOTE: max_pool1d ?
xs = F.relu(self.conv1_1(xs))
xs = F.relu(self.conv1_2(xs))
xs = F.max_pool2d(xs, 2, stride=2, ceil_mode=True)
xs = F.relu(self.conv2_1(xs))
xs = F.relu(self.conv2_2(xs))
xs = F.max_pool2d(xs, 2, stride=2, ceil_mode=True)
# change ilens accordingly
# ilens = [_get_max_pooled_size(i) for i in ilens]
ilens = np.array(
np.ceil(np.array(ilens, dtype=np.float32) / 2), dtype=np.int64)
ilens = np.array(
np.ceil(np.array(ilens, dtype=np.float32) / 2), dtype=np.int64).tolist()
# x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
xs = xs.transpose(1, 2)
xs = xs.contiguous().view(
xs.size(0), xs.size(1), xs.size(2) * xs.size(3))
xs = [xs[i, :ilens[i]] for i in range(len(ilens))]
xs = pad_list(xs, 0.0)
return xs, ilens
|
the-stack_0_20141 | import tensorflow as tf
from preprocessing.augmentation_utils import mixup
from time import time
from utils.dist_utils import is_sm_dist
if is_sm_dist():
import smdistributed.dataparallel.tensorflow as dist
else:
import horovod.tensorflow as dist
layers = tf.keras.layers
class Trainer(object):
def __init__(self, model, opt, loss_func, scheduler, logging=None,
fp16=True, mixup_alpha=0.0, model_dir='.',
start_saving_accuracy=0.7):
self.model = model
self.opt = opt
self.loss_func = loss_func
self.fp16 = fp16
self.mixup_alpha = mixup_alpha
self.time = time()
self.model_dir = model_dir
self.best_validation_accuracy = start_saving_accuracy
self.logging = logging
self.scheduler = scheduler
self.iteration = tf.convert_to_tensor(0)
self.inner_iteration = tf.convert_to_tensor(0)
@tf.function
def train_step(self, images, labels, first_batch=False):
batch_size = tf.shape(images)[0]
images, labels = mixup(batch_size, self.mixup_alpha, images, labels)
images = tf.cast(images, tf.float16 if self.fp16 else tf.float32)
with tf.GradientTape() as tape:
logits = tf.cast(self.model(images, training=True), tf.float32)
loss_value = self.loss_func(labels, logits)
loss_value += tf.add_n(self.model.losses)
if self.fp16:
scaled_loss_value = self.opt.get_scaled_loss(loss_value)
tape = dist.DistributedGradientTape(tape)
if self.fp16:
grads = tape.gradient(scaled_loss_value, self.model.trainable_variables)
grads = self.opt.get_unscaled_gradients(grads)
else:
grads = tape.gradient(loss_value, self.model.trainable_variables)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
if first_batch:
dist.broadcast_variables(self.model.variables, root_rank=0)
dist.broadcast_variables(self.opt.variables(), root_rank=0)
probs = layers.Activation('softmax', dtype='float32')(logits)
top_1_pred = tf.squeeze(tf.math.top_k(probs, k=1)[1])
sparse_labels = tf.cast(tf.math.argmax(labels, axis=1), tf.int32)
top_1_accuracy = tf.math.reduce_sum(tf.cast(tf.equal(top_1_pred, sparse_labels), tf.int32))/batch_size
return loss_value, top_1_accuracy
@tf.function
def validation_step(self, images, labels):
images = tf.cast(images, tf.float16 if self.fp16 else tf.float32)
logits = tf.cast(self.model(images, training=False), tf.float32)
loss = self.loss_func(labels, logits)
top_1_pred = tf.squeeze(tf.math.top_k(logits, k=1)[1])
sparse_labels = tf.cast(tf.math.argmax(labels, axis=1), tf.int32)
top_1_accuracy = tf.math.reduce_sum(tf.cast(tf.equal(top_1_pred, sparse_labels), tf.int32))
return loss, top_1_accuracy
def print_train(self, interval, **kwargs):
elapsed_time = time() - self.time
self.time = time()
step_time = elapsed_time/interval
log_list = [('step', str(self.inner_iteration.numpy())),
('step time', f"{step_time:.4f}")]
for key, value in kwargs.items():
log_list.append((key, f"{value:.4f}"))
info_str = ", ".join([": ".join(i) for i in log_list])
if self.logging:
self.logging.info(info_str)
print(info_str)
def train_epoch(self, dataset, print_interval=50):
self.inner_iteration = tf.convert_to_tensor(0)
for step, (images, labels) in enumerate(dataset):
loss_value, top_1_accuracy = self.train_step(images, labels, first_batch=step==0)
if dist.rank()==0 and step%print_interval==0:
self.print_train(print_interval,
train_loss=loss_value,
top_1_accuracy=top_1_accuracy,
learning_rate=self.scheduler(self.iteration))
self.iteration+=1
self.inner_iteration+=1
def validation_epoch(self, dataset, output_name=None):
validation_score = 0
counter = 0
for step, (images, labels) in enumerate(dataset):
if step==0:
batch_size = tf.shape(images)[0]
loss, score = self.validation_step(images, labels)
validation_score += score.numpy()
counter += 1
validation_accuracy = validation_score / (batch_size * counter)
average_validation_accuracy = dist.allreduce(tf.constant(validation_accuracy))
average_validation_loss = dist.allreduce(tf.constant(loss))
if dist.rank() == 0:
info_str = 'Validation Accuracy: {0}, Validation Loss: {1}'.format(average_validation_accuracy,
average_validation_loss)
if self.logging:
self.logging.info(info_str)
print(info_str)
if average_validation_accuracy > self.best_validation_accuracy:
self.best_validation_accuracy = average_validation_accuracy
print("Found new best accuracy, saving checkpoint ...")
self.model.save('{}/{}'.format(self.model_dir,
self.iteration if not output_name else output_name))
@tf.function
def train_step(model, opt, loss_func, images, labels, first_batch, batch_size, mixup_alpha=0.0, fp32=False):
images, labels = mixup(batch_size, mixup_alpha, images, labels)
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss_value = loss_func(labels, tf.cast(logits, tf.float32))
loss_value += tf.add_n(model.losses)
if not fp32:
scaled_loss_value = opt.get_scaled_loss(loss_value)
tape = dist.DistributedGradientTape(tape, compression=dist.Compression.fp16)
if not fp32:
grads = tape.gradient(scaled_loss_value, model.trainable_variables)
grads = opt.get_unscaled_gradients(grads)
else:
grads = tape.gradient(loss_value, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
if first_batch:
dist.broadcast_variables(model.variables, root_rank=0)
dist.broadcast_variables(opt.variables(), root_rank=0)
probs = layers.Activation('softmax', dtype='float32')(logits)
top_1_pred = tf.squeeze(tf.math.top_k(probs, k=1)[1])
sparse_labels = tf.cast(tf.math.argmax(labels, axis=1), tf.int32)
top_1_accuracy = tf.math.reduce_sum(tf.cast(tf.equal(top_1_pred, sparse_labels), tf.int32))
return loss_value, top_1_accuracy
@tf.function
def validation_step(images, labels, model, loss_func):
pred = model(images, training=False)
loss = loss_func(labels, pred)
top_1_pred = tf.squeeze(tf.math.top_k(pred, k=1)[1])
sparse_labels = tf.cast(tf.math.argmax(labels, axis=1), tf.int32)
top_1_accuracy = tf.math.reduce_sum(tf.cast(tf.equal(top_1_pred, sparse_labels), tf.int32))
return loss, top_1_accuracy
|
the-stack_0_20143 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The plugin serving the interactive inference tab."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import io
import json
import logging
import math
import numpy as np
import os
import werkzeug
from werkzeug import wrappers
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import json_format
from grpc.framework.interfaces.face.face import AbortionError
from werkzeug import wrappers
import tensorflow as tf
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard_plugin_wit._utils import common_utils
from tensorboard_plugin_wit._utils import inference_utils
from tensorboard_plugin_wit._utils import platform_utils
logger = logging.getLogger('tensorboard')
# Max number of examples to scan along the `examples_path` in order to return
# statistics and sampling for features.
NUM_EXAMPLES_TO_SCAN = 50
# Max number of mutants to show per feature (i.e. num of points along x-axis).
NUM_MUTANTS = 10
# Max number of examples to send in a single response.
# TODO(jameswex): Make dynamic based on example size.
MAX_EXAMPLES_TO_SEND = 10000
class WhatIfToolPlugin(base_plugin.TBPlugin):
"""Plugin for understanding/debugging model inference.
"""
# This string field is used by TensorBoard to generate the paths for routes
# provided by this plugin. It must thus be URL-friendly. This field is also
# used to uniquely identify this plugin throughout TensorBoard. See BasePlugin
# for details.
plugin_name = 'whatif'
examples = []
updated_example_indices = set()
sprite = None
example_class = tf.train.Example
# The standard name for encoded image features in TensorFlow.
image_feature_name = 'image/encoded'
# The width and height of the thumbnail for any images for Facets Dive.
sprite_thumbnail_dim_px = 32
# The vocab of inference class indices to label names for the model.
label_vocab = []
def __init__(self, context):
"""Constructs an interactive inference plugin for TensorBoard.
Args:
context: A base_plugin.TBContext instance.
"""
self._logdir = context.logdir
self._wit_data_dir = context.flags.wit_data_dir if context.flags else None
self.custom_predict_fn = None
if context.flags and context.flags.custom_predict_fn:
try:
import importlib.util as iu
spec = iu.spec_from_file_location("custom_predict_fn", context.flags.custom_predict_fn)
module = iu.module_from_spec(spec)
spec.loader.exec_module(module)
self.custom_predict_fn = module.custom_predict_fn
logger.info("custom_predict_fn loaded.")
except Exception as e:
logger.error(str(e))
logger.error("Failed to load the custom predict function.")
logger.error("Have you defined a function named custom_predict_fn?")
def get_plugin_apps(self):
"""Obtains a mapping between routes and handlers. Stores the logdir.
Returns:
A mapping between routes and handlers (functions that respond to
requests).
"""
return {
'/index.js': self._serve_js,
'/wit_tb_bin.html': self._serve_wit,
'/wit_tb_bin.js': self._serve_wit_js,
'/infer': self._infer,
'/update_example': self._update_example,
'/examples_from_path': self._examples_from_path_handler,
'/sprite': self._serve_sprite,
'/duplicate_example': self._duplicate_example,
'/delete_example': self._delete_example,
'/infer_mutants': self._infer_mutants_handler,
'/eligible_features': self._eligible_features_from_example_handler,
'/sort_eligible_features': self._sort_eligible_features_handler,
}
def is_active(self):
"""Determines whether this plugin is active.
Returns:
A boolean. Whether this plugin is active.
"""
# TODO(jameswex): Maybe enable if config flags were specified?
return False
def frontend_metadata(self):
return base_plugin.FrontendMetadata(
es_module_path="/index.js",
tab_name='What-If Tool')
@wrappers.Request.application
def _serve_js(self, request):
del request # unused
filepath = os.path.join(os.path.dirname(__file__), "static", "index.js")
with io.open(filepath, encoding='utf-8') as infile:
contents = infile.read()
return werkzeug.Response(contents, content_type="application/javascript")
@wrappers.Request.application
def _serve_wit(self, request):
del request # unused
filepath = os.path.join(os.path.dirname(__file__), "static", "wit_tb_bin.html")
with io.open(filepath, encoding='utf-8') as infile:
contents = infile.read()
return werkzeug.Response(contents, content_type="text/html")
@wrappers.Request.application
def _serve_wit_js(self, request):
del request # unused
filepath = os.path.join(os.path.dirname(__file__), "static", "wit_tb_bin.js")
with io.open(filepath, encoding='utf-8') as infile:
contents = infile.read()
return werkzeug.Response(contents, content_type="application/javascript")
def generate_sprite(self, example_strings):
# Generate a sprite image for the examples if the examples contain the
# standard encoded image feature.
feature_list = (self.examples[0].features.feature
if self.example_class == tf.train.Example
else self.examples[0].context.feature)
self.sprite = (
inference_utils.create_sprite_image(example_strings)
if (len(self.examples) and self.image_feature_name in feature_list) else
None)
@wrappers.Request.application
def _examples_from_path_handler(self, request):
"""Returns JSON of the specified examples.
Args:
request: A request that should contain 'examples_path' and 'max_examples'.
Returns:
JSON of up to max_examples of the examples in the path.
"""
start_example = (int(request.args.get('start_example'))
if request.args.get('start_example') else 0)
if not start_example:
examples_count = int(request.args.get('max_examples'))
examples_path = request.args.get('examples_path')
sampling_odds = float(request.args.get('sampling_odds'))
self.example_class = (tf.train.SequenceExample
if request.args.get('sequence_examples') == 'true'
else tf.train.Example)
try:
platform_utils.throw_if_file_access_not_allowed(examples_path,
self._logdir,
self._wit_data_dir)
example_strings = platform_utils.example_protos_from_path(
examples_path, examples_count, parse_examples=False,
sampling_odds=sampling_odds, example_class=self.example_class)
self.examples = [
self.example_class.FromString(ex) for ex in example_strings]
self.generate_sprite(example_strings)
self.updated_example_indices = set(range(len(self.examples)))
except common_utils.InvalidUserInputError as e:
logger.error('Data loading error: %s', e.message)
return http_util.Respond(request, e.message,
'application/json', code=400)
except Exception as e:
return http_util.Respond(request, str(e),
'application/json', code=400)
# Split examples from start_example to + max_examples
# Send next start_example if necessary
end_example = start_example + MAX_EXAMPLES_TO_SEND
json_examples = [
json_format.MessageToJson(example) for example in self.examples[
start_example:end_example]
]
if end_example >= len(self.examples):
end_example = -1
return http_util.Respond(
request,
{'examples': json_examples,
'sprite': True if (self.sprite and not start_example) else False,
'next': end_example},
'application/json')
@wrappers.Request.application
def _serve_sprite(self, request):
return http_util.Respond(request, self.sprite, 'image/png')
@wrappers.Request.application
def _update_example(self, request):
"""Updates the specified example.
Args:
request: A request that should contain 'index' and 'example'.
Returns:
An empty response.
"""
if request.method != 'POST':
return http_util.Respond(request, 'invalid non-POST request',
'application/json', code=405)
example_json = request.form['example']
index = int(request.form['index'])
if index >= len(self.examples):
return http_util.Respond(request, 'invalid index provided',
'application/json', code=400)
new_example = self.example_class()
json_format.Parse(example_json, new_example)
self.examples[index] = new_example
self.updated_example_indices.add(index)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
@wrappers.Request.application
def _duplicate_example(self, request):
"""Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, 'invalid index provided',
'application/json', code=400)
new_example = self.example_class()
new_example.CopyFrom(self.examples[index])
self.examples.append(new_example)
self.updated_example_indices.add(len(self.examples) - 1)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
@wrappers.Request.application
def _delete_example(self, request):
"""Deletes the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, 'invalid index provided',
'application/json', code=400)
del self.examples[index]
self.updated_example_indices = set([
i if i < index else i - 1 for i in self.updated_example_indices])
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
def _parse_request_arguments(self, request):
"""Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
"""
inference_addresses = request.args.get('inference_address').split(',')
model_names = request.args.get('model_name').split(',')
model_versions = request.args.get('model_version').split(',')
model_signatures = request.args.get('model_signature').split(',')
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError('Every model should have a ' +
'name and address.')
return inference_addresses, model_names, model_versions, model_signatures
@wrappers.Request.application
def _infer(self, request):
"""Returns JSON for the `vz-line-chart`s for a feature.
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.
Returns:
A list of JSON objects, one for each chart.
"""
start_example = (int(request.args.get('start_example'))
if request.args.get('start_example') else 0)
if not start_example:
label_vocab = inference_utils.get_label_vocab(
request.args.get('label_vocab_path'))
try:
if request.method != 'GET':
logger.error('%s requests are forbidden.', request.method)
return http_util.Respond(request, 'invalid non-GET request',
'application/json', code=405)
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
self.indices_to_infer = sorted(self.updated_example_indices)
examples_to_infer = [self.examples[index] for index in self.indices_to_infer]
self.infer_objs = []
for model_num in xrange(len(inference_addresses)):
serving_bundle = inference_utils.ServingBundle(
inference_addresses[model_num],
model_names[model_num],
request.args.get('model_type'),
model_versions[model_num],
model_signatures[model_num],
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'),
custom_predict_fn=self.custom_predict_fn)
(predictions, _) = inference_utils.run_inference_for_inference_results(
examples_to_infer, serving_bundle)
self.infer_objs.append(predictions)
self.updated_example_indices = set()
except AbortionError as e:
logging.error(str(e))
return http_util.Respond(request, e.details,
'application/json', code=400)
except Exception as e:
logging.error(str(e))
return http_util.Respond(request, str(e),
'application/json', code=400)
# Split results from start_example to + max_examples
# Send next start_example if necessary
end_example = start_example + MAX_EXAMPLES_TO_SEND
def get_inferences_resp():
sliced_infer_objs = [
copy.deepcopy(infer_obj) for infer_obj in self.infer_objs]
if request.args.get('model_type') == 'classification':
for obj in sliced_infer_objs:
obj['classificationResult']['classifications'][:] = obj[
'classificationResult']['classifications'][
start_example:end_example]
else:
for obj in sliced_infer_objs:
obj['regressionResult']['regressions'][:] = obj['regressionResult'][
'regressions'][start_example:end_example]
return {'indices': self.indices_to_infer[start_example:end_example],
'results': sliced_infer_objs}
try:
inferences_resp = get_inferences_resp()
resp = {'inferences': json.dumps(inferences_resp)}
if end_example >= len(self.examples):
end_example = -1
if start_example == 0:
resp['vocab'] = json.dumps(label_vocab)
resp['next'] = end_example
return http_util.Respond(request, resp, 'application/json')
except Exception as e:
logging.error(e)
return http_util.Respond(request, str(e),
'application/json', code=400)
@wrappers.Request.application
def _eligible_features_from_example_handler(self, request):
"""Returns a list of JSON objects for each feature in the example.
Args:
request: A request for features.
Returns:
A list with a JSON object for each feature.
Numeric features are represented as {name: observedMin: observedMax:}.
Categorical features are repesented as {name: samples:[]}.
"""
features_list = inference_utils.get_eligible_features(
self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
return http_util.Respond(request, features_list, 'application/json')
@wrappers.Request.application
def _sort_eligible_features_handler(self, request):
"""Returns a sorted list of JSON objects for each feature in the example.
The list is sorted by interestingness in terms of the resulting change in
inference values across feature values, for partial dependence plots.
Args:
request: A request for sorted features.
Returns:
A sorted list with a JSON object for each feature.
Numeric features are represented as
{name: observedMin: observedMax: interestingness:}.
Categorical features are repesented as
{name: samples:[] interestingness:}.
"""
try:
features_list = inference_utils.get_eligible_features(
self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
example_index = int(request.args.get('example_index', '0'))
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
chart_data = {}
for feat in features_list:
chart_data[feat['name']] = self._infer_mutants_impl(
feat['name'], example_index,
inference_addresses, model_names, request.args.get('model_type'),
model_versions, model_signatures,
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'),
feat['observedMin'] if 'observedMin' in feat else 0,
feat['observedMax'] if 'observedMin' in feat else 0,
None, custom_predict_fn=self.custom_predict_fn)
features_list = inference_utils.sort_eligible_features(
features_list, chart_data)
return http_util.Respond(request, features_list, 'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, e.message,
'application/json', code=400)
except Exception as e:
return http_util.Respond(request, str(e),
'application/json', code=400)
@wrappers.Request.application
def _infer_mutants_handler(self, request):
"""Returns JSON for the partial dependence plots for a feature.
Args:
request: A request that should contain 'feature_name', 'example_index',
'inference_address', 'model_name', 'model_type', 'model_version', and
'model_signature'.
Returns:
A list of JSON objects, one for each chart.
"""
try:
if request.method != 'GET':
logger.error('%s requests are forbidden.', request.method)
return http_util.Respond(request, 'invalid non-GET request',
'application/json', code=405)
example_index = int(request.args.get('example_index', '0'))
feature_name = request.args.get('feature_name')
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
json_mapping = self._infer_mutants_impl(feature_name, example_index,
inference_addresses, model_names, request.args.get('model_type'),
model_versions, model_signatures,
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'),
request.args.get('x_min'), request.args.get('x_max'),
request.args.get('feature_index_pattern'),
custom_predict_fn=self.custom_predict_fn)
return http_util.Respond(request, json_mapping, 'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, e.message,
'application/json', code=400)
except Exception as e:
return http_util.Respond(request, str(e),
'application/json', code=400)
def _infer_mutants_impl(self, feature_name, example_index, inference_addresses,
model_names, model_type, model_versions, model_signatures, use_predict,
predict_input_tensor, predict_output_tensor, x_min, x_max,
feature_index_pattern, custom_predict_fn):
"""Helper for generating PD plots for a feature."""
examples = (self.examples if example_index == -1
else [self.examples[example_index]])
serving_bundles = []
for model_num in xrange(len(inference_addresses)):
serving_bundles.append(inference_utils.ServingBundle(
inference_addresses[model_num],
model_names[model_num],
model_type,
model_versions[model_num],
model_signatures[model_num],
use_predict,
predict_input_tensor,
predict_output_tensor,
custom_predict_fn=custom_predict_fn))
viz_params = inference_utils.VizParams(
x_min, x_max,
self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,
feature_index_pattern)
return inference_utils.mutant_charts_for_feature(
examples, feature_name, serving_bundles, viz_params)
|
the-stack_0_20145 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nnabla.utils.image_utils import imresize
from nnabla.utils.cli.utility import let_data_to_variable
from .utils import add_frame, overlay_images, preprocess_image, is_normalized
def value_to_color(value):
if value < 1.0 / 8:
value += 1.0 / 8
return [0, 0, value * 255 * 4]
elif value < 3.0 / 8:
value -= 1.0 / 8
return [0, value * 255 * 4, 255]
elif value < 5.0 / 8:
value -= 3.0 / 8
return [value * 255 * 4, 255, 255 - value * 255 * 4]
elif value < 7.0 / 8:
value -= 5.0 / 8
return [255, 255 - value * 255 * 4, 0]
else:
value -= 7.0 / 8
return [255 - value * 255 * 4, 0, 0]
def generate_heatmap(im):
hm_sh = im.shape
ret = np.ndarray((3,) + hm_sh)
for y in range(hm_sh[0]):
for x in range(hm_sh[1]):
ret[:, y, x] = value_to_color(im[y, x])
return ret
def gradcam(conv_output, conv_grad):
pooled_grad = conv_grad.mean(axis=(0, 2, 3), keepdims=True)
ret = pooled_grad * conv_output
ret = np.maximum(ret, 0) # ReLU
ret = ret.mean(axis=(0, 1))
max_v, min_v = np.max(ret), np.min(ret)
if max_v != min_v:
ret = (ret - min_v) / (max_v - min_v)
return ret
def zero_grad(executor):
for k, v in executor.network.variables.items():
v.variable_instance.need_grad = True
v.variable_instance.grad.zero()
def get_gradcam_image(input_image, config):
executor = config.executor
output_variable = config.output_variable
input_variable = config.input_variable
# input image
im = preprocess_image(input_image, executor.no_image_normalization)
let_data_to_variable(
input_variable.variable_instance,
im,
data_name=config.input_data_name,
variable_name=input_variable.name
)
# forward, backward
zero_grad(executor)
selected = output_variable.variable_instance[:, config.class_index]
selected.forward()
selected.backward()
# Grad-CAM
last_conv = executor.network.variables[config.last_conv_name]
conv_grad = last_conv.variable_instance.g.copy()
conv_output = last_conv.variable_instance.d.copy()
gc = gradcam(conv_output, conv_grad)
# Generate output image
heatmap = generate_heatmap(gc)
_h, _w = config.cropped_shape
heatmap = imresize(heatmap, (_w, _h), channel_first=True)
heatmap = add_frame(heatmap, config.input_shape, config.padding)
_im = input_image.copy()
if is_normalized(_im):
_im = _im * 255.0
result = overlay_images(heatmap, _im)
return result
|
the-stack_0_20146 | from stack import Stack
from parser import evaluate
class Instruction_Stack:
def __init__(self, existing_instr=[], pointer=0, stack=Stack()):
self.instructions = existing_instr
self.instruction_pointer = pointer
self.stack_machine = stack
def token_to_instruction(self, token):
if isinstance(token, int) or isinstance(token, float):
self.instructions.append('push')
self.instructions.append(token)
else:
self.instructions.append(token)
def step_instruction(self):
self.instruction_pointer = self.instruction_pointer + 1
def jump_instruction(self, intended_location):
self.instruction_pointer = intended_location
def insert_instruction(self, index, instruction):
self.instructions.insert(index, instruction)
def pre_push(self):
pre_push_pointer = 0
while pre_push_pointer < len(self.instructions):
if self.instructions[pre_push_pointer] == 'push':
del self.instructions[pre_push_pointer]
self.stack_machine.push(self.instructions[pre_push_pointer])
del self.instructions[pre_push_pointer]
else:
pre_push_pointer = pre_push_pointer + 1
def run(self, instruction):
if instruction == 'end':
return False
elif instruction == 'add':
self.stack_machine.add()
elif instruction == 'subtract':
self.stack_machine.subtract()
elif instruction == 'multiply':
self.stack_machine.multiply()
elif instruction == 'divide':
self.stack_machine.divide()
elif instruction == 'print':
self.stack_machine.pop_print()
elif instruction == 'push':
self.step_instruction()
number = self.instructions[self.instruction_pointer]
self.stack_machine.push(number)
elif instruction == 'clear':
self.stack_machine.clear()
else:
SystemError('Runtime error unrecognized instruction')
self.step_instruction()
self.run(self.instructions[self.instruction_pointer])
string = '1000 * 3'
tokens = evaluate(string)
instr_stack = Instruction_Stack()
for token in tokens:
instr_stack.token_to_instruction(token)
instr_stack.pre_push()
instr_stack.run(instr_stack.instructions[0]) |
the-stack_0_20147 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from six import StringIO
import pytest
import llnl.util.filesystem as fs
import spack.hash_types as ht
import spack.modules
import spack.environment as ev
from spack.cmd.env import _env_create
from spack.spec import Spec
from spack.main import SpackCommand, SpackCommandError
from spack.stage import stage_prefix
from spack.util.mock_package import MockPackageMultiRepo
import spack.util.spack_json as sjson
from spack.util.path import substitute_path_variables
# everything here uses the mock_env_path
pytestmark = pytest.mark.usefixtures(
'mutable_mock_env_path', 'config', 'mutable_mock_repo')
env = SpackCommand('env')
install = SpackCommand('install')
add = SpackCommand('add')
remove = SpackCommand('remove')
concretize = SpackCommand('concretize')
stage = SpackCommand('stage')
uninstall = SpackCommand('uninstall')
find = SpackCommand('find')
def check_mpileaks_and_deps_in_view(viewdir):
"""Check that the expected install directories exist."""
assert os.path.exists(str(viewdir.join('.spack', 'mpileaks')))
assert os.path.exists(str(viewdir.join('.spack', 'libdwarf')))
def check_viewdir_removal(viewdir):
"""Check that the uninstall/removal worked."""
assert (not os.path.exists(str(viewdir.join('.spack'))) or
os.listdir(str(viewdir.join('.spack'))) == ['projections.yaml'])
@pytest.fixture()
def env_deactivate():
yield
spack.environment._active_environment = None
os.environ.pop('SPACK_ENV', None)
def test_add():
e = ev.create('test')
e.add('mpileaks')
assert Spec('mpileaks') in e.user_specs
def test_env_add_virtual():
env('create', 'test')
e = ev.read('test')
e.add('mpi')
e.concretize()
hashes = e.concretized_order
assert len(hashes) == 1
spec = e.specs_by_hash[hashes[0]]
assert spec.satisfies('mpi')
def test_env_add_nonexistant_fails():
env('create', 'test')
e = ev.read('test')
with pytest.raises(ev.SpackEnvironmentError, match=r'no such package'):
e.add('thispackagedoesnotexist')
def test_env_list(mutable_mock_env_path):
env('create', 'foo')
env('create', 'bar')
env('create', 'baz')
out = env('list')
assert 'foo' in out
assert 'bar' in out
assert 'baz' in out
# make sure `spack env list` skips invalid things in var/spack/env
mutable_mock_env_path.join('.DS_Store').ensure(file=True)
out = env('list')
assert 'foo' in out
assert 'bar' in out
assert 'baz' in out
assert '.DS_Store' not in out
def test_env_remove(capfd):
env('create', 'foo')
env('create', 'bar')
out = env('list')
assert 'foo' in out
assert 'bar' in out
foo = ev.read('foo')
with foo:
with pytest.raises(spack.main.SpackCommandError):
with capfd.disabled():
env('remove', '-y', 'foo')
assert 'foo' in env('list')
env('remove', '-y', 'foo')
out = env('list')
assert 'foo' not in out
assert 'bar' in out
env('remove', '-y', 'bar')
out = env('list')
assert 'foo' not in out
assert 'bar' not in out
def test_concretize():
e = ev.create('test')
e.add('mpileaks')
e.concretize()
env_specs = e._get_environment_specs()
assert any(x.name == 'mpileaks' for x in env_specs)
def test_env_install_all(install_mockery, mock_fetch):
e = ev.create('test')
e.add('cmake-client')
e.concretize()
e.install_all()
env_specs = e._get_environment_specs()
spec = next(x for x in env_specs if x.name == 'cmake-client')
assert spec.package.installed
def test_env_install_single_spec(install_mockery, mock_fetch):
env('create', 'test')
install = SpackCommand('install')
e = ev.read('test')
with e:
install('cmake-client')
e = ev.read('test')
assert e.user_specs[0].name == 'cmake-client'
assert e.concretized_user_specs[0].name == 'cmake-client'
assert e.specs_by_hash[e.concretized_order[0]].name == 'cmake-client'
def test_env_modifications_error_on_activate(
install_mockery, mock_fetch, monkeypatch, capfd):
env('create', 'test')
install = SpackCommand('install')
e = ev.read('test')
with e:
install('cmake-client')
def setup_error(pkg, env):
raise RuntimeError("cmake-client had issues!")
pkg = spack.repo.path.get_pkg_class("cmake-client")
monkeypatch.setattr(pkg, "setup_run_environment", setup_error)
with e:
pass
_, err = capfd.readouterr()
assert "cmake-client had issues!" in err
assert "Warning: couldn't get environment settings" in err
def test_env_install_same_spec_twice(install_mockery, mock_fetch, capfd):
env('create', 'test')
e = ev.read('test')
with capfd.disabled():
with e:
# The first installation outputs the package prefix
install('cmake-client')
# The second installation attempt will also update the view
out = install('cmake-client')
assert 'Updating view at' in out
def test_remove_after_concretize():
e = ev.create('test')
e.add('mpileaks')
e.concretize()
e.add('python')
e.concretize()
e.remove('mpileaks')
assert Spec('mpileaks') not in e.user_specs
env_specs = e._get_environment_specs()
assert any(s.name == 'mpileaks' for s in env_specs)
e.add('mpileaks')
assert any(s.name == 'mpileaks' for s in e.user_specs)
e.remove('mpileaks', force=True)
assert Spec('mpileaks') not in e.user_specs
env_specs = e._get_environment_specs()
assert not any(s.name == 'mpileaks' for s in env_specs)
def test_remove_command():
env('create', 'test')
assert 'test' in env('list')
with ev.read('test'):
add('mpileaks')
assert 'mpileaks' in find()
assert 'mpileaks@' not in find()
assert 'mpileaks@' not in find('--show-concretized')
with ev.read('test'):
remove('mpileaks')
assert 'mpileaks' not in find()
assert 'mpileaks@' not in find()
assert 'mpileaks@' not in find('--show-concretized')
with ev.read('test'):
add('mpileaks')
assert 'mpileaks' in find()
assert 'mpileaks@' not in find()
assert 'mpileaks@' not in find('--show-concretized')
with ev.read('test'):
concretize()
assert 'mpileaks' in find()
assert 'mpileaks@' not in find()
assert 'mpileaks@' in find('--show-concretized')
with ev.read('test'):
remove('mpileaks')
assert 'mpileaks' not in find()
# removed but still in last concretized specs
assert 'mpileaks@' in find('--show-concretized')
with ev.read('test'):
concretize()
assert 'mpileaks' not in find()
assert 'mpileaks@' not in find()
# now the lockfile is regenerated and it's gone.
assert 'mpileaks@' not in find('--show-concretized')
def test_environment_status(capsys, tmpdir):
with tmpdir.as_cwd():
with capsys.disabled():
assert 'No active environment' in env('status')
with ev.create('test'):
with capsys.disabled():
assert 'In environment test' in env('status')
with ev.Environment('local_dir'):
with capsys.disabled():
assert os.path.join(os.getcwd(), 'local_dir') in env('status')
e = ev.Environment('myproject')
e.write()
with tmpdir.join('myproject').as_cwd():
with e:
with capsys.disabled():
assert 'in current directory' in env('status')
def test_env_status_broken_view(
mutable_mock_env_path, mock_archive, mock_fetch, mock_packages,
install_mockery
):
with ev.create('test'):
install('trivial-install-test-package')
# switch to a new repo that doesn't include the installed package
# test that Spack detects the missing package and warns the user
new_repo = MockPackageMultiRepo()
with spack.repo.swap(new_repo):
output = env('status')
assert 'In environment test' in output
assert 'Environment test includes out of date' in output
# Test that the warning goes away when it's fixed
output = env('status')
assert 'In environment test' in output
assert 'Environment test includes out of date' not in output
def test_env_activate_broken_view(
mutable_mock_env_path, mock_archive, mock_fetch, mock_packages,
install_mockery
):
with ev.create('test'):
install('trivial-install-test-package')
# switch to a new repo that doesn't include the installed package
# test that Spack detects the missing package and fails gracefully
new_repo = MockPackageMultiRepo()
with spack.repo.swap(new_repo):
with pytest.raises(SpackCommandError):
env('activate', '--sh', 'test')
# test replacing repo fixes it
env('activate', '--sh', 'test')
def test_to_lockfile_dict():
e = ev.create('test')
e.add('mpileaks')
e.concretize()
context_dict = e._to_lockfile_dict()
e_copy = ev.create('test_copy')
e_copy._read_lockfile_dict(context_dict)
assert e.specs_by_hash == e_copy.specs_by_hash
def test_env_repo():
e = ev.create('test')
e.add('mpileaks')
e.write()
with ev.read('test'):
concretize()
package = e.repo.get('mpileaks')
assert package.name == 'mpileaks'
assert package.namespace == 'builtin.mock'
def test_user_removed_spec():
"""Ensure a user can remove from any position in the spack.yaml file."""
initial_yaml = StringIO("""\
env:
specs:
- mpileaks
- hypre
- libelf
""")
before = ev.create('test', initial_yaml)
before.concretize()
before.write()
# user modifies yaml externally to spack and removes hypre
with open(before.manifest_path, 'w') as f:
f.write("""\
env:
specs:
- mpileaks
- libelf
""")
after = ev.read('test')
after.concretize()
after.write()
env_specs = after._get_environment_specs()
read = ev.read('test')
env_specs = read._get_environment_specs()
assert not any(x.name == 'hypre' for x in env_specs)
def test_init_from_lockfile(tmpdir):
"""Test that an environment can be instantiated from a lockfile."""
initial_yaml = StringIO("""\
env:
specs:
- mpileaks
- hypre
- libelf
""")
e1 = ev.create('test', initial_yaml)
e1.concretize()
e1.write()
e2 = ev.Environment(str(tmpdir), e1.lock_path)
for s1, s2 in zip(e1.user_specs, e2.user_specs):
assert s1 == s2
for h1, h2 in zip(e1.concretized_order, e2.concretized_order):
assert h1 == h2
assert e1.specs_by_hash[h1] == e2.specs_by_hash[h2]
for s1, s2 in zip(e1.concretized_user_specs, e2.concretized_user_specs):
assert s1 == s2
def test_init_from_yaml(tmpdir):
"""Test that an environment can be instantiated from a lockfile."""
initial_yaml = StringIO("""\
env:
specs:
- mpileaks
- hypre
- libelf
""")
e1 = ev.create('test', initial_yaml)
e1.concretize()
e1.write()
e2 = ev.Environment(str(tmpdir), e1.manifest_path)
for s1, s2 in zip(e1.user_specs, e2.user_specs):
assert s1 == s2
assert not e2.concretized_order
assert not e2.concretized_user_specs
assert not e2.specs_by_hash
@pytest.mark.usefixtures('config')
def test_env_view_external_prefix(tmpdir_factory, mutable_database,
mock_packages):
fake_prefix = tmpdir_factory.mktemp('a-prefix')
fake_bin = fake_prefix.join('bin')
fake_bin.ensure(dir=True)
initial_yaml = StringIO("""\
env:
specs:
- a
view: true
""")
external_config = StringIO("""\
packages:
a:
externals:
- spec: a
prefix: {a_prefix}
buildable: false
""".format(a_prefix=str(fake_prefix)))
external_config_dict = spack.util.spack_yaml.load_config(external_config)
test_scope = spack.config.InternalConfigScope(
'env-external-test', data=external_config_dict)
with spack.config.override(test_scope):
e = ev.create('test', initial_yaml)
e.concretize()
# Note: normally installing specs in a test environment requires doing
# a fake install, but not for external specs since no actions are
# taken to install them. The installation commands also include
# post-installation functions like DB-registration, so are important
# to do (otherwise the package is not considered installed).
e.install_all()
e.write()
env_modifications = e.add_default_view_to_shell('sh')
individual_modifications = env_modifications.split('\n')
def path_includes_fake_prefix(cmd):
return 'export PATH' in cmd and str(fake_bin) in cmd
assert any(
path_includes_fake_prefix(cmd) for cmd in individual_modifications
)
def test_init_with_file_and_remove(tmpdir):
"""Ensure a user can remove from any position in the spack.yaml file."""
path = tmpdir.join('spack.yaml')
with tmpdir.as_cwd():
with open(str(path), 'w') as f:
f.write("""\
env:
specs:
- mpileaks
""")
env('create', 'test', 'spack.yaml')
out = env('list')
assert 'test' in out
with ev.read('test'):
assert 'mpileaks' in find()
env('remove', '-y', 'test')
out = env('list')
assert 'test' not in out
def test_env_with_config():
test_config = """\
env:
specs:
- mpileaks
packages:
mpileaks:
version: [2.2]
"""
_env_create('test', StringIO(test_config))
e = ev.read('test')
with e:
e.concretize()
assert any(x.satisfies('[email protected]')
for x in e._get_environment_specs())
def test_with_config_bad_include(env_deactivate, capfd):
env_name = 'test_bad_include'
test_config = """\
spack:
include:
- /no/such/directory
- no/such/file.yaml
"""
_env_create(env_name, StringIO(test_config))
e = ev.read(env_name)
with pytest.raises(SystemExit):
with e:
e.concretize()
out, err = capfd.readouterr()
assert 'missing include' in err
assert '/no/such/directory' in err
assert 'no/such/file.yaml' in err
def test_env_with_include_config_files_same_basename():
test_config = """\
env:
include:
- ./path/to/included-config.yaml
- ./second/path/to/include-config.yaml
specs:
[libelf, mpileaks]
"""
_env_create('test', StringIO(test_config))
e = ev.read('test')
fs.mkdirp(os.path.join(e.path, 'path', 'to'))
with open(os.path.join(
e.path,
'./path/to/included-config.yaml'), 'w') as f:
f.write("""\
packages:
libelf:
version: [0.8.10]
""")
fs.mkdirp(os.path.join(e.path, 'second', 'path', 'to'))
with open(os.path.join(
e.path,
'./second/path/to/include-config.yaml'), 'w') as f:
f.write("""\
packages:
mpileaks:
version: [2.2]
""")
with e:
e.concretize()
environment_specs = e._get_environment_specs(False)
assert(environment_specs[0].satisfies('[email protected]'))
assert(environment_specs[1].satisfies('[email protected]'))
def test_env_with_included_config_file():
test_config = """\
env:
include:
- ./included-config.yaml
specs:
- mpileaks
"""
_env_create('test', StringIO(test_config))
e = ev.read('test')
with open(os.path.join(e.path, 'included-config.yaml'), 'w') as f:
f.write("""\
packages:
mpileaks:
version: [2.2]
""")
with e:
e.concretize()
assert any(x.satisfies('[email protected]')
for x in e._get_environment_specs())
def test_env_with_included_config_scope():
config_scope_path = os.path.join(ev.root('test'), 'config')
test_config = """\
env:
include:
- %s
specs:
- mpileaks
""" % config_scope_path
_env_create('test', StringIO(test_config))
e = ev.read('test')
fs.mkdirp(config_scope_path)
with open(os.path.join(config_scope_path, 'packages.yaml'), 'w') as f:
f.write("""\
packages:
mpileaks:
version: [2.2]
""")
with e:
e.concretize()
assert any(x.satisfies('[email protected]')
for x in e._get_environment_specs())
def test_env_with_included_config_var_path():
config_var_path = os.path.join('$tempdir', 'included-config.yaml')
test_config = """\
env:
include:
- %s
specs:
- mpileaks
""" % config_var_path
_env_create('test', StringIO(test_config))
e = ev.read('test')
config_real_path = substitute_path_variables(config_var_path)
fs.mkdirp(os.path.dirname(config_real_path))
with open(config_real_path, 'w') as f:
f.write("""\
packages:
mpileaks:
version: [2.2]
""")
with e:
e.concretize()
assert any(x.satisfies('[email protected]')
for x in e._get_environment_specs())
def test_env_config_precedence():
test_config = """\
env:
packages:
libelf:
version: [0.8.12]
include:
- ./included-config.yaml
specs:
- mpileaks
"""
_env_create('test', StringIO(test_config))
e = ev.read('test')
with open(os.path.join(e.path, 'included-config.yaml'), 'w') as f:
f.write("""\
packages:
mpileaks:
version: [2.2]
libelf:
version: [0.8.11]
""")
with e:
e.concretize()
# ensure included scope took effect
assert any(
x.satisfies('[email protected]') for x in e._get_environment_specs())
# ensure env file takes precedence
assert any(
x.satisfies('[email protected]') for x in e._get_environment_specs())
def test_included_config_precedence():
test_config = """\
env:
include:
- ./high-config.yaml # this one should take precedence
- ./low-config.yaml
specs:
- mpileaks
"""
_env_create('test', StringIO(test_config))
e = ev.read('test')
with open(os.path.join(e.path, 'high-config.yaml'), 'w') as f:
f.write("""\
packages:
libelf:
version: [0.8.10] # this should override libelf version below
""")
with open(os.path.join(e.path, 'low-config.yaml'), 'w') as f:
f.write("""\
packages:
mpileaks:
version: [2.2]
libelf:
version: [0.8.12]
""")
with e:
e.concretize()
assert any(
x.satisfies('[email protected]') for x in e._get_environment_specs())
assert any(
[x.satisfies('[email protected]') for x in e._get_environment_specs()])
def test_bad_env_yaml_format(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
spacks:
- mpileaks
""")
with tmpdir.as_cwd():
with pytest.raises(spack.config.ConfigFormatError) as e:
env('create', 'test', './spack.yaml')
assert './spack.yaml:2' in str(e)
assert "'spacks' was unexpected" in str(e)
def test_env_loads(install_mockery, mock_fetch):
env('create', 'test')
with ev.read('test'):
add('mpileaks')
concretize()
install('--fake')
with ev.read('test'):
env('loads', 'test')
e = ev.read('test')
loads_file = os.path.join(e.path, 'loads')
assert os.path.exists(loads_file)
with open(loads_file) as f:
contents = f.read()
assert 'module load mpileaks' in contents
@pytest.mark.disable_clean_stage_check
def test_stage(mock_stage, mock_fetch, install_mockery):
env('create', 'test')
with ev.read('test'):
add('mpileaks')
add('zmpi')
concretize()
stage()
root = str(mock_stage)
def check_stage(spec):
spec = Spec(spec).concretized()
for dep in spec.traverse():
stage_name = "{0}{1}-{2}-{3}".format(stage_prefix, dep.name,
dep.version, dep.dag_hash())
assert os.path.isdir(os.path.join(root, stage_name))
check_stage('mpileaks')
check_stage('zmpi')
def test_env_commands_die_with_no_env_arg():
# these fail in argparse when given no arg
with pytest.raises(SystemExit):
env('create')
with pytest.raises(SystemExit):
env('remove')
# these have an optional env arg and raise errors via tty.die
with pytest.raises(spack.main.SpackCommandError):
env('loads')
# This should NOT raise an error with no environment
# it just tells the user there isn't an environment
env('status')
def test_env_blocks_uninstall(mock_stage, mock_fetch, install_mockery):
env('create', 'test')
with ev.read('test'):
add('mpileaks')
install('--fake')
out = uninstall('mpileaks', fail_on_error=False)
assert uninstall.returncode == 1
assert 'used by the following environments' in out
def test_roots_display_with_variants():
env('create', 'test')
with ev.read('test'):
add('boost+shared')
with ev.read('test'):
out = find(output=str)
assert "boost +shared" in out
def test_uninstall_removes_from_env(mock_stage, mock_fetch, install_mockery):
env('create', 'test')
with ev.read('test'):
add('mpileaks')
add('libelf')
install('--fake')
test = ev.read('test')
assert any(s.name == 'mpileaks' for s in test.specs_by_hash.values())
assert any(s.name == 'libelf' for s in test.specs_by_hash.values())
with ev.read('test'):
uninstall('-ya')
test = ev.read('test')
assert not test.specs_by_hash
assert not test.concretized_order
assert not test.user_specs
def create_v1_lockfile_dict(roots, all_specs):
test_lockfile_dict = {
"_meta": {
"lockfile-version": 1,
"file-type": "spack-lockfile"
},
"roots": list(
{
"hash": s.dag_hash(),
"spec": s.name
} for s in roots
),
# Version one lockfiles use the dag hash without build deps as keys,
# but they write out the full node dict (including build deps)
"concrete_specs": dict(
(s.dag_hash(), s.to_node_dict(hash=ht.build_hash))
for s in all_specs
)
}
return test_lockfile_dict
@pytest.mark.usefixtures('config')
def test_read_old_lock_and_write_new(tmpdir):
build_only = ('build',)
mock_repo = MockPackageMultiRepo()
y = mock_repo.add_package('y', [], [])
mock_repo.add_package('x', [y], [build_only])
with spack.repo.swap(mock_repo):
x = Spec('x')
x.concretize()
y = x['y']
test_lockfile_dict = create_v1_lockfile_dict([x], [x, y])
test_lockfile_path = str(tmpdir.join('test.lock'))
with open(test_lockfile_path, 'w') as f:
sjson.dump(test_lockfile_dict, stream=f)
_env_create('test', test_lockfile_path, with_view=False)
e = ev.read('test')
hashes = set(e._to_lockfile_dict()['concrete_specs'])
# When the lockfile is rewritten, it should adopt the new hash scheme
# which accounts for all dependencies, including build dependencies
assert hashes == set([
x.build_hash(),
y.build_hash()])
@pytest.mark.usefixtures('config')
def test_read_old_lock_creates_backup(tmpdir):
"""When reading a version-1 lockfile, make sure that a backup of that file
is created.
"""
mock_repo = MockPackageMultiRepo()
y = mock_repo.add_package('y', [], [])
with spack.repo.swap(mock_repo):
y = Spec('y')
y.concretize()
test_lockfile_dict = create_v1_lockfile_dict([y], [y])
env_root = tmpdir.mkdir('test-root')
test_lockfile_path = str(env_root.join(ev.lockfile_name))
with open(test_lockfile_path, 'w') as f:
sjson.dump(test_lockfile_dict, stream=f)
e = ev.Environment(str(env_root))
assert os.path.exists(e._lock_backup_v1_path)
with open(e._lock_backup_v1_path, 'r') as backup_v1_file:
lockfile_dict_v1 = sjson.load(backup_v1_file)
# Make sure that the backup file follows the v1 hash scheme
assert y.dag_hash() in lockfile_dict_v1['concrete_specs']
@pytest.mark.usefixtures('config')
def test_indirect_build_dep():
"""Simple case of X->Y->Z where Y is a build/link dep and Z is a
build-only dep. Make sure this concrete DAG is preserved when writing the
environment out and reading it back.
"""
default = ('build', 'link')
build_only = ('build',)
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [build_only])
mock_repo.add_package('x', [y], [default])
def noop(*args):
pass
setattr(mock_repo, 'dump_provenance', noop)
with spack.repo.swap(mock_repo):
x_spec = Spec('x')
x_concretized = x_spec.concretized()
_env_create('test', with_view=False)
e = ev.read('test')
e.add(x_spec)
e.concretize()
e.write()
e_read = ev.read('test')
x_env_hash, = e_read.concretized_order
x_env_spec = e_read.specs_by_hash[x_env_hash]
assert x_env_spec == x_concretized
@pytest.mark.usefixtures('config')
def test_store_different_build_deps():
r"""Ensure that an environment can store two instances of a build-only
dependency::
x y
/| (l) | (b)
(b) | y z2
\| (b)
z1
"""
default = ('build', 'link')
build_only = ('build',)
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [build_only])
mock_repo.add_package('x', [y, z], [default, build_only])
def noop(*args):
pass
setattr(mock_repo, 'dump_provenance', noop)
with spack.repo.swap(mock_repo):
y_spec = Spec('y ^z@3')
y_concretized = y_spec.concretized()
x_spec = Spec('x ^z@2')
x_concretized = x_spec.concretized()
# Even though x chose a different 'z', it should choose the same y
# according to the DAG hash (since build deps are excluded from
# comparison by default). Although the dag hashes are equal, the specs
# are not considered equal because they compare build deps.
assert x_concretized['y'].dag_hash() == y_concretized.dag_hash()
_env_create('test', with_view=False)
e = ev.read('test')
e.add(y_spec)
e.add(x_spec)
e.concretize()
e.write()
e_read = ev.read('test')
y_env_hash, x_env_hash = e_read.concretized_order
y_read = e_read.specs_by_hash[y_env_hash]
x_read = e_read.specs_by_hash[x_env_hash]
assert x_read['z'] != y_read['z']
def test_env_updates_view_install(
tmpdir, mock_stage, mock_fetch, install_mockery):
view_dir = tmpdir.mkdir('view')
env('create', '--with-view=%s' % view_dir, 'test')
with ev.read('test'):
add('mpileaks')
install('--fake')
check_mpileaks_and_deps_in_view(view_dir)
def test_env_view_fails(
tmpdir, mock_packages, mock_stage, mock_fetch, install_mockery):
view_dir = tmpdir.mkdir('view')
env('create', '--with-view=%s' % view_dir, 'test')
with ev.read('test'):
add('libelf')
add('libelf cflags=-g')
with pytest.raises(RuntimeError, match='merge blocked by file'):
install('--fake')
def test_env_without_view_install(
tmpdir, mock_stage, mock_fetch, install_mockery):
# Test enabling a view after installing specs
env('create', '--without-view', 'test')
test_env = ev.read('test')
with pytest.raises(spack.environment.SpackEnvironmentError):
test_env.default_view
view_dir = tmpdir.mkdir('view')
with ev.read('test'):
add('mpileaks')
install('--fake')
env('view', 'enable', str(view_dir))
# After enabling the view, the specs should be linked into the environment
# view dir
check_mpileaks_and_deps_in_view(view_dir)
def test_env_config_view_default(
tmpdir, mock_stage, mock_fetch, install_mockery):
# This config doesn't mention whether a view is enabled
test_config = """\
env:
specs:
- mpileaks
"""
_env_create('test', StringIO(test_config))
with ev.read('test'):
install('--fake')
e = ev.read('test')
# Try retrieving the view object
view = e.default_view.view()
assert view.get_spec('mpileaks')
def test_env_updates_view_install_package(
tmpdir, mock_stage, mock_fetch, install_mockery):
view_dir = tmpdir.mkdir('view')
env('create', '--with-view=%s' % view_dir, 'test')
with ev.read('test'):
install('--fake', 'mpileaks')
assert os.path.exists(str(view_dir.join('.spack/mpileaks')))
def test_env_updates_view_add_concretize(
tmpdir, mock_stage, mock_fetch, install_mockery):
view_dir = tmpdir.mkdir('view')
env('create', '--with-view=%s' % view_dir, 'test')
install('--fake', 'mpileaks')
with ev.read('test'):
add('mpileaks')
concretize()
check_mpileaks_and_deps_in_view(view_dir)
def test_env_updates_view_uninstall(
tmpdir, mock_stage, mock_fetch, install_mockery):
view_dir = tmpdir.mkdir('view')
env('create', '--with-view=%s' % view_dir, 'test')
with ev.read('test'):
install('--fake', 'mpileaks')
check_mpileaks_and_deps_in_view(view_dir)
with ev.read('test'):
uninstall('-ay')
check_viewdir_removal(view_dir)
def test_env_updates_view_uninstall_referenced_elsewhere(
tmpdir, mock_stage, mock_fetch, install_mockery):
view_dir = tmpdir.mkdir('view')
env('create', '--with-view=%s' % view_dir, 'test')
install('--fake', 'mpileaks')
with ev.read('test'):
add('mpileaks')
concretize()
check_mpileaks_and_deps_in_view(view_dir)
with ev.read('test'):
uninstall('-ay')
check_viewdir_removal(view_dir)
def test_env_updates_view_remove_concretize(
tmpdir, mock_stage, mock_fetch, install_mockery):
view_dir = tmpdir.mkdir('view')
env('create', '--with-view=%s' % view_dir, 'test')
install('--fake', 'mpileaks')
with ev.read('test'):
add('mpileaks')
concretize()
check_mpileaks_and_deps_in_view(view_dir)
with ev.read('test'):
remove('mpileaks')
concretize()
check_viewdir_removal(view_dir)
def test_env_updates_view_force_remove(
tmpdir, mock_stage, mock_fetch, install_mockery):
view_dir = tmpdir.mkdir('view')
env('create', '--with-view=%s' % view_dir, 'test')
with ev.read('test'):
install('--fake', 'mpileaks')
check_mpileaks_and_deps_in_view(view_dir)
with ev.read('test'):
remove('-f', 'mpileaks')
check_viewdir_removal(view_dir)
def test_env_activate_view_fails(
tmpdir, mock_stage, mock_fetch, install_mockery, env_deactivate):
"""Sanity check on env activate to make sure it requires shell support"""
out = env('activate', 'test')
assert "To set up shell support" in out
def test_stack_yaml_definitions(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('mpileaks') in test.user_specs
assert Spec('callpath') in test.user_specs
def test_stack_yaml_definitions_as_constraints(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- mpis: [mpich, openmpi]
specs:
- matrix:
- [$packages]
- [$^mpis]
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('mpileaks^mpich') in test.user_specs
assert Spec('callpath^mpich') in test.user_specs
assert Spec('mpileaks^openmpi') in test.user_specs
assert Spec('callpath^openmpi') in test.user_specs
def test_stack_yaml_definitions_as_constraints_on_matrix(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- mpis:
- matrix:
- [mpich]
- ['@3.0.4', '@3.0.3']
specs:
- matrix:
- [$packages]
- [$^mpis]
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('mpileaks^[email protected]') in test.user_specs
assert Spec('callpath^[email protected]') in test.user_specs
assert Spec('mpileaks^[email protected]') in test.user_specs
assert Spec('callpath^[email protected]') in test.user_specs
@pytest.mark.regression('12095')
def test_stack_yaml_definitions_write_reference(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- indirect: [$packages]
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
concretize()
test = ev.read('test')
assert Spec('mpileaks') in test.user_specs
assert Spec('callpath') in test.user_specs
def test_stack_yaml_add_to_list(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
add('-l', 'packages', 'libelf')
test = ev.read('test')
assert Spec('libelf') in test.user_specs
assert Spec('mpileaks') in test.user_specs
assert Spec('callpath') in test.user_specs
def test_stack_yaml_remove_from_list(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
remove('-l', 'packages', 'mpileaks')
test = ev.read('test')
assert Spec('mpileaks') not in test.user_specs
assert Spec('callpath') in test.user_specs
def test_stack_yaml_remove_from_list_force(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
specs:
- matrix:
- [$packages]
- [^mpich, ^zmpi]
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
concretize()
remove('-f', '-l', 'packages', 'mpileaks')
find_output = find('-c')
assert 'mpileaks' not in find_output
test = ev.read('test')
assert len(test.user_specs) == 2
assert Spec('callpath ^zmpi') in test.user_specs
assert Spec('callpath ^mpich') in test.user_specs
def test_stack_yaml_remove_from_matrix_no_effect(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages:
- matrix:
- [mpileaks, callpath]
- [target=be]
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test') as e:
before = e.user_specs.specs
remove('-l', 'packages', 'mpileaks')
after = e.user_specs.specs
assert before == after
def test_stack_yaml_force_remove_from_matrix(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages:
- matrix:
- [mpileaks, callpath]
- [target=be]
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test') as e:
concretize()
before_user = e.user_specs.specs
before_conc = e.concretized_user_specs
remove('-f', '-l', 'packages', 'mpileaks')
after_user = e.user_specs.specs
after_conc = e.concretized_user_specs
assert before_user == after_user
mpileaks_spec = Spec('mpileaks target=be')
assert mpileaks_spec in before_conc
assert mpileaks_spec not in after_conc
def test_stack_concretize_extraneous_deps(tmpdir, config, mock_packages):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- install:
- matrix:
- [$packages]
- ['^zmpi', '^mpich']
specs:
- $install
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
concretize()
test = ev.read('test')
for user, concrete in test.concretized_specs():
assert concrete.concrete
assert not user.concrete
if user.name == 'libelf':
assert not concrete.satisfies('^mpi', strict=True)
elif user.name == 'mpileaks':
assert concrete.satisfies('^mpi', strict=True)
def test_stack_concretize_extraneous_variants(tmpdir, config, mock_packages):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- install:
- matrix:
- [$packages]
- ['~shared', '+shared']
specs:
- $install
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
concretize()
test = ev.read('test')
for user, concrete in test.concretized_specs():
assert concrete.concrete
assert not user.concrete
if user.name == 'libelf':
assert 'shared' not in concrete.variants
if user.name == 'mpileaks':
assert (concrete.variants['shared'].value ==
user.variants['shared'].value)
def test_stack_concretize_extraneous_variants_with_dash(tmpdir, config,
mock_packages):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- install:
- matrix:
- [$packages]
- ['shared=False', '+shared-libs']
specs:
- $install
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
concretize()
ev.read('test')
# Regression test for handling of variants with dashes in them
# will fail before this point if code regresses
assert True
def test_stack_definition_extension(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- packages: [callpath]
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('libelf') in test.user_specs
assert Spec('mpileaks') in test.user_specs
assert Spec('callpath') in test.user_specs
def test_stack_definition_conditional_false(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- packages: [callpath]
when: 'False'
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('libelf') in test.user_specs
assert Spec('mpileaks') in test.user_specs
assert Spec('callpath') not in test.user_specs
def test_stack_definition_conditional_true(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- packages: [callpath]
when: 'True'
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('libelf') in test.user_specs
assert Spec('mpileaks') in test.user_specs
assert Spec('callpath') in test.user_specs
def test_stack_definition_conditional_with_variable(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- packages: [callpath]
when: platform == 'test'
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('libelf') in test.user_specs
assert Spec('mpileaks') in test.user_specs
assert Spec('callpath') in test.user_specs
def test_stack_definition_conditional_with_satisfaction(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
when: arch.satisfies('platform=foo') # will be "test" when testing
- packages: [callpath]
when: arch.satisfies('platform=test')
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('libelf') not in test.user_specs
assert Spec('mpileaks') not in test.user_specs
assert Spec('callpath') in test.user_specs
def test_stack_definition_complex_conditional(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- packages: [callpath]
when: re.search(r'foo', hostname) and env['test'] == 'THISSHOULDBEFALSE'
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
test = ev.read('test')
assert Spec('libelf') in test.user_specs
assert Spec('mpileaks') in test.user_specs
assert Spec('callpath') not in test.user_specs
def test_stack_definition_conditional_invalid_variable(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- packages: [callpath]
when: bad_variable == 'test'
specs:
- $packages
""")
with tmpdir.as_cwd():
with pytest.raises(NameError):
env('create', 'test', './spack.yaml')
def test_stack_definition_conditional_add_write(tmpdir):
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [libelf, mpileaks]
- packages: [callpath]
when: platform == 'test'
specs:
- $packages
""")
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
add('-l', 'packages', 'zmpi')
test = ev.read('test')
packages_lists = list(filter(lambda x: 'packages' in x,
test.yaml['env']['definitions']))
assert len(packages_lists) == 2
assert 'callpath' not in packages_lists[0]['packages']
assert 'callpath' in packages_lists[1]['packages']
assert 'zmpi' in packages_lists[0]['packages']
assert 'zmpi' not in packages_lists[1]['packages']
def test_stack_combinatorial_view(tmpdir, mock_fetch, mock_packages,
mock_archive, install_mockery):
filename = str(tmpdir.join('spack.yaml'))
viewdir = str(tmpdir.join('view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
combinatorial:
root: %s
projections:
'all': '{name}/{version}-{compiler.name}'""" % viewdir)
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
test = ev.read('test')
for spec in test._get_environment_specs():
assert os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
def test_stack_view_select(tmpdir, mock_fetch, mock_packages,
mock_archive, install_mockery):
filename = str(tmpdir.join('spack.yaml'))
viewdir = str(tmpdir.join('view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
combinatorial:
root: %s
select: ['%%gcc']
projections:
'all': '{name}/{version}-{compiler.name}'""" % viewdir)
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
test = ev.read('test')
for spec in test._get_environment_specs():
if spec.satisfies('%gcc'):
assert os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
else:
assert not os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
def test_stack_view_exclude(tmpdir, mock_fetch, mock_packages,
mock_archive, install_mockery):
filename = str(tmpdir.join('spack.yaml'))
viewdir = str(tmpdir.join('view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
combinatorial:
root: %s
exclude: [callpath]
projections:
'all': '{name}/{version}-{compiler.name}'""" % viewdir)
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
test = ev.read('test')
for spec in test._get_environment_specs():
if not spec.satisfies('callpath'):
assert os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
else:
assert not os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
def test_stack_view_select_and_exclude(tmpdir, mock_fetch, mock_packages,
mock_archive, install_mockery):
filename = str(tmpdir.join('spack.yaml'))
viewdir = str(tmpdir.join('view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
combinatorial:
root: %s
select: ['%%gcc']
exclude: [callpath]
projections:
'all': '{name}/{version}-{compiler.name}'""" % viewdir)
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
test = ev.read('test')
for spec in test._get_environment_specs():
if spec.satisfies('%gcc') and not spec.satisfies('callpath'):
assert os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
else:
assert not os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
def test_view_link_roots(tmpdir, mock_fetch, mock_packages, mock_archive,
install_mockery):
filename = str(tmpdir.join('spack.yaml'))
viewdir = str(tmpdir.join('view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
combinatorial:
root: %s
select: ['%%gcc']
exclude: [callpath]
link: 'roots'
projections:
'all': '{name}/{version}-{compiler.name}'""" % viewdir)
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
test = ev.read('test')
for spec in test._get_environment_specs():
if spec in test.roots() and (spec.satisfies('%gcc') and
not spec.satisfies('callpath')):
assert os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
else:
assert not os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
def test_view_link_all(tmpdir, mock_fetch, mock_packages, mock_archive,
install_mockery):
filename = str(tmpdir.join('spack.yaml'))
viewdir = str(tmpdir.join('view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, callpath]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
combinatorial:
root: %s
select: ['%%gcc']
exclude: [callpath]
link: 'all'
projections:
'all': '{name}/{version}-{compiler.name}'""" % viewdir)
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
test = ev.read('test')
for spec in test._get_environment_specs():
if spec.satisfies('%gcc') and not spec.satisfies('callpath'):
assert os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
else:
assert not os.path.exists(
os.path.join(viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
def test_stack_view_activate_from_default(tmpdir, mock_fetch, mock_packages,
mock_archive, install_mockery,
env_deactivate):
filename = str(tmpdir.join('spack.yaml'))
viewdir = str(tmpdir.join('view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, cmake]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
default:
root: %s
select: ['%%gcc']""" % viewdir)
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
shell = env('activate', '--sh', 'test')
assert 'PATH' in shell
assert os.path.join(viewdir, 'bin') in shell
assert 'FOOBAR=mpileaks' in shell
def test_stack_view_no_activate_without_default(tmpdir, mock_fetch,
mock_packages, mock_archive,
install_mockery,
env_deactivate):
filename = str(tmpdir.join('spack.yaml'))
viewdir = str(tmpdir.join('view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, cmake]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
not-default:
root: %s
select: ['%%gcc']""" % viewdir)
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
shell = env('activate', '--sh', 'test')
assert 'PATH' not in shell
assert viewdir not in shell
def test_stack_view_multiple_views(tmpdir, mock_fetch, mock_packages,
mock_archive, install_mockery,
env_deactivate):
filename = str(tmpdir.join('spack.yaml'))
default_viewdir = str(tmpdir.join('default-view'))
combin_viewdir = str(tmpdir.join('combinatorial-view'))
with open(filename, 'w') as f:
f.write("""\
env:
definitions:
- packages: [mpileaks, cmake]
- compilers: ['%%gcc', '%%clang']
specs:
- matrix:
- [$packages]
- [$compilers]
view:
default:
root: %s
select: ['%%gcc']
combinatorial:
root: %s
exclude: [callpath %%gcc]
projections:
'all': '{name}/{version}-{compiler.name}'""" % (default_viewdir,
combin_viewdir))
with tmpdir.as_cwd():
env('create', 'test', './spack.yaml')
with ev.read('test'):
install()
shell = env('activate', '--sh', 'test')
assert 'PATH' in shell
assert os.path.join(default_viewdir, 'bin') in shell
test = ev.read('test')
for spec in test._get_environment_specs():
if not spec.satisfies('callpath%gcc'):
assert os.path.exists(
os.path.join(combin_viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
else:
assert not os.path.exists(
os.path.join(combin_viewdir, spec.name, '%s-%s' %
(spec.version, spec.compiler.name)))
def test_env_activate_sh_prints_shell_output(
tmpdir, mock_stage, mock_fetch, install_mockery, env_deactivate
):
"""Check the shell commands output by ``spack env activate --sh``.
This is a cursory check; ``share/spack/qa/setup-env-test.sh`` checks
for correctness.
"""
env('create', 'test', add_view=True)
out = env('activate', '--sh', 'test')
assert "export SPACK_ENV=" in out
assert "export PS1=" not in out
assert "alias despacktivate=" in out
out = env('activate', '--sh', '--prompt', 'test')
assert "export SPACK_ENV=" in out
assert "export PS1=" in out
assert "alias despacktivate=" in out
def test_env_activate_csh_prints_shell_output(
tmpdir, mock_stage, mock_fetch, install_mockery, env_deactivate
):
"""Check the shell commands output by ``spack env activate --csh``."""
env('create', 'test', add_view=True)
out = env('activate', '--csh', 'test')
assert "setenv SPACK_ENV" in out
assert "setenv set prompt" not in out
assert "alias despacktivate" in out
out = env('activate', '--csh', '--prompt', 'test')
assert "setenv SPACK_ENV" in out
assert "set prompt=" in out
assert "alias despacktivate" in out
@pytest.mark.regression('12719')
def test_env_activate_default_view_root_unconditional(env_deactivate,
mutable_mock_env_path):
"""Check that the root of the default view in the environment is added
to the shell unconditionally."""
env('create', 'test', add_view=True)
with ev.read('test') as e:
viewdir = e.default_view.root
out = env('activate', '--sh', 'test')
assert 'PATH=%s' % os.path.join(viewdir, 'bin') in out
def test_concretize_user_specs_together():
e = ev.create('coconcretization')
e.concretization = 'together'
# Concretize a first time using 'mpich' as the MPI provider
e.add('mpileaks')
e.add('mpich')
e.concretize()
assert all('mpich' in spec for _, spec in e.concretized_specs())
assert all('mpich2' not in spec for _, spec in e.concretized_specs())
# Concretize a second time using 'mpich2' as the MPI provider
e.remove('mpich')
e.add('mpich2')
e.concretize()
assert all('mpich2' in spec for _, spec in e.concretized_specs())
assert all('mpich' not in spec for _, spec in e.concretized_specs())
# Concretize again without changing anything, check everything
# stays the same
e.concretize()
assert all('mpich2' in spec for _, spec in e.concretized_specs())
assert all('mpich' not in spec for _, spec in e.concretized_specs())
def test_cant_install_single_spec_when_concretizing_together():
e = ev.create('coconcretization')
e.concretization = 'together'
with pytest.raises(ev.SpackEnvironmentError, match=r'cannot install'):
e.install('zlib')
def test_duplicate_packages_raise_when_concretizing_together():
e = ev.create('coconcretization')
e.concretization = 'together'
e.add('mpileaks+opt')
e.add('mpileaks~opt')
e.add('mpich')
with pytest.raises(ev.SpackEnvironmentError, match=r'cannot contain more'):
e.concretize()
def test_env_write_only_non_default():
env('create', 'test')
e = ev.read('test')
with open(e.manifest_path, 'r') as f:
yaml = f.read()
assert yaml == ev.default_manifest_yaml
@pytest.fixture
def packages_yaml_v015(tmpdir):
"""Return the path to an existing manifest in the v0.15.x format
and the path to a non yet existing backup file.
"""
raw_yaml = """
spack:
specs:
- mpich
packages:
cmake:
paths:
[email protected]: /usr
"""
manifest = tmpdir.ensure('spack.yaml')
backup_file = tmpdir.join('spack.yaml.bkp')
manifest.write(raw_yaml)
return manifest, backup_file
def test_update_anonymous_env(packages_yaml_v015):
manifest, backup_file = packages_yaml_v015
env('update', '-y', str(manifest.dirname))
# The environment is now at the latest format
assert ev.is_latest_format(str(manifest))
# A backup file has been created and it's not at the latest format
assert os.path.exists(str(backup_file))
assert not ev.is_latest_format(str(backup_file))
def test_double_update(packages_yaml_v015):
manifest, backup_file = packages_yaml_v015
# Update the environment
env('update', '-y', str(manifest.dirname))
# Try to read the environment (it should not error)
ev.create('test', str(manifest))
# Updating again does nothing since the manifest is up-to-date
env('update', '-y', str(manifest.dirname))
# The environment is at the latest format
assert ev.is_latest_format(str(manifest))
# A backup file has been created and it's not at the latest format
assert os.path.exists(str(backup_file))
assert not ev.is_latest_format(str(backup_file))
def test_update_and_revert(packages_yaml_v015):
manifest, backup_file = packages_yaml_v015
# Update the environment
env('update', '-y', str(manifest.dirname))
assert os.path.exists(str(backup_file))
assert not ev.is_latest_format(str(backup_file))
assert ev.is_latest_format(str(manifest))
# Revert to previous state
env('revert', '-y', str(manifest.dirname))
assert not os.path.exists(str(backup_file))
assert not ev.is_latest_format(str(manifest))
def test_old_format_cant_be_updated_implicitly(packages_yaml_v015):
manifest, backup_file = packages_yaml_v015
env('activate', str(manifest.dirname))
with pytest.raises(spack.main.SpackCommandError):
add('hdf5')
@pytest.mark.regression('18147')
def test_can_update_attributes_with_override(tmpdir):
spack_yaml = """
spack:
mirrors::
test: /foo/bar
packages:
cmake:
paths:
[email protected]: /usr
specs:
- hdf5
"""
abspath = tmpdir.join('spack.yaml')
abspath.write(spack_yaml)
# Check that an update does not raise
env('update', '-y', str(abspath.dirname))
@pytest.mark.regression('18338')
def test_newline_in_commented_sequence_is_not_an_issue(tmpdir):
spack_yaml = """
spack:
specs:
- dyninst
packages:
libelf:
externals:
- spec: [email protected]
modules:
- libelf/3.18.1
concretization: together
"""
abspath = tmpdir.join('spack.yaml')
abspath.write(spack_yaml)
def extract_build_hash(environment):
_, dyninst = next(iter(environment.specs_by_hash.items()))
return dyninst['libelf'].build_hash()
# Concretize a first time and create a lockfile
with ev.Environment(str(tmpdir)) as e:
concretize()
libelf_first_hash = extract_build_hash(e)
# Check that a second run won't error
with ev.Environment(str(tmpdir)) as e:
concretize()
libelf_second_hash = extract_build_hash(e)
assert libelf_first_hash == libelf_second_hash
@pytest.mark.regression('18441')
def test_lockfile_not_deleted_on_write_error(tmpdir, monkeypatch):
raw_yaml = """
spack:
specs:
- dyninst
packages:
libelf:
externals:
- spec: [email protected]
prefix: /usr
"""
spack_yaml = tmpdir.join('spack.yaml')
spack_yaml.write(raw_yaml)
spack_lock = tmpdir.join('spack.lock')
# Concretize a first time and create a lockfile
with ev.Environment(str(tmpdir)):
concretize()
assert os.path.exists(str(spack_lock))
# If I run concretize again and there's an error during write,
# the spack.lock file shouldn't disappear from disk
def _write_helper_raise(self, x, y):
raise RuntimeError('some error')
monkeypatch.setattr(
ev.Environment, '_update_and_write_manifest', _write_helper_raise
)
with ev.Environment(str(tmpdir)) as e:
e.concretize(force=True)
with pytest.raises(RuntimeError):
e.clear()
e.write()
assert os.path.exists(str(spack_lock))
|
the-stack_0_20149 | import os
import typing
from json import decoder, encoder
from Mod_NeonOcean_S4_Order import Mod
from Mod_NeonOcean_S4_Order.Tools import Package
def BuildPackageChanges () -> bool:
if not Package.CanBuildPackage():
return False
for package in Mod.GetCurrentMod().Packages: # type: Mod.Package
baseFileExists = os.path.exists(package.SourceBaseFilePath) # type: bool
loosePathExists = os.path.exists(package.SourceLoosePath) # type: bool
packageManifest = None # type: typing.Optional[typing.Dict[str, typing.Union[float, typing.Dict[str, float]]]]
if not os.path.exists(package.BuildFilePath):
_BuildPackageEverythingInternal(package)
return True
if os.path.exists(package.BuildManifestFilePath):
with open(package.BuildManifestFilePath) as packageManifestFile:
packageManifest = decoder.JSONDecoder().decode(packageManifestFile.read())
if packageManifest is not None and not isinstance(packageManifest, dict):
packageManifest = None
if packageManifest is not None and (not "Loose" in packageManifest or not "Base" in packageManifest):
packageManifest = None
if packageManifest is None:
_BuildPackageEverythingInternal(package)
return True
else:
filesChanged = False # type: bool
if baseFileExists:
baseCurrentChangeTime = os.path.getmtime(package.SourceBaseFilePath) # type: float
if packageManifest["Base"] != os.path.getmtime(package.SourceBaseFilePath):
packageManifest["Base"] = baseCurrentChangeTime
filesChanged = True
if loosePathExists:
packageManifestLooseDictionary = packageManifest["Loose"] # type: dict
for entryFileName in list(packageManifestLooseDictionary.keys()): # type: str
entryChangeTime = packageManifestLooseDictionary[entryFileName] # type: float
entryFilePath = os.path.join(package.SourceLoosePath, entryFileName) # type: str
if not os.path.exists(entryFilePath):
packageManifestLooseDictionary.pop(entryFileName)
filesChanged = True
continue
entryCurrentChangeTime = os.path.getmtime(entryFilePath) # type: float
if entryCurrentChangeTime != entryChangeTime:
packageManifest["Loose"][entryFileName] = entryCurrentChangeTime
filesChanged = True
for sourceDirectoryRoot, sourceDirectoryNames, sourceFileNames in os.walk(package.SourceLoosePath): # type: str, typing.List[str], typing.List[str]
for sourceFileName in sourceFileNames: # type: str
sourceFilePath = os.path.join(sourceDirectoryRoot, sourceFileName) # type: str
relativeSourceFilePath = os.path.relpath(sourceFilePath, package.SourceLoosePath) # type: str
sourceFileDuplicate = False # type: bool
for entryFileName in packageManifest["Loose"].keys(): # type: str
if entryFileName.lower() == relativeSourceFilePath.lower():
sourceFileDuplicate = True
break
if not sourceFileDuplicate:
packageManifest["Loose"][relativeSourceFilePath] = os.path.getmtime(sourceFilePath)
filesChanged = True
if filesChanged:
addingFilePaths = list() # type: typing.List[str]
if loosePathExists:
for sourceDirectoryRoot, sourceDirectoryNames, sourceFileNames in os.walk(package.SourceLoosePath): # type: str, typing.List[str], typing.List[str]
for sourceFileName in sourceFileNames: # type: str
# noinspection SpellCheckingInspection
if os.path.splitext(sourceFileName)[1].lower() == ".sourceinfo":
continue
sourceFilePath = os.path.join(sourceDirectoryRoot, sourceFileName) # type: str
if os.path.isfile(sourceFilePath):
addingFilePaths.append(sourceFilePath)
if baseFileExists:
Package.BuildPackage(package.BuildFilePath,
baseFilePath = package.SourceBaseFilePath,
addingFilePaths = addingFilePaths)
else:
Package.BuildPackage(package.BuildFilePath,
addingFilePaths = addingFilePaths)
with open(package.BuildManifestFilePath, "w+") as packageManifestFile:
packageManifestFile.write(encoder.JSONEncoder(indent = "\t").encode(packageManifest))
return True
def BuildPackageEverything () -> bool:
if not Package.CanBuildPackage():
return False
for package in Mod.GetCurrentMod().Packages: # type: Mod.Package
_BuildPackageEverythingInternal(package)
return True
def _BuildPackageEverythingInternal (package: Mod.Package) -> None:
baseFileExists = os.path.exists(package.SourceBaseFilePath) # type: bool
loosePathExists = os.path.exists(package.SourceLoosePath) # type: bool
packageManifest = dict() # type: typing.Dict[str, typing.Union[float, typing.Dict[str, float]]]
if baseFileExists:
packageManifest["Base"] = os.path.getmtime(package.SourceBaseFilePath)
else:
packageManifest["Base"] = -1
packageManifest["Loose"] = dict()
for sourceDirectoryRoot, sourceDirectoryNames, sourceFileNames in os.walk(package.SourceLoosePath): # type: str, typing.List[str], typing.List[str]
for sourceFileName in sourceFileNames: # type: str
sourceFilePath = os.path.join(sourceDirectoryRoot, sourceFileName) # type: str
relativeSourceFilePath = os.path.relpath(sourceFilePath, package.SourceLoosePath) # type: str
packageManifest["Loose"][relativeSourceFilePath] = os.path.getmtime(sourceFilePath)
addingFilePaths = list() # type: typing.List[str]
if loosePathExists:
for sourceDirectoryRoot, sourceDirectoryNames, sourceFileNames in os.walk(package.SourceLoosePath): # type: str, typing.List[str], typing.List[str]
for sourceFileName in sourceFileNames: # type: str
# noinspection SpellCheckingInspection
if os.path.splitext(sourceFileName)[1].lower() == ".sourceinfo":
continue
sourceFilePath = os.path.join(sourceDirectoryRoot, sourceFileName) # type: str
if os.path.isfile(sourceFilePath):
addingFilePaths.append(sourceFilePath)
if baseFileExists:
Package.BuildPackage(package.BuildFilePath,
baseFilePath = package.SourceBaseFilePath,
addingFilePaths = addingFilePaths)
else:
Package.BuildPackage(package.BuildFilePath,
addingFilePaths = addingFilePaths)
with open(package.BuildManifestFilePath, "w+") as packageManifestFile:
packageManifestFile.write(encoder.JSONEncoder(indent = "\t").encode(packageManifest))
|
the-stack_0_20151 | #import library
import tensorflow as tf
import numpy as np
from skimage.io import imread, imshow
from skimage.transform import resize
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import os
from os import makedirs
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.keras.mixed_precision import experimental as mixed_precision
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
tf.config.optimizer.set_jit(True)
########################################################################################################################
'''initialize constants'''
########################################################################################################################
seed = 7
np.random.seed = seed
tf.random.set_seed(seed)
IMG_WIDTH = 256
IMG_HEIGHT = 256
IMG_CHANNELS = 1
########################################################################################################################
'''Load dataset'''
########################################################################################################################
TRAIN_PATH = 'Simulated_dataset/'
data_ids = [filename for filename in os.listdir(TRAIN_PATH) if filename.startswith("x_")]
NUMBER_OF_SAMPLES = int(len(data_ids))
print(NUMBER_OF_SAMPLES)
########################################################################################################################
'''Folder for saving the model'''
########################################################################################################################
MODEL_NAME = 'modelFDUNET.h5'
X_total = np.zeros((NUMBER_OF_SAMPLES, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y_total = np.zeros((NUMBER_OF_SAMPLES, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)
NUMBER_EPOCHS = 250
PATIENCE = 10
MONITOR = 'val_loss'
im = TRAIN_PATH.split("d", 1)[1]
FOLDER_NAME = "modelARPAM"
makedirs(FOLDER_NAME)
MODEL_NAME = FOLDER_NAME + MODEL_NAME
LOG_NAME = FOLDER_NAME + "logs"
########################################################################################################################
'''Image augmentation'''
########################################################################################################################
print('Resizing training images and masks')
for data, val in enumerate(data_ids):
ext = val.split("_", 1)[1] #To get the number after x_
xpath = TRAIN_PATH + val
ypath = TRAIN_PATH + 'y_' + ext
img = imread(xpath)#[:, :, :IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
img = np.expand_dims(img, axis=2)
X_total[data] = img # Fill empty X_train with values from img
true_img = imread(ypath)#[:, :, :IMG_CHANNELS]
true_img = resize(true_img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
true_img = np.expand_dims(true_img, axis=2)
Y_total[data] = true_img
########################################################################################################################
'''Divide in training and test data'''
########################################################################################################################
test_split = 0.1
X_train, X_test, Y_train, Y_test = train_test_split(X_total, Y_total, test_size=test_split, random_state=seed)
Y_pred = np.zeros((len(X_test), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)
X_train, Y_train = shuffle(X_train, Y_train, random_state=seed)
print('Done splitting and shuffling')
########################################################################################################################
'''Network functions'''
########################################################################################################################
def Conv2D_BatchNorm(input, filters, kernel_size, strides, activation, kernel_initializer, padding):
out = tf.keras.layers.Conv2D(filters, kernel_size=kernel_size, strides= strides, activation=activation, kernel_initializer=kernel_initializer, padding=padding)(input)
out = tf.keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None)(out)
return out
def Conv2D_Transpose_BatchNorm(input, filters, kernel_size, strides, activation, kernel_initializer, padding):
out = tf.keras.layers.Conv2DTranspose(filters, kernel_size=kernel_size, strides= strides, activation=activation, kernel_initializer=kernel_initializer, padding=padding)(input)
out = tf.keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None)(out)
return out
def DownBlock(input, filters, kernel_size, padding, activation, kernel_initializer):
out = FD_Block(input, f_in=filters // 2, f_out=filters, k=filters // 8, kernel_size=3, padding='same',
activation=activation, kernel_initializer='glorot_normal')
shortcut = out
out = DownSample(out, filters, kernel_size, strides=2, padding=padding,
activation=activation, kernel_initializer=kernel_initializer)
return [out, shortcut]
def BrigdeBlock(input, filters, kernel_size, padding, activation, kernel_initializer):
out = FD_Block(input, f_in=filters // 2, f_out=filters, k=filters // 8, kernel_size=3, padding='same',
activation=activation, kernel_initializer='glorot_normal')
out = UpSample(out, filters, kernel_size, strides=2, padding=padding,
activation=activation, kernel_initializer=kernel_initializer)
return out
def UpBlock(input, filters, kernel_size, padding, activation, kernel_initializer):
out = Conv2D_BatchNorm(input, filters= filters//2, kernel_size=1, strides=1, activation=activation,
kernel_initializer=kernel_initializer, padding=padding)
out = FD_Block(out, f_in=filters // 2, f_out=filters, k=filters // 8, kernel_size=3, padding='same',
activation=activation, kernel_initializer='glorot_normal')
out = UpSample(out, filters, kernel_size, strides=2, padding=padding,
activation=activation, kernel_initializer=kernel_initializer)
return out
def FD_Block(input, f_in, f_out, k, kernel_size, padding, activation, kernel_initializer):
out = input
for i in range(f_in, f_out, k):
shortcut = out
out = Conv2D_BatchNorm(out, filters=f_in, kernel_size=1, strides=1, padding=padding,
activation=activation, kernel_initializer=kernel_initializer)
out = Conv2D_BatchNorm(out, filters=k, kernel_size=kernel_size, strides=1, padding=padding,
activation=activation, kernel_initializer=kernel_initializer)
out = tf.keras.layers.Dropout(0.7, seed=seed)(out)
out = tf.keras.layers.concatenate([out, shortcut])
return out
def DownSample(input, filters, kernel_size, strides, padding, activation, kernel_initializer):
out = Conv2D_BatchNorm(input, filters, kernel_size=1, strides=1, activation= activation, kernel_initializer= kernel_initializer, padding=padding)
out = Conv2D_BatchNorm(out, filters, kernel_size=kernel_size, strides=strides, activation=activation,
kernel_initializer=kernel_initializer, padding=padding)
return out
def UpSample(input, filters, kernel_size, strides, padding, activation, kernel_initializer):
out = Conv2D_BatchNorm(input, filters, kernel_size=1, strides=1, padding=padding,
activation=activation, kernel_initializer=kernel_initializer)
out = Conv2D_Transpose_BatchNorm(out, filters//2, kernel_size=kernel_size, strides=strides, activation=activation,
kernel_initializer=kernel_initializer, padding=padding)
return out
########################################################################################################################
'''Define parameters'''
########################################################################################################################
kernel_initializer = tf.keras.initializers.glorot_normal(seed=seed)
activation = 'relu'
filters = 16
padding = 'same'
kernel_size = 3
strides = 1
inputs = tf.keras.layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = inputs
out = Conv2D_BatchNorm(s, filters, kernel_size=kernel_size, strides= strides, activation=activation, kernel_initializer=kernel_initializer, padding=padding)
[out, c1] = DownBlock(out, filters*2**1, kernel_size, padding, activation, kernel_initializer)
[out, c2] = DownBlock(out, filters*2**2, kernel_size, padding, activation, kernel_initializer)
[out, c3] = DownBlock(out, filters*2**3, kernel_size, padding, activation, kernel_initializer)
[out, c4] = DownBlock(out, filters*2**4, kernel_size, padding, activation, kernel_initializer)
[out, c5] = DownBlock(out, filters*2**5, kernel_size, padding, activation, kernel_initializer)
out = BrigdeBlock(out, filters*2**6, kernel_size, padding, activation, kernel_initializer)
out = tf.keras.layers.concatenate([out, c5])
out = UpBlock(out, filters*2**5, kernel_size, padding, activation, kernel_initializer)
out = tf.keras.layers.concatenate([out, c4])
out = UpBlock(out, filters*2**4, kernel_size, padding, activation, kernel_initializer)
out = tf.keras.layers.concatenate([out, c3])
out = UpBlock(out, filters*2**3, kernel_size, padding, activation, kernel_initializer)
out = tf.keras.layers.concatenate([out, c2])
out = UpBlock(out, filters*2**2, kernel_size, padding, activation, kernel_initializer)
out = tf.keras.layers.concatenate([out, c1])
out = Conv2D_BatchNorm(out, filters, kernel_size=1, strides=1, activation=activation, kernel_initializer=kernel_initializer, padding=padding)
out = FD_Block(out, f_in=filters, f_out=filters*2, k=filters // 4, kernel_size=3, padding=padding,
activation=activation, kernel_initializer=kernel_initializer)
out = tf.keras.layers.Conv2D(filters=1, kernel_size=1, strides=1, padding=padding, activation='linear', kernel_initializer=kernel_initializer)(out)
out = tf.keras.layers.Add()([out, s])
out = tf.keras.layers.ReLU()(out)
outputs = out
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
########################################################################################################################
'''define adam'''
########################################################################################################################
opt = tf.keras.optimizers.Adam(learning_rate = 0.001)
model.compile(optimizer=opt, loss='mean_squared_error', metrics=[tf.keras.metrics.MeanAbsoluteError(), tf.keras.metrics.MeanSquaredError()])
########################################################################################################################
'''Model checkpoints'''
########################################################################################################################
callbacks = [tf.keras.callbacks.ModelCheckpoint(MODEL_NAME, verbose=1, save_best_only=True),
tf.keras.callbacks.TensorBoard(log_dir=LOG_NAME),
tf.keras.callbacks.EarlyStopping(patience=PATIENCE, monitor=MONITOR)]
########################################################################################################################
'''Compile model'''
########################################################################################################################
results = model.fit(X_train, Y_train, validation_split=0.2, batch_size=8, epochs=NUMBER_EPOCHS, callbacks=callbacks)
print('Model Trained')
########################################################################################################################
'''Model evaluvation'''
########################################################################################################################
model.evaluate(X_test, Y_test, verbose=1)
print('Done evaluation')
preds_test = np.zeros((142, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)
preds_train = model.predict(X_train[:int(X_train.shape[0] * 0.8)], verbose=1)
preds_val = model.predict(X_train[int(X_train.shape[0] * 0.8):], verbose=1)
preds_test = model.predict(X_test, verbose=1)
print('Done prediction on simulation')
preds_test_int = preds_test.astype(np.uint8)
preds_test_t = tf.convert_to_tensor(preds_test_int)
Y_test_t = tf.convert_to_tensor(Y_test)
X_test_t = tf.convert_to_tensor(X_test)
ssim_test = tf.image.ssim(Y_test_t, preds_test_t, max_val=255)
ssim_test_orig = tf.image.ssim(Y_test_t, X_test_t, max_val=255)
psnr_test = tf.image.psnr(Y_test_t, preds_test_t, max_val=255)
psnr_test_orig = tf.image.psnr(Y_test_t, X_test_t, max_val=255)
print('SSIM Test')
print(np.mean(ssim_test.numpy()))
print('PSNR Test')
print(np.mean(psnr_test.numpy()))
print('SSIM original')
print(np.mean(ssim_test_orig.numpy()))
print('PSNR original')
print(np.mean(psnr_test_orig.numpy()))
|
the-stack_0_20152 | import unittest2
from datafeeds.usfirst_alliances_parser import UsfirstAlliancesParser
class TestUsfirstAlliancesParser(unittest2.TestCase):
def test_parse_2014test(self):
with open('test_data/usfirst_html/usfirst_event_matches_2014test.html', 'r') as f:
alliances, _ = UsfirstAlliancesParser.parse(f.read())
self.assertEqual(alliances, None)
def test_parse_2014curie(self):
with open('test_data/usfirst_html/usfirst_event_matches_2014curie.html', 'r') as f:
alliances, _ = UsfirstAlliancesParser.parse(f.read())
self.assertEqual(
alliances,
[
{'picks': ['frc254', 'frc469', 'frc2848', 'frc74'], 'declines':[] },
{'picks': ['frc1718', 'frc2451', 'frc573', 'frc2016'], 'declines':[] },
{'picks': ['frc2928', 'frc2013', 'frc1311', 'frc842'], 'declines':[] },
{'picks': ['frc180', 'frc125', 'frc1323', 'frc2468'], 'declines':[] },
{'picks': ['frc118', 'frc359', 'frc4334', 'frc865'], 'declines':[] },
{'picks': ['frc135', 'frc1241', 'frc11', 'frc68'], 'declines':[] },
{'picks': ['frc3478', 'frc177', 'frc294', 'frc230'], 'declines':[] },
{'picks': ['frc624', 'frc987', 'frc3476', 'frc3015'], 'declines':[] },
]
)
|
the-stack_0_20153 | import itertools
import re
from django.db.models.deletion import Collector
from django.urls import NoReverseMatch, reverse
def related_classes(instance):
"""
Return all classes which would be deleted if the passed instance
were deleted too by employing the cascade machinery of Django
itself. Does **not** return instances, only classes.
Note! When using Django 1.5, autogenerated models (many to many through
models) are returned too.
"""
collector = Collector(using=instance._state.db)
# We really do not want fast deletion, we absolutely need to know whether
# there are related objects around!
collector.can_fast_delete = lambda *args, **kwargs: False
collector.collect([instance])
# Save collected objects for later referencing (well yes, it does return
# instances but we don't have to tell anybody :-)
instance._collected_objects = collector.data
return collector.data.keys()
def safe_queryset_and(head, *tail):
"""
Safe AND-ing of querysets. If one of both queries has its
DISTINCT flag set, sets distinct on both querysets. Also takes extra
care to preserve the result of the following queryset methods:
* ``reverse()``
* ``transform()``
* ``select_related()``
* ``prefetch_related()``
"""
def _merge(qs1, qs2):
if qs1.query.distinct or qs2.query.distinct:
res = qs1.distinct() & qs2.distinct()
else:
res = qs1 & qs2
res._transform_fns = list(
set(getattr(qs1, "_transform_fns", []) + getattr(qs2, "_transform_fns", []))
)
if not (qs1.query.standard_ordering and qs2.query.standard_ordering):
res.query.standard_ordering = False
select_related = [qs1.query.select_related, qs2.query.select_related]
if False in select_related:
# We are not interested in the default value
select_related.remove(False)
if len(select_related) == 1:
res.query.select_related = select_related[0]
elif len(select_related) == 2:
if True in select_related:
# Prefer explicit select_related to generic select_related()
select_related.remove(True)
if len(select_related) > 0:
# If we have two explicit select_related calls, take any
res.query.select_related = select_related[0]
else:
res = res.select_related()
res._prefetch_related_lookups = list(
set(qs1._prefetch_related_lookups) | set(qs2._prefetch_related_lookups)
)
return res
while tail:
head = _merge(head, tail[0])
tail = tail[1:]
return head
_KWARG_RE = re.compile(r"(?:([-\w]+)=)?(.+)")
def parse_args_and_kwargs(parser, bits):
"""
Parses template tag arguments and keyword arguments
Returns a tuple ``args, kwargs``.
Usage::
@register.tag
def custom(parser, token):
return CustomNode(*parse_args_and_kwargs(parser,
token.split_contents()[1:]))
class CustomNode(template.Node):
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def render(self, context):
args, kwargs = resolve_args_and_kwargs(context, self.args,
self.kwargs)
return self._render(context, *args, **kwargs):
def _render(self, context, ...):
# The real workhorse
"""
args = []
kwargs = {}
for bit in bits:
match = _KWARG_RE.match(bit)
key, value = match.groups()
value = parser.compile_filter(value)
if key:
kwargs[str(key)] = value
else:
args.append(value)
return args, kwargs
def resolve_args_and_kwargs(context, args, kwargs):
"""
Resolves arguments and keyword arguments parsed by
``parse_args_and_kwargs`` using the passed context instance
See ``parse_args_and_kwargs`` for usage instructions.
"""
return (
[v.resolve(context) for v in args],
{k: v.resolve(context) for k, v in kwargs.items()},
)
def changed_regions(regions, fields):
"""
Returns a subset of regions which have to be updated when fields have
been edited. To be used together with the ``{% regions %}`` template
tag.
Usage::
regions = {}
render(request, 'detail.html', {
'object': instance,
'regions': regions,
})
return HttpResponse(
json.dumps(changed_regions(regions, ['emails', 'phones'])),
content_type='application/json')
"""
dependencies = regions.get("_dependencies", {})
to_update = set(itertools.chain(*[dependencies.get(field, []) for field in fields]))
return {key: value for key, value in regions.items() if key in to_update}
def tryreverse(*args, **kwargs):
"""
Calls ``django.core.urlresolvers.reverse``, and returns ``None`` on
failure instead of raising an exception.
"""
try:
return reverse(*args, **kwargs)
except NoReverseMatch:
return None
def substitute_with(to_delete, instance):
"""
Substitute the first argument with the second in all relations,
and delete the first argument afterwards.
"""
assert to_delete.__class__ == instance.__class__
assert to_delete.pk != instance.pk
fields = [
f
for f in to_delete._meta.get_fields()
if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete
]
for related_object in fields:
try:
model = related_object.related_model
except AttributeError:
model = related_object.model
queryset = model._base_manager.complex_filter(
{related_object.field.name: to_delete.pk}
)
queryset.update(**{related_object.field.name: instance.pk})
to_delete.delete()
def app_model_label(model):
"""
Stop those deprecation warnings
"""
return model._meta.app_label, model._meta.model_name
|
the-stack_0_20155 | import pymongo
import time
from sqlalchemy import create_engine
import pandas as pd
import re
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import logging
# logging criteria:
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S') # filename='debug.log',
# analyzser for sentiment analysis:
analyzer = SentimentIntensityAnalyzer()
# mongodb connection:
time.sleep(10)
client = pymongo.MongoClient(host='mongodb', port=27017)
db = client.tweets_1
collection = db.tweet_data_1
logging.info('mongodb client set')
# postgresdb connection:
time.sleep(5)
pg = create_engine('postgres://bird:bird@postgresdb:5432/tweet_db', echo=False)
logging.info('postgresdb engine set')
# create dataframe of list of tweets (pandas):
def create_df(collection):
df = pd.DataFrame(collection.find({},{ "_id": 0, "user": 1, "text": 1 }))
# add column for sentiment analysis:
df.insert(len(df.columns), 'sentiments', '')
logging.warning('data frame created')
return df
# analyze sentiments and append sentiment compound scores to dataframe column 'sentiment':
def sentiments_to_df(df):
result = []
for tweet in df['text']:
score = analyzer.polarity_scores(tweet)
result.append(str(score['compound']))
df["sentiment"] = result
logging.warning('sentiment compound score appended to df: {}'.format(score['compound']))
return df
#transform dataframe to postgres table:
def df_to_sql(df):
df.to_sql('tweets', pg, if_exists='replace')
logging.warning('table created')
# function running the above functions with constrainted repetition:
def main_code():
#repeat times =
times_to_repeat = 10
while times_to_repeat >= 0:
df = create_df(collection)
df = sentiments_to_df(df)
df_to_sql(df)
times_to_repeat -= 1
logging.critical('times to repeat left = {}'.format(times_to_repeat))
time.sleep(70) #60
time.sleep(10)
main_code()
|
the-stack_0_20156 | from web3 import HTTPProvider
from aquarius.events.request import make_post_request
class CustomHTTPProvider(HTTPProvider):
"""
Override requests to control the connection pool to make it blocking.
"""
def make_request(self, method, params):
self.logger.debug(
"Making request HTTP. URI: %s, Method: %s", self.endpoint_uri, method
)
request_data = self.encode_rpc_request(method, params)
raw_response = make_post_request(
self.endpoint_uri, request_data, **self.get_request_kwargs()
)
response = self.decode_rpc_response(raw_response)
self.logger.debug(
"Getting response HTTP. URI: %s, " "Method: %s, Response: %s",
self.endpoint_uri,
method,
response,
)
return response
|
the-stack_0_20157 | import numpy as np
import pandas as pd
import os
import tensorflow as tf
from node_cell import (
LSTMCell,
CTRNNCell,
ODELSTM,
VanillaRNN,
CTGRU,
BidirectionalRNN,
GRUD,
PhasedLSTM,
GRUODE,
HawkLSTMCell,
)
from tqdm import tqdm
import argparse
class ImitationData:
def __init__(self, seq_len):
self.seq_len = seq_len
all_files = sorted(
[
os.path.join("data/walker", d)
for d in os.listdir("data/walker")
if d.endswith(".npy")
]
)
self.rng = np.random.RandomState(891374)
np.random.RandomState(125487).shuffle(all_files)
# 15% test set, 10% validation set, the rest is for training
test_n = int(0.15 * len(all_files))
valid_n = int((0.15 + 0.1) * len(all_files))
test_files = all_files[:test_n]
valid_files = all_files[test_n:valid_n]
train_files = all_files[valid_n:]
train_x, train_t, train_y = self._load_files(train_files)
valid_x, valid_t, valid_y = self._load_files(valid_files)
test_x, test_t, test_y = self._load_files(test_files)
train_x, train_t, train_y = self.perturb_sequences(train_x, train_t, train_y)
valid_x, valid_t, valid_y = self.perturb_sequences(valid_x, valid_t, valid_y)
test_x, test_t, test_y = self.perturb_sequences(test_x, test_t, test_y)
self.train_x, self.train_times, self.train_y = self.align_sequences(
train_x, train_t, train_y
)
self.valid_x, self.valid_times, self.valid_y = self.align_sequences(
valid_x, valid_t, valid_y
)
self.test_x, self.test_times, self.test_y = self.align_sequences(
test_x, test_t, test_y
)
self.input_size = self.train_x.shape[-1]
print("train_times: ", str(self.train_times.shape))
print("train_x: ", str(self.train_x.shape))
print("train_y: ", str(self.train_y.shape))
def align_sequences(self, set_x, set_t, set_y):
times = []
x = []
y = []
for i in range(len(set_y)):
seq_x = set_x[i]
seq_t = set_t[i]
seq_y = set_y[i]
for t in range(0, seq_y.shape[0] - self.seq_len, self.seq_len // 4):
x.append(seq_x[t : t + self.seq_len])
times.append(seq_t[t : t + self.seq_len])
y.append(seq_y[t : t + self.seq_len])
return (
np.stack(x, axis=0),
np.expand_dims(np.stack(times, axis=0), axis=-1),
np.stack(y, axis=0),
)
def perturb_sequences(self, set_x, set_t, set_y):
x = []
times = []
y = []
for i in range(len(set_y)):
seq_x = set_x[i]
seq_y = set_y[i]
new_x, new_times = [], []
new_y = []
skip = 0
for t in range(seq_y.shape[0]):
skip += 1
if self.rng.rand() < 0.9:
new_x.append(seq_x[t])
new_times.append(skip)
new_y.append(seq_y[t])
skip = 0
x.append(np.stack(new_x, axis=0))
times.append(np.stack(new_times, axis=0))
y.append(np.stack(new_y, axis=0))
return x, times, y
def _load_files(self, files):
all_x = []
all_t = []
all_y = []
for f in files:
arr = np.load(f)
x_state = arr[:-1, :].astype(np.float32)
y = arr[1:, :].astype(np.float32)
x_times = np.ones(x_state.shape[0])
all_x.append(x_state)
all_t.append(x_times)
all_y.append(y)
print("Loaded file '{}' of length {:d}".format(f, x_state.shape[0]))
return all_x, all_t, all_y
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="lstm")
parser.add_argument("--size", default=64, type=int)
parser.add_argument("--epochs", default=200, type=int)
parser.add_argument("--lr", default=0.005, type=float)
args = parser.parse_args()
data = ImitationData(seq_len=64)
if args.model == "lstm":
cell = LSTMCell(units=args.size)
elif args.model == "ctrnn":
cell = CTRNNCell(units=args.size, num_unfolds=3, method="rk4")
elif args.model == "node":
cell = CTRNNCell(units=args.size, num_unfolds=3, method="rk4", tau=0)
elif args.model == "odelstm":
cell = ODELSTM(units=args.size)
elif args.model == "ctgru":
cell = CTGRU(units=args.size)
elif args.model == "vanilla":
cell = VanillaRNN(units=args.size)
elif args.model == "bidirect":
cell = BidirectionalRNN(units=args.size)
elif args.model == "grud":
cell = GRUD(units=args.size)
elif args.model == "phased":
cell = PhasedLSTM(units=args.size)
elif args.model == "gruode":
cell = GRUODE(units=args.size)
elif args.model == "hawk":
cell = HawkLSTMCell(units=args.size)
else:
raise ValueError("Unknown model type '{}'".format(args.model))
signal_input = tf.keras.Input(shape=(data.seq_len, data.input_size), name="robot")
time_input = tf.keras.Input(shape=(data.seq_len, 1), name="time")
rnn = tf.keras.layers.RNN(cell, time_major=False, return_sequences=True)
output_states = rnn((signal_input, time_input))
y = tf.keras.layers.Dense(data.input_size)(output_states)
model = tf.keras.Model(inputs=[signal_input, time_input], outputs=[y])
model.compile(
optimizer=tf.keras.optimizers.RMSprop(args.lr),
loss=tf.keras.losses.MeanSquaredError(),
)
model.summary()
hist = model.fit(
x=(data.train_x, data.train_times),
y=data.train_y,
batch_size=128,
epochs=args.epochs,
validation_data=((data.valid_x, data.valid_times), data.valid_y),
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
"/tmp/checkpoint", save_best_only=True, save_weights_only=True, mode="min"
)
],
)
# Restore checkpoint with lowest validation MSE
model.load_weights("/tmp/checkpoint")
best_test_loss = model.evaluate(
x=(data.test_x, data.test_times), y=data.test_y, verbose=2
)
print("Best test loss: {:0.3f}".format(best_test_loss))
# Log result in file
base_path = "results/walker"
os.makedirs(base_path, exist_ok=True)
with open("{}/{}_{}.csv".format(base_path, args.model, args.size), "a") as f:
f.write("{:06f}\n".format(best_test_loss))
|
the-stack_0_20158 | """Optimization routines."""
import contextlib
import functools
import os
import sys
import warnings
from pathlib import Path
from typing import Any, Callable, Iterable, Iterator, Optional, Tuple, Union
import numpy as np
import scipy.optimize
from .. import options
from ..utilities.basics import Array, Options, SolverStats, StringRepresentation, format_options
# objective function types
ObjectiveResults = Tuple[float, Optional[Array]]
ObjectiveFunction = Callable[[Array], ObjectiveResults]
class Optimization(StringRepresentation):
r"""Configuration for solving optimization problems.
Parameters
----------
method : `str or callable`
The optimization routine that will be used. The following routines support parameter bounds and use analytic
gradients:
- ``'knitro'`` - Uses an installed version of
`Artleys Knitro <https://www.artelys.com/solvers/knitro/>`_. Python 3 is supported by Knitro version 10.3
and newer. A number of environment variables most likely need to be configured properly, such as
``KNITRODIR``, ``ARTELYS_LICENSE``, ``LD_LIBRARY_PATH`` (on Linux), and ``DYLD_LIBRARY_PATH`` (on
Mac OS X). For more information, refer to the
`Knitro installation guide <https://www.artelys.com/docs/knitro//1_introduction/installation.html>`_.
- ``'slsqp'`` - Uses the :func:`scipy.optimize.minimize` SLSQP routine.
- ``'trust-constr'`` - Uses the :func:`scipy.optimize.minimize` trust-region routine.
- ``'l-bfgs-b'`` - Uses the :func:`scipy.optimize.minimize` L-BFGS-B routine.
- ``'tnc'`` - Uses the :func:`scipy.optimize.minimize` TNC routine.
The following routines also use analytic gradients but will ignore parameter bounds (not bounding the problem
may create issues if the optimizer tries out large parameter values that create overflow errors):
- ``'cg'`` - Uses the :func:`scipy.optimize.minimize` CG routine.
- ``'bfgs'`` - Uses the :func:`scipy.optimize.minimize` BFGS routine.
- ``'newton-cg'`` - Uses the :func:`scipy.optimize.minimize` Newton-CG routine.
The following routines do not use analytic gradients and will also ignore parameter bounds (without analytic
gradients, optimization will likely be much slower):
- ``'nelder-mead'`` - Uses the :func:`scipy.optimize.minimize` Nelder-Mead routine.
- ``'powell'`` - Uses the :func:`scipy.optimize.minimize` Powell routine.
The following trivial routine can be used to evaluate an objective at specific parameter values:
- ``'return'`` - Assume that the initial parameter values are the optimal ones.
Also accepted is a custom callable method with the following form::
method(initial, bounds, objective_function, iteration_callback, **options) -> (final, converged)
where ``initial`` is an array of initial parameter values, ``bounds`` is a list of ``(min, max)`` pairs for each
element in ``initial``, ``objective_function`` is a callable objective function of the form specified below,
``iteration_callback`` is a function that should be called without any arguments after each major iteration (it
is used to record the number of major iterations), ``options`` are specified below, ``final`` is an array of
optimized parameter values, and ``converged`` is a flag for whether the routine converged.
The ``objective_function`` has the following form:
objective_function(theta) -> (objective, gradient)
where ``gradient`` is ``None`` if ``compute_gradient is ``False``.
method_options : `dict, optional`
Options for the optimization routine.
For any non-custom ``method`` other than ``'knitro'`` and ``'return'``, these options will be passed to
``options`` in :func:`scipy.optimize.minimize`. Refer to the SciPy documentation for information about which
options are available for each optimization routine.
If ``method`` is ``'knitro'``, these options should be
`Knitro user options <https://www.artelys.com/docs/knitro//3_referenceManual/userOptions.html>`_. The
non-standard ``knitro_dir`` option can also be specified. The following options have non-standard default
values:
- **knitro_dir** : (`str`) - By default, the KNITRODIR environment variable is used. Otherwise, this
option should point to the installation directory of Knitro, which contains direct subdirectories such as
``'examples'`` and ``'lib'``. For example, on Windows this option could be
``'/Program Files/Artleys3/Knitro 10.3.0'``.
- **algorithm** : (`int`) - The optimization algorithm to be used. The default value is ``1``, which
corresponds to the Interior/Direct algorithm.
- **gradopt** : (`int`) - How the objective's gradient is computed. The default value is ``1`` if
``compute_gradient`` is ``True`` and is ``2`` otherwise, which corresponds to estimating the gradient with
finite differences.
- **hessopt** : (`int`) - How the objective's Hessian is computed. The default value is ``2``, which
corresponds to computing a quasi-Newton BFGS Hessian.
- **honorbnds** : (`int`) - Whether to enforce satisfaction of simple variable bounds. The default value is
``1``, which corresponds to enforcing that the initial point and all subsequent solution estimates satisfy
the bounds.
compute_gradient : `bool, optional`
Whether to compute an analytic objective gradient during optimization, which must be ``False`` if ``method``
does not use analytic gradients, and must be ``True`` if ``method`` is ``'newton-cg'``, which requires an
analytic gradient.
By default, analytic gradients are computed. Not using an analytic gradient will likely slow
down estimation a good deal. If ``False``, an analytic gradient may still be computed once at the end of
optimization to compute optimization results. To always use finite differences, ``finite_differences`` in
:meth:`Problem.solve` can be set to ``True``.
universal_display : `bool, optional`
Whether to format optimization progress such that the display looks the same for all routines. By default, the
universal display is used and some ``method_options`` are used to prevent default displays from showing up.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/optimization.ipynb
.. raw:: latex
\end{examplenotebook}
"""
_optimizer: functools.partial
_description: str
_method_options: Options
_supports_bounds: bool
_compute_gradient: bool
_universal_display: bool
def __init__(
self, method: Union[str, Callable], method_options: Optional[Options] = None, compute_gradient: bool = True,
universal_display: bool = True) -> None:
"""Validate the method and set default options."""
simple_methods = {
'nelder-mead': (functools.partial(scipy_optimizer), "the Nelder-Mead algorithm implemented in SciPy"),
'powell': (functools.partial(scipy_optimizer), "the modified Powell algorithm implemented in SciPy")
}
unbounded_methods = {
'cg': (functools.partial(scipy_optimizer), "the conjugate gradient algorithm implemented in SciPy"),
'bfgs': (functools.partial(scipy_optimizer), "the BFGS algorithm implemented in SciPy"),
'newton-cg': (functools.partial(scipy_optimizer), "the Newton-CG algorithm implemented in SciPy")
}
bounded_methods = {
'l-bfgs-b': (functools.partial(scipy_optimizer), "the L-BFGS-B algorithm implemented in SciPy"),
'tnc': (functools.partial(scipy_optimizer), "the truncated Newton algorithm implemented in SciPy"),
'slsqp': (functools.partial(scipy_optimizer), "Sequential Least SQuares Programming implemented in SciPy"),
'trust-constr': (functools.partial(scipy_optimizer), "trust-region routine implemented in SciPy"),
'knitro': (functools.partial(knitro_optimizer), "an installed version of Artleys Knitro"),
'return': (functools.partial(return_optimizer), "a trivial routine that returns the initial parameters")
}
methods = {**simple_methods, **unbounded_methods, **bounded_methods}
# validate the configuration
if method not in methods and not callable(method):
raise ValueError(f"method must be one of {list(methods)} or a callable object.")
if method_options is not None and not isinstance(method_options, dict):
raise ValueError("method_options must be None or a dict.")
if method in simple_methods and compute_gradient:
raise ValueError(f"compute_gradient must be False when method is '{method}'.")
if method == 'newton-cg' and not compute_gradient:
raise ValueError(f"compute_gradient must be True when method is '{method}'.")
# initialize class attributes
self._compute_gradient = compute_gradient
self._universal_display = universal_display
self._supports_bounds = callable(method) or method in bounded_methods
# options are by default empty
if method_options is None:
method_options = {}
# options are simply passed along to custom methods
if callable(method):
self._optimizer = functools.partial(method)
self._description = "a custom method"
self._method_options = method_options
return
# identify the non-custom optimizer, configure arguments, and set default options
self._method_options: Options = {}
self._optimizer, self._description = methods[method]
self._optimizer = functools.partial(self._optimizer, compute_gradient=compute_gradient)
if method == 'knitro':
self._method_options.update({
'hessopt': 2,
'algorithm': 1,
'honorbnds': 1,
'gradopt': 1 if compute_gradient else 2,
'knitro_dir': os.environ.get('KNITRODIR'),
'outlev': 4 if not universal_display and options.verbose else 0
})
elif method != 'return':
self._optimizer = functools.partial(self._optimizer, method=method)
if not universal_display and options.verbose:
self._method_options['disp'] = True
if method in {'l-bfgs-b', 'slsqp'}:
self._method_options['iprint'] = 2
elif method == 'trust-constr':
self._method_options['verbose'] = 3
# update the default options
self._method_options.update(method_options)
# validate options for non-SciPy routines
if method == 'return' and self._method_options:
raise ValueError("The return method does not support any options.")
if method == 'knitro':
# get the location of the Knitro installation
knitro_dir = self._method_options.pop('knitro_dir')
if not isinstance(knitro_dir, (Path, str)):
raise OSError(
"If specified, the knitro_dir optimization option must point to the Knitro installation directory."
"Otherwise, the KNITRODIR environment variable must be configured."
)
# add relevant paths
for subdir in ['lib', 'examples/Python']:
full_path = Path(knitro_dir) / subdir
if not full_path.is_dir():
raise OSError(
f"Failed to find the directory '{full_path}'. Make sure a supported version of Knitro is "
f"properly installed and that the KNITRODIR environment variable exists. Alternatively, the "
f"knitro_dir optimization option should point to the Knitro installation directory."
)
sys.path.append(str(full_path))
# make sure that Knitro can be initialized
with knitro_context_manager():
pass
def __str__(self) -> str:
"""Format the configuration as a string."""
description = f"{self._description} {'with' if self._compute_gradient else 'without'} analytic gradients"
return f"Configured to optimize using {description} and options {format_options(self._method_options)}."
def _optimize(
self, initial: Array, bounds: Optional[Iterable[Tuple[float, float]]],
verbose_objective_function: Callable[[Array, int, int], ObjectiveResults]) -> Tuple[Array, SolverStats]:
"""Optimize parameters to minimize a scalar objective."""
# initialize counters
iterations = evaluations = 0
def iteration_callback() -> None:
"""Count the number of major iterations."""
nonlocal iterations
iterations += 1
def objective_wrapper(raw_values: Any) -> ObjectiveResults:
"""Normalize arrays so they work with all types of routines. Also count the total number of contraction
evaluations.
"""
nonlocal evaluations
evaluations += 1
raw_values = np.asanyarray(raw_values)
values = raw_values.reshape(initial.shape).astype(initial.dtype, copy=False)
objective, gradient = verbose_objective_function(values, iterations, evaluations)
return (
float(objective),
None if gradient is None else gradient.astype(raw_values.dtype, copy=False).flatten()
)
# normalize values
raw_initial = initial.astype(np.float64, copy=False).flatten()
raw_bounds = None if bounds is None or not self._supports_bounds else [(float(l), float(u)) for l, u in bounds]
# solve the problem and convert the raw final values to the same data type and shape as the initial values
raw_final, converged = self._optimizer(
raw_initial, raw_bounds, objective_wrapper, iteration_callback, **self._method_options
)
final = np.asanyarray(raw_final).astype(initial.dtype, copy=False).reshape(initial.shape)
stats = SolverStats(converged, iterations, evaluations)
return final, stats
def return_optimizer(initial_values: Array, *_: Any, **__: Any) -> Tuple[Array, bool]:
"""Assume the initial values are the optimal ones."""
success = True
return initial_values, success
def scipy_optimizer(
initial_values: Array, bounds: Optional[Iterable[Tuple[float, float]]], objective_function: ObjectiveFunction,
iteration_callback: Callable[[], None], method: str, compute_gradient: bool, **scipy_options: Any) -> (
Tuple[Array, bool]):
"""Optimize with a SciPy method."""
cache: Optional[Tuple[Array, ObjectiveResults]] = None
def objective_wrapper(values: Array) -> float:
"""Return a possibly cached objective value."""
nonlocal cache
if cache is None or not np.array_equal(values, cache[0]):
cache = (values.copy(), objective_function(values))
return cache[1][0]
def gradient_wrapper(values: Array) -> Array:
"""Return a possibly cached gradient."""
nonlocal cache
if cache is None or not np.array_equal(values, cache[0]):
cache = (values.copy(), objective_function(values))
return cache[1][1]
# by default use the BFGS approximation for the Hessian
hess = scipy_options.get('hess', scipy.optimize.BFGS() if method == 'trust-constr' else None)
# call the SciPy function
callback = lambda *_: iteration_callback()
results = scipy.optimize.minimize(
objective_wrapper, initial_values, method=method, jac=gradient_wrapper if compute_gradient else False,
hess=hess, bounds=bounds, callback=callback, options=scipy_options
)
return results.x, results.success
def knitro_optimizer(
initial_values: Array, bounds: Optional[Iterable[Tuple[float, float]]], objective_function: ObjectiveFunction,
iteration_callback: Callable[[], None], compute_gradient: bool, **knitro_options: Any) -> Tuple[Array, bool]:
"""Optimize with Knitro."""
with knitro_context_manager() as (knitro, knitro_context):
iterations = 0
cache: Optional[Tuple[Array, ObjectiveResults]] = None
def combined_callback(
request_code: int, _: Any, __: Any, ___: Any, ____: Any, values: Array, _____: Any,
objective_store: Array, ______: Any, gradient_store: Array, *_______: Any) -> int:
"""Handle requests to compute either the objective or its gradient (which are cached for when the next
request is for the same values) and call the iteration callback when there's a new major iteration.
"""
nonlocal iterations, cache
# call the iteration callback if this is a new iteration
current_iterations = knitro.KTR_get_number_iters(knitro_context)
while iterations < current_iterations:
iteration_callback()
iterations += 1
# compute the objective or used cached values
if cache is None or not np.array_equal(values, cache[0]):
cache = (values.copy(), objective_function(values))
objective, gradient = cache[1]
# define a function that normalizes values so they can be digested by Knitro
normalize = lambda x: min(max(float(x), -sys.maxsize), sys.maxsize)
# handle request codes
if request_code == knitro.KTR_RC_EVALFC:
objective_store[0] = normalize(objective)
return knitro.KTR_RC_BEGINEND
if request_code == knitro.KTR_RC_EVALGA:
assert compute_gradient and gradient is not None
for index, gradient_value in enumerate(gradient.flatten()):
gradient_store[index] = normalize(gradient_value)
return knitro.KTR_RC_BEGINEND
return knitro.KTR_RC_CALLBACK_ERR
# configure Knitro callbacks
callback_mapping = {
knitro.KTR_set_func_callback: combined_callback,
knitro.KTR_set_grad_callback: combined_callback
}
for set_callback, callback in callback_mapping.items():
code = set_callback(knitro_context, callback)
if code != 0:
raise RuntimeError(f"Encountered error code {code} when registering {set_callback.__name__}.")
# configure Knitro parameters
for key, value in knitro_options.items():
set_parameter = knitro.KTR_set_param_by_name
if isinstance(value, str):
set_parameter = knitro.KTR_set_char_param_by_name
code = set_parameter(knitro_context, key, value)
if code != 0:
raise RuntimeError(f"Encountered error code {code} when configuring '{key}'.")
# initialize the problem
bounds = bounds or [(-np.inf, +np.inf)] * initial_values.size
with warnings.catch_warnings():
warnings.simplefilter('ignore')
code = knitro.KTR_init_problem(
kc=knitro_context,
n=initial_values.size,
xInitial=initial_values,
lambdaInitial=None,
objGoal=knitro.KTR_OBJGOAL_MINIMIZE,
objType=knitro.KTR_OBJTYPE_GENERAL,
xLoBnds=np.array([b[0] if np.isfinite(b[0]) else -knitro.KTR_INFBOUND for b in bounds]),
xUpBnds=np.array([b[1] if np.isfinite(b[1]) else +knitro.KTR_INFBOUND for b in bounds]),
cType=None,
cLoBnds=None,
cUpBnds=None,
jacIndexVars=None,
jacIndexCons=None,
hessIndexRows=None,
hessIndexCols=None
)
if code != 0:
raise RuntimeError(f"Encountered error code {code} when initializing the Knitro problem solver.")
# solve the problem
values_store = np.zeros_like(initial_values)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return_code = knitro.KTR_solve(
kc=knitro_context, x=values_store, lambda_=np.zeros_like(initial_values), evalStatus=0,
obj=np.array([0]), c=None, objGrad=None, jac=None, hess=None, hessVector=None, userParams=None
)
# Knitro was only successful if its return code was 0 (final solution satisfies the termination conditions for
# verifying optimality) or between -100 and -199 (a feasible approximate solution was found)
return values_store, return_code > -200
@contextlib.contextmanager
def knitro_context_manager() -> Iterator[Tuple[Any, Any]]:
"""Import Knitro and initialize its context."""
try:
import knitro
except OSError as exception:
if 'Win32' in repr(exception):
raise EnvironmentError("Make sure both Knitro and Python are 32- or 64-bit.") from exception
raise
# modify older version of Knitro to work with NumPy
try:
# noinspection PyUnresolvedReferences
import knitroNumPy
knitro.KTR_array_handler._cIntArray = knitroNumPy._cIntArray
knitro.KTR_array_handler._cDoubleArray = knitroNumPy._cDoubleArray
knitro.KTR_array_handler._userArray = knitroNumPy._userArray
knitro.KTR_array_handler._userToCArray = knitroNumPy._userToCArray
knitro.KTR_array_handler._cToUserArray = knitroNumPy._cToUserArray
except ImportError:
pass
# create the Knitro context and attempt to free it if anything goes wrong
knitro_context = None
try:
knitro_context = None
try:
knitro_context = knitro.KTR_new()
except RuntimeError as exception:
if 'Error while initializing parameter' not in str(exception):
raise
if not knitro_context:
raise OSError(
"Failed to find a Knitro license. Make sure that Knitro is properly installed. You may have to create "
"the environment variable ARTELYS_LICENSE and set it to the location of the directory with the license "
"file."
)
yield knitro, knitro_context
finally:
try:
knitro.KTR_free(knitro_context)
except Exception:
pass
|
the-stack_0_20159 | # Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.
# Example 1:
# Input:
# [
# [1,1,1],
# [1,0,1],
# [1,1,1]
# ]
# Output:
# [
# [1,0,1],
# [0,0,0],
# [1,0,1]
# ]
# Example 2:
# Input:
# [
# [0,1,2,0],
# [3,4,5,2],
# [1,3,1,5]
# ]
# Output:
# [
# [0,0,0,0],
# [0,4,5,0],
# [0,3,1,0]
# ]
# Follow up:
# A straight forward solution using O(mn) space is probably a bad idea.
# A simple improvement uses O(m + n) space, but still not the best solution.
# Could you devise a constant space solution?
class Solution:
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
zeros = []
for i in range (0, len(matrix)):
for j in range (0, len(matrix[i])):
if matrix[i][j] == 0:
zeros.append((i, j))
for i in range (0, len(zeros)):
for j in range (0, len(matrix[zeros[i][0]])):
matrix[zeros[i][0]][j] = 0
for j in range (0, len(matrix)):
matrix[j][zeros[i][1]] = 0 |
the-stack_0_20162 | #!/usr/bin/env python3
# pylint: disable=too-many-arguments
"""
Author : saminamomtaz <saminamomtaz@localhost>
Date : 2021-11-28
Purpose: Fold change of RNAseq data
"""
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Fold change of RNAseq data',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('foldchange',
metavar='file1.xlsx',
help='Input file with fold change')
parser.add_argument('enrichment',
metavar='file2.xlsx',
help='Input file with LogP and geneID')
parser.add_argument('outfile',
metavar='outfile.csv',
help='Output file name')
args = parser.parse_args()
# check file type
if not args.foldchange.endswith('.xlsx'):
parser.error(f'file1 "{args.foldchange}" must be an excel file')
elif not args.enrichment.endswith('.xlsx'):
parser.error(f'file2 "{args.enrichment}" must be an excel file')
elif not args.outfile.endswith('.csv'):
parser.error(f'outfile "{args.outfile}" must be a csv file')
# check correctly formatted sheets
# get the sheet names from args.enrichment file
sheet_names_enrichment = list(pd.read_excel(args.enrichment, None).keys())
if sheet_names_enrichment != ['Annotation', 'Enrichment']:
parser.error(f'file2 "{args.enrichment}" does not contain required sheets.')
else:
# read the raw datas from metascape Sheet 'Annotation'
args.annotation_all = pd.read_excel(args.enrichment, sheet_name='Annotation')
# read the raw datas from metascape Sheet 'Enrichment'
args.enrichment_all = pd.read_excel(args.enrichment, sheet_name='Enrichment')
# read the raw datas from fold change file
args.fold_change_all = pd.read_excel(args.foldchange)
# check correctly formatted columns
if not set(['Input ID', 'Gene ID']).issubset(args.annotation_all.columns):
parser.error(f'file2 "{args.enrichment}" does not contain required columns.')
elif not set(['GroupID', 'Term', 'Description', 'LogP', 'Genes']).issubset(args.enrichment_all.columns):
parser.error(f'file2 "{args.enrichment}" does not contain required columns.')
elif not set(['ensembl_gene_id', 'FoldChange']).issubset(args.fold_change_all.columns):
parser.error(f'file1 "{args.foldchange}" does not contain required columns.')
return args
# --------------------------------------------------
def create_summary(annotation_all, enrichment_all, fold_change_all):
"""Create a summary dataframe"""
# extract the 'summary' rows from the 'Enrichment' sheet
enrichment_selected = enrichment_all.loc[enrichment_all['GroupID'].str.contains("Summary")]
# create summary dataframe
plot_label = []
val_LogP = []
gene_list = []
gene_1st = []
ensum_list = []
fold_list = []
decision_list = []
for ii in range(len(enrichment_selected)):
plot_label += [enrichment_selected['Term'].iloc[ii] + ': ' + enrichment_selected['Description'].iloc[ii]]
val_LogP += [enrichment_selected['LogP'].iloc[ii]]
gene_list += [enrichment_selected['Genes'].iloc[ii].split(',')]
# choosing the 1st gene from the gene list
# as all genes have similar fold change
gene_1st += [gene_list[ii][0]]
# find out the 'ensemble_gene_id' corresponding
# to the first gene of the 'gene_list' column
for jj in range(len(annotation_all)):
if gene_1st[ii] == annotation_all['Gene ID'].iloc[jj]:
ensum_list += [annotation_all['Input ID'].iloc[jj]]
break
# find out the fold change corresponding to the 'ensemble_gene_id'
for jj in range(len(fold_change_all)):
if ensum_list[ii] == fold_change_all['ensembl_gene_id'].iloc[jj]:
fold_list += [fold_change_all['FoldChange'].iloc[jj]]
# if fold change is less than 1.2 define
# as down_reg and else define it as up_reg
if fold_list[ii] < 1.2:
decision_list += ['down_reg']
else:
decision_list += ['up_reg']
break
# define summary dataframe
summary_df = pd.DataFrame(list(zip(plot_label, val_LogP, gene_list, ensum_list, fold_list, decision_list)),
columns=['Pathway', 'LogP', 'Gene', 'ensemble_ID', 'FoldChange', 'Decision'])
return summary_df
# --------------------------------------------------
def plot_fig(df, xlimit_right, figure_name): # pylint: disable=too-many-arguments
"""
function for plotting logP values
Function description:
df: dataframe containing the values of the logP and pathways
df must have columns with the name 'LogP' and 'Pathway'
xlimit_right: maximum value of the x axis in the plot
figure_name: name of the saved figure
Function usage:
plot_fig(df,5,'file.png')
"""
plt.figure(figsize=(20, 20))
sns.barplot(-df.LogP, df.Pathway) # pylint: disable=too-many-arguments
plt.xlabel("-logP", fontsize=20)
plt.xlim(right=xlimit_right)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.ylabel('', fontsize=20)
plt.savefig(figure_name, bbox_inches='tight')
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
annotation_all = args.annotation_all
enrichment_all = args.enrichment_all
fold_change_all = args.fold_change_all
# create summary dataframe
summary_df = create_summary(annotation_all, enrichment_all, fold_change_all)
# creating up and down regulation dataframe based on the
# decision column from the summary dataframe
up_df = summary_df[summary_df["Decision"] == 'up_reg']
down_df = summary_df[summary_df["Decision"] == 'down_reg']
# defining max x limit
max_x_plot = np.ceil(max(-summary_df['LogP']) / 5) * 5
# plotting up regulations
plot_fig(up_df, max_x_plot, 'up_reg.png')
# plotting down regulations
plot_fig(down_df, max_x_plot, 'down_reg.png')
# creating output file from the summary
summary_df.to_csv(args.outfile)
print(f"See summary in \"{args.outfile}\"")
print('See plots in \"up_reg.png\" and \"down_reg.png\" files.')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
the-stack_0_20163 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 11:40:43 2019
@author: v.shkaberda
"""
from functools import wraps
from tkRepairs import NetworkError
import pyodbc
def monitor_network_state(method):
""" Show error message in case of network error.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except pyodbc.Error as e:
# Network error
if e.args[0] in ('01000', '08S01', '08001'):
NetworkError()
return wrapper
class DBConnect(object):
""" Provides connection to database and functions to work with server.
"""
def __init__(self, *, server, db):
self._server = server
self._db = db
# Connection properties
self.conn_str = (
'Driver={{SQL Server}};'
'Server={0};'
'Database={1};'
'Trusted_Connection=yes;'.format(self._server, self._db)
)
def __enter__(self):
self.__db = pyodbc.connect(self.conn_str)
self.__cursor = self.__db.cursor()
return self
def __exit__(self, type, value, traceback):
self.__db.close()
@monitor_network_state
def add_movement(self, UserID, ID, SN, ObjectID, date_movement):
""" Executes procedure that adds info about technics movement.
"""
query = '''
exec technics.ADD_MOVEMENT @UserID = ?,
@ID = ?,
@SN = ?,
@ObjectID = ?,
@date_movement = ?
'''
try:
self.__cursor.execute(query, UserID, ID, SN, ObjectID,
date_movement)
request_success = self.__cursor.fetchone()[0]
self.__db.commit()
return request_success
except pyodbc.ProgrammingError:
return
@monitor_network_state
def create_repair(self, *, userID, SN, date_broken,
date_repair_finished, OutfitOrder, WorkingHours,
UnitOfMeasureID, NumberOfUnits, FaultDescription,
PerformedWork, statusID):
""" Executes procedure that creates new repair.
"""
query = '''
exec technics.CREATE_REPAIR @UserID = ?,
@SN = ?,
@date_broken = ?,
@date_repair_finished = ?,
@OutfitOrder = ?,
@WorkingHours = ?,
@UnitOfMeasureID = ?,
@NumberOfUnits = ?,
@FaultDescription = ?,
@PerformedWork = ?,
@StatusID = ?
'''
try:
self.__cursor.execute(query, userID, SN, date_broken,
date_repair_finished, OutfitOrder, WorkingHours,
UnitOfMeasureID, NumberOfUnits, FaultDescription,
PerformedWork, statusID)
request_success = self.__cursor.fetchone()[0]
self.__db.commit()
return request_success
except pyodbc.ProgrammingError:
return
@monitor_network_state
def access_check(self):
""" Check user permission.
If access permitted returns True, otherwise None.
"""
self.__cursor.execute("exec [technics].[Access_Check]")
access = self.__cursor.fetchone()
# check AccessType and isSuperUser
if access and (access[0] in (1, 2, 3) or access[1]):
return True
@monitor_network_state
def get_object_owner_info(self, sn, date_broken):
""" Returns unis of measure list.
"""
query = '''
exec [technics].[get_object_owner_info] @SN = ?, @date_broken = ?
'''
self.__cursor.execute(query, sn, date_broken)
return self.__cursor.fetchall()
@monitor_network_state
def get_current_repair(self, repairID):
""" Returns info about repairID.
"""
query = '''
exec [technics].[get_current_repair] @repairID = ?
'''
self.__cursor.execute(query, repairID)
return self.__cursor.fetchall()
@monitor_network_state
def get_measure_units(self):
""" Returns unis of measure list.
"""
self.__cursor.execute("exec [technics].[get_measure_units]")
return self.__cursor.fetchall()
@monitor_network_state
def get_objects(self):
""" Returns all references used in filters.
"""
self.__cursor.execute("exec [technics].[get_objects]")
return self.__cursor.fetchall()
@monitor_network_state
def get_references(self):
""" Returns all references used in filters.
"""
self.__cursor.execute("exec [technics].[get_references]")
return self.__cursor.fetchall()
@monitor_network_state
def get_repair_list(self, *, created_by, rc, store, owner, mfr,
tech_type, status):
""" Executes procedure and return repair list.
"""
query = '''
exec technics.get_repair_list @created_by = ?,
@rc = ?,
@store = ?,
@owner = ?,
@mfr = ?,
@tech_type = ?,
@status = ?
'''
self.__cursor.execute(query, created_by, rc, store, owner, mfr,
tech_type, status)
return self.__cursor.fetchall()
@monitor_network_state
def get_technics_info(self):
""" Returns technics info.
"""
self.__cursor.execute("exec [technics].[get_technics_info]")
return self.__cursor.fetchall()
@monitor_network_state
def get_user_info(self):
""" Returns information about current user based on ORIGINAL_LOGIN().
"""
self.__cursor.execute("exec [technics].[get_user_info]")
return self.__cursor.fetchone()
@monitor_network_state
def get_version_for_comparison(self):
""" Return version supposed to be the same as client version.
"""
self.__cursor.execute("exec [technics].[get_version_for_comparison]")
return tuple(self.__cursor.fetchone())
@monitor_network_state
def raw_query(self, query):
""" Takes the query and returns output from db.
"""
self.__cursor.execute(query)
return self.__cursor.fetchall()
@monitor_network_state
def update_repair(self, UserID, RepairID, SN, date_broken,
date_repair_finished, OutfitOrder, WorkingHours,
UnitOfMeasureID, NumberOfUnits, FaultDescription,
PerformedWork, StatusID):
""" Executes procedure that updates repair.
"""
query = '''
exec technics.UPDATE_REPAIR @UserID = ?,
@RepairID = ?,
@SN = ?,
@date_broken = ?,
@date_repair_finished = ?,
@OutfitOrder = ?,
@WorkingHours = ?,
@UnitOfMeasureID = ?,
@NumberOfUnits = ?,
@FaultDescription = ?,
@PerformedWork = ?,
@StatusID = ?
'''
try:
self.__cursor.execute(query, UserID, RepairID, SN, date_broken,
date_repair_finished, OutfitOrder, WorkingHours,
UnitOfMeasureID, NumberOfUnits, FaultDescription,
PerformedWork, StatusID)
request_success = self.__cursor.fetchone()[0]
self.__db.commit()
return request_success
except pyodbc.ProgrammingError:
return
if __name__ == '__main__':
with DBConnect(server='s-kv-center-s64', db='CB') as sql:
query = 'select 42'
assert sql.raw_query(query)[0][0] == 42, 'Server returns no output.'
print('Connected successfully.')
input('Press Enter to exit...')
|
the-stack_0_20164 | import setuptools
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
func = lambda name, enc=ascii: {True: enc}.get(name=='mbcs')
codecs.register(func)
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = ['requests', 'click']
setuptools.setup(
name="rnaget-compliance",
version="1.0.0",
author="Sean Upchurch",
author_email="[email protected]",
description="A compliance utility reporting system for rnaget server implementations",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ga4gh-rnaseq/rnaget-compliance-suite",
package_data={'': ['web/*']},
packages=setuptools.find_packages(),
install_requires=install_requires,
entry_points='''
[console_scripts]
rnaget-compliance=compliance_suite.cli:main
''',
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
),
)
|
the-stack_0_20166 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def select():
parts = command.split(' ', maxsplit=2)
sel = (parts[1])
count = 0
for spisok in spisoks:
if spisok.get('name') == sel:
count = "Дата рождения"
print(
'{:>4}: {}'.format(count, spisok.get('date', ''))
)
print('Номер телефона', spisok.get('tel', ''))
print('Фамилия Имя', spisok.get('name', ''))
# Если счетчик равен 0, то рейсы не найдены.
if count == 0:
print("Люди не найден.")
|
the-stack_0_20167 | import os
from cloudburst.client.client import CloudburstConnection
cloudburst = None
def get_or_init_client():
global cloudburst
if cloudburst is None:
ip = os.environ.get("MODIN_IP", None)
conn = os.environ.get("MODIN_CONNECTION", None)
cloudburst = CloudburstConnection(conn, ip, local=True)
return cloudburst
|
the-stack_0_20168 | import inspect
import os
from pathlib import Path
import toml
__all__ = ['read_secret', 'SecretsError']
class SecretsError(Exception):
pass
def read_secret(name):
v = os.environ.get(name)
if v is None:
raise SecretsError('Missing environment variable {}'.format(name))
if not v.startswith('@'):
return v
secrets_filename = Path(v[1:])
if not secrets_filename.is_absolute():
outer_frame = inspect.currentframe().f_back
caller_filename, *_ = inspect.getframeinfo(outer_frame)
caller_directory = Path(caller_filename).parent
secrets_filename = _find_secrets_file(
secrets_filename, caller_directory)
return _read_secret_from_file(name, secrets_filename)
def _find_secrets_file(filename, directory):
given_directory = directory
while 1:
pathname = directory / filename
if pathname.exists():
return pathname
if str(directory.parent) == str(directory):
raise SecretsError(
'No file named {filename} found in {given_directory}'
' or any of its parents'.format(
filename=filename, given_directory=given_directory))
directory = directory.parent
def _read_secret_from_file(name, secrets_filename):
try:
data = toml.load(secrets_filename)
except toml.TomlDecodeError as e:
error = SecretsError('Secrets file {} cannot be parsed as TOML'.format(
secrets_filename))
raise error from e
if name in data:
return data[name]
else:
raise SecretsError(
'Secrets file {secrets_filename} does not define{name}'.format(
secrets_filename=secrets_filename, name=name))
|
the-stack_0_20169 | import copy
import re
import numpy as np
import pykeops
from pykeops import numpy_found as usenumpy
from pykeops import torch_found as usetorch
if usenumpy:
from pykeops.numpy.utils import numpytools
if usetorch:
import torch
from pykeops.torch.utils import torchtools
# Convenient aliases:
def Var(x_or_ind, dim=None, cat=None):
if dim is None:
# init via data: we assume x_or_ind is data
return LazyTensor(x_or_ind, axis=cat)
else:
# init via symbolic variable given as triplet (ind,dim,cat)
return LazyTensor((x_or_ind, dim, cat))
def Vi(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 0.
"""
return Var(x_or_ind, dim, 0)
def Vj(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 1.
"""
return Var(x_or_ind, dim, 1)
def Pm(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 2.
"""
return Var(x_or_ind, dim, 2)
class LazyTensor:
r"""Symbolic wrapper for NumPy arrays and PyTorch tensors.
:class:`LazyTensor` encode numerical arrays through the combination
of a symbolic, **mathematical formula** and a list of **small data arrays**.
They can be used to implement efficient algorithms on objects
that are **easy to define**, but **impossible to store** in memory
(e.g. the matrix of pairwise distances between
two large point clouds).
:class:`LazyTensor` may be created from standard NumPy arrays or PyTorch tensors,
combined using simple mathematical operations and converted
back to NumPy arrays or PyTorch tensors with
efficient reduction routines, which outperform
standard tensorized implementations by two orders of magnitude.
"""
def __init__(self, x=None, axis=None):
r"""Creates a KeOps symbolic variable.
Args:
x: May be either:
- A *float*, a *list of floats*, a *NumPy float*, a *0D or 1D NumPy array*,
a *0D or 1D PyTorch tensor*, in which case the
:class:`LazyTensor`
represents a constant **vector of parameters**, to be broadcasted
on other :class:`LazyTensor`.
- A *2D NumPy array* or *PyTorch tensor*, in which case the
:class:`LazyTensor`
represents a **variable** indexed by
:math:`i` if **axis=0** or :math:`j` if **axis=1**.
- A *3D+ NumPy array* or *PyTorch tensor* with
a dummy dimension (=1) at position -3 or -2,
in which case the
:class:`LazyTensor`
represents a **variable** indexed by
:math:`i` or :math:`j`, respectively.
Dimensions before the last three will be handled as
**batch dimensions**, that may support
operator broadcasting.
- A *tuple of 3 integers* (ind,dim,cat), in which case the
:class:`LazyTensor` represents a :doc:`symbolic variable <../../../api/math-operations>`
that should be instantiated at call-time.
- An *integer*, in which case the
:class:`LazyTensor` represents an **integer constant** handled efficiently at compilation time.
- **None**, for internal use.
axis (int): should be equal to 0 or 1 if **x** is a 2D tensor, and **None** otherwise.
.. warning::
A :class:`LazyTensor` constructed
from a NumPy array or a PyTorch tensor retains its **dtype** (float32 vs float64)
and **device** properties (is it stored on the GPU?).
Since KeOps does **not** support automatic type conversions and data transfers,
please make sure **not to mix** :class:`LazyTensor`
that come from different frameworks/devices or which are stored
with different precisions.
"""
self.dtype = None
self.variables = ()
self.symbolic_variables = ()
self.formula = None
self.formula2 = None
self.ndim = None
self.tools = None
self.Genred = None
self.KernelSolve = None
self.batchdims = None
self.ni = None
self.nj = None
self.axis = None
self.ranges = None # Block-sparsity pattern
self.backend = None # "CPU", "GPU", "GPU_2D", etc.
# Duck typing attribute, to be used instead of "isinstance(self, LazyTensor)"
# This is a workaround for an importlib.reload messy situation,
# and will come handy when we'll start supporting Block LazyTensors
# and other custom operators.
self.__lazytensor__ = True
if x is not None: # A KeOps LazyTensor can be built from many different objects:
# Stage 1: Are we dealing with simple numbers? ---------------------
typex = type(x)
if typex == tuple: # x is not a Tensor but a triplet of integers
# (ind,dim,cat) that specifies an abstract variable
if len(x) != 3 or not isinstance(x[0], int) or not isinstance(x[1], int) or not isinstance(x[2], int):
raise ValueError(
"LazyTensors(tuple) is only valid if tuple = (ind,dim,cat) is a triplet of integers.")
if axis is not None:
raise ValueError("'axis' parameter should not be given when 'x' is of the form (ind,dim,cat).")
self.symbolic_variables = (x,)
self.ndim = x[1]
self.axis = x[2]
self.formula = "VarSymb({},{},{})".format(x[0], self.ndim, self.axis)
return # That's it!
elif typex == int: # Integer constants are best handled directly by the compiler
self.formula = "IntCst(" + str(x) + ")"
self.ndim = 1
self.axis = 2
return # That's it!
elif typex == float: # Float numbers must be encoded as Parameters,
# as C++'s templating system cannot deal with floating point arithmetics.
x = [x] # Convert to list and go to stage 2
elif typex == list:
pass
elif usenumpy and typex in (np.float32, np.float64): # NumPy scalar -> NumPy array
x = np.array(x).reshape(1)
elif usetorch and typex == torch.Tensor and len(x.shape) == 0: # Torch scalar -> Torch tensor
x = x.view(1)
# Stage 2: Dealing with python lists, understood as arrays of floats,
# and handled as Parameter variables without fixed dtype
if type(x) == list:
if axis is not None and axis != 2:
raise ValueError("Lists of numbers are handled as Parameter " \
+ "variables, with an optional 'axis' argument that is equal to 2.")
self.variables = (x,)
self.ndim = len(x)
self.axis = 2
self.formula = "Var({},{},2)".format(id(x), self.ndim)
return # That's it!
# Stage 3: Dealing with NumPy and Torch tensors --------------------
typex = type(x)
if usenumpy and typex == np.ndarray:
self.tools = numpytools
self.Genred = numpytools.Genred
self.KernelSolve = numpytools.KernelSolve
self.dtype = self.tools.dtypename(self.tools.dtype(x))
elif usetorch and typex == torch.Tensor:
self.tools = torchtools
self.Genred = torchtools.Genred
self.KernelSolve = torchtools.KernelSolve
self.dtype = self.tools.dtypename(self.tools.dtype(x))
else:
str_numpy = "NumPy arrays, " if usenumpy else ""
str_torch = "PyTorch tensors, " if usetorch else ""
raise ValueError("LazyTensors should be built from " + str_numpy + str_torch \
+ "float/integer numbers, lists of floats or 3-uples of integers. " \
+ "Received: {}".format(typex))
if len(x.shape) >= 3: # Infer axis from the input shape
# If x is a 3D+ array, its shape must be either (..,M,1,D) or (..,1,N,D) or (..,1,1,D).
# We infer axis from shape and squeeze out the "1" dimensions:
if axis is not None:
raise ValueError("'axis' parameter should not be given when 'x' is a 3D tensor.")
if len(x.shape) > 3: # We're in "batch mode"
self.batchdims = tuple(x.shape[:-3])
if x.shape[-3] == 1:
if x.shape[-2] == 1: # (..,1,1,D) -> Pm(D)
x = x.squeeze(-2).squeeze(-2)
axis = 2
else: # (..,1,N,D) -> Vj(D)
x = x.squeeze(-3)
axis = 1
elif x.shape[-2] == 1: # (M,1,D) -> Vi(D)
x = x.squeeze(-2)
axis = 0
else:
raise ValueError(
"If 'x' is a 3D+ tensor, its shape should be one of (..,M,1,D), (..,1,N,D) or (..,1,1,D).")
# Stage 4: x is now encoded as a 2D or 1D array + batch dimensions --------------------
if len(x.shape) >= 2 and axis != 2: # shape is (..,M,D) or (..,N,D), with an explicit 'axis' parameter
if axis is None or axis not in (0, 1):
raise ValueError(
"When 'x' is encoded as a 2D array, LazyTensor expects an explicit 'axis' value in {0,1}.")
# id(x) is used as temporary identifier for KeOps "Var",
# this identifier will be changed when calling method "fixvariables"
# But first we do a small hack, in order to distinguish same array involved twice in a formula but with
# different axis (e.g. Vi(x)-Vj(x) formula): we do a dummy reshape in order to get a different id
if axis == 1:
x = self.tools.view(x, x.shape)
self.variables = (x,)
self.ndim = x.shape[-1]
self.axis = axis
self.formula = "Var({},{},{})".format(id(x), self.ndim, self.axis)
if axis == 0:
self.ni = x.shape[-2]
else:
self.nj = x.shape[-2]
self.dtype = self.tools.dtypename(self.tools.dtype(x))
elif len(x.shape) == 1 or axis == 2: # shape is (D,): x is a "Pm(D)" parameter
if axis is not None and axis != 2:
raise ValueError(
"When 'x' is encoded as a 1D or 0D array, 'axis' must be None or 2 (= Parameter variable).")
self.variables = (x,)
self.ndim = x.shape[-1]
self.axis = 2
self.formula = "Var({},{},2)".format(id(x), self.ndim)
else:
raise ValueError("LazyTensors can be built from 0D, 1D, 2D or 3D+ tensors. " \
+ "Received x of shape: {}.".format(x.shape))
# N.B.: We allow empty init!
def fixvariables(self):
"""If needed, assigns final labels to each variable and pads their batch dimensions prior to a :mod:`Genred()` call."""
newvars = ()
if self.formula2 is None: self.formula2 = "" # We don't want to get regexp errors...
device = None # Useful to load lists (and float constants) on the proper device
for v in self.variables:
device = self.tools.device(v)
if device is not None:
break
i = len(self.symbolic_variables) # The first few labels are already taken...
for v in self.variables: # So let's loop over our tensors, and give them labels:
idv = id(v)
if type(v) == list:
v = self.tools.array(v, self.dtype, device)
# Replace "Var(idv," by "Var(i," and increment 'i':
tag = "Var({},".format(idv)
if tag in self.formula + self.formula2:
self.formula = self.formula.replace(tag, "Var({},".format(i))
self.formula2 = self.formula2.replace(tag, "Var({},".format(i))
# Detect if v is meant to be used as a variable or as a parameter:
str_cat_v = re.search(r"Var\({},\d+,([012])\)".format(i), self.formula + self.formula2).group(1)
is_variable = 1 if str_cat_v in ('0', '1') else 0
dims_to_pad = self.nbatchdims + 1 + is_variable - len(v.shape)
padded_v = self.tools.view(v, (1,) * dims_to_pad + v.shape)
newvars += (padded_v,)
i += 1
# "VarSymb(..)" appear when users rely on the "LazyTensor(Ind,Dim,Cat)" syntax,
# for the sake of disambiguation:
self.formula = self.formula.replace("VarSymb(", "Var(") # We can now replace them with
self.formula2 = self.formula2.replace("VarSymb(", "Var(") # actual "Var" symbols
if self.formula2 == "": self.formula2 = None # The pre-processing step is now over
self.variables = newvars
def separate_kwargs(self, kwargs):
# separating keyword arguments for Genred init vs Genred call...
# Currently the only three additional optional keyword arguments that are passed to Genred init
# are accuracy options: use_double_acc, use_BlockRed, use_Kahan. Since they all start
# by "use_", we use this to distinguish...
kwargs_init = []
kwargs_call = []
for key in kwargs:
if len(key)>3 and key[:4]=="use_":
kwargs_init += [(key,kwargs[key])]
else:
kwargs_call += [(key,kwargs[key])]
kwargs_init = dict(kwargs_init)
kwargs_call = dict(kwargs_call)
return kwargs_init, kwargs_call
def promote(self, other, props):
r"""
Creates a new :class:`LazyTensor` whose **None** properties are set to those of **self** or **other**.
"""
res = LazyTensor()
for prop in props:
x, y = getattr(self, prop), getattr(other, prop)
if x is not None:
if y is not None and x != y:
raise ValueError("Incompatible values for attribute {}: {} and {}.".format(prop, x, y))
setattr(res, prop, x)
else:
setattr(res, prop, y)
return res
def init(self):
r"""
Creates a copy of a :class:`LazyTensor`, without **formula** attribute.
"""
res = LazyTensor()
res.dtype = self.dtype
res.tools = self.tools
res.dtype = self.dtype
res.Genred = self.Genred
res.KernelSolve = self.KernelSolve
res.batchdims = self.batchdims
res.ni = self.ni
res.nj = self.nj
res.ranges = self.ranges
res.backend = self.backend
res.variables = self.variables
res.symbolic_variables = self.symbolic_variables
return res
def join(self, other):
r"""
Merges the variables and attributes of two :class:`LazyTensor`, with a compatibility check. This method concatenates tuples of variables, without paying attention to repetitions.
"""
res = LazyTensor.promote(self, other,
("dtype", "tools", "Genred", "KernelSolve", "ni", "nj", "ranges", "backend"))
res.symbolic_variables = self.symbolic_variables + other.symbolic_variables
def max_tuple(a, b):
return tuple( max(a_i, b_i) for (a_i, b_i) in zip(a, b) )
def check_broadcasting(dims_1, dims_2):
r"""
Checks that the shapes **dims_1** and **dims_2** are compatible with each other.
"""
if dims_1 is None:
return dims_2
if dims_2 is None:
return dims_1
padded_dims_1 = (1,) * (len(dims_2) - len(dims_1)) + dims_1
padded_dims_2 = (1,) * (len(dims_1) - len(dims_2)) + dims_2
for (dim_1, dim_2) in zip(padded_dims_1, padded_dims_2):
if dim_1 != 1 and dim_2 != 1 and dim_1 != dim_2:
raise ValueError("Incompatible batch dimensions: {} and {}.".format(dims_1, dims_2))
return max_tuple(padded_dims_1, padded_dims_2)
# Checks on the batch dimensions - we support broadcasting:
res.batchdims = check_broadcasting(self.batchdims, other.batchdims)
# N.B.: If needed, variables will be padded with "dummy 1's" just before the Genred call, in self/res.fixvariables():
res.variables = self.variables + other.variables
return res
# Prototypes for unary and binary operations ==============================
def unary(self, operation, dimres=None, opt_arg=None, opt_arg2=None):
r"""
Symbolically applies **operation** to **self**, with optional arguments if needed.
The optional argument **dimres** may be used to specify the dimension of the output **result**.
"""
# If needed, convert float numbers / lists / arrays / tensors to LazyTensors:
if not hasattr(self, "__lazytensor__"): self = LazyTensor(self)
# we must prevent any operation if self is the output of a reduction operation,
# i.e. if it has a reduction_op field
if hasattr(self, 'reduction_op'):
raise ValueError("Input is a 'reduced' LazyTensor, no operation can be applied to it. ")
if not dimres: dimres = self.ndim
res = self.init() # Copy of self, without a formula
if opt_arg2 is not None:
res.formula = "{}({},{},{})".format(operation, self.formula, opt_arg, opt_arg2)
elif opt_arg is not None:
res.formula = "{}({},{})".format(operation, self.formula, opt_arg)
else:
res.formula = "{}({})".format(operation, self.formula)
res.ndim = dimres
return res
def binary(self, other, operation, is_operator=False, dimres=None, dimcheck="sameor1", opt_arg=None,
opt_pos="last"):
r"""Symbolically applies **operation** to **self**, with optional arguments if needed.
Keyword args:
- dimres (int): May be used to specify the dimension of the output **result**.
- is_operator (bool, default=False): May be used to specify if **operation** is
an operator like ``+``, ``-`` or a "genuine" function.
- dimcheck (string): shall we check the input dimensions?
Supported values are ``"same"``, ``"sameor1"``, or **None**.
"""
# If needed, convert float numbers / lists / arrays / tensors to LazyTensors:
if not hasattr(self, "__lazytensor__"): self = LazyTensor(self)
if not hasattr(other, "__lazytensor__"): other = LazyTensor(other)
# we must prevent any operation if self or other is the output of a reduction operation,
# i.e. if it has a reduction_op field
if hasattr(self, 'reduction_op') or hasattr(other, 'reduction_op'):
raise ValueError("One of the inputs is a 'reduced' LazyTensor, no operation can be applied to it. ")
# By default, the dimension of the output variable is the max of the two operands:
if not dimres: dimres = max(self.ndim, other.ndim)
if dimcheck == "same":
if self.ndim != other.ndim:
raise ValueError("Operation {} expects inputs of the same dimension. ".format(operation) \
+ "Received {} and {}.".format(self.ndim, other.ndim))
elif dimcheck == "sameor1":
if (self.ndim != other.ndim and self.ndim != 1 and other.ndim != 1):
raise ValueError("Operation {} expects inputs of the same dimension or dimension 1. ".format(operation) \
+ "Received {} and {}.".format(self.ndim, other.ndim))
elif dimcheck != None:
raise ValueError("incorrect dimcheck keyword in binary operation")
res = LazyTensor.join(self, other) # Merge the attributes and variables of both operands
res.ndim = dimres
if is_operator:
res.formula = "({} {} {})".format(self.formula, operation, other.formula)
elif opt_arg is not None:
if hasattr(opt_arg, "__lazytensor__"): opt_arg = opt_arg.formula
if opt_pos == "last":
res.formula = "{}({}, {}, {})".format(operation, self.formula, other.formula, opt_arg)
elif opt_pos == "middle":
res.formula = "{}({}, {}, {})".format(operation, self.formula, opt_arg, other.formula)
else:
res.formula = "{}({}, {})".format(operation, self.formula, other.formula)
return res
# Prototypes for reduction operations =====================================
def reduction(self, reduction_op, other=None, opt_arg=None, axis=None, dim=None, call=True, **kwargs):
r"""
Applies a reduction to a :class:`LazyTensor`. This method is used internally by the LazyTensor class.
Args:
reduction_op (string): the string identifier of the reduction, which will be passed to the KeOps routines.
Keyword Args:
other: May be used to specify some **weights** ; depends on the reduction.
opt_arg: typically, some integer needed by ArgKMin reductions ; depends on the reduction.
axis (integer): The axis with respect to which the reduction should be performed.
Supported values are **nbatchdims** and **nbatchdims + 1**, where **nbatchdims** is the number of "batch" dimensions before the last three
(:math:`i` indices, :math:`j` indices, variables' dimensions).
dim (integer): alternative keyword for the **axis** argument.
call (True or False): Should we actually perform the reduction on the current variables?
If **True**, the returned object will be a NumPy array or a PyTorch tensor.
Otherwise, we simply return a callable :class:`LazyTensor` that may be used
as a :mod:`pykeops.numpy.Genred` or :mod:`pykeops.torch.Genred` function
on arbitrary tensor data.
backend (string): Specifies the map-reduce scheme,
as detailed in the documentation of the :mod:`Genred <pykeops.torch.Genred>` module.
device_id (int, default=-1): Specifies the GPU that should be used
to perform the computation; a negative value lets your system
choose the default GPU. This parameter is only useful if your
system has access to several GPUs.
ranges (6-uple of IntTensors, None by default):
Ranges of integers that specify a
:doc:`block-sparse reduction scheme <../../sparsity>`
as detailed in the documentation of the :mod:`Genred <pykeops.torch.Genred>` module.
If **None** (default), we simply use a **dense Kernel matrix**
as we loop over all indices
:math:`i\in[0,M)` and :math:`j\in[0,N)`.
use_double_acc (bool, default False): accumulate results of reduction in float64 variables, before casting to float32.
This can only be set to True when data is in float32, and reduction_op is one of:"Sum", "MaxSumShiftExp", "LogSumExp",
"Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight".
It improves the accuracy of results in case of large sized data, but is slower.
use_BlockRed (bool, default False): use an intermediate accumulator in each block before accumulating in the output. This improves
accuracy for large sized data. This can only be set to True when reduction_op is one of:"Sum", "MaxSumShiftExp", "LogSumExp",
"Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight".
use_Kahan (bool, default False): use Kahan summation algorithm to compensate for round-off errors. This improves
accuracy for large sized data. This can only be set to True when reduction_op is one of:"Sum", "MaxSumShiftExp", "LogSumExp",
"Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight".
"""
if axis is None: axis = dim # NumPy uses axis, PyTorch uses dim...
if axis - self.nbatchdims not in (0, 1):
raise ValueError(
"Reductions must be called with 'axis' (or 'dim') equal to the number of batch dimensions + 0 or 1.")
if other is None:
res = self.init() # ~ self.copy()
res.formula2 = None
else:
res = self.join(other)
res.formula2 = other.formula
res.formula = self.formula
res.reduction_op = reduction_op
res.axis = axis - self.nbatchdims
res.opt_arg = opt_arg
kwargs_init, kwargs_call = self.separate_kwargs(kwargs)
res.kwargs = kwargs_call
res.ndim = self.ndim
if res.dtype is not None:
res.fixvariables() # Turn the "id(x)" numbers into consecutive labels
# "res" now becomes a callable object:
res.callfun = res.Genred(res.formula, [], res.reduction_op, res.axis,
res.dtype, res.opt_arg, res.formula2, **kwargs_init)
if call and len(res.symbolic_variables) == 0 and res.dtype is not None:
return res()
else:
return res
def solve(self, other, var=None, call=True, **kwargs):
r"""
Solves a positive definite linear system of the form ``sum(self) = other`` or ``sum(self*var) = other`` , using a conjugate gradient solver.
Args:
self (:class:`LazyTensor`): KeOps variable that encodes a symmetric positive definite matrix / linear operator.
other (:class:`LazyTensor`): KeOps variable that encodes the second member of the equation.
Keyword args:
var (:class:`LazyTensor`):
If **var** is **None**, **solve** will return the solution
of the ``self * var = other`` equation.
Otherwise, if **var** is a KeOps symbolic variable, **solve** will
assume that **self** defines an expression that is linear
with respect to **var** and solve the equation ``self(var) = other``
with respect to **var**.
alpha (float, default=1e-10): Non-negative **ridge regularization** parameter.
call (bool): If **True** and if no other symbolic variable than
**var** is contained in **self**, **solve** will return a tensor
solution of our linear system. Otherwise **solve** will return
a callable :class:`LazyTensor`.
backend (string): Specifies the map-reduce scheme,
as detailed in the documentation of the :mod:`Genred <pykeops.torch.Genred>` module.
device_id (int, default=-1): Specifies the GPU that should be used
to perform the computation; a negative value lets your system
choose the default GPU. This parameter is only useful if your
system has access to several GPUs.
ranges (6-uple of IntTensors, None by default):
Ranges of integers that specify a
:doc:`block-sparse reduction scheme <../../sparsity>`
as detailed in the documentation of the :mod:`Genred <pykeops.torch.Genred>` module.
If **None** (default), we simply use a **dense Kernel matrix**
as we loop over all indices
:math:`i\in[0,M)` and :math:`j\in[0,N)`.
use_double_acc (bool, default False): accumulate results of reductions in float64 variables, before casting to float32.
This can only be set to True when data is in float32, and reduction_op is one of:"Sum", "MaxSumShiftExp", "LogSumExp",
"Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight".
It improves the accuracy of results in case of large sized data, but is slower.
use_BlockRed (bool, default False): use an intermediate accumulator in each block before accumulating in the output. This improves
accuracy for large sized data. This can only be set to True when reduction_op is one of:"Sum", "MaxSumShiftExp", "LogSumExp",
"Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight".
use_Kahan (bool, default False): use Kahan summation algorithm to compensate for round-off errors. This improves
accuracy for large sized data. This can only be set to True when reduction_op is one of:"Sum", "MaxSumShiftExp", "LogSumExp",
"Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight".
.. warning::
Please note that **no check** of symmetry and definiteness will be
performed prior to our conjugate gradient descent.
"""
if not hasattr(other, "__lazytensor__"):
other = LazyTensor(other, axis=0) # a vector is normally indexed by "i"
# If given, var is symbolic variable corresponding to unknown
# other must be a variable equal to the second member of the linear system,
# and it may be symbolic. If it is symbolic, its index should match the index of var
# if other is not symbolic, all variables in self must be non symbolic
if len(other.symbolic_variables) == 0 and len(self.symbolic_variables) != 0:
raise ValueError("If 'self' has symbolic variables, so should 'other'.")
# we infer axis of reduction as the opposite of the axis of output
axis = 1 - other.axis
if var is None:
# this is the classical mode: we want to invert sum(self*var) = other
# we define var as a new symbolic variable with same dimension as other
# and we assume axis of var is same as axis of reduction
varindex = len(self.symbolic_variables)
var = Var(varindex, other.ndim, axis)
res = self * var
else:
# var is given and must be a symbolic variable which is already inside self
varindex = var.symbolic_variables[0][0]
res = self.init()
res.formula = self.formula
res.formula2 = None
res.reduction_op = "Solve"
res.varindex = varindex
res.varformula = var.formula.replace("VarSymb", "Var")
res.other = other
res.axis = axis
kwargs_init, res.kwargs = self.separate_kwargs(kwargs)
res.ndim = self.ndim
if res.dtype is not None:
res.fixvariables()
res.callfun = res.KernelSolve(res.formula, [], res.varformula,
res.axis, res.dtype, **kwargs_init)
# we call if call=True, if other is not symbolic, and if the dtype is set
if call and len(other.symbolic_variables) == 0 and res.dtype is not None:
return res()
else:
return res
def __call__(self, *args, **kwargs):
r"""
Executes a :mod:`Genred <pykeops.torch.Genred>` or :mod:`KernelSolve <pykeops.torch.KernelSolve>` call on the input data, as specified by **self.formula** .
"""
if not hasattr(self,"reduction_op"):
raise ValueError("A LazyTensor object may be called only if it corresponds to the ouput of a reduction operation or solve operation.")
self.kwargs.update(kwargs)
if self.ranges is not None and "ranges" not in self.kwargs:
self.kwargs.update({"ranges": self.ranges})
if self.backend is not None and "backend" not in self.kwargs:
self.kwargs.update({"backend": self.backend})
if self.dtype is None: # This can only happen if we haven't encountered 2D or 3D arrays just yet...
if usenumpy and isinstance(args[0],
np.ndarray): # We use the "NumPy" or "PyTorch" backend depending on the first argument
self.tools = numpytools
self.Genred = numpytools.Genred
self.KernelSolve = numpytools.KernelSolve
elif usetorch and isinstance(args[0], torch.Tensor):
self.tools = torchtools
self.Genred = torchtools.Genred
self.KernelSolve = torchtools.KernelSolve
self.dtype = self.tools.dtypename(self.tools.dtype(args[0]))
self.fixvariables()
kwargs_init, self.kwargs = self.separate_kwargs(self.kwargs)
if self.reduction_op == "Solve":
self.callfun = self.KernelSolve(self.formula, [], self.formula2,
self.axis, self.dtype, **kwargs_init)
else:
self.callfun = self.Genred(self.formula, [], self.reduction_op,
self.axis, self.dtype, self.opt_arg, self.formula2, **kwargs_init)
if self.reduction_op == "Solve" and len(self.other.symbolic_variables) == 0:
# here args should be empty, according to our rule
if args != ():
raise ValueError("no input required")
# we replace by other
args = (self.other.variables[0],)
return self.callfun(*args, *self.variables, **self.kwargs)
def __str__(self):
r"""
Returns a verbose string identifier.
"""
tmp = self.init() # ~ self.copy()
tmp.formula = self.formula
tmp.formula2 = None if not hasattr(self, 'formula2') else self.formula2
if tmp.tools is None:
# just to avoid error when printing a parameter vector entered as a raw python list
tmp.tools = numpytools
tmp.fixvariables() # Replace Var(id(x),...) with consecutive labels
string = "KeOps LazyTensor\n formula: {}".format(tmp.formula)
if len(self.symbolic_variables) > 0:
string += "\n symbolic variables: Var{}".format(self.symbolic_variables[0])
for var in self.symbolic_variables[1:]: string += ", Var{}".format(var)
string += "\n shape: {}".format(self.shape)
if hasattr(self, 'reduction_op'):
string += "\n reduction: {} (axis={})".format(self.reduction_op, self.axis)
if tmp.formula2 is not None:
string += "\n formula2: {}".format(tmp.formula2)
if hasattr(self, 'opt_arg') and self.opt_arg is not None:
string += "\n opt_arg: {}".format(self.opt_arg)
return string
@property
def shape(self):
btch = () if self.batchdims is None else self.batchdims
ni = 1 if self.ni is None else self.ni
nj = 1 if self.nj is None else self.nj
ndim = 1 if self.ndim is None else self.ndim
return btch + (ni, nj) if ndim == 1 else btch + (ni, nj, ndim)
@property
def _shape(self):
btch = () if self.batchdims is None else self.batchdims
ni = 1 if self.ni is None else self.ni
nj = 1 if self.nj is None else self.nj
ndim = 1 if self.ndim is None else self.ndim
return btch + (ni, nj, ndim)
def dim(self):
r"""
Just as in PyTorch, returns the number of dimensions of a :class:`LazyTensor`.
"""
return len(self._shape)
@property
def nbatchdims(self):
return 0 if self.batchdims is None else len(self.batchdims)
# List of supported operations ============================================
# N.B.: This flag prevents NumPy (and also PyTorch ?) from overriding
# the KeOps implementations of __radd__, __rdiv___, etc. written below.
# For instance, if x is a NumPy array and y is a KeOps LazyTensor,
# writing "x+y" will call y.__radd__(x) (LazyTensor method) instead
# of x.__add__(y) (NumPy method)
__array_ufunc__ = None
# Arithmetics --------------------------------------------------------------
def __add__(self, other):
r"""
Broadcasted addition operator - a binary operation.
``x + y`` returns a :class:`LazyTensor` that encodes,
symbolically, the addition of ``x`` and ``y``.
"""
return self.binary(other, "+", is_operator=True)
def __radd__(self, other):
r"""
Broadcasted addition operator - a binary operation.
``x + y`` returns a :class:`LazyTensor` that encodes,
symbolically, the addition of ``x`` and ``y``.
"""
if other == 0:
return self
else:
return LazyTensor.binary(other, self, "+", is_operator=True)
def __sub__(self, other):
r"""
Broadcasted subtraction operator - a binary operation.
``x - y`` returns a :class:`LazyTensor` that encodes,
symbolically, the subtraction of ``x`` and ``y``.
"""
return self.binary(other, "-", is_operator=True)
def __rsub__(self, other):
r"""
Broadcasted subtraction operator - a binary operation.
``x - y`` returns a :class:`LazyTensor` that encodes,
symbolically, the subtraction of ``x`` and ``y``.
"""
if other == 0:
return -self
else:
return LazyTensor.binary(other, self, "-", is_operator=True)
def __mul__(self, other):
r"""
Broadcasted elementwise product - a binary operation.
``x * y`` returns a :class:`LazyTensor` that encodes, symbolically,
the elementwise product of ``x`` and ``y``.
"""
return self.binary(other, "*", is_operator=True)
def __rmul__(self, other):
r"""
Broadcasted elementwise product - a binary operation.
``x * y`` returns a :class:`LazyTensor` that encodes, symbolically,
the elementwise product of ``x`` and ``y``.
"""
if other == 0:
return 0
elif other == 1:
return self
else:
return LazyTensor.binary(other, self, "*", is_operator=True)
def __truediv__(self, other):
r"""
Broadcasted elementwise division - a binary operation.
``x / y`` returns a :class:`LazyTensor` that encodes, symbolically,
the elementwise division of ``x`` by ``y``.
"""
return self.binary(other, "/", is_operator=True)
def __rtruediv__(self, other):
r"""
Broadcasted elementwise division - a binary operation.
``x / y`` returns a :class:`LazyTensor` that encodes, symbolically,
the elementwise division of ``x`` by ``y``.
"""
if other == 0:
return 0
elif other == 1:
return self.unary("Inv")
else:
return LazyTensor.binary(other, self, "/", is_operator=True)
def __or__(self, other):
r"""
Euclidean scalar product - a binary operation.
``(x|y)`` returns a :class:`LazyTensor` that encodes, symbolically,
the scalar product of ``x`` and ``y`` which are assumed to have the same shape.
"""
return self.binary(other, "|", is_operator=True, dimres=1, dimcheck="same")
def __ror__(self, other):
r"""
Euclidean scalar product - a binary operation.
``(x|y)`` returns a :class:`LazyTensor` that encodes, symbolically,
the scalar product of ``x`` and ``y`` which are assumed to have the same shape.
"""
return LazyTensor.binary(other, self, "|", is_operator=True, dimres=1, dimcheck="same")
# Unary arithmetics --------------------------------------------------------
def __abs__(self):
r"""
Element-wise absolute value - a unary operation.
``abs(x)`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise absolute value of ``x``.
"""
return self.unary("Abs")
def abs(self):
r"""
Element-wise absolute value - a unary operation.
``x.abs()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise absolute value of ``x``.
"""
return abs(self)
def __neg__(self):
r"""
Element-wise minus - a unary operation.
``-x`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise opposite of ``x``.
"""
return self.unary("Minus")
# Simple functions ---------------------------------------------------------
def exp(self):
r"""
Element-wise exponential - a unary operation.
``x.exp()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise exponential of ``x``.
"""
return self.unary("Exp")
def log(self):
r"""
Element-wise logarithm - a unary operation.
``x.log()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise logarithm of ``x``.
"""
return self.unary("Log")
def xlogx(self):
r"""
Element-wise x*log(x) function - a unary operation.
``x.xlogx()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise ``x`` times logarithm of ``x`` (with value 0 at 0).
"""
return self.unary("XLogX")
def cos(self):
r"""
Element-wise cosine - a unary operation.
``x.cos()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise cosine of ``x``.
"""
return self.unary("Cos")
def sin(self):
r"""
Element-wise sine - a unary operation.
``x.sin()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise sine of ``x``.
"""
return self.unary("Sin")
def sqrt(self):
r"""
Element-wise square root - a unary operation.
``x.sqrt()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise square root of ``x``.
"""
return self.unary("Sqrt")
def rsqrt(self):
r"""
Element-wise inverse square root - a unary operation.
``x.rsqrt()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise inverse square root of ``x``.
"""
return self.unary("Rsqrt")
def __pow__(self, other):
r"""
Broadcasted element-wise power operator - a binary operation.
``x**y`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise value of ``x`` to the power ``y``.
Note:
- if **y = 2**, ``x**y`` relies on the ``"Square"`` KeOps operation;
- if **y = 0.5**, ``x**y`` uses on the ``"Sqrt"`` KeOps operation;
- if **y = -0.5**, ``x**y`` uses on the ``"Rsqrt"`` KeOps operation.
"""
if type(other) == int:
return self.unary("Square") if other == 2 else self.unary("Pow", opt_arg=other)
elif type(other) == float:
if other == .5:
return self.unary("Sqrt")
elif other == -.5:
return self.unary("Rsqrt")
else:
other = LazyTensor(other)
if type(other) == type(LazyTensor()):
if other.ndim == 1 or other.ndim == self.ndim:
return self.binary(other, "Powf", dimcheck=None)
else:
raise ValueError("Incompatible dimensions for the LazyTensor and its exponent: " \
+ "{} and {}.".format(self.ndim, other.ndim))
else:
raise ValueError("The exponent should be an integer, a float number or a LazyTensor.")
def power(self, other):
r"""
Broadcasted element-wise power operator - a binary operation.
``pow(x,y)`` is equivalent to ``x**y``.
"""
return self ** other
def square(self):
r"""
Element-wise square - a unary operation.
``x.square()`` is equivalent to ``x**2`` and returns a :class:`LazyTensor`
that encodes, symbolically, the element-wise square of ``x``.
"""
return self.unary("Square")
def sign(self):
r"""
Element-wise sign in {-1,0,+1} - a unary operation.
``x.sign()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise sign of ``x``.
"""
return self.unary("Sign")
def step(self):
r"""
Element-wise step function - a unary operation.
``x.step()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise sign of ``x``.
"""
return self.unary("Step")
def relu(self):
r"""
Element-wise ReLU function - a unary operation.
``x.relu()`` returns a :class:`LazyTensor` that encodes, symbolically,
the element-wise positive part of ``x``.
"""
return self.unary("ReLU")
def sqnorm2(self):
r"""
Squared Euclidean norm - a unary operation.
``x.sqnorm2()`` returns a :class:`LazyTensor` that encodes, symbolically,
the squared Euclidean norm of a vector ``x``.
"""
return self.unary("SqNorm2", dimres=1)
def norm2(self):
r"""
Euclidean norm - a unary operation.
``x.norm2()`` returns a :class:`LazyTensor` that encodes, symbolically,
the Euclidean norm of a vector ``x``.
"""
return self.unary("Norm2", dimres=1)
def norm(self, dim):
r"""
Euclidean norm - a unary operation.
``x.norm(-1)`` is equivalent to ``x.norm2()`` and returns a
:class:`LazyTensor` that encodes, symbolically, the Euclidean norm of a vector ``x``.
"""
if dim not in [-1, len(self._shape) - 1]:
raise ValueError("KeOps only supports norms over the last dimension.")
return self.norm2()
def normalize(self):
r"""
Vector normalization - a unary operation.
``x.normalize()`` returns a :class:`LazyTensor` that encodes, symbolically,
a vector ``x`` divided by its Euclidean norm.
"""
return self.unary("Normalize")
def sqdist(self, other):
r"""
Squared distance - a binary operation.
``x.sqdist(y)`` returns a :class:`LazyTensor` that encodes, symbolically,
the squared Euclidean distance between two vectors ``x`` and ``y``.
"""
return self.binary(other, "SqDist", dimres=1)
def weightedsqnorm(self, other):
r"""
Weighted squared norm - a binary operation.
``LazyTensor.weightedsqnorm(s, x)`` returns a :class:`LazyTensor` that encodes, symbolically,
the weighted squared Norm of a vector ``x`` - see
the :doc:`main reference page <../../../api/math-operations>` for details.
"""
if type(self) != type(LazyTensor()):
self = LazyTensor(self)
if self.ndim not in (1, other.ndim, other.ndim ** 2):
raise ValueError("Squared norm weights should be of size 1 (scalar), " \
+ "D (diagonal) or D^2 (full symmetric tensor), but received " \
+ "{} with D={}.".format(self.ndim, other.ndim))
return self.binary(other, "WeightedSqNorm", dimres=1, dimcheck=None)
def weightedsqdist(self, f, g):
r"""
Weighted squared distance.
``LazyTensor.weightedsqdist(s, x, y)`` is equivalent to ``LazyTensor.weightedsqnorm(s, x - y)``.
"""
return self.weightedsqnorm(f - g)
def elem(self, i):
r"""
Indexing of a vector - a unary operation.
``x.elem(i)`` returns a :class:`LazyTensor` that encodes, symbolically,
the i-th element ``x[i]`` of the vector ``x``.
"""
if type(i) is not int:
raise ValueError("Elem indexing is only supported for integer indices.")
if i < 0 or i >= self.ndim:
raise ValueError("Index i={} is out of bounds [0,D) = [0,{}).".format(i, self.ndim))
return self.unary("Elem", dimres=1, opt_arg=i)
def extract(self, i, d):
r"""
Range indexing - a unary operation.
``x.extract(i, d)`` returns a :class:`LazyTensor` that encodes, symbolically,
the sub-vector ``x[i:i+d]`` of the vector ``x``.
"""
if (type(i) is not int) or (type(d) is not int):
raise ValueError("Indexing is only supported for integer indices.")
if i < 0 or i >= self.ndim:
raise ValueError("Starting index is out of bounds.")
if d < 1 or i + d > self.ndim:
raise ValueError("Slice dimension is out of bounds.")
return self.unary("Extract", dimres=d, opt_arg=i, opt_arg2=d)
def __getitem__(self, key):
r"""
Element or range indexing - a unary operation.
``x[key]`` redirects to the :meth:`elem` or :meth:`extract` methods, depending on the ``key`` argument.
Supported values are:
- an integer ``k``, in which case ``x[key]``
redirects to ``elem(x,k)``,
- a tuple ``..,:,:,k`` with ``k`` an integer,
which is equivalent to the case above,
- a slice of the form ``k:l``, ``k:`` or ``:l``, with ``k``
and ``l`` two integers, in which case ``x[key]`` redirects to ``extract(x,k,l-k)``,
- a tuple of slices of the form ``..,:,:,k:l``, ``..,:,:,k:`` or ``..,:,:,:l``,
with ``k`` and ``l`` two integers, which are equivalent to the case above.
"""
# we allow only these forms:
# [..,:,:,k], [..,:,:,k:l], [..,:,:,k:], [..,:,:,:l]
# or equivalent [k], [k:l], [k:], [:l]
if isinstance(key, tuple):
if len(key) == len(self._shape) and key[:-1] == (slice(None),) * (len(self._shape) - 1):
key = key[-1]
else:
raise ValueError("LazyTensors only support indexing with respect to their last dimension.")
if isinstance(key, slice):
if not key.step in [None, 1]:
raise ValueError("LazyTensors do not support sliced indexing with stepsizes > 1.")
if key.start is None:
key = slice(0, key.stop)
if key.stop is None:
key = slice(key.start, self.ndim)
return self.extract(key.start, key.stop - key.start)
elif isinstance(key, int):
return self.elem(key)
else:
raise ValueError("LazyTensors only support indexing with integers and vanilla python slices.")
def one_hot(self, D):
r"""
Encodes a (rounded) scalar value as a one-hot vector of dimension D.
``x.one_hot(D)`` returns a :class:`LazyTensor` that encodes, symbolically,
a vector of length D whose round(x)-th coordinate is equal to 1, and the other ones to zero.
"""
if type(D) is not int:
raise ValueError("One-hot encoding expects an integer dimension of the output vector.")
if self.ndim != 1:
raise ValueError("One-hot encoding is only supported for scalar formulas.")
return self.unary("OneHot", dimres=D, opt_arg=D)
def concat(self, other):
r"""
Concatenation of two :class:`LazyTensor` - a binary operation.
``x.concat(y)`` returns a :class:`LazyTensor` that encodes, symbolically,
the concatenation of ``x`` and ``y`` along their last dimension.
"""
return self.binary(other, "Concat", dimres=(self.ndim + other.ndim), dimcheck=None)
def concatenate(self, axis=-1):
r"""
Concatenation of a tuple of :class:`LazyTensor`.
``LazyTensor.concatenate( (x_1, x_2, ..., x_n), -1)`` returns a :class:`LazyTensor` that encodes, symbolically,
the concatenation of ``x_1``, ``x_2``, ..., ``x_n`` along their last dimension.
Note that **axis** should be equal to -1 or 2 (if the ``x_i``'s are 3D LazyTensor):
LazyTensors only support concatenation and indexing operations with respect
to the last dimension.
"""
if axis not in [-1, len(self._shape) - 1]:
raise ValueError("LazyTensor only supports concatenation along the last axis.")
if isinstance(self, tuple):
if len(self) == 0:
raise ValueError("Received an empty tuple of LazyTensors.")
elif len(self) == 1:
return self
elif len(self) == 2:
return self[0].concat(self[1])
else:
return LazyTensor.concatenate((self[0].concat(self[1]), self[2:]), axis=-1)
else:
raise ValueError("LazyTensor.concatenate is implemented on *tuples* of LazyTensors.")
def cat(self, dim):
r"""
Concatenation of a tuple of LazyTensors.
``LazyTensor.cat( (x_1, x_2, ..., x_n), -1)``
is a PyTorch-friendly alias for ``LazyTensor.concatenate( (x_1, x_2, ..., x_n), -1)``;
just like indexing operations, it is only supported along the last dimension.
"""
return LazyTensor.concatenate(self, dim)
def matvecmult(self, other):
r"""
Matrix-vector product - a binary operation.
If ``x._shape[-1] == A*B`` and ``y._shape[-1] == B``,
``z = x.matvecmult(y)`` returns a :class:`LazyTensor`
such that ``z._shape[-1] == A`` which encodes, symbolically,
the matrix-vector product of ``x`` and ``y`` along their last dimension.
For details, please check the documentation of the KeOps operation ``"MatVecMult"`` in
the :doc:`main reference page <../../../api/math-operations>`.
"""
return self.binary(other, "MatVecMult", dimres=(self.ndim // other.ndim), dimcheck=None)
def vecmatmult(self, other):
r"""
Vector-matrix product - a binary operation.
If ``x._shape[-1] == A`` and ``y._shape[-1] == A*B``,
``z = x.vecmatmult(y)`` returns a :class:`LazyTensor`
such that ``z._shape[-1] == B`` which encodes, symbolically,
the vector-matrix product of ``x`` and ``y`` along their last dimension.
For details, please check the documentation of the KeOps operation ``"VetMacMult"`` in
the :doc:`main reference page <../../../api/math-operations>`.
"""
return self.binary(other, "VecMatMult", dimres=(other.ndim // self.ndim), dimcheck=None)
def tensorprod(self, other):
r"""
Tensor product of vectors - a binary operation.
If ``x._shape[-1] == A`` and ``y._shape[-1] == B``,
``z = x.tensorprod(y)`` returns a :class:`LazyTensor`
such that ``z._shape[-1] == A*B`` which encodes, symbolically,
the tensor product of ``x`` and ``y`` along their last dimension.
For details, please check the documentation of the KeOps operation ``"TensorProd"`` in
the :doc:`main reference page <../../../api/math-operations>`.
"""
return self.binary(other, "TensorProd", dimres=(other.ndim * self.ndim), dimcheck=None)
def keops_tensordot(self, other, dimfa, dimfb, contfa, contfb, *args):
"""
Tensor dot product (on KeOps internal dimensions) - a binary operation.
:param other: a LazyTensor
:param dimfa: tuple of int
:param dimfb: tuple of int
:param contfa: tuple of int listing contraction dimension of a (could be empty)
:param contfb: tuple of int listing contraction dimension of b (could be empty)
:param args: a tuple of int containing the graph of a permutation of the output
:return:
"""
# permute = tuple(range(len(dimfa) + len(dimfb) - 2 * len(contfa)))
opt_arg = ""
for intseq in (dimfa, dimfb, contfa, contfb) + args:
opt_arg += "Ind("
if isinstance(intseq,int):
intseq = (intseq,) # convert to tuple
for item in intseq:
opt_arg += "{},".format(item)
opt_arg = opt_arg[:-1] + "), " # to remove last comma
opt_arg = opt_arg[:-2] # to remove last comma and space
dimres = np.array(dimfa).prod() * np.array(dimfb).prod() / np.array(dimfa)[np.array(contfa)].prod() ** 2
return self.binary(other, "TensorDot", dimres=dimres, dimcheck=None, opt_arg=opt_arg)
def grad(self, other, gradin):
r"""
Symbolic gradient operation.
``z = x.grad(v,e)`` returns a :class:`LazyTensor`
which encodes, symbolically,
the gradient (more precisely, the adjoint of the differential operator) of ``x``, with
respect to variable ``v``, and applied to ``e``.
For details, please check the documentation of the KeOps operation ``"Grad"`` in
the :doc:`main reference page <../../../api/math-operations>`.
"""
return self.binary(gradin, "Grad", dimres=other.ndim, dimcheck="same", opt_arg=other, opt_pos="middle")
# List of supported reductions ============================================
def sum(self, axis=-1, dim=None, **kwargs):
r"""
Summation unary operation, or Sum reduction.
``sum(axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the sum reduction of **self** over the "i" indexes.
- if **axis or dim = 1**, return the sum reduction of **self** over the "j" indexes.
- if **axis or dim = 2**, return a new :class:`LazyTensor` object representing the sum of the values of the vector **self**,
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
1 (= reduction over :math:`j`) or 2 (i.e. -1, sum along the
dimension of the vector variable).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
if dim is not None:
axis = dim
if axis in [-1, len(self._shape) - 1]:
return self.unary("Sum", dimres=1)
else:
return self.reduction("Sum", axis=axis, **kwargs)
def sum_reduction(self, axis=None, dim=None, **kwargs):
r"""
Sum reduction.
``sum_reduction(axis, dim, **kwargs)`` will return the sum reduction of **self**.
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("Sum", axis=axis, dim=dim, **kwargs)
def logsumexp(self, axis=None, dim=None, weight=None, **kwargs):
r"""
Log-Sum-Exp reduction.
``logsumexp(axis, dim, weight, **kwargs)`` will:
- if **axis or dim = 0**, return the "log-sum-exp" reduction of **self** over the "i" indexes.
- if **axis or dim = 1**, return the "log-sum-exp" reduction of **self** over the "j" indexes.
For details, please check the documentation of the KeOps reductions ``LogSumExp`` and ``LogSumExpWeight`` in
the :doc:`main reference page <../../../api/math-operations>`.
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
weight (:class:`LazyTensor`): optional object that specifies scalar or vector-valued weights
in the log-sum-exp operation
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
if weight is None:
return self.reduction("LogSumExp", axis=axis, dim=dim, **kwargs)
else:
return self.reduction("LogSumExp", other=weight, axis=axis, dim=dim, **kwargs)
def logsumexp_reduction(self, **kwargs):
r"""
Log-Sum-Exp reduction. Redirects to :meth:`logsumexp` method.
"""
return self.logsumexp(**kwargs)
def sumsoftmaxweight(self, weight, axis=None, dim=None, **kwargs):
r"""
Sum of weighted Soft-Max reduction.
``sumsoftmaxweight(weight, axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the "sum of weighted Soft-Max" reduction of **self** over the "i" indexes.
- if **axis or dim = 1**, return the "sum of weighted Soft-Max" reduction of **self** over the "j" indexes.
For details, please check the documentation of the KeOps reduction ``SumSoftMaxWeight`` in
the :doc:`main reference page <../../../api/math-operations>`.
Keyword Args:
weight (:class:`LazyTensor`): object that specifies scalar or vector-valued weights.
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("SumSoftMaxWeight", other=weight, axis=axis, dim=dim, **kwargs)
def sumsoftmaxweight_reduction(self, **kwargs):
r"""
Sum of weighted Soft-Max reduction. Redirects to :meth:`sumsoftmaxweight` method.
"""
return self.sumsoftmaxweight(**kwargs)
def min(self, axis=None, dim=None, **kwargs):
r"""
Min reduction.
``min(axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the minimal values of **self** over the "i" indexes.
- if **axis or dim = 1**, return the minimal values of **self** over the "j" indexes.
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("Min", axis=axis, dim=dim, **kwargs)
def min_reduction(self, **kwargs):
r"""
Min reduction. Redirects to :meth:`min` method.
"""
return self.min(**kwargs)
def __min__(self, **kwargs):
r"""
Min reduction. Redirects to :meth:`min` method.
"""
return self.min(**kwargs)
def argmin(self, axis=None, dim=None, **kwargs):
r"""
ArgMin reduction.
``argmin(axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the indices of minimal values of **self** over the "i" indexes.
- if **axis or dim = 1**, return the indices of minimal values of **self** over the "j" indexes.
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("ArgMin", axis=axis, dim=dim, **kwargs)
def argmin_reduction(self, **kwargs):
r"""
ArgMin reduction. Redirects to :meth:`argmin` method.
"""
return self.argmin(**kwargs)
def min_argmin(self, axis=None, dim=None, **kwargs):
r"""
Min-ArgMin reduction.
``min_argmin(axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the minimal values and its indices of **self** over the "i" indexes.
- if **axis or dim = 1**, return the minimal values and its indices of **self** over the "j" indexes.
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("Min_ArgMin", axis=axis, dim=dim, **kwargs)
def min_argmin_reduction(self, **kwargs):
r"""
Min-ArgMin reduction. Redirects to :meth:`min_argmin` method.
"""
return self.min_argmin(**kwargs)
def max(self, axis=None, dim=None, **kwargs):
r"""
Max reduction.
``max(axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the maximal values of **self** over the "i" indexes.
- if **axis or dim = 1**, return the maximal values of **self** over the "j" indexes.
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("Max", axis=axis, dim=dim, **kwargs)
def max_reduction(self, **kwargs):
r"""
Max reduction. Redirects to :meth:`max` method.
"""
return self.max(**kwargs)
def __max__(self, **kwargs):
r"""
Max reduction. Redirects to :meth:`max` method.
"""
return self.max(**kwargs)
def argmax(self, axis=None, dim=None, **kwargs):
r"""
ArgMax reduction.
``argmax(axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the indices of maximal values of **self** over the "i" indexes.
- if **axis or dim = 1**, return the indices of maximal values of **self** over the "j" indexes.
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("ArgMax", axis=axis, dim=dim, **kwargs)
def argmax_reduction(self, **kwargs):
r"""
ArgMax reduction. Redirects to :meth:`argmax` method.
"""
return self.argmax(**kwargs)
def max_argmax(self, axis=None, dim=None, **kwargs):
r"""
Max-ArgMax reduction.
``max_argmax(axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the maximal values and its indices of **self** over the "i" indexes.
- if **axis or dim = 1**, return the maximal values and its indices of **self** over the "j" indexes.
Keyword Args:
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("Max_ArgMax", axis=axis, dim=dim, **kwargs)
def max_argmax_reduction(self, **kwargs):
r"""
Max-ArgMax reduction. Redirects to :meth:`max_argmax` method.
"""
return self.max_argmax(**kwargs)
def Kmin(self, K, axis=None, dim=None, **kwargs):
r"""
K-Min reduction.
``Kmin(K, axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the K minimal values of **self** over the "i" indexes.
- if **axis or dim = 1**, return the K minimal values of **self** over the "j" indexes.
Keyword Args:
K (integer): number of minimal values required
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("KMin", opt_arg=K, axis=axis, dim=dim, **kwargs)
def Kmin_reduction(self, **kwargs):
r"""
Kmin reduction. Redirects to :meth:`Kmin` method.
"""
return self.Kmin(**kwargs)
def argKmin(self, K, axis=None, dim=None, **kwargs):
r"""
argKmin reduction.
``argKmin(K, axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the indices of the K minimal values of **self** over the "i" indexes.
- if **axis or dim = 1**, return the indices of the K minimal values of **self** over the "j" indexes.
Keyword Args:
K (integer): number of minimal values required
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("ArgKMin", opt_arg=K, axis=axis, dim=dim, **kwargs)
def argKmin_reduction(self, **kwargs):
r"""
argKmin reduction. Redirects to :meth:`argKmin` method.
"""
return self.argKmin(**kwargs)
def Kmin_argKmin(self, K, axis=None, dim=None, **kwargs):
r"""
K-Min-argK-min reduction.
``Kmin_argKmin(K, axis, dim, **kwargs)`` will:
- if **axis or dim = 0**, return the K minimal values and its indices of **self** over the "i" indexes.
- if **axis or dim = 1**, return the K minimal values and its indices of **self** over the "j" indexes.
Keyword Args:
K (integer): number of minimal values required
axis (integer): reduction dimension, which should be equal to the number
of batch dimensions plus 0 (= reduction over :math:`i`),
or 1 (= reduction over :math:`j`).
dim (integer): alternative keyword for the axis parameter.
**kwargs: optional parameters that are passed to the :meth:`reduction` method.
"""
return self.reduction("KMin_ArgKMin", opt_arg=K, axis=axis, dim=dim, **kwargs)
def Kmin_argKmin_reduction(self, **kwargs):
r"""
Kmin_argKmin reduction. Redirects to :meth:`Kmin_argKmin` method.
"""
return self.Kmin_argKmin(**kwargs)
# LazyTensors as linear operators =========================================
def __matmul__(self, v):
r"""
Matrix-vector or Matrix-matrix product, supporting batch dimensions.
If ``K`` is a :class:`LazyTensor` whose trailing dimension ``K._shape[-1]`` is equal to 1,
we can understand it as a linear operator and apply it to arbitrary NumPy arrays
or PyTorch Tensors. Assuming that ``v`` is a 1D (resp. ND) tensor such that
``K.shape[-1] == v.shape[-1]`` (resp. ``v.shape[-2]``), ``K @ v`` denotes the matrix-vector (resp. matrix-matrix)
product between the two objects, encoded as a vanilla NumPy or PyTorch 1D (resp. ND) tensor.
Example:
>>> x, y = torch.randn(1000, 3), torch.randn(2000, 3)
>>> x_i, y_j = LazyTensor( x[:,None,:] ), LazyTensor( y[None,:,:] )
>>> K = (- ((x_i - y_j)**2).sum(2) ).exp() # Symbolic (1000,2000,1) Gaussian kernel matrix
>>> v = torch.rand(2000, 2)
>>> print( (K @ v).shape )
... torch.Size([1000, 2])
"""
if self._shape[-1] != 1:
raise ValueError("The 'K @ v' syntax is only supported for LazyTensors " \
+ "'K' whose trailing dimension is equal to 1. Here, K.shape = {}.".format(self.shape))
if len(v.shape) == 1:
newdims = (1, v.shape[0], 1)
else:
newdims = v.shape[:-2] + (1,) + v.shape[-2:]
v_ = LazyTensor(self.tools.view( v, newdims ))
Kv = (self * v_ ) # Supports broadcasting
Kv = Kv.sum( Kv.dim() - 2 ) # Matrix-vector or Matrix-matrix product
# Expected behavior: if v is a vector, so should K @ v.
return self.tools.view( Kv, -1 ) if len(v.shape) == 1 else Kv
def t(self):
r"""
Matrix transposition, permuting the axes of :math:`i`- and :math:`j`-variables.
For instance, if ``K`` is a LazyTensor of shape ``(B,M,N,D)``,
``K.t()`` returns a symbolic copy of ``K`` whose axes 1 and 2 have
been switched with each other: ``K.t().shape == (B,N,M,D)``.
Example:
>>> x, y = torch.randn(1000, 3), torch.randn(2000, 3)
>>> x_i, y_j = LazyTensor( x[:,None,:] ), LazyTensor( y[None,:,:] )
>>> K = (- (( x_i - y_j )**2).sum(2) ).exp() # Symbolic (1000,2000) Gaussian kernel matrix
>>> K_ = (- ((x[:,None,:] - y[None,:,:])**2).sum(2) ).exp() # Explicit (1000,2000) Gaussian kernel matrix
>>> w = torch.rand(1000, 2)
>>> print( (K.t() @ w - K_.t() @ w).abs().mean() )
... tensor(1.7185e-05)
"""
res = copy.copy(self)
res.ni, res.nj = res.nj, res.ni # Switch the "M" and "N" dimensions
res.ranges = res.tools.swap_axes(res.ranges)
if res.axis == 0:
res.axis = 1
elif res.axis == 1:
res.axis = 0
if res.formula is not None: # Switch variables with CAT=0 and CAT=1
res.formula = re.sub(r"(Var|VarSymb)\((\d+),(\d+),0\)", r"\1(\2,\3,i)", res.formula)
res.formula = re.sub(r"(Var|VarSymb)\((\d+),(\d+),1\)", r"\1(\2,\3,0)", res.formula)
res.formula = re.sub(r"(Var|VarSymb)\((\d+),(\d+),i\)", r"\1(\2,\3,1)", res.formula)
if res.formula2 is not None: # Switch variables with CAT=0 and CAT=1
res.formula2 = re.sub(r"(Var|VarSymb)\((\d+),(\d+),0\)", r"\1(\2,\3,i)", res.formula2)
res.formula2 = re.sub(r"(Var|VarSymb)\((\d+),(\d+),1\)", r"\1(\2,\3,0)", res.formula2)
res.formula2 = re.sub(r"(Var|VarSymb)\((\d+),(\d+),i\)", r"\1(\2,\3,1)", res.formula2)
return res
@property
def T(self):
r"""
Numpy-friendly alias for the matrix transpose ``self.t()``.
"""
return self.t()
def matvec(self, v):
r"""
Alias for the matrix-vector product, added for compatibility with :mod:`scipy.sparse.linalg`.
If ``K`` is a :class:`LazyTensor` whose trailing dimension ``K._shape[-1]`` is equal to 1,
we can understand it as a linear operator and wrap it into a
:mod:`scipy.sparse.linalg.LinearOperator` object, thus getting access
to robust solvers and spectral routines.
Example:
>>> import numpy as np
>>> x = np.random.randn(1000,3)
>>> x_i, x_j = LazyTensor( x[:,None,:] ), LazyTensor( x[None,:,:] )
>>> K_xx = (- ((x_i - x_j)**2).sum(2) ).exp() # Symbolic (1000,1000) Gaussian kernel matrix
>>> from scipy.sparse.linalg import eigsh, aslinearoperator
>>> eigenvalues, eigenvectors = eigsh( aslinearoperator( K_xx ), k=5 )
>>> print(eigenvalues)
... [ 35.5074527 59.01096445 61.35075268 69.34038814 123.77540277]
>>> print( eigenvectors.shape)
... (1000, 5)
"""
return self @ v
def rmatvec(self):
r"""
Alias for the transposed matrix-vector product, added for compatibility with :mod:`scipy.sparse.linalg`.
See :meth:`matvec` for further reference.
"""
return self.T @ v
|
the-stack_0_20171 | import os
root = "music"
for path, dirs, files in os.walk(root, topdown=True):
if files:
print(path)
first_split = os.path.split(path)
print(first_split)
second_split = os.path.split(first_split[0])
print(second_split)
for f in files:
song_details = f[:-5].split(' - ')
print(song_details)
print("*" * 50) |
the-stack_0_20172 | # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY
# mf6/utils/createpackages.py
# FILE created on December 22, 2021 17:36:26 UTC
from .. import mfpackage
from ..data.mfdatautil import ArrayTemplateGenerator, ListTemplateGenerator
class ModflowGwtdisv(mfpackage.MFPackage):
"""
ModflowGwtdisv defines a disv package within a gwt6 model.
Parameters
----------
model : MFModel
Model that this package is a part of. Package is automatically
added to model when it is initialized.
loading_package : bool
Do not set this parameter. It is intended for debugging and internal
processing purposes only.
length_units : string
* length_units (string) is the length units used for this model. Values
can be "FEET", "METERS", or "CENTIMETERS". If not specified, the
default is "UNKNOWN".
nogrb : boolean
* nogrb (boolean) keyword to deactivate writing of the binary grid
file.
xorigin : double
* xorigin (double) x-position of the origin used for model grid
vertices. This value should be provided in a real-world coordinate
system. A default value of zero is assigned if not specified. The
value for XORIGIN does not affect the model simulation, but it is
written to the binary grid file so that postprocessors can locate the
grid in space.
yorigin : double
* yorigin (double) y-position of the origin used for model grid
vertices. This value should be provided in a real-world coordinate
system. If not specified, then a default value equal to zero is used.
The value for YORIGIN does not affect the model simulation, but it is
written to the binary grid file so that postprocessors can locate the
grid in space.
angrot : double
* angrot (double) counter-clockwise rotation angle (in degrees) of the
model grid coordinate system relative to a real-world coordinate
system. If not specified, then a default value of 0.0 is assigned.
The value for ANGROT does not affect the model simulation, but it is
written to the binary grid file so that postprocessors can locate the
grid in space.
nlay : integer
* nlay (integer) is the number of layers in the model grid.
ncpl : integer
* ncpl (integer) is the number of cells per layer. This is a constant
value for the grid and it applies to all layers.
nvert : integer
* nvert (integer) is the total number of (x, y) vertex pairs used to
characterize the horizontal configuration of the model grid.
top : [double]
* top (double) is the top elevation for each cell in the top model
layer.
botm : [double]
* botm (double) is the bottom elevation for each cell.
idomain : [integer]
* idomain (integer) is an optional array that characterizes the
existence status of a cell. If the IDOMAIN array is not specified,
then all model cells exist within the solution. If the IDOMAIN value
for a cell is 0, the cell does not exist in the simulation. Input and
output values will be read and written for the cell, but internal to
the program, the cell is excluded from the solution. If the IDOMAIN
value for a cell is 1, the cell exists in the simulation. If the
IDOMAIN value for a cell is -1, the cell does not exist in the
simulation. Furthermore, the first existing cell above will be
connected to the first existing cell below. This type of cell is
referred to as a "vertical pass through" cell.
vertices : [iv, xv, yv]
* iv (integer) is the vertex number. Records in the VERTICES block must
be listed in consecutive order from 1 to NVERT. This argument is an
index variable, which means that it should be treated as zero-based
when working with FloPy and Python. Flopy will automatically subtract
one when loading index variables and add one when writing index
variables.
* xv (double) is the x-coordinate for the vertex.
* yv (double) is the y-coordinate for the vertex.
cell2d : [icell2d, xc, yc, ncvert, icvert]
* icell2d (integer) is the CELL2D number. Records in the CELL2D block
must be listed in consecutive order from the first to the last. This
argument is an index variable, which means that it should be treated
as zero-based when working with FloPy and Python. Flopy will
automatically subtract one when loading index variables and add one
when writing index variables.
* xc (double) is the x-coordinate for the cell center.
* yc (double) is the y-coordinate for the cell center.
* ncvert (integer) is the number of vertices required to define the
cell. There may be a different number of vertices for each cell.
* icvert (integer) is an array of integer values containing vertex
numbers (in the VERTICES block) used to define the cell. Vertices
must be listed in clockwise order. Cells that are connected must
share vertices. This argument is an index variable, which means that
it should be treated as zero-based when working with FloPy and
Python. Flopy will automatically subtract one when loading index
variables and add one when writing index variables.
filename : String
File name for this package.
pname : String
Package name for this package.
parent_file : MFPackage
Parent package file that references this package. Only needed for
utility packages (mfutl*). For example, mfutllaktab package must have
a mfgwflak package parent_file.
"""
top = ArrayTemplateGenerator(("gwt6", "disv", "griddata", "top"))
botm = ArrayTemplateGenerator(("gwt6", "disv", "griddata", "botm"))
idomain = ArrayTemplateGenerator(("gwt6", "disv", "griddata", "idomain"))
vertices = ListTemplateGenerator(("gwt6", "disv", "vertices", "vertices"))
cell2d = ListTemplateGenerator(("gwt6", "disv", "cell2d", "cell2d"))
package_abbr = "gwtdisv"
_package_type = "disv"
dfn_file_name = "gwt-disv.dfn"
dfn = [
[
"header",
],
[
"block options",
"name length_units",
"type string",
"reader urword",
"optional true",
],
[
"block options",
"name nogrb",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name xorigin",
"type double precision",
"reader urword",
"optional true",
],
[
"block options",
"name yorigin",
"type double precision",
"reader urword",
"optional true",
],
[
"block options",
"name angrot",
"type double precision",
"reader urword",
"optional true",
],
[
"block dimensions",
"name nlay",
"type integer",
"reader urword",
"optional false",
],
[
"block dimensions",
"name ncpl",
"type integer",
"reader urword",
"optional false",
],
[
"block dimensions",
"name nvert",
"type integer",
"reader urword",
"optional false",
],
[
"block griddata",
"name top",
"type double precision",
"shape (ncpl)",
"reader readarray",
],
[
"block griddata",
"name botm",
"type double precision",
"shape (nlay, ncpl)",
"reader readarray",
"layered true",
],
[
"block griddata",
"name idomain",
"type integer",
"shape (nlay, ncpl)",
"reader readarray",
"layered true",
"optional true",
],
[
"block vertices",
"name vertices",
"type recarray iv xv yv",
"reader urword",
"optional false",
],
[
"block vertices",
"name iv",
"type integer",
"in_record true",
"tagged false",
"reader urword",
"optional false",
"numeric_index true",
],
[
"block vertices",
"name xv",
"type double precision",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block vertices",
"name yv",
"type double precision",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block cell2d",
"name cell2d",
"type recarray icell2d xc yc ncvert icvert",
"reader urword",
"optional false",
],
[
"block cell2d",
"name icell2d",
"type integer",
"in_record true",
"tagged false",
"reader urword",
"optional false",
"numeric_index true",
],
[
"block cell2d",
"name xc",
"type double precision",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block cell2d",
"name yc",
"type double precision",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block cell2d",
"name ncvert",
"type integer",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block cell2d",
"name icvert",
"type integer",
"shape (ncvert)",
"in_record true",
"tagged false",
"reader urword",
"optional false",
"numeric_index true",
],
]
def __init__(
self,
model,
loading_package=False,
length_units=None,
nogrb=None,
xorigin=None,
yorigin=None,
angrot=None,
nlay=None,
ncpl=None,
nvert=None,
top=None,
botm=None,
idomain=None,
vertices=None,
cell2d=None,
filename=None,
pname=None,
parent_file=None,
):
super().__init__(
model, "disv", filename, pname, loading_package, parent_file
)
# set up variables
self.length_units = self.build_mfdata("length_units", length_units)
self.nogrb = self.build_mfdata("nogrb", nogrb)
self.xorigin = self.build_mfdata("xorigin", xorigin)
self.yorigin = self.build_mfdata("yorigin", yorigin)
self.angrot = self.build_mfdata("angrot", angrot)
self.nlay = self.build_mfdata("nlay", nlay)
self.ncpl = self.build_mfdata("ncpl", ncpl)
self.nvert = self.build_mfdata("nvert", nvert)
self.top = self.build_mfdata("top", top)
self.botm = self.build_mfdata("botm", botm)
self.idomain = self.build_mfdata("idomain", idomain)
self.vertices = self.build_mfdata("vertices", vertices)
self.cell2d = self.build_mfdata("cell2d", cell2d)
self._init_complete = True
|
the-stack_0_20173 | from bashfuscator.core.mutators.command_obfuscator import CommandObfuscator
from bashfuscator.core.mutators.command_obfuscator import Stub
class CaseSwapper(CommandObfuscator):
def __init__(self):
super().__init__(
name="Case Swapper",
description="Flips the case of all alpha chars",
sizeRating=1,
timeRating=1,
author="capnspacehook",
reversible=True
)
self.stubs = [
Stub(
name="bash case swap expansion",
sizeRating=1,
timeRating=1,
binariesUsed=[],
fileWrite=False,
escapeQuotes=True,
stub='''? ?VAR1='CMD'* *END0* *:printf:^ ^%s^ ^"${VAR1~~}"* *END0* *'''
)
]
def mutate(self, userCmd):
obCmd = userCmd.swapcase()
return self.deobStub.genStub(obCmd)
|
the-stack_0_20175 | from pulsar.apps import rpc
def get_model(wsrequest):
"""Get a Rest model from a websocket rpc request
:param wsrequest:
:return: a :class:`~RestModel`
"""
model = wsrequest.pop_param('model')
restmodel = wsrequest.app.models.get(model)
if not restmodel:
raise rpc.InvalidParams('Model "%s" does not exist' % model)
return restmodel
class WsModelRpc:
def ws_model_metadata(self, wsrequest):
"""Metadata from a model
From the client::
client.rpc('model_metadata', {'model': '<mymodel'>})
:param request:
:return: object with metadata information
"""
model = get_model(wsrequest)
check_permission = wsrequest.resource(
model.identifier, 'read', model.fields())
return model.meta(wsrequest.wsgi_request,
check_permission=check_permission)
def ws_model_data(self, wsrequest):
"""Read data from a model
From the client::
client.rpc('model_data', {'model': '<mymodel'>})
:param request:
:return: object with metadata information
"""
model = get_model(wsrequest)
check_permission = wsrequest.resource(
model.identifier, 'read', model.fields())
request = wsrequest.wsgi_request
return model.query_data(request,
check_permission=check_permission,
**wsrequest.params)
def ws_model_create(self, wsrequest):
"""Create a new model
From the client::
client.rpc('model_create', {'field1': 'foo', ...})
:param request:
:return: object with metadata information
"""
model = get_model(wsrequest)
request = wsrequest.wsgi_request
form_class = get_form_class(request, model.form)
if not form_class:
raise rpc.InvalidRequest('cannot create model %s' % model.name)
fields = wsrequest.check_permission(
model.name, 'create', model.fields())
# fields = model.column_fields(fields, 'name')
instance = model.instance(fields=fields)
form = form_class(request, data=wsrequest.params)
if form.is_valid():
with model.session(request) as session:
instance = model.create_model(request,
instance,
form.cleaned_data,
session=session)
return model.tojson(request, instance)
else:
raise rpc.InvalidParams(data=form.tojson())
|
the-stack_0_20176 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Extras',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('choice', models.CharField(max_length=69, verbose_name='valg')),
('note', models.CharField(max_length=200, null=True, verbose_name='notat', blank=True)),
],
options={
'ordering': ['choice'],
'verbose_name': 'ekstra valg',
'verbose_name_plural': 'ekstra valg',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='attendanceevent',
name='extras',
field=models.ManyToManyField(to='events.Extras', null=True, blank=True),
preserve_default=True,
),
]
|
the-stack_0_20177 | from __future__ import print_function
from timeit import timeit
from py_aho_corasick import py_aho_corasick
import builtins
from ahocorapy.keywordtree import KeywordTree
import ahocorasick
SEARCH_ITERATIONS = 100
with open('tests/data/names.txt') as keyword_file:
keyword_list = [keyword.strip() for keyword in keyword_file.readlines()]
with open('tests/data/textblob.txt') as keyword_file:
textblob = keyword_file.read()
print('-' * 10 + 'ahocorapy' + '-' * 10)
def init_ahocorapy():
kwtree = KeywordTree()
for keyword in keyword_list:
kwtree.add(keyword)
kwtree.finalize()
return kwtree
def search_ahocorapy(ahocorapy_tree, textblob):
result = ''
for keyword, _ in ahocorapy_tree.search_all(textblob):
result += keyword
return result
ahocorapy_tree = init_ahocorapy()
result = search_ahocorapy(ahocorapy_tree, textblob)
assert result == 'Dawn Higgins'
builtins.__dict__.update(locals())
print('setup_ahocorapy: ' +
str(timeit(stmt='init_ahocorapy()', number=1)))
print('search_ahocorapy: ' + str(timeit(stmt='search_ahocorapy(ahocorapy_tree, textblob)',
number=SEARCH_ITERATIONS)))
print('-' * 10 + 'pyahocorasick' + '-' * 10)
def init_ahocorasick():
A = ahocorasick.Automaton()
for keyword in keyword_list:
A.add_word(keyword, keyword)
A.make_automaton()
return A
def search_ahocorasick(ahocorasick_tree, textblob):
result = ''
for _, keyword in ahocorasick_tree.iter(textblob):
result += keyword
return result
ahocorasick_tree = init_ahocorasick()
result = search_ahocorasick(ahocorasick_tree, textblob)
assert result == 'Dawn Higgins'
builtins.__dict__.update(locals())
print('setup_pyahocorasick: ' +
str(timeit(stmt='init_ahocorasick()', number=1)))
print('search_pyahocorasick: ' + str(timeit(stmt='search_ahocorasick(ahocorasick_tree, textblob)',
number=SEARCH_ITERATIONS)))
print('-' * 10 + 'py_aho_corasick' + '-' * 10)
def init_py_aho_corasick():
return py_aho_corasick.Automaton(keyword_list)
def search_py_aho_corasick(py_aho_corasick_tree, textblob):
result = ''
for match in py_aho_corasick_tree.get_keywords_found(textblob):
result += match[1]
return result
py_aho_corasick_tree = init_py_aho_corasick()
result = search_py_aho_corasick(py_aho_corasick_tree, textblob)
assert result == 'dawn higgins'
builtins.__dict__.update(locals())
print('setup_py_aho_corasick: ' +
str(timeit(stmt='init_py_aho_corasick()', number=1)))
print('search_py_aho_corasick: ' + str(timeit(stmt='search_py_aho_corasick(py_aho_corasick_tree, textblob)',
number=SEARCH_ITERATIONS)))
|
the-stack_0_20179 | import collections
import schematics
from export_util import (
template as tpl,
value as val
)
class Normalizer:
"""
Normalizer object formats data into ordered rows using provided template. Templates are building
using `export_lib.template` functionality. First what should be passed into nomralizer constructor
is the `export_lib.template.Object` instance. This is the way to say how to format each object
at the provided data list.
Each `export_lib.template.Object` takes `export_lib.template.Field` as an `field` argument, which
allows to understand which fields and in which order they should be rendered. Also `Object` has
other properties which are allows to specify how they should looks like in a table.
Example:
def create_duration(start_time, object_source):
""
This function formats duration column depending on the object
`cuesheet_start_time.$date` and `cuesheet_end_time.$date`.
When we know the amount of seconds duration, we formatting it into
the HH:MM:SS format.
:param start_time:
:param DataGetter object_source:
:return:
""
return time.strftime(
'%H:%M:%S',
time.gmtime(
int((object_source.get('cuesheet_end_time.$date') - start_time) / 1000)
)
)
def get_year_from_timestamp(milliseconds, object_source):
""
This function returns year from the provided timestamp.
:param milliseconds:
:param DataGetter object_source:
:return:
""
return time.strftime('%Y', time.gmtime(int(milliseconds / 1000)))
def verbose_boolean(boolean, object_source):
""
This function returns "NO" if `boolean` is False, and
returns "YES" if `boolean` is True.
:param boolean:
:param DataGetter object_source:
:return:
""
return 'YES' if boolean else 'NO'
def verbose_list(list_objects, object_source):
""
This function formats list of an objects into single string.
:param list_objects:
:param DataGetter object_source:
:return:
""
return ', '.join(map(lambda x: str(x), list_objects))
# This is how the data exporter should be initialized.
ex = Exporter(normalizer=Normalizer(
# This name - `OBJ_1`, will be used to describe hard logic.
tpl.Object(
# This is the number of column where the object should start render
col=1,
# If `titles` is True, then all headers of each field will be rendered before objects list.
titles=True,
# If you want to translate or rename some fields - use `translate` option. Note:
# 1. Keys of this dict would be used as field name and values as a replacement.
# 2. Keys are case-sensitive. So the field named `duration` will not be replaced in this example.
# 3. Translation dict will be automatically converted to DataGetter object. So you can pass not only
# field names, but path to replacement as of `DataGetter` specification too.
translate={
'Duration': 'Length',
'fields': {
'year': {
'title': 'Year
}
}
},
# This is the object fields
fields=[
# Required. First argument of the Field object is the column number. It's related to parent Object
# col number, and always starts from 1.
tpl.Field(1, 'Production ID', '_id.$oid'),
# Required. The second argument is the field title / verbose_name.
tpl.Field(2, 'Source', 'cuesheet_channel'),
# Optional. The third argument is the where values is stored at the object. If you keep it empty -
# title will be renderred instead.
tpl.Field(3, 'Duration', 'cuesheet_start_time.$date', create_duration),
# Optional. The fourth argument is the callable function which takes field value as the first arg
# and the whole object `DataGetter` instance as the second argument. So you can compute absolutely
# new value from field value and any amount of other objects values.
tpl.Field(4, 'Year', 'updated_at.$date', get_year_from_timestamp),
tpl.Field(5, 'Free Music', 'free_music', verbose_boolean),
tpl.Field(6, 'Title', 'category.other_production.original_title'),
tpl.Field(7, 'Gema AVR', 'cuesheet_progress'),
tpl.Field(8, 'Country', 'production_country', verbose_list),
# Then we rendering child objects list starting from the first column of table. Each nested object
# is rendering under the parent object. So if you have more than one nested objects list to be
# rendered, you need to fold them in a one table, see description below.
#
# This name - `OBJ_2`, will be used to describe hard logic.
tpl.Object(
# Nested objects table start column
col=1,
# How much rows should be kept empty before rendering this table.
offset_top=5,
# This is the Object verbose_name. It's required for all nested objects but should be empty
# for the root.
verbose_name='Cuesheets',
# This is the path of the list of objects which are will be rendered.
path='cuesheet.cues',
# If `titles` is True, then all headers of each field will be rendered before objects list.
titles=True,
# This is the way to render multiple nested objects lists in a one table and not one under
# other. If you kept it empty or set it to False, child `Objects` will be rendered as stairs.
fold_nested=True,
# Let's say, we want to keep one row empty before rendering next nested object, to get more
# readable table.
offset_item=1,
# Object also has optional `preformat` parameter, which allows to pass callable object
# preprocessor. For example if you want to change something in the object before rendering.
# This is the nested object template fields.
fields=[
tpl.Field(1, 'Start Time', 'start_time'),
tpl.Field(2, 'Work ID', 'work_id'),
tpl.Field(3, 'Length', 'length'),
tpl.Field(4, 'Music Type', 'music_type'),
tpl.Field(5, 'Use', 'use'),
tpl.Field(6, 'Music Title', 'music_work.music_title'),
tpl.Field(7, 'Origin', 'music_work.origin'),
tpl.Field(8, 'Work ID', 'music_work.work_id.mpn_id'),
tpl.Field(9, 'Work ID Type', 'music_work.work_id.iswc'),
# This name - `OBJ_3`, will be used to describe hard logic.
tpl.Object(
col=10,
verbose_name='Authors',
path='music_work.author',
# Inline option is the way to render nested object title in a one line with the
# parent object. If you keep it empty, then parent object will have empty cells
# before this table.
inline=True,
titles=False,
fields=[
# Each field of this nested object, has numeration of columns starting from 1.
# It's made to simplify templates building. Nested objects column numbers are
# always relative to the parent object.
# This column number will be calculated in a next way:
# OBJ_1.column + OBJ_2.column + OBJ_3.column + self.column = 10
# It's hard to explain, just believe me it works as described.
tpl.Field(1, 'Name', 'name'),
tpl.Field(2, 'Rolle', 'rolle'),
]
),
# In the previous object, we have two fields placed horizontally. This means that previous
# object will take two columns of space. Then we need to give him a place and place next
# nested object on a column `prev.column+2`
tpl.Object(
col=12,
verbose_name='Publishers',
path='music_work.publisher',
inline=True,
titles=False,
fields=[
tpl.Field(1, 'Name', 'name'),
tpl.Field(2, 'Rolle', 'rolle'),
]
),
tpl.Object(
col=14,
verbose_name='Interpreters',
path='music_work.interpreter',
inline=True,
titles=False,
fields=[
tpl.Field(1, 'Name', 'name'),
tpl.Field(2, 'Rolle', 'rolle'),
]
),
]
)
]
)),
output=XLSXBytesOutputWriter(cols_dimensions={
'A': 28.06,
'B': 27.65,
'C': 10.0,
'D': 13.19,
'E': 11.25,
'F': 43.9,
'G': 13.89,
'H': 30.7,
'I': 14.72,
'J': 29.45,
'K': 8.67,
'L': 28.76,
'M': 8.67,
'N': 29.03,
'O': 8.67
})
)
*Preformat* argument is a function which takes two arguments. First - os the this column value, and the Second
is the whole object `DataGetter`. So you can compute any difficult values which are depends on other object values.
"""
def __init__(self, template: tpl.Object, *args, **kwargs):
"""
Create normalizer instance.
"""
self.template = template
def build_table(self, obj):
"""
Returns N rows which are representing this object due to provided.
template.
"""
yield from self.template.render(obj)
class SchematicsNormalizer(Normalizer):
"""
Creates object template from the schematics model.
"""
root_object_options = dict(
col=1,
titles=True,
fold_nested=True,
)
nested_object_options = dict(
titles=True,
inline=True,
fold_nested=True,
)
def __init__(self, model: schematics.Model, *args, **kwargs):
"""
Create objects template from schematics model.
"""
template = self._build_template(model, **kwargs)
super(SchematicsNormalizer, self).__init__(template, *args, **kwargs)
def _build_template(self, model: schematics.Model, **kwargs) -> tpl.Object:
"""
Creates object template from model.
"""
template_options = self.root_object_options
template_options.update(kwargs)
template = tpl.Object(**template_options)
for field, preformat in self._get_model_renderable_fields(model):
options = {}
if preformat is not None:
options['preformat'] = preformat
template.add_field(
field=self._create_field_template(
field=field,
parent=kwargs.get('parent'),
previous=template.fields[-1] if template.fields else None,
**options
)
)
return template
def _create_field_template(self, field: schematics.types.BaseType, parent=None, previous=None, **kwargs):
if isinstance(field, schematics.types.ListType) and not kwargs.get('preformat'):
return self._type_list_related_field(field, parent, previous, **kwargs)
if isinstance(field, schematics.types.ModelType) and not kwargs.get('preformat'):
return self._type_related_field(field, parent, previous, **kwargs)
return self._type_base_field(field, parent, previous, **kwargs)
def _type_base_field(self, field: schematics.types.BaseType, parent=None, previous=None, **kwargs):
if 'col' in kwargs:
column = kwargs.pop('col')
else:
column = self._get_next_column_number(previous)
preformat = None
if 'preformat' in kwargs:
preformat = kwargs.pop('preformat')
if preformat is None:
preformat = val.any_to_string
return tpl.Field(
col=column,
path=self._get_field_path(parent, field.name),
preformat=preformat,
verbose_name=self._get_field_verbose_name(field),
**kwargs
)
def _type_list_related_field(self, field: schematics.types.BaseType, parent=None, previous=None, **kwargs):
if hasattr(field, 'model_class'):
return self._type_related_field(field, parent, previous, **kwargs)
if 'col' in kwargs:
column = kwargs.pop('col')
else:
column = self._get_next_column_number(previous)
preformat = None
if 'preformat' in kwargs:
preformat = kwargs.pop('preformat')
if preformat is None:
preformat = val.any_to_string
return self._type_base_field(
col=column,
field=field,
parent=parent,
previous=previous,
preformat=preformat,
**kwargs
)
def _type_related_field(self, field: schematics.types.BaseType, parent=None, previous=None, **kwargs):
options = kwargs or self._get_model_template_options(field.model_class)
if 'col' in options:
column = options.pop('col')
else:
column = self._get_next_column_number(previous)
return self._build_template(
col=column,
path=self._get_field_path(parent, field.name),
model=field.model_class,
parent=parent,
verbose_name=self._get_field_verbose_name(field),
**options
)
def _get_model_template_options(self, model) -> dict:
o = self.nested_object_options.copy()
o.update({
k: v for k, v in model._options
if k in tpl.Object.supported_options and v is not None
})
return dict(filter(lambda x: x[1] is not None, o.items()))
def _get_model_renderable_fields(self, model: schematics.models.Model):
if 'fields' in dict(model._options) and model._options.fields is not None:
for field_name in model._options.fields:
# Get model field
if field_name in model.fields:
yield model.fields[field_name], self._get_model_preformat_field(model, field_name)
continue
# Get custom report field
getter = f'get_{field_name}'
if not hasattr(model, getter):
raise NotImplementedError(f'{model.__name__}.{getter} is not implemented')
getter = getattr(model, getter)
getter.name = getter.serialized_name = field_name
# Define preformatters and prepend getter
preformatters = self._get_model_preformat_field(model, field_name)
if not preformatters:
preformatters = [getter]
elif isinstance(preformatters, collections.Iterable):
preformatters = [getter] + list(preformatters)
elif callable(preformatters):
preformatters = [getter, preformatters]
yield getter, preformatters
return
yield from (
(v, self._get_model_preformat_field(model, k))
for k, v in model.fields.items()
)
def _get_model_preformat_field(self, model: schematics.models.Model, field_name):
if 'preformat' in dict(model._options) and model._options.preformat is not None:
source_formatters = model._options.preformat.get(field_name)
if callable(source_formatters) or not source_formatters:
return source_formatters
callable_formatters = []
if isinstance(source_formatters, collections.Iterable):
for formatter in source_formatters:
if isinstance(formatter, str):
callable_formatters.append(getattr(model, formatter))
elif callable(formatter):
callable_formatters.append(formatter)
else:
raise TypeError(f'{field_name} formatter must be callable or iterable of callable')
return callable_formatters
return None
def _get_next_column_number(self, previous_field=None):
if previous_field is None:
return 1
return previous_field.column + previous_field.length
def _get_field_verbose_name(self, field):
return field.serialized_name or field.name
def _get_field_path(self, parent, field_name):
return '.'.join([parent or '', field_name]).strip('.')
__all__ = [
'Normalizer',
'SchematicsNormalizer',
]
|
the-stack_0_20184 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import gzip
import heapq # for heapq.merge
import itertools
import os
import re # used to extract words from input
import sys
import shutil # to remove directory trees
import traceback
import urllib # for quote and unquote
def write_tuples(outfile, tuples):
for item in tuples:
line = ' '.join(urllib.quote(str(field)) for field in item)
outfile.write(line + "\n")
def read_tuples(infile):
for line in infile:
yield tuple(urllib.unquote(field) for field in line.split())
# XXX maybe these functions don't need to exist?
def read_metadata(index_path):
with path['documents'].open() as metadata_file:
return dict((pathname, (int(mtime), int(size)))
for pathname, size, mtime in read_tuples(metadata_file))
# XXX we probably want files_unchanged
def file_unchanged(metadatas, path):
return any(metadata.get(path.name) == get_metadata(path)
for metadata in metadatas)
def doc_ids(index_path, terms):
"Actually evaluate a query."
doc_id_sets = (set(term_doc_ids(index_path, term)) for term in terms)
return set.intersection(*doc_id_sets)
def term_doc_ids(index_path, term):
doc_id_sets = (segment_term_doc_ids(segment, term)
for segment in index_segments(index_path))
return itertools.chain(*doc_id_sets)
# XXX make this a method of the Index object, perhaps returning Segment objects
def index_segments(index_path):
return [path for path in index_path if path.basename().startswith('seg_')]
class Path: # like java.lang.File
def __init__(self, name):
self.name = name
__getitem__ = lambda self, child: Path(os.path.join(self.name, str(child)))
__contains__ = lambda self, child: os.path.exists(self[child].name)
__iter__ = lambda self: (self[child] for child in os.listdir(self.name))
open = lambda self, *args: open(self.name, *args)
open_gzipped = lambda self, *args: gzip.GzipFile(self.name, *args)
basename = lambda self: os.path.basename(self.name)
parent = lambda self: Path(os.path.dirname(self.name))
abspath = lambda self: os.path.abspath(self.name)
def segment_term_doc_ids(segment, needle_term):
for chunk_name in segment_term_chunks(segment, needle_term):
with segment[chunk_name].open_gzipped() as chunk_file:
for haystack_term, doc_id in read_tuples(chunk_file):
if haystack_term == needle_term:
yield doc_id
# Once we reach an alphabetically later term, we're done:
if haystack_term > needle_term:
break
# XXX maybe return Path objects?
def segment_term_chunks(segment, term):
previous_chunk = None
for headword, chunk in skip_file_entries(segment):
if headword >= term:
if previous_chunk is not None:
yield previous_chunk
if headword > term:
break
previous_chunk = chunk
else: # executed if we don't break
if previous_chunk is not None:
yield previous_chunk
def skip_file_entries(segment_path):
with segment_path['skip'].open() as skip_file:
return list(read_tuples(skip_file))
def get_metadata(path):
s = os.stat(path.name)
return int(s.st_mtime), int(s.st_size)
def search_ui(index_path, terms):
# Use the crudest possible ranking: newest (largest mtime) first.
for path in sorted(paths(index_path, terms),
key=get_metadata, reverse=True):
print(path.name)
# At the moment, our doc_ids are just pathnames; this converts them to Path objects.
def paths(index_path, terms):
parent = index_path.parent()
for doc_id in doc_ids(index_path, terms):
yield Path(os.path.relpath(parent[doc_id].abspath(), start='.'))
# 2**20 is chosen as the maximum segment size because that uses
# typically about a quarter gig, which is a reasonable size these
# days.
segment_size = 2**20
def build_index(index_path, corpus_path, postings_filters):
os.mkdir(index_path.name)
# XXX hmm, these should match the doc_ids in the index
corpus_paths = list(find_documents(corpus_path))
with index_path['documents'].open('w') as outfile:
write_tuples(outfile, ((path.name,) + get_metadata(path)
for path in corpus_paths))
postings = tokenize_documents(corpus_paths)
for filter_function in postings_filters:
postings = filter_function(postings)
# XXX at this point we should just pass the fucking doc_id into
# the analyzer function :(
parent = index_path.parent()
rel_paths = dict((path.name, os.path.relpath(path.name, start=parent.name))
for path in corpus_paths)
rel_postings = ((term, rel_paths[doc_id]) for term, doc_id in postings)
for ii, chunk in enumerate(blocked(rel_postings, segment_size)):
write_new_segment(index_path['seg_%s' % ii], sorted(chunk))
merge_segments(index_path, index_segments(index_path))
# From nikipore on Stack Overflow <http://stackoverflow.com/a/19264525>
def blocked(seq, block_size):
seq = iter(seq)
while True:
# XXX for some reason using list(), and then later sorting in
# place, makes the whole program run twice as slow and doesn't
# reduce its memory usage. No idea why.
block = tuple(itertools.islice(seq, block_size))
if block:
yield block
else:
raise StopIteration
def find_documents(path):
for dir_name, _, filenames in os.walk(path.name):
dir_path = Path(dir_name)
for filename in filenames:
yield dir_path[filename]
def tokenize_documents(paths):
for path in paths:
for posting in remove_duplicates(tokenize_file(path)):
yield posting
# Demonstrate a crude set of smart tokenizer frontends.
def tokenize_file(file_path):
if file_path.name.endswith('.html'):
return tokenize_html(file_path)
else:
return tokenize_text(file_path)
def tokenize_text(file_path):
word_re = re.compile(r'\w+')
with file_path.open() as fo:
for line in fo:
for word in word_re.findall(line):
yield word, file_path.name
def remove_duplicates(seq):
seen_items = set()
for item in seq:
if item not in seen_items:
yield item
seen_items.add(item)
chunk_size = 4096
def write_new_segment(path, postings):
os.mkdir(path.name)
chunks = blocked(postings, chunk_size)
skip_file_contents = (write_chunk(path, '%s.gz' % ii, chunk)
for ii, chunk in enumerate(chunks))
with path['skip'].open('w') as skip_file:
write_tuples(skip_file, itertools.chain(*skip_file_contents))
# Yields one skip file entry, or, in the edge case of an empty chunk, none.
def write_chunk(path, filename, chunk):
with path[filename].open_gzipped('w') as chunk_file:
write_tuples(chunk_file, chunk)
if chunk:
yield chunk[0][0], filename
# Crude approximation of HTML tokenization. Note that for proper
# excerpt generation (as in the "grep" command) the postings generated
# need to contain position information, because we need to run this
# tokenizer during excerpt generation too.
def tokenize_html(file_path):
tag_re = re.compile('<.*?>')
tag_start_re = re.compile('<.*')
tag_end_re = re.compile('.*?>')
word_re = re.compile(r'\w+')
with file_path.open() as fo:
in_tag = False
for line in fo:
if in_tag and tag_end_re.search(line):
line = tag_end_re.sub('', line)
in_tag = False
elif not in_tag:
line = tag_re.subn('', line)[0]
if tag_start_re.search(line):
in_tag = True
line = tag_start_re.sub('', line)
for term in word_re.findall(line):
yield term, file_path.name
def merge_segments(path, segments):
if len(segments) == 1:
return
postings = heapq.merge(*(read_segment(segment)
for segment in segments))
write_new_segment(path['seg_merged'], postings)
for segment in segments:
shutil.rmtree(segment.name)
def read_segment(path):
for _, chunk in skip_file_entries(path):
# XXX refactor chunk reading? We open_gzipped in three places now.
with path[chunk].open_gzipped() as chunk_file:
for item in read_tuples(chunk_file):
yield item
def make_stopwords_filter(stopwords):
stopwords = set(stopwords)
stopwords |= ( set(word.upper() for word in stopwords)
| set(word.capitalize() for word in stopwords))
return lambda postings: ((term, doc_id) for term, doc_id in postings
if term not in stopwords)
word_len = 20 # to eliminate nonsense words
def discard_long_nonsense_words_filter(postings):
"""Drop postings for nonsense words."""
return ((term, doc_id) for term, doc_id in postings if len(term) < word_len)
def case_insensitive_filter(postings):
for term, doc_id in postings:
yield term, doc_id
if term.lower() != term:
yield term.lower(), doc_id
def grep(index_path, terms):
for path in paths(index_path, terms):
try:
with path.open() as text:
for ii, line in enumerate(text, start=1):
if any(term in line for term in terms):
sys.stdout.write("%s:%s:%s" % (path.name, ii, line))
except Exception: # The file might e.g. no longer exist.
traceback.print_exc()
def main(argv):
# Eliminate the most common English words from queries and indices.
stopwords = 'the of and to a in it is was that i for on you he be'.split()
if argv[1] == 'index':
build_index(index_path=Path(argv[2]), corpus_path=Path(argv[3]),
postings_filters=[discard_long_nonsense_words_filter,
make_stopwords_filter(stopwords),
case_insensitive_filter])
elif argv[1] == 'query':
search_ui(Path(argv[2]), [term for term in argv[3:]
if term not in stopwords])
elif argv[1] == 'grep':
grep(Path(argv[2]), [term for term in argv[3:]
if term not in stopwords])
else:
raise Exception("%s (index|query|grep) index_dir ..." % (argv[0]))
if __name__ == '__main__':
main(sys.argv)
|
the-stack_0_20187 | import torch
import glob
import ipdb
import imageio
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import json
import h5py
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch.utils.data import DataLoader
from model import MAC
from data import get_dataset
from evaluation import get_tlts
from config import ex
from loguru import logger
from einops import rearrange
from tqdm import tqdm
mpl.use("agg")
sns.set()
nice_fonts = {
# Use LaTeX for writing all texts
"text.usetex": True,
"font.family": "serif",
"xtick.labelsize": 7,
"ytick.labelsize": 7,
}
mpl.rcParams.update(nice_fonts)
image_dims = (14, 14)
cdict = dict(
{
"red": ((0.0, 0.0, 0.0), (0.6, 0.8, 0.8), (1.0, 1, 1)),
"green": ((0.0, 0.0, 0.0), (0.6, 0.8, 0.8), (1.0, 1, 1)),
"blue": ((0.0, 0.0, 0.0), (0.6, 0.8, 0.8), (1.0, 1, 1)),
"alpha": ((0.0, 0.35, 0.35), (1.0, 0.65, 0.65)),
}
)
plt.register_cmap(name="custom", data=cdict)
@ex.capture
def image_path(idx, root, task_root, dataset_name, visualization_split):
if dataset_name == "clevr":
image_dir = f"{root}/{task_root}/images/{visualization_split}"
path = f"{image_dir}/CLEVR_{visualization_split}_{idx:06d}.png"
elif dataset_name == "gqa":
image_dir = f"{root}/{task_root}/images"
path = f"{image_dir}/{idx}.jpg"
return path
@ex.capture
def draw_attentions(
attentions, question, answer, image_id, vis_dir, dataset, dataset_name
):
text = F.softmax(attentions["text"].cpu().squeeze(3), dim=2)
# text = attentions["text"].cpu().squeeze(3) # for logit visualization
text = rearrange(text, "steps b length -> steps (b length)", b=1)
image = F.softmax(attentions["image"].cpu().squeeze(3), dim=2)
if dataset_name == "clevr":
image = rearrange(image, "steps b (h w) -> steps (b h) w", b=1, h=14, w=14)
elif dataset_name == "gqa":
info = dataset.object_info[image_id[0]]
image = rearrange(image, "steps b length -> steps (b length)", b=1, length=100)
path = image_path(image_id[0])
# draw question and its attention
steps = range(1, len(text) + 1)
tokens = question.split()
table = text[:, : (len(tokens) + 1)].permute(1, 0).numpy()
df = pd.DataFrame(data=table, index=tokens, columns=steps)
heatmap = sns.heatmap(
df,
annot=True,
annot_kws={"size": 4},
fmt=".2f",
cmap="Purples",
cbar=False,
linewidths=0.5,
linecolor="gray",
square=True,
robust=False,
)
heatmap.xaxis.tick_top()
heatmap.set_yticklabels(heatmap.get_yticklabels(), rotation=0)
heatmap.set_xticklabels(heatmap.get_xticklabels(), rotation=0)
fig = heatmap.get_figure()
fig.savefig(
f"{vis_dir}/text.pdf",
dpi=480,
bbox_inches="tight",
format="pdf",
pad_inches=0.0,
)
plt.close(fig)
# draw image and its attention
img = imageio.imread(path)
if dataset_name == "gqa":
bbox = torch.load(
f"{'/'.join(path.split('/')[:-2])}/features/{image_id[0]}_bbox.pth"
)
for i, ia in enumerate(image):
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = torch.arange(-1.5, 1.5, 0.05), torch.arange(-1.0, 1.0, 0.05)
extent = x.min().item(), x.max().item(), y.min().item(), y.max().item()
ax.imshow(img, interpolation="nearest", extent=extent)
if dataset_name == "clevr":
ax.imshow(
ia.reshape(image_dims).numpy(),
cmap=plt.get_cmap("custom"),
interpolation="bicubic",
extent=extent,
)
elif dataset_name == "gqa":
canvas = torch.zeros(img.shape[0], img.shape[1]) # H x W
for att, box in zip(ia, bbox):
x0, y0, x1, y1 = box
canvas[int(y0) : int(y1), int(x0) : int(x1)] += att
ax.imshow(
canvas.numpy(),
cmap=plt.get_cmap("custom"),
interpolation="nearest",
extent=extent,
)
ax.set_axis_off()
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, hspace=0, wspace=0)
file_name = f"{vis_dir}/image_step{i}.png"
fig.savefig(file_name, bbox_inches="tight", pad_inches=0.0)
plt.close(fig)
@ex.automain
def main(
dataset_name, root, task_root, visualization_split, use_daft, load_seed, _config
):
assert dataset_name in ["clevr", "gqa"]
assert visualization_split in ["train", "val"]
task_name = f"{'daftmac' if use_daft else 'mac'}_{_config['dataset_name']}_step{_config['max_step']}_{load_seed}"
logger.add(f"result/log/{task_name}_vis.txt")
image_dir = f"{root}/{task_root}/images"
os.makedirs(f"result/vis/{task_name}", exist_ok=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = get_dataset(visualization_split)
n_words = len(dataset.qdic["w2i"])
n_answers = len(dataset.adic["w2i"])
net = MAC(n_words, classes=n_answers, use_daft=use_daft, qdic=dataset.qdic)
list_of_weights = glob.glob(f"result/model/{task_name}/*.model")
weight_path = max(list_of_weights, key=os.path.getctime)
weight = torch.load(weight_path)
net.load_state_dict(weight)
net.eval()
net = net.to(device)
loader = DataLoader(
dataset,
batch_size=1,
num_workers=0,
collate_fn=dataset.collate_data_vis,
pin_memory=True,
)
for i, (image, question, q_len, answer, dataset_id, image_id) in enumerate(
tqdm(loader)
):
original_question = " ".join(
[dataset.qdic["i2w"][e.item()] for e in question[0]]
)
original_answer = dataset.adic["i2w"][answer[0].item()]
image, question, q_len = image.to(device), question.to(device), q_len.to(device)
with torch.no_grad():
output = net(image, question, q_len)
pred = output.detach().argmax(dim=1)
is_right = (pred.cpu() == answer).item() == 1
logger.info(
f'Visualizing Question "{original_question}" -> "{original_answer}", [{i}/{len(loader)}] image {image_id}'
)
tlts, lts = dict(), dict()
for modal in ["text", "image"]:
attentions = net.mac.attentions[modal].cpu().squeeze(3)
tlts[modal], lts[modal] = get_tlts(
attentions, modal, dataset_name, q_len, image
)
vis_dir = f"result/vis/{task_name}/{original_question}->{original_answer}"
os.makedirs(f"{vis_dir}", exist_ok=True)
with open(f"{vis_dir}/tlt.txt", "w") as fp:
json.dump(
{
"text_lts": lts["text"].tolist(),
"text_tlt": tlts["text"].tolist(),
"image_lts": lts["image"].tolist(),
"image_tlt": tlts["image"].tolist(),
},
fp,
indent=2,
)
attentions = net.mac.attentions
draw_attentions(
attentions, original_question, original_answer, image_id, vis_dir, dataset
)
|
the-stack_0_20189 | import taichi as ti
import os
import numpy as np
import cv2
import math
import time
import random
from renderer_utils import out_dir, ray_aabb_intersection, inf, eps, \
intersect_sphere, sphere_aabb_intersect_motion, inside_taichi
import sys
res = 1280, 720
num_spheres = 1024
color_buffer = ti.Vector(3, dt=ti.f32)
sphere_pos = ti.Vector(3, dt=ti.f32)
bbox = ti.Vector(3, dt=ti.f32)
grid_density = ti.var(dt=ti.i32)
voxel_has_particle = ti.var(dt=ti.i32)
max_ray_depth = 4
use_directional_light = True
particle_x = ti.Vector(3, dt=ti.f32)
particle_v = ti.Vector(3, dt=ti.f32)
particle_color = ti.var(ti.i32)
pid = ti.var(ti.i32)
num_particles = ti.var(ti.i32)
fov = 0.23
dist_limit = 100
exposure = 1.5
camera_pos = ti.Vector([0.5, 0.32, 1.7])
vignette_strength = 0.9
vignette_radius = 0.0
vignette_center = [0.5, 0.5]
light_direction = [1.2, 0.3, 0.7]
light_direction_noise = 0.03
light_color = [1.0, 1.0, 1.0]
# ti.runtime.print_preprocessed = True
# ti.cfg.print_ir = True
ti.cfg.arch = ti.cuda
grid_visualization_block_size = 16
grid_resolution = 256 // grid_visualization_block_size
scene = sys.argv[1]
folder = sys.argv[2]
frame_id = int(sys.argv[3])
assert scene in ['sand', 'fluid', 'snow']
render_voxel = False
inv_dx = 64.0
dx = 1.0 / inv_dx
if scene == 'fluid':
supporter = 1
shutter_time = 1e-3
sphere_radius = 0.0018
particle_grid_res = 128
max_num_particles_per_cell = 256
max_num_particles = 1024 * 1024 * 4
elif scene == 'snow':
camera_pos = ti.Vector([0.35, 0.27, 2.0])
supporter = 2
shutter_time = 1e-3
sphere_radius = 0.005
particle_grid_res = 64
max_num_particles_per_cell = 128
max_num_particles = 1024 * 1024 * 4
assert sphere_radius * 2 * particle_grid_res < 1
@ti.layout
def buffers():
ti.root.dense(ti.ij, (res[0] // 8, res[1] // 8)).dense(ti.ij, 8).place(
color_buffer)
ti.root.dense(ti.i, num_spheres).place(sphere_pos)
ti.root.dense(ti.ijk, 2).dense(ti.ijk, particle_grid_res // 8).dense(ti.ijk,
8).place(
voxel_has_particle)
ti.root.dense(ti.ijk, 4).dense(ti.ijk,
particle_grid_res // 8).pointer().dense(ti.ijk,
8).dynamic(
ti.l,
max_num_particles_per_cell).place(
pid)
ti.root.dense(ti.l, max_num_particles).place(particle_x, particle_v,
particle_color)
ti.root.place(num_particles)
ti.root.dense(ti.ijk, grid_resolution // 8).dense(ti.ijk, 8).place(
grid_density)
ti.root.dense(ti.i, 2).place(bbox)
@ti.func
def inside_grid(ipos):
return ipos.min() >= 0 and ipos.max() < grid_resolution
# The dda algorithm requires the voxel grid to have one surrounding layer of void region
# to correctly render the outmost voxel faces
@ti.func
def inside_grid_loose(ipos):
return ipos.min() >= -1 and ipos.max() <= grid_resolution
@ti.func
def query_density_int(ipos):
inside = inside_grid(ipos)
ret = 0
if inside:
ret = grid_density[ipos]
else:
ret = 0
return ret
@ti.func
def voxel_color(pos):
p = pos * grid_resolution
p -= ti.Matrix.floor(p)
boundary = 0.1
count = 0
for i in ti.static(range(3)):
if p[i] < boundary or p[i] > 1 - boundary:
count += 1
f = 0.0
if count >= 2:
f = 1.0
return ti.Vector([0.2, 0.3, 0.2]) * (2.3 - 2 * f)
n_pillars = 9
@ti.func
def sdf(o_):
if ti.static(supporter == 0):
o = o_ - ti.Vector([0.5, 0.002, 0.5])
p = o
h = 0.02
ra = 0.29
rb = 0.005
d = (ti.Vector([p[0], p[2]]).norm() - 2.0 * ra + rb, ti.abs(p[1]) - h)
dist = ti.min(ti.max(d[0], d[1]), 0.0) + ti.Vector(
[ti.max(d[0], 0.0), ti.max(d[1], 0)]).norm() - rb
return dist
elif ti.static(supporter == 1):
o = o_ - ti.Vector([0.5, 0.002, 0.5])
dist = (o.abs() - ti.Vector([0.5, 0.02, 0.5])).max()
else:
dist = o_[1] - 0.04
return dist
@ti.func
def ray_march(p, d):
j = 0
dist = 0.0
limit = 200
while j < limit and sdf(p + dist * d) > 1e-8 and dist < dist_limit:
dist += sdf(p + dist * d)
j += 1
if dist > dist_limit:
dist = inf
return dist
@ti.func
def sdf_normal(p):
d = 1e-3
n = ti.Vector([0.0, 0.0, 0.0])
for i in ti.static(range(3)):
inc = p
dec = p
inc[i] += d
dec[i] -= d
n[i] = (0.5 / d) * (sdf(inc) - sdf(dec))
return ti.Matrix.normalized(n)
@ti.func
def sdf_color(p_):
p = p_
scale = 0.4
# if inside_taichi(ti.Vector([p[0], p[2]])):
# scale = 1
return ti.Vector([0.3, 0.5, 0.7]) * scale
@ti.func
def dda(eye_pos, d_):
d = d_
for i in ti.static(range(3)):
if ti.abs(d[i]) < 1e-6:
d[i] = 1e-6
rinv = 1.0 / d
rsign = ti.Vector([0, 0, 0])
for i in ti.static(range(3)):
if d[i] > 0:
rsign[i] = 1
else:
rsign[i] = -1
bbox_min = ti.Vector([0.0, 0.0, 0.0]) - 10 * eps
bbox_max = ti.Vector([1.0, 1.0, 1.0]) + 10 * eps
inter, near, far = ray_aabb_intersection(bbox_min, bbox_max, eye_pos, d)
hit_distance = inf
normal = ti.Vector([0.0, 0.0, 0.0])
c = ti.Vector([0.0, 0.0, 0.0])
if inter:
near = ti.max(0, near)
pos = eye_pos + d * (near + 5 * eps)
o = grid_resolution * pos
ipos = ti.Matrix.floor(o).cast(ti.i32)
dis = (ipos - o + 0.5 + rsign * 0.5) * rinv
running = 1
i = 0
hit_pos = ti.Vector([0.0, 0.0, 0.0])
while running:
last_sample = query_density_int(ipos)
if not inside_grid_loose(ipos):
running = 0
# normal = [0, 0, 0]
if last_sample:
mini = (ipos - o + ti.Vector([0.5, 0.5, 0.5]) - rsign * 0.5) * rinv
hit_distance = mini.max() * (1 / grid_resolution) + near
hit_pos = eye_pos + hit_distance * d
c = voxel_color(hit_pos)
running = 0
else:
mm = ti.Vector([0, 0, 0])
if dis[0] <= dis[1] and dis[0] < dis[2]:
mm[0] = 1
elif dis[1] <= dis[0] and dis[1] <= dis[2]:
mm[1] = 1
else:
mm[2] = 1
dis += mm * rsign * rinv
ipos += mm * rsign
normal = -mm * rsign
i += 1
return hit_distance, normal, c
@ti.func
def intersect_spheres(pos, d):
normal = ti.Vector([0.0, 0.0, 0.0])
c = ti.Vector([0.0, 0.0, 0.0])
min_dist = inf
sid = -1
for i in range(num_spheres):
dist = intersect_sphere(pos, d, sphere_pos[i], 0.05)
if dist < min_dist:
min_dist = dist
sid = i
if min_dist < inf:
hit_pos = pos + d * min_dist
normal = ti.Matrix.normalized(hit_pos - sphere_pos[sid])
c = [0.3, 0.5, 0.2]
return min_dist, normal, c
@ti.func
def inside_particle_grid(ipos):
grid_res = particle_grid_res
return bbox[0][0] <= ipos[0] * dx and ipos[0] < bbox[1][0] * inv_dx and bbox[0][1] * inv_dx <= ipos[1] and ipos[
1] < bbox[1][1] * inv_dx and bbox[0][2] * inv_dx <= ipos[2] and ipos[2] < bbox[1][2] * inv_dx
# pos = ipos * dx
# return bbox[0][0] - 0.1 < pos[0] and pos[0] < bbox[1][0] + 0.1 and bbox[0][1] - 0.1 < pos[1] and \
# pos[1] < bbox[1][1] + 0.1 and bbox[0][2] - 0.1 < pos[2] and pos[2] < bbox[1][2] + 0.1
@ti.func
def dda_particle(eye_pos, d_, t):
grid_res = particle_grid_res
bbox_min = bbox[0]
bbox_max = bbox[1]
hit_pos = ti.Vector([0.0, 0.0, 0.0])
normal = ti.Vector([0.0, 0.0, 0.0])
c = ti.Vector([0.0, 0.0, 0.0])
d = d_
for i in ti.static(range(3)):
if ti.abs(d[i]) < 1e-6:
d[i] = 1e-6
inter, near, far = ray_aabb_intersection(bbox_min, bbox_max, eye_pos, d)
near = ti.max(0, near)
closest_intersection = inf
if inter:
pos = eye_pos + d * (near + eps)
rinv = 1.0 / d
rsign = ti.Vector([0, 0, 0])
for i in ti.static(range(3)):
if d[i] > 0:
rsign[i] = 1
else:
rsign[i] = -1
o = grid_res * pos
ipos = ti.Matrix.floor(o).cast(ti.i32)
dis = (ipos - o + 0.5 + rsign * 0.5) * rinv
running = 1
while running:
inside = inside_particle_grid(ipos)
if inside:
num_particles = voxel_has_particle[ipos]
if num_particles != 0:
num_particles = ti.length(pid.parent(), ipos)
for k in range(num_particles):
p = pid[ipos[0], ipos[1], ipos[2], k]
v = particle_v[p]
x = particle_x[p] + t * v
color = particle_color[p]
dist, poss = intersect_sphere(eye_pos, d, x, sphere_radius)
hit_pos = poss
if dist < closest_intersection and dist > 0:
hit_pos = eye_pos + dist * d
closest_intersection = dist
normal = ti.Matrix.normalized(hit_pos - x)
c = [color // 256 ** 2 / 255.0, color / 256 % 256 / 255.0,
color % 256 / 255.0]
else:
running = 0
normal = [0, 0, 0]
if closest_intersection < inf:
running = 0
else:
# hits nothing. Continue ray marching
mm = ti.Vector([0, 0, 0])
if dis[0] <= dis[1] and dis[0] <= dis[2]:
mm[0] = 1
elif dis[1] <= dis[0] and dis[1] <= dis[2]:
mm[1] = 1
else:
mm[2] = 1
dis += mm * rsign * rinv
ipos += mm * rsign
return closest_intersection, normal, c
@ti.func
def next_hit(pos_, d, t):
pos = pos_
closest = inf
normal = ti.Vector([0.0, 0.0, 0.0])
c = ti.Vector([0.0, 0.0, 0.0])
if ti.static(render_voxel):
closest, normal, c = dda(pos, d)
else:
closest, normal, c = dda_particle(pos, d, t)
if d[2] != 0:
ray_closest = -(pos[2] + 5.5) / d[2]
if ray_closest > 0 and ray_closest < closest:
closest = ray_closest
normal = ti.Vector([0.0, 0.0, 1.0])
c = ti.Vector([0.6, 0.7, 0.7])
ray_march_dist = ray_march(pos, d)
if ray_march_dist < dist_limit and ray_march_dist < closest:
closest = ray_march_dist
normal = sdf_normal(pos + d * closest)
c = sdf_color(pos + d * closest)
return closest, normal, c
aspect_ratio = res[0] / res[1]
@ti.kernel
def render():
ti.parallelize(6)
for u, v in color_buffer(0):
pos = camera_pos
d = ti.Vector(
[(2 * fov * (u + ti.random(ti.f32)) / res[1] - fov * aspect_ratio - 1e-5),
2 * fov * (v + ti.random(ti.f32)) / res[1] - fov - 1e-5,
-1.0])
d = ti.Matrix.normalized(d)
if u < res[0] and v < res[1]:
t = (ti.random() - 0.5) * shutter_time
contrib = ti.Vector([0.0, 0.0, 0.0])
throughput = ti.Vector([1.0, 1.0, 1.0])
depth = 0
hit_sky = 1
ray_depth = 0
while depth < max_ray_depth:
closest, normal, c = next_hit(pos, d, t)
hit_pos = pos + closest * d
depth += 1
ray_depth = depth
if normal.norm() != 0:
d = out_dir(normal)
pos = hit_pos + 1e-4 * d
throughput *= c
if ti.static(use_directional_light):
dir_noise = ti.Vector([ti.random() - 0.5, ti.random() - 0.5,
ti.random() - 0.5]) * light_direction_noise
direct = ti.Matrix.normalized(
ti.Vector(light_direction) + dir_noise)
dot = direct.dot(normal)
if dot > 0:
dist, _, _ = next_hit(pos, direct, t)
if dist > dist_limit:
contrib += throughput * ti.Vector(light_color) * dot
else: # hit sky
hit_sky = 1
depth = max_ray_depth
max_c = throughput.max()
if ti.random() > max_c:
depth = max_ray_depth
throughput = [0, 0, 0]
else:
throughput /= max_c
if hit_sky:
if ray_depth != 1:
# contrib *= ti.max(d[1], 0.05)
pass
else:
# directly hit sky
pass
else:
throughput *= 0
# contrib += throughput
color_buffer[u, v] += contrib
support = 2
@ti.kernel
def initialize_particle_grid():
for p in particle_x(0):
if p < num_particles:
x = particle_x[p]
v = particle_v[p]
ipos = ti.Matrix.floor(x * particle_grid_res).cast(ti.i32)
for i in range(-support, support + 1):
for j in range(-support, support + 1):
for k in range(-support, support + 1):
offset = ti.Vector([i, j, k])
box_ipos = ipos + offset
if inside_particle_grid(box_ipos):
box_min = box_ipos * (1 / particle_grid_res)
box_max = (box_ipos + ti.Vector([1, 1, 1])) * (
1 / particle_grid_res)
if sphere_aabb_intersect_motion(box_min, box_max,
x - 0.5 * shutter_time * v,
x + 0.5 * shutter_time * v,
sphere_radius):
ti.append(pid.parent(), box_ipos, p)
voxel_has_particle[box_ipos] = 1
@ti.func
def color_f32_to_i8(x):
return ti.cast(ti.min(ti.max(x, 0.0), 1.0) * 255, ti.i32)
@ti.func
def rgb_to_i32(r, g, b):
return color_f32_to_i8(r) * 65536 + color_f32_to_i8(
g) * 256 + color_f32_to_i8(b)
@ti.kernel
def copy(img: np.ndarray):
for i in range(res[0]):
for j in range(res[1]):
u = 1.0 * i / res[0]
v = 1.0 * j / res[1]
darken = 1.0 - vignette_strength * ti.max((ti.sqrt(
ti.sqr(u - vignette_center[0]) + ti.sqr(
v - vignette_center[1])) - vignette_radius), 0)
coord = ((res[1] - 1 - j) * res[0] + i) * 3
for c in ti.static(range(3)):
img[coord + c] = color_buffer[i, j][2 - c] * darken
def main():
sand = np.fromfile(
"mpm3d/iter{}/{:04d}.bin".format(folder, frame_id),
dtype=np.float32)
for i in range(num_spheres):
for c in range(3):
sphere_pos[i][c] = 0.5 # random.random()
num_sand_particles = len(sand) // 7
num_part = num_sand_particles
sand = sand.reshape((7, num_sand_particles))
sand = np.transpose(sand)
np_x = sand[:, :3].flatten()
np_v = sand[:, 3:6].flatten()
np_c = sand[:, 6].flatten().astype(np.float32)
for i in range(3):
print(sand[:, i].min(), sand[:, i].max())
# bbox values must be multiples of dx
bbox[0][i] = (math.floor(sand[:, i].min() * particle_grid_res) - 3.0) / particle_grid_res
bbox[1][i] = (math.floor(sand[:, i].max() * particle_grid_res) + 3.0) / particle_grid_res
num_particles[None] = num_part
print('num_input_particles =', num_part)
@ti.kernel
def initialize_particle_x(x: np.ndarray, v: np.ndarray, color: np.ndarray):
for i in range(max_num_particles):
if i < num_particles:
for c in ti.static(range(3)):
particle_x[i][c] = x[i * 3 + c]
for c in ti.static(range(3)):
particle_v[i][c] = v[i * 3 + c]
# v_c = ti.min(1, particle_v[i].norm()) * 1.5
# ti.print(v_c)
particle_color[i] = ti.cast(color[i], ti.i32)
# reconstruct grid using particle position and MPM p2g.
for k in ti.static(range(27)):
base_coord = (inv_dx * particle_x[i] - 0.5).cast(ti.i32) + ti.Vector(
[k // 9, k // 3 % 3, k % 3])
grid_density[base_coord / grid_visualization_block_size] = 1
initialize_particle_x(np_x, np_v, np_c)
initialize_particle_grid()
output_folder = 'outputs_' + folder
last_t = 0
for i in range(500):
render()
interval = 50
if i % interval == 0:
img = np.zeros((res[1] * res[0] * 3,), dtype=np.float32)
copy(img)
if last_t != 0:
print(
"time per spp = {:.2f} ms".format(
(time.time() - last_t) * 1000 / interval))
last_t = time.time()
img = img.reshape(res[1], res[0], 3) * (1 / (i + 1)) * exposure
img = np.sqrt(img)
# cv2.imshow('img', img)
# cv2.waitKey(1)
os.makedirs(output_folder, exist_ok=True)
cv2.imwrite(output_folder + '/{:04d}.png'.format(frame_id), img * 255)
if __name__ == '__main__':
main()
|
the-stack_0_20190 | try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
from . import util
from .pprint import PrettyPrinter
class AttrTree(object):
"""
An AttrTree offers convenient, multi-level attribute access for
collections of objects. AttrTree objects may also be combined
together using the update method or merge classmethod. Here is an
example of adding a ViewableElement to an AttrTree and accessing it:
>>> t = AttrTree()
>>> t.Example.Path = 1
>>> t.Example.Path #doctest: +ELLIPSIS
1
"""
_disabled_prefixes = [] # Underscore attributes that should be
_sanitizer = util.sanitize_identifier
@classmethod
def merge(cls, trees):
"""
Merge a collection of AttrTree objects.
"""
first = trees[0]
for tree in trees:
first.update(tree)
return first
def __dir__(self):
"""
The _dir_mode may be set to 'default' or 'user' in which case
only the child nodes added by the user are listed.
"""
dict_keys = self.__dict__.keys()
if self.__dict__['_dir_mode'] == 'user':
return self.__dict__['children']
else:
return dir(type(self)) + list(dict_keys)
def __init__(self, items=None, identifier=None, parent=None, dir_mode='default'):
"""
identifier: A string identifier for the current node (if any)
parent: The parent node (if any)
items: Items as (path, value) pairs to construct
(sub)tree down to given leaf values.
Note that the root node does not have a parent and does not
require an identifier.
"""
self.__dict__['parent'] = parent
self.__dict__['identifier'] = type(self)._sanitizer(identifier, escape=False)
self.__dict__['children'] = []
self.__dict__['_fixed'] = False
self.__dict__['_dir_mode'] = dir_mode # Either 'default' or 'user'
fixed_error = 'No attribute %r in this AttrTree, and none can be added because fixed=True'
self.__dict__['_fixed_error'] = fixed_error
self.__dict__['data'] = OrderedDict()
items = items.items() if isinstance(items, OrderedDict) else items
# Python 3
items = list(items) if items else items
items = [] if not items else items
for path, item in items:
self.set_path(path, item)
@property
def path(self):
"Returns the path up to the root for the current node."
if self.parent:
return '.'.join([self.parent.path, str(self.identifier)])
else:
return self.identifier if self.identifier else self.__class__.__name__
@property
def fixed(self):
"If fixed, no new paths can be created via attribute access"
return self.__dict__['_fixed']
@fixed.setter
def fixed(self, val):
self.__dict__['_fixed'] = val
def update(self, other):
"""
Updated the contents of the current AttrTree with the
contents of a second AttrTree.
"""
if not isinstance(other, AttrTree):
raise Exception('Can only update with another AttrTree type.')
fixed_status = (self.fixed, other.fixed)
(self.fixed, other.fixed) = (False, False)
for identifier, element in other.items():
if identifier not in self.data:
self[identifier] = element
else:
self[identifier].update(element)
(self.fixed, other.fixed) = fixed_status
def set_path(self, path, val):
"""
Set the given value at the supplied path where path is either
a tuple of strings or a string in A.B.C format.
"""
path = tuple(path.split('.')) if isinstance(path , str) else tuple(path)
disallowed = [p for p in path if not type(self)._sanitizer.allowable(p)]
if any(disallowed):
raise Exception("Attribute strings in path elements cannot be "
"correctly escaped : %s" % ','.join(repr(el) for el in disallowed))
if len(path) > 1:
attrtree = self.__getattr__(path[0])
attrtree.set_path(path[1:], val)
else:
self.__setattr__(path[0], val)
def filter(self, path_filters):
"""
Filters the loaded AttrTree using the supplied path_filters.
"""
if not path_filters: return self
# Convert string path filters
path_filters = [tuple(pf.split('.')) if not isinstance(pf, tuple)
else pf for pf in path_filters]
# Search for substring matches between paths and path filters
new_attrtree = self.__class__()
for path, item in self.data.items():
if any([all([subpath in path for subpath in pf]) for pf in path_filters]):
new_attrtree.set_path(path, item)
return new_attrtree
def _propagate(self, path, val):
"""
Propagate the value up to the root node.
"""
if val == '_DELETE':
if path in self.data:
del self.data[path]
else:
items = [(key, v) for key, v in self.data.items()
if not all(k==p for k, p in zip(key, path))]
self.data = OrderedDict(items)
else:
self.data[path] = val
if self.parent is not None:
self.parent._propagate((self.identifier,)+path, val)
def __setitem__(self, identifier, val):
"""
Set a value at a child node with given identifier. If at a root
node, multi-level path specifications is allowed (i.e. 'A.B.C'
format or tuple format) in which case the behaviour matches
that of set_path.
"""
if isinstance(identifier, str) and '.' not in identifier:
self.__setattr__(identifier, val)
elif isinstance(identifier, str) and self.parent is None:
self.set_path(tuple(identifier.split('.')), val)
elif isinstance(identifier, tuple) and self.parent is None:
self.set_path(identifier, val)
else:
raise Exception("Multi-level item setting only allowed from root node.")
def __getitem__(self, identifier):
"""
For a given non-root node, access a child element by identifier.
If the node is a root node, you may also access elements using
either tuple format or the 'A.B.C' string format.
"""
split_label = (tuple(identifier.split('.'))
if isinstance(identifier, str) else tuple(identifier))
if len(split_label) == 1:
identifier = split_label[0]
if identifier in self.children:
return self.__dict__[identifier]
else:
raise KeyError(identifier)
path_item = self
for identifier in split_label:
path_item = path_item[identifier]
return path_item
def __delitem__(self, identifier):
split_label = (tuple(identifier.split('.'))
if isinstance(identifier, str) else tuple(identifier))
if len(split_label) == 1:
identifier = split_label[0]
if identifier in self.children:
del self.__dict__[identifier]
self.children.pop(self.children.index(identifier))
else:
raise KeyError(identifier)
self._propagate(split_label, '_DELETE')
else:
path_item = self
for i, identifier in enumerate(split_label[:-1]):
path_item = path_item[identifier]
del path_item[split_label[-1]]
def __setattr__(self, identifier, val):
# Getattr is skipped for root and first set of children
shallow = (self.parent is None or self.parent.parent is None)
if identifier[0].isupper() and self.fixed and shallow:
raise AttributeError(self._fixed_error % identifier)
super(AttrTree, self).__setattr__(identifier, val)
if identifier[0].isupper():
if not identifier in self.children:
self.children.append(identifier)
self._propagate((identifier,), val)
def __getattr__(self, identifier):
"""
Access a identifier from the AttrTree or generate a new AttrTree
with the chosen attribute path.
"""
try:
return super(AttrTree, self).__getattr__(identifier)
except AttributeError: pass
# Attributes starting with __ get name mangled
if identifier.startswith('_' + type(self).__name__) or identifier.startswith('__'):
raise AttributeError('Attribute %s not found.' % identifier)
elif self.fixed==True:
raise AttributeError(self._fixed_error % identifier)
if not any(identifier.startswith(prefix)
for prefix in type(self)._disabled_prefixes):
sanitized = type(self)._sanitizer(identifier, escape=False)
else:
sanitized = identifier
if sanitized in self.children:
return self.__dict__[sanitized]
if not sanitized.startswith('_') and identifier[0].isupper():
self.children.append(sanitized)
dir_mode = self.__dict__['_dir_mode']
child_tree = self.__class__(identifier=sanitized,
parent=self, dir_mode=dir_mode)
self.__dict__[sanitized] = child_tree
return child_tree
else:
raise AttributeError('%r object has no attribute %s.' %
(type(self).__name__, identifier))
def __iter__(self):
return iter(self.data.values())
def __contains__(self, name):
return name in self.children or name in self.data
def __len__(self):
return len(self.data)
def get(self, identifier, default=None):
split_label = (tuple(identifier.split('.'))
if isinstance(identifier, str) else tuple(identifier))
if len(split_label) == 1:
identifier = split_label[0]
return self.__dict__.get(identifier, default)
path_item = self
for identifier in split_label:
if path_item == default or path_item is None:
return default
path_item = path_item.get(identifier, default)
return path_item
def keys(self):
return list(self.data.keys())
def items(self):
return list(self.data.items())
def values(self):
return list(self.data.values())
def pop(self, identifier, default=None):
if identifier in self.children:
item = self[identifier]
self.__delitem__(identifier)
return item
else:
return default
def __repr__(self):
return PrettyPrinter.pprint(self)
__all__ = ['AttrTree']
|
the-stack_0_20191 | from __future__ import absolute_import
import numpy as np
def knn_threshold(data, count, epsilon):
return data[count - 1] + epsilon
def epsilon_threshold(data, count, epsilon):
return data[count - 1] * (1 + epsilon)
# def get_recall_values(dataset_distances, run_distances, count, threshold,
# epsilon=1e-3):
# recalls = np.zeros(len(run_distances)) # len of nq
# for i in range(len(run_distances)):
# t = threshold(dataset_distances[i], count, epsilon)
# actual = 0
# for d in run_distances[i][:count]:
# if d <= t:
# actual += 1
# recalls[i] = actual
# return (np.mean(recalls) / float(count),
# np.std(recalls) / float(count),
# recalls)
def get_recall_values(dataset_neighbors, run_neighbors, count, threshold,
epsilon=1e-3):
recalls = np.zeros(len(run_neighbors)) # len of nq
for i in range(len(run_neighbors)):
# t = threshold(dataset_distances[i], count, epsilon)
inter = np.intersect1d(run_neighbors[i][:count], dataset_neighbors[i][:count])
# for d in run_neighbors[i][:count]:
# if d <= t:
# actual += 1
recalls[i] = inter.shape[0]
return (np.mean(recalls) / float(count),
np.std(recalls) / float(count),
recalls)
def knn(dataset_distances, run_distances, count, metrics, epsilon=1e-3):
if 'knn' not in metrics:
print('Computing knn metrics')
knn_metrics = metrics.create_group('knn')
mean, std, recalls = get_recall_values(dataset_distances,
run_distances, count,
knn_threshold, epsilon)
knn_metrics.attrs['mean'] = mean
knn_metrics.attrs['std'] = std
knn_metrics['recalls'] = recalls
else:
print("Found cached result")
return metrics['knn']
def epsilon(dataset_distances, run_distances, count, metrics, epsilon=0.01):
s = 'eps' + str(epsilon)
if s not in metrics:
print('Computing epsilon metrics')
epsilon_metrics = metrics.create_group(s)
mean, std, recalls = get_recall_values(dataset_distances,
run_distances, count,
epsilon_threshold, epsilon)
epsilon_metrics.attrs['mean'] = mean
epsilon_metrics.attrs['std'] = std
epsilon_metrics['recalls'] = recalls
else:
print("Found cached result")
return metrics[s]
def rel(dataset_distances, run_distances, metrics):
if 'rel' not in metrics.attrs:
print('Computing rel metrics')
total_closest_distance = 0.0
total_candidate_distance = 0.0
for true_distances, found_distances in zip(dataset_distances,
run_distances):
for rdist, cdist in zip(true_distances, found_distances):
if np.isnan(rdist) or np.isnan(cdist) or np.isinf(rdist) or np.isinf(cdist):
continue
total_closest_distance += rdist
total_candidate_distance += cdist
if total_closest_distance < 0.01:
metrics.attrs['rel'] = float("inf")
else:
metrics.attrs['rel'] = total_candidate_distance / \
total_closest_distance
if metrics.attrs['rel'] - 1.0 < 0:
metrics.attrs['rel'] = 1.0
else:
print("Found cached result")
return metrics.attrs['rel']
def queries_per_second(queries, attrs):
return 1.0 / attrs["best_search_time"]
def index_size(queries, attrs):
# TODO(erikbern): should replace this with peak memory usage or something
return attrs.get("index_size", 0)
def build_time(queries, attrs):
return attrs["build_time"]
def candidates(queries, attrs):
return attrs["candidates"]
def dist_computations(queries, attrs):
return attrs.get("dist_comps", 0) / (attrs['run_count'] * len(queries))
all_metrics = {
"k-nn": {
"description": "Recall",
"function": lambda true_distances, run_distances, metrics, run_attrs: knn(true_distances, run_distances, run_attrs["count"], metrics).attrs['mean'], # noqa
"worst": float("-inf"),
"lim": [0.0, 1.03]
},
"k-nn_0.8": {
"description": "Recall 0.8+",
"function": lambda true_distances, run_distances, metrics, run_attrs: knn(true_distances, run_distances, run_attrs["count"], metrics).attrs['mean'], # noqa
"worst": float("-inf"),
"lim": [0.80, 1.03]
},
"epsilon": {
"description": "Epsilon 0.01 Recall",
"function": lambda true_distances, run_distances, metrics, run_attrs: epsilon(true_distances, run_distances, run_attrs["count"], metrics).attrs['mean'], # noqa
"worst": float("-inf")
},
"epsilon_0.8": {
"description": "Epsilon 0.01 Recall 0.8+",
"function": lambda true_distances, run_distances, metrics, run_attrs: epsilon(true_distances, run_distances, run_attrs["count"], metrics).attrs['mean'], # noqa
"worst": float("-inf"),
"lim": [0.80, 1.03]
},
"largeepsilon": {
"description": "Epsilon 0.1 Recall",
"function": lambda true_distances, run_distances, metrics, run_attrs: epsilon(true_distances, run_distances, run_attrs["count"], metrics, 0.1).attrs['mean'], # noqa
"worst": float("-inf")
},
"largeepsilon_0.8": {
"description": "Epsilon 0.1 Recall 0.8+",
"function": lambda true_distances, run_distances, metrics, run_attrs: epsilon(true_distances, run_distances, run_attrs["count"], metrics, 0.1).attrs['mean'], # noqa
"worst": float("-inf"),
"lim": [0.80, 1.03]
},
"rel": {
"description": "Relative Error",
"function": lambda true_distances, run_distances, metrics, run_attrs: rel(true_distances, run_distances, metrics), # noqa
"worst": float("inf")
},
"qps": {
"description": "Queries per second (1/s)",
"function": lambda true_distances, run_distances, metrics, run_attrs: queries_per_second(true_distances, run_attrs), # noqa
"worst": float("-inf")
},
"distcomps": {
"description": "Distance computations",
"function": lambda true_distances, run_distances, metrics, run_attrs: dist_computations(true_distances, run_attrs), # noqa
"worst": float("inf")
},
"build": {
"description": "Refresh time (s)",
"function": lambda true_distances, run_distances, metrics, run_attrs: build_time(true_distances, run_attrs), # noqa
"worst": float("inf")
},
"candidates": {
"description": "Candidates generated",
"function": lambda true_distances, run_distances, metrics, run_attrs: candidates(true_distances, run_attrs), # noqa
"worst": float("inf")
},
"indexsize": {
"description": "Index size (kB)",
"function": lambda true_distances, run_distances, metrics, run_attrs: index_size(true_distances, run_attrs), # noqa
"worst": float("inf")
},
"queriessize": {
"description": "Index size (kB)/Queries per second (s)",
"function": lambda true_distances, run_distances, metrics, run_attrs: index_size(true_distances, run_attrs) / queries_per_second(true_distances, run_attrs), # noqa
"worst": float("inf")
}
}
|
the-stack_0_20192 | # -*- coding: utf-8 -*-
"""
pyvisa-py.protocols.usbraw
~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements Session to control USB Raw devices
Loosely based on PyUSBTMC:python module to handle USB-TMC(Test and Measurement class) devices.
by Noboru Yamamot, Accl. Lab, KEK, JAPAN
This file is an offspring of the Lantz Project.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from .usbtmc import USBRaw as USBRaw
from .usbutil import find_devices, find_interfaces
def find_raw_devices(vendor=None, product=None, serial_number=None,
custom_match=None, **kwargs):
"""Find connected USB RAW devices. See usbutil.find_devices for more info.
"""
def is_usbraw(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xFF,
bInterfaceSubClass=0xFF))
return find_devices(vendor, product, serial_number, is_usbraw, **kwargs)
class USBRawDevice(USBRaw):
RECV_CHUNK = 1024 ** 2
find_devices = staticmethod(find_raw_devices)
def __init__(self, vendor=None, product=None, serial_number=None, **kwargs):
super(USBRawDevice, self).__init__(vendor, product, serial_number, **kwargs)
if not (self.usb_recv_ep and self.usb_send_ep):
raise ValueError("USBRAW device must have both Bulk-In and Bulk-out endpoints.")
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
begin, end, size = 0, 0, len(data)
bytes_sent = 0
raw_write = super(USBRawDevice, self).write
while not end > size:
begin = end
end = begin + self.RECV_CHUNK
bytes_sent += raw_write(data[begin:end])
return bytes_sent
def read(self, size):
"""Read raw bytes from the instrument.
:param size: amount of bytes to be sent to the instrument
:type size: integer
:return: received bytes
:return type: bytes
"""
raw_read = super(USBRawDevice, self).read
received = bytearray()
while not len(received) >= size:
resp = raw_read(self.RECV_CHUNK)
received.extend(resp)
return bytes(received)
|
the-stack_0_20193 | # -*- coding: utf-8 -*-
'''
The EC2 Cloud Module
====================
The EC2 cloud module is used to interact with the Amazon Elastic Cloud
Computing.
To use the EC2 cloud module, set up the cloud configuration at
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/ec2.conf``:
.. code-block:: yaml
my-ec2-config:
# The EC2 API authentication id
id: GKTADJGHEIQSXMKKRBJ08H
# The EC2 API authentication key
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
# The ssh keyname to use
keyname: default
# The amazon security group
securitygroup: ssh_open
# The location of the private key which corresponds to the keyname
private_key: /root/default.pem
# Be default, service_url is set to amazonaws.com. If you are using this
# driver for something other than Amazon EC2, change it here:
service_url: amazonaws.com
# The endpoint that is ultimately used is usually formed using the region
# and the service_url. If you would like to override that entirely, you
# can explicitly define the endpoint:
endpoint: myendpoint.example.com:1138/services/Cloud
# SSH Gateways can be used with this provider. Gateways can be used
# when a salt-master is not on the same private network as the instance
# that is being deployed.
# Defaults to None
# Required
ssh_gateway: gateway.example.com
# Defaults to port 22
# Optional
ssh_gateway_port: 22
# Defaults to root
# Optional
ssh_gateway_username: root
# One authentication method is required. If both
# are specified, Private key wins.
# Private key defaults to None
ssh_gateway_private_key: /path/to/key.pem
# Password defaults to None
ssh_gateway_password: ExamplePasswordHere
provider: ec2
:depends: requests
'''
# pylint: disable=E0102
from __future__ import absolute_import
# Import python libs
import os
import sys
import stat
import time
import uuid
import pprint
import logging
import yaml
# Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import requests
import salt.ext.six as six
from salt.ext.six.moves import map, range, zip
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse, urlencode as _urlencode
# pylint: enable=import-error,no-name-in-module
# Import libs for talking to the EC2 API
import hmac
import hashlib
import binascii
import datetime
import base64
# Import salt libs
import salt.utils
from salt.utils import namespaced_function
from salt.cloud.libcloudfuncs import get_salt_interface
from salt._compat import ElementTree as ET
import salt.utils.aws as aws
# Import salt.cloud libs
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudException,
SaltCloudSystemExit,
SaltCloudConfigError,
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure
)
# Try to import PyCrypto, which may not be installed on a RAET-based system
try:
import Crypto
# PKCS1_v1_5 was added in PyCrypto 2.5
from Crypto.Cipher import PKCS1_v1_5 # pylint: disable=E0611
HAS_PYCRYPTO = True
except ImportError:
HAS_PYCRYPTO = False
# Get logging started
log = logging.getLogger(__name__)
# namespace libcloudfuncs
get_salt_interface = namespaced_function(get_salt_interface, globals())
SIZE_MAP = {
'Micro Instance': 't1.micro',
'Small Instance': 'm1.small',
'Medium Instance': 'm1.medium',
'Large Instance': 'm1.large',
'Extra Large Instance': 'm1.xlarge',
'High-CPU Medium Instance': 'c1.medium',
'High-CPU Extra Large Instance': 'c1.xlarge',
'High-Memory Extra Large Instance': 'm2.xlarge',
'High-Memory Double Extra Large Instance': 'm2.2xlarge',
'High-Memory Quadruple Extra Large Instance': 'm2.4xlarge',
'Cluster GPU Quadruple Extra Large Instance': 'cg1.4xlarge',
'Cluster Compute Quadruple Extra Large Instance': 'cc1.4xlarge',
'Cluster Compute Eight Extra Large Instance': 'cc2.8xlarge',
}
EC2_LOCATIONS = {
'ap-northeast-1': 'ec2_ap_northeast',
'ap-southeast-1': 'ec2_ap_southeast',
'ap-southeast-2': 'ec2_ap_southeast_2',
'eu-west-1': 'ec2_eu_west',
'sa-east-1': 'ec2_sa_east',
'us-east-1': 'ec2_us_east',
'us-west-1': 'ec2_us_west',
'us-west-2': 'ec2_us_west_oregon',
}
DEFAULT_LOCATION = 'us-east-1'
DEFAULT_EC2_API_VERSION = '2013-10-01'
EC2_RETRY_CODES = [
'RequestLimitExceeded',
'InsufficientInstanceCapacity',
'InternalError',
'Unavailable',
'InsufficientAddressCapacity',
'InsufficientReservedInstanceCapacity',
]
# Only load in this module if the EC2 configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for EC2 configurations
'''
if get_configured_provider() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'provider' not in details or details['provider'] != 'ec2':
continue
if not os.path.exists(details['private_key']):
raise SaltCloudException(
'The EC2 key file {0!r} used in the {1!r} provider '
'configuration does not exist\n'.format(
details['private_key'],
provider
)
)
keymode = str(
oct(stat.S_IMODE(os.stat(details['private_key']).st_mode))
)
if keymode not in ('0400', '0600'):
raise SaltCloudException(
'The EC2 key file {0!r} used in the {1!r} provider '
'configuration needs to be set to mode 0400 or 0600\n'.format(
details['private_key'],
provider
)
)
return True
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'ec2',
('id', 'key', 'keyname', 'private_key')
)
def _xml_to_dict(xmltree):
'''
Convert an XML tree into a dict
'''
if sys.version_info < (2, 7):
children_len = len(xmltree.getchildren())
else:
children_len = len(xmltree)
if children_len < 1:
name = xmltree.tag
if '}' in name:
comps = name.split('}')
name = comps[1]
return {name: xmltree.text}
xmldict = {}
for item in xmltree:
name = item.tag
if '}' in name:
comps = name.split('}')
name = comps[1]
if name not in xmldict:
if sys.version_info < (2, 7):
children_len = len(item.getchildren())
else:
children_len = len(item)
if children_len > 0:
xmldict[name] = _xml_to_dict(item)
else:
xmldict[name] = item.text
else:
if not isinstance(xmldict[name], list):
tempvar = xmldict[name]
xmldict[name] = []
xmldict[name].append(tempvar)
xmldict[name].append(_xml_to_dict(item))
return xmldict
def optimize_providers(providers):
'''
Return an optimized list of providers.
We want to reduce the duplication of querying
the same region.
If a provider is using the same credentials for the same region
the same data will be returned for each provider, thus causing
un-wanted duplicate data and API calls to EC2.
'''
tmp_providers = {}
optimized_providers = {}
for name, data in six.iteritems(providers):
if 'location' not in data:
data['location'] = DEFAULT_LOCATION
if data['location'] not in tmp_providers:
tmp_providers[data['location']] = {}
creds = (data['id'], data['key'])
if creds not in tmp_providers[data['location']]:
tmp_providers[data['location']][creds] = {'name': name,
'data': data,
}
for location, tmp_data in six.iteritems(tmp_providers):
for creds, data in six.iteritems(tmp_data):
_id, _key = creds
_name = data['name']
_data = data['data']
if _name not in optimized_providers:
optimized_providers[_name] = _data
return optimized_providers
def query(params=None, setname=None, requesturl=None, location=None,
return_url=False, return_root=False):
provider = get_configured_provider()
service_url = provider.get('service_url', 'amazonaws.com')
attempts = 5
while attempts > 0:
params_with_headers = params.copy()
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if not location:
location = get_location()
if not requesturl:
endpoint = provider.get(
'endpoint',
'ec2.{0}.{1}'.format(location, service_url)
)
requesturl = 'https://{0}/'.format(endpoint)
else:
endpoint = _urlparse(requesturl).netloc
if endpoint == '':
endpoint_err = (
'Could not find a valid endpoint in the '
'requesturl: {0}. Looking for something '
'like https://some.ec2.endpoint/?args').format(requesturl)
log.error(endpoint_err)
if return_url is True:
return {'error': endpoint_err}, requesturl
return {'error': endpoint_err}
log.debug('Using EC2 endpoint: {0}'.format(endpoint))
method = 'GET'
ec2_api_version = provider.get(
'ec2_api_version',
DEFAULT_EC2_API_VERSION
)
params_with_headers['AWSAccessKeyId'] = provider['id']
params_with_headers['SignatureVersion'] = '2'
params_with_headers['SignatureMethod'] = 'HmacSHA256'
params_with_headers['Timestamp'] = '{0}'.format(timestamp)
params_with_headers['Version'] = ec2_api_version
keys = sorted(params_with_headers)
values = list(map(params_with_headers.get, keys))
querystring = _urlencode(list(zip(keys, values)))
# AWS signature version 2 requires that spaces be encoded as
# %20, however urlencode uses '+'. So replace pluses with %20.
querystring = querystring.replace('+', '%20')
uri = '{0}\n{1}\n/\n{2}'.format(method.encode('utf-8'),
endpoint.encode('utf-8'),
querystring.encode('utf-8'))
hashed = hmac.new(provider['key'], uri, hashlib.sha256)
sig = binascii.b2a_base64(hashed.digest())
params_with_headers['Signature'] = sig.strip()
log.debug('EC2 Request: {0}'.format(requesturl))
log.trace('EC2 Request Parameters: {0}'.format(params_with_headers))
try:
result = requests.get(requesturl, params=params_with_headers)
log.debug(
'EC2 Response Status Code: {0}'.format(
# result.getcode()
result.status_code
)
)
log.trace(
'EC2 Response Text: {0}'.format(
result.text
)
)
result.raise_for_status()
break
except requests.exceptions.HTTPError as exc:
root = ET.fromstring(exc.response.content)
data = _xml_to_dict(root)
# check to see if we should retry the query
err_code = data.get('Errors', {}).get('Error', {}).get('Code', '')
if attempts > 0 and err_code and err_code in EC2_RETRY_CODES:
attempts -= 1
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}; '
'Attempts remaining: {3}'.format(
exc.response.status_code, exc, data, attempts
)
)
# Wait a bit before continuing to prevent throttling
time.sleep(2)
continue
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}'.format(
exc.response.status_code, exc, data
)
)
if return_url is True:
return {'error': data}, requesturl
return {'error': data}
else:
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}'.format(
exc.response.status_code, exc, data
)
)
if return_url is True:
return {'error': data}, requesturl
return {'error': data}
response = result.text
root = ET.fromstring(response)
items = root[1]
if return_root is True:
items = root
if setname:
if sys.version_info < (2, 7):
children_len = len(root.getchildren())
else:
children_len = len(root)
for item in range(0, children_len):
comps = root[item].tag.split('}')
if comps[1] == setname:
items = root[item]
ret = []
for item in items:
ret.append(_xml_to_dict(item))
if return_url is True:
return ret, requesturl
return ret
def _wait_for_spot_instance(update_callback,
update_args=None,
update_kwargs=None,
timeout=10 * 60,
interval=30,
interval_multiplier=1,
max_failures=10):
'''
Helper function that waits for a spot instance request to become active
for a specific maximum amount of time.
:param update_callback: callback function which queries the cloud provider
for spot instance request. It must return None if
the required data, running instance included, is
not available yet.
:param update_args: Arguments to pass to update_callback
:param update_kwargs: Keyword arguments to pass to update_callback
:param timeout: The maximum amount of time(in seconds) to wait for the IP
address.
:param interval: The looping interval, i.e., the amount of time to sleep
before the next iteration.
:param interval_multiplier: Increase the interval by this multiplier after
each request; helps with throttling
:param max_failures: If update_callback returns ``False`` it's considered
query failure. This value is the amount of failures
accepted before giving up.
:returns: The update_callback returned data
:raises: SaltCloudExecutionTimeout
'''
if update_args is None:
update_args = ()
if update_kwargs is None:
update_kwargs = {}
duration = timeout
while True:
log.debug(
'Waiting for spot instance reservation. Giving up in '
'00:{0:02d}:{1:02d}'.format(
int(timeout // 60),
int(timeout % 60)
)
)
data = update_callback(*update_args, **update_kwargs)
if data is False:
log.debug(
'update_callback has returned False which is considered a '
'failure. Remaining Failures: {0}'.format(max_failures)
)
max_failures -= 1
if max_failures <= 0:
raise SaltCloudExecutionFailure(
'Too many failures occurred while waiting for '
'the spot instance reservation to become active.'
)
elif data is not None:
return data
if timeout < 0:
raise SaltCloudExecutionTimeout(
'Unable to get an active spot instance request for '
'00:{0:02d}:{1:02d}'.format(
int(duration // 60),
int(duration % 60)
)
)
time.sleep(interval)
timeout -= interval
if interval_multiplier > 1:
interval *= interval_multiplier
if interval > timeout:
interval = timeout + 1
log.info('Interval multiplier in effect; interval is '
'now {0}s'.format(interval))
def avail_sizes(call=None):
'''
Return a dict of all available VM sizes on the cloud provider with
relevant data. Latest version can be found at:
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
sizes = {
'Cluster Compute': {
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'cores': '16 (2 x Intel Xeon E5-2670, eight-core with '
'hyperthread)',
'disk': '3360 GiB (4 x 840 GiB)',
'ram': '60.5 GiB'
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'cores': '8 (2 x Intel Xeon X5570, quad-core with '
'hyperthread)',
'disk': '1690 GiB (2 x 840 GiB)',
'ram': '22.5 GiB'
},
},
'Cluster CPU': {
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'cores': '8 (2 x Intel Xeon X5570, quad-core with '
'hyperthread), plus 2 NVIDIA Tesla M2050 GPUs',
'disk': '1680 GiB (2 x 840 GiB)',
'ram': '22.5 GiB'
},
},
'High CPU': {
'c1.xlarge': {
'id': 'c1.xlarge',
'cores': '8 (with 2.5 ECUs each)',
'disk': '1680 GiB (4 x 420 GiB)',
'ram': '8 GiB'
},
'c1.medium': {
'id': 'c1.medium',
'cores': '2 (with 2.5 ECUs each)',
'disk': '340 GiB (1 x 340 GiB)',
'ram': '1.7 GiB'
},
'c3.large': {
'id': 'c3.large',
'cores': '2 (with 3.5 ECUs each)',
'disk': '32 GiB (2 x 16 GiB SSD)',
'ram': '3.75 GiB'
},
'c3.xlarge': {
'id': 'c3.xlarge',
'cores': '4 (with 3.5 ECUs each)',
'disk': '80 GiB (2 x 40 GiB SSD)',
'ram': '7.5 GiB'
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'cores': '8 (with 3.5 ECUs each)',
'disk': '160 GiB (2 x 80 GiB SSD)',
'ram': '15 GiB'
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'cores': '16 (with 3.5 ECUs each)',
'disk': '320 GiB (2 x 80 GiB SSD)',
'ram': '30 GiB'
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'cores': '32 (with 3.5 ECUs each)',
'disk': '320 GiB (2 x 160 GiB SSD)',
'ram': '60 GiB'
}
},
'High I/O': {
'hi1.4xlarge': {
'id': 'hi1.4xlarge',
'cores': '8 (with 4.37 ECUs each)',
'disk': '2 TiB',
'ram': '60.5 GiB'
},
},
'High Memory': {
'm2.2xlarge': {
'id': 'm2.2xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': '840 GiB (1 x 840 GiB)',
'ram': '34.2 GiB'
},
'm2.xlarge': {
'id': 'm2.xlarge',
'cores': '2 (with 3.25 ECUs each)',
'disk': '410 GiB (1 x 410 GiB)',
'ram': '17.1 GiB'
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': '1680 GiB (2 x 840 GiB)',
'ram': '68.4 GiB'
},
'r3.large': {
'id': 'r3.large',
'cores': '2 (with 3.25 ECUs each)',
'disk': '32 GiB (1 x 32 GiB SSD)',
'ram': '15 GiB'
},
'r3.xlarge': {
'id': 'r3.xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': '80 GiB (1 x 80 GiB SSD)',
'ram': '30.5 GiB'
},
'r3.2xlarge': {
'id': 'r3.2xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': '160 GiB (1 x 160 GiB SSD)',
'ram': '61 GiB'
},
'r3.4xlarge': {
'id': 'r3.4xlarge',
'cores': '16 (with 3.25 ECUs each)',
'disk': '320 GiB (1 x 320 GiB SSD)',
'ram': '122 GiB'
},
'r3.8xlarge': {
'id': 'r3.8xlarge',
'cores': '32 (with 3.25 ECUs each)',
'disk': '640 GiB (2 x 320 GiB SSD)',
'ram': '244 GiB'
}
},
'High-Memory Cluster': {
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'cores': '16 (2 x Intel Xeon E5-2670, eight-core)',
'disk': '240 GiB (2 x 120 GiB SSD)',
'ram': '244 GiB'
},
},
'High Storage': {
'hs1.8xlarge': {
'id': 'hs1.8xlarge',
'cores': '16 (8 cores + 8 hyperthreads)',
'disk': '48 TiB (24 x 2 TiB hard disk drives)',
'ram': '117 GiB'
},
},
'Micro': {
't1.micro': {
'id': 't1.micro',
'cores': '1',
'disk': 'EBS',
'ram': '615 MiB'
},
},
'Standard': {
'm1.xlarge': {
'id': 'm1.xlarge',
'cores': '4 (with 2 ECUs each)',
'disk': '1680 GB (4 x 420 GiB)',
'ram': '15 GiB'
},
'm1.large': {
'id': 'm1.large',
'cores': '2 (with 2 ECUs each)',
'disk': '840 GiB (2 x 420 GiB)',
'ram': '7.5 GiB'
},
'm1.medium': {
'id': 'm1.medium',
'cores': '1',
'disk': '400 GiB',
'ram': '3.75 GiB'
},
'm1.small': {
'id': 'm1.small',
'cores': '1',
'disk': '150 GiB',
'ram': '1.7 GiB'
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'cores': '8 (with 3.25 ECUs each)',
'disk': 'EBS',
'ram': '30 GiB'
},
'm3.xlarge': {
'id': 'm3.xlarge',
'cores': '4 (with 3.25 ECUs each)',
'disk': 'EBS',
'ram': '15 GiB'
},
}
}
return sizes
def avail_images(kwargs=None, call=None):
'''
Return a dict of all available VM images on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not isinstance(kwargs, dict):
kwargs = {}
if 'owner' in kwargs:
owner = kwargs['owner']
else:
provider = get_configured_provider()
owner = config.get_cloud_config_value(
'owner', provider, __opts__, default='amazon'
)
ret = {}
params = {'Action': 'DescribeImages',
'Owner': owner}
images = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for image in images:
ret[image['imageId']] = image
return ret
def script(vm_):
'''
Return the script deployment object
'''
return salt.utils.cloud.os_script(
config.get_cloud_config_value('script', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
def keyname(vm_):
'''
Return the keyname
'''
return config.get_cloud_config_value(
'keyname', vm_, __opts__, search_global=False
)
def securitygroup(vm_):
'''
Return the security group
'''
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, search_global=False
)
def iam_profile(vm_):
'''
Return the IAM profile.
The IAM instance profile to associate with the instances.
This is either the Amazon Resource Name (ARN) of the instance profile
or the name of the role.
Type: String
Default: None
Required: No
Example: arn:aws:iam::111111111111:instance-profile/s3access
Example: s3access
'''
return config.get_cloud_config_value(
'iam_profile', vm_, __opts__, search_global=False
)
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def get_ssh_gateway_config(vm_):
'''
Return the ssh_gateway configuration.
'''
ssh_gateway = config.get_cloud_config_value(
'ssh_gateway', vm_, __opts__, default=None,
search_global=False
)
# Check to see if a SSH Gateway will be used.
if not isinstance(ssh_gateway, str):
return None
# Create dictionary of configuration items
# ssh_gateway
ssh_gateway_config = {'ssh_gateway': ssh_gateway}
# ssh_gateway_port
ssh_gateway_config['ssh_gateway_port'] = config.get_cloud_config_value(
'ssh_gateway_port', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_username
ssh_gateway_config['ssh_gateway_user'] = config.get_cloud_config_value(
'ssh_gateway_username', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_private_key
ssh_gateway_config['ssh_gateway_key'] = config.get_cloud_config_value(
'ssh_gateway_private_key', vm_, __opts__, default=None,
search_global=False
)
# ssh_gateway_password
ssh_gateway_config['ssh_gateway_password'] = config.get_cloud_config_value(
'ssh_gateway_password', vm_, __opts__, default=None,
search_global=False
)
# Check if private key exists
key_filename = ssh_gateway_config['ssh_gateway_key']
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined ssh_gateway_private_key {0!r} does not exist'.format(
key_filename
)
)
elif (
key_filename is None and
not ssh_gateway_config['ssh_gateway_password']
):
raise SaltCloudConfigError(
'No authentication method. Please define: '
' ssh_gateway_password or ssh_gateway_private_key'
)
return ssh_gateway_config
def get_location(vm_=None):
'''
Return the EC2 region to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
'''
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(),
__opts__,
default=DEFAULT_LOCATION,
search_global=False
)
)
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
params = {'Action': 'DescribeRegions'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for region in result:
ret[region['regionName']] = {
'name': region['regionName'],
'endpoint': region['regionEndpoint'],
}
return ret
def get_availability_zone(vm_):
'''
Return the availability zone to use
'''
avz = config.get_cloud_config_value(
'availability_zone', vm_, __opts__, search_global=False
)
if avz is None:
return None
zones = _list_availability_zones()
# Validate user-specified AZ
if avz not in zones:
raise SaltCloudException(
'The specified availability zone isn\'t valid in this region: '
'{0}\n'.format(
avz
)
)
# check specified AZ is available
elif zones[avz] != 'available':
raise SaltCloudException(
'The specified availability zone isn\'t currently available: '
'{0}\n'.format(
avz
)
)
return avz
def get_tenancy(vm_):
'''
Returns the Tenancy to use.
Can be "dedicated" or "default". Cannot be present for spot instances.
'''
return config.get_cloud_config_value(
'tenancy', vm_, __opts__, search_global=False
)
def get_subnetid(vm_):
'''
Returns the SubnetId to use
'''
return config.get_cloud_config_value(
'subnetid', vm_, __opts__, search_global=False
)
def securitygroupid(vm_):
'''
Returns the SecurityGroupId
'''
return config.get_cloud_config_value(
'securitygroupid', vm_, __opts__, search_global=False
)
def get_placementgroup(vm_):
'''
Returns the PlacementGroup to use
'''
return config.get_cloud_config_value(
'placementgroup', vm_, __opts__, search_global=False
)
def get_spot_config(vm_):
'''
Returns the spot instance configuration for the provided vm
'''
return config.get_cloud_config_value(
'spot_config', vm_, __opts__, search_global=False
)
def get_provider(vm_=None):
'''
Extract the provider name from vm
'''
if vm_ is None:
provider = __active_provider_name__ or 'ec2'
else:
provider = vm_.get('provider', 'ec2')
if ':' in provider:
prov_comps = provider.split(':')
provider = prov_comps[0]
return provider
def _list_availability_zones():
'''
List all availability zones in the current region
'''
ret = {}
params = {'Action': 'DescribeAvailabilityZones',
'Filter.0.Name': 'region-name',
'Filter.0.Value.0': get_location()}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for zone in result:
ret[zone['zoneName']] = zone['zoneState']
return ret
def block_device_mappings(vm_):
'''
Return the block device mapping:
.. code-block:: python
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def _request_eip(interface):
'''
Request and return Elastic IP
'''
params = {'Action': 'AllocateAddress'}
params['Domain'] = interface.setdefault('domain', 'vpc')
eips = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for eip in eips:
if 'allocationId' in eip:
return eip['allocationId']
return None
def _create_eni(interface):
'''
Create and return an Elastic Interface
'''
params = {'Action': 'DescribeSubnets'}
subnet_query = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
found = False
for subnet_query_result in subnet_query:
if 'item' in subnet_query_result:
for subnet in subnet_query_result['item']:
if subnet['subnetId'] == interface['SubnetId']:
found = True
break
if not found:
raise SaltCloudConfigError(
'No such subnet <{0}>'.format(interface['SubnetId'])
)
params = {'Action': 'CreateNetworkInterface',
'SubnetId': interface['SubnetId']}
for k in ('Description', 'PrivateIpAddress',
'SecondaryPrivateIpAddressCount'):
if k in interface:
params[k] = interface[k]
for k in ('PrivateIpAddresses', 'SecurityGroupId'):
if k in interface:
params.update(_param_from_config(k, interface[k]))
result = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
eni_desc = result[1]
if not eni_desc or not eni_desc.get('networkInterfaceId'):
raise SaltCloudException('Failed to create interface: {0}'.format(result))
eni_id = eni_desc.get('networkInterfaceId')
log.debug(
'Created network interface {0} inst {1}'.format(
eni_id, interface['DeviceIndex']
)
)
if interface.get('associate_eip'):
_associate_eip_with_interface(eni_id, interface.get('associate_eip'))
elif interface.get('allocate_new_eip'):
_new_eip = _request_eip(interface)
_associate_eip_with_interface(eni_id, _new_eip)
elif interface.get('allocate_new_eips'):
addr_list = _list_interface_private_addresses(eni_desc)
eip_list = []
for idx, addr in enumerate(addr_list):
eip_list.append(_request_eip(interface))
for idx, addr in enumerate(addr_list):
_associate_eip_with_interface(eni_id, eip_list[idx], addr)
return {'DeviceIndex': interface['DeviceIndex'],
'NetworkInterfaceId': eni_id}
def _list_interface_private_addresses(eni_desc):
'''
Returns a list of all of the private IP addresses attached to a
network interface. The 'primary' address will be listed first.
'''
primary = eni_desc.get('privateIpAddress')
if not primary:
return None
addresses = [primary]
lst = eni_desc.get('privateIpAddressesSet', {}).get('item', [])
if not isinstance(lst, list):
return addresses
for entry in lst:
if entry.get('primary') == 'true':
continue
if entry.get('privateIpAddress'):
addresses.append(entry.get('privateIpAddress'))
return addresses
def _associate_eip_with_interface(eni_id, eip_id, private_ip=None):
'''
Accept the id of a network interface, and the id of an elastic ip
address, and associate the two of them, such that traffic sent to the
elastic ip address will be forwarded (NATted) to this network interface.
Optionally specify the private (10.x.x.x) IP address that traffic should
be NATted to - useful if you have multiple IP addresses assigned to an
interface.
'''
retries = 5
while retries > 0:
params = {'Action': 'AssociateAddress',
'NetworkInterfaceId': eni_id,
'AllocationId': eip_id}
if private_ip:
params['PrivateIpAddress'] = private_ip
retries = retries - 1
result = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if isinstance(result, dict) and result.get('error'):
time.sleep(1)
continue
if not result[2].get('associationId'):
break
log.debug(
'Associated ElasticIP address {0} with interface {1}'.format(
eip_id, eni_id
)
)
return result[2].get('associationId')
raise SaltCloudException(
'Could not associate elastic ip address '
'<{0}> with network interface <{1}>'.format(
eip_id, eni_id
)
)
def _update_enis(interfaces, instance):
config_enis = {}
instance_enis = []
for interface in interfaces:
if 'DeviceIndex' in interface:
if interface['DeviceIndex'] in config_enis:
log.error(
'Duplicate DeviceIndex in profile. Cannot update ENIs.'
)
return None
config_enis[str(interface['DeviceIndex'])] = interface
query_enis = instance[0]['instancesSet']['item']['networkInterfaceSet']['item']
if isinstance(query_enis, list):
for query_eni in query_enis:
instance_enis.append((query_eni['networkInterfaceId'], query_eni['attachment']))
else:
instance_enis.append((query_enis['networkInterfaceId'], query_enis['attachment']))
for eni_id, eni_data in instance_enis:
params = {'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': eni_id,
'Attachment.AttachmentId': eni_data['attachmentId'],
'Attachment.DeleteOnTermination': config_enis[eni_data['deviceIndex']].setdefault('delete_interface_on_terminate', True)}
set_eni_attributes = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return None
def _param_from_config(key, data):
'''
Return EC2 API parameters based on the given config data.
Examples:
1. List of dictionaries
>>> data = [
... {'DeviceIndex': 0, 'SubnetId': 'subid0',
... 'AssociatePublicIpAddress': True},
... {'DeviceIndex': 1,
... 'SubnetId': 'subid1',
... 'PrivateIpAddress': '192.168.1.128'}
... ]
>>> _param_from_config('NetworkInterface', data)
... {'NetworkInterface.0.SubnetId': 'subid0',
... 'NetworkInterface.0.DeviceIndex': 0,
... 'NetworkInterface.1.SubnetId': 'subid1',
... 'NetworkInterface.1.PrivateIpAddress': '192.168.1.128',
... 'NetworkInterface.0.AssociatePublicIpAddress': 'true',
... 'NetworkInterface.1.DeviceIndex': 1}
2. List of nested dictionaries
>>> data = [
... {'DeviceName': '/dev/sdf',
... 'Ebs': {
... 'SnapshotId': 'dummy0',
... 'VolumeSize': 200,
... 'VolumeType': 'standard'}},
... {'DeviceName': '/dev/sdg',
... 'Ebs': {
... 'SnapshotId': 'dummy1',
... 'VolumeSize': 100,
... 'VolumeType': 'standard'}}
... ]
>>> _param_from_config('BlockDeviceMapping', data)
... {'BlockDeviceMapping.0.Ebs.VolumeType': 'standard',
... 'BlockDeviceMapping.1.Ebs.SnapshotId': 'dummy1',
... 'BlockDeviceMapping.0.Ebs.VolumeSize': 200,
... 'BlockDeviceMapping.0.Ebs.SnapshotId': 'dummy0',
... 'BlockDeviceMapping.1.Ebs.VolumeType': 'standard',
... 'BlockDeviceMapping.1.DeviceName': '/dev/sdg',
... 'BlockDeviceMapping.1.Ebs.VolumeSize': 100,
... 'BlockDeviceMapping.0.DeviceName': '/dev/sdf'}
3. Dictionary of dictionaries
>>> data = { 'Arn': 'dummyarn', 'Name': 'Tester' }
>>> _param_from_config('IamInstanceProfile', data)
{'IamInstanceProfile.Arn': 'dummyarn', 'IamInstanceProfile.Name': 'Tester'}
'''
param = {}
if isinstance(data, dict):
for k, v in data.items():
param.update(_param_from_config('{0}.{1}'.format(key, k), v))
elif isinstance(data, list) or isinstance(data, tuple):
for idx, conf_item in enumerate(data):
prefix = '{0}.{1}'.format(key, idx)
param.update(_param_from_config(prefix, conf_item))
else:
if isinstance(data, bool):
# convert boolean Trur/False to 'true'/'false'
param.update({key: str(data).lower()})
else:
param.update({key: data})
return param
def request_instance(vm_=None, call=None):
'''
Put together all of the information necessary to request an instance on EC2,
and then fire off the request the instance.
Returns data about the instance
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
location = vm_.get('location', get_location(vm_))
# do we launch a regular vm or a spot instance?
# see http://goo.gl/hYZ13f for more information on EC2 API
spot_config = get_spot_config(vm_)
if spot_config is not None:
if 'spot_price' not in spot_config:
raise SaltCloudSystemExit(
'Spot instance config for {0} requires a spot_price '
'attribute.'.format(vm_['name'])
)
params = {'Action': 'RequestSpotInstances',
'InstanceCount': '1',
'Type': spot_config['type']
if 'type' in spot_config else 'one-time',
'SpotPrice': spot_config['spot_price']}
# All of the necessary launch parameters for a VM when using
# spot instances are the same except for the prefix below
# being tacked on.
spot_prefix = 'LaunchSpecification.'
# regular EC2 instance
else:
# WARNING! EXPERIMENTAL!
# This allows more than one instance to be spun up in a single call.
# The first instance will be called by the name provided, but all other
# instances will be nameless (or more specifically, they will use the
# InstanceId as the name). This interface is expected to change, so
# use at your own risk.
min_instance = config.get_cloud_config_value(
'min_instance', vm_, __opts__, search_global=False, default=1
)
max_instance = config.get_cloud_config_value(
'max_instance', vm_, __opts__, search_global=False, default=1
)
params = {'Action': 'RunInstances',
'MinCount': min_instance,
'MaxCount': max_instance}
# Normal instances should have no prefix.
spot_prefix = ''
image_id = vm_['image']
params[spot_prefix + 'ImageId'] = image_id
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
if userdata_file is None:
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
else:
log.trace('userdata_file: {0}'.format(userdata_file))
if os.path.exists(userdata_file):
with salt.utils.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata is not None:
params['UserData'] = base64.b64encode(userdata)
vm_size = config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
)
params[spot_prefix + 'InstanceType'] = vm_size
ex_keyname = keyname(vm_)
if ex_keyname:
params[spot_prefix + 'KeyName'] = ex_keyname
ex_securitygroup = securitygroup(vm_)
if ex_securitygroup:
if not isinstance(ex_securitygroup, list):
params[spot_prefix + 'SecurityGroup.1'] = ex_securitygroup
else:
for counter, sg_ in enumerate(ex_securitygroup):
params[spot_prefix + 'SecurityGroup.{0}'.format(counter)] = sg_
ex_iam_profile = iam_profile(vm_)
if ex_iam_profile:
try:
if ex_iam_profile.startswith('arn:aws:iam:'):
params[
spot_prefix + 'IamInstanceProfile.Arn'
] = ex_iam_profile
else:
params[
spot_prefix + 'IamInstanceProfile.Name'
] = ex_iam_profile
except AttributeError:
raise SaltCloudConfigError(
'\'iam_profile\' should be a string value.'
)
az_ = get_availability_zone(vm_)
if az_ is not None:
params[spot_prefix + 'Placement.AvailabilityZone'] = az_
tenancy_ = get_tenancy(vm_)
if tenancy_ is not None:
if spot_config is not None:
raise SaltCloudConfigError(
'Spot instance config for {0} does not support '
'specifying tenancy.'.format(vm_['name'])
)
params['Placement.Tenancy'] = tenancy_
subnetid_ = get_subnetid(vm_)
if subnetid_ is not None:
params[spot_prefix + 'SubnetId'] = subnetid_
ex_securitygroupid = securitygroupid(vm_)
if ex_securitygroupid:
if not isinstance(ex_securitygroupid, list):
params[spot_prefix + 'SecurityGroupId.1'] = ex_securitygroupid
else:
for (counter, sg_) in enumerate(ex_securitygroupid):
params[
spot_prefix + 'SecurityGroupId.{0}'.format(counter)
] = sg_
placementgroup_ = get_placementgroup(vm_)
if placementgroup_ is not None:
params[spot_prefix + 'Placement.GroupName'] = placementgroup_
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
params.update(_param_from_config(spot_prefix + 'BlockDeviceMapping',
ex_blockdevicemappings))
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
eni_devices = []
for interface in network_interfaces:
log.debug('Create network interface: {0}'.format(interface))
_new_eni = _create_eni(interface)
eni_devices.append(_new_eni)
params.update(_param_from_config(spot_prefix + 'NetworkInterface',
eni_devices))
set_ebs_optimized = config.get_cloud_config_value(
'ebs_optimized', vm_, __opts__, search_global=False
)
if set_ebs_optimized is not None:
if not isinstance(set_ebs_optimized, bool):
raise SaltCloudConfigError(
'\'ebs_optimized\' should be a boolean value.'
)
params[spot_prefix + 'EbsOptimized'] = set_ebs_optimized
set_del_root_vol_on_destroy = config.get_cloud_config_value(
'del_root_vol_on_destroy', vm_, __opts__, search_global=False
)
if set_del_root_vol_on_destroy is not None:
if not isinstance(set_del_root_vol_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_root_vol_on_destroy\' should be a boolean value.'
)
vm_['set_del_root_vol_on_destroy'] = set_del_root_vol_on_destroy
if set_del_root_vol_on_destroy:
# first make sure to look up the root device name
# as Ubuntu and CentOS (and most likely other OSs)
# use different device identifiers
log.info('Attempting to look up root device name for image id {0} on '
'VM {1}'.format(image_id, vm_['name']))
rd_params = {
'Action': 'DescribeImages',
'ImageId.1': image_id
}
try:
rd_data = aws.query(rd_params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in rd_data:
return rd_data['error']
log.debug('EC2 Response: {0!r}'.format(rd_data))
except Exception as exc:
log.error(
'Error getting root device name for image id {0} for '
'VM {1}: \n{2}'.format(image_id, vm_['name'], exc),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise
# make sure we have a response
if not rd_data:
err_msg = 'There was an error querying EC2 for the root device ' \
'of image id {0}. Empty response.'.format(image_id)
raise SaltCloudSystemExit(err_msg)
# pull the root device name from the result and use it when
# launching the new VM
rd_name = None
if 'blockDeviceMapping' in rd_data[0]:
if rd_data[0]['blockDeviceMapping'] is None:
# Some ami instances do not have a root volume. Ignore such cases
rd_name = None
elif isinstance(rd_data[0]['blockDeviceMapping']['item'], list):
rd_name = rd_data[0]['blockDeviceMapping']['item'][0]['deviceName']
else:
rd_name = rd_data[0]['blockDeviceMapping']['item']['deviceName']
log.info('Found root device name: {0}'.format(rd_name))
if rd_name is not None:
if ex_blockdevicemappings:
dev_list = [
dev['DeviceName'] for dev in ex_blockdevicemappings
]
else:
dev_list = []
if rd_name in dev_list:
dev_index = dev_list.index(rd_name)
termination_key = '{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(spot_prefix, dev_index)
params[termination_key] = str(set_del_root_vol_on_destroy).lower()
else:
dev_index = len(dev_list)
params[
'{0}BlockDeviceMapping.{1}.DeviceName'.format(
spot_prefix, dev_index
)
] = rd_name
params[
'{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(
spot_prefix, dev_index
)
] = str(set_del_root_vol_on_destroy).lower()
set_del_all_vols_on_destroy = config.get_cloud_config_value(
'del_all_vols_on_destroy', vm_, __opts__, search_global=False, default=False
)
if set_del_all_vols_on_destroy is not None:
if not isinstance(set_del_all_vols_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_all_vols_on_destroy\' should be a boolean value.'
)
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': params, 'location': location},
transport=__opts__['transport']
)
provider = get_provider(vm_)
try:
data = aws.query(params,
'instancesSet',
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in data:
return data['error']
except Exception as exc:
log.error(
'Error creating {0} on EC2 when trying to run the initial '
'deployment: \n{1}'.format(
vm_['name'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
raise
# if we're using spot instances, we need to wait for the spot request
# to become active before we continue
if spot_config:
sir_id = data[0]['spotInstanceRequestId']
def __query_spot_instance_request(sir_id, location):
params = {'Action': 'DescribeSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for spot instance method
return False
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0}'
.format(data['error'])
)
# Trigger a failure in the wait for spot instance method
return False
log.debug('Returned query data: {0}'.format(data))
if 'state' in data[0]:
state = data[0]['state']
if state == 'active':
return data
if state == 'open':
# Still waiting for an active state
log.info('Spot instance status: {0}'.format(
data[0]['status']['message']
))
return None
if state in ['cancelled', 'failed', 'closed']:
# Request will never be active, fail
log.error('Spot instance request resulted in state \'{0}\'. '
'Nothing else we can do here.')
return False
salt.utils.cloud.fire_event(
'event',
'waiting for spot instance',
'salt/cloud/{0}/waiting_for_spot'.format(vm_['name']),
transport=__opts__['transport']
)
try:
data = _wait_for_spot_instance(
__query_spot_instance_request,
update_args=(sir_id, location),
timeout=config.get_cloud_config_value(
'wait_for_spot_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_spot_interval', vm_, __opts__, default=30),
interval_multiplier=config.get_cloud_config_value(
'wait_for_spot_interval_multiplier',
vm_,
__opts__,
default=1),
max_failures=config.get_cloud_config_value(
'wait_for_spot_max_failures',
vm_,
__opts__,
default=10),
)
log.debug('wait_for_spot_instance data {0}'.format(data))
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# Cancel the existing spot instance request
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.debug('Canceled spot instance request {0}. Data '
'returned: {1}'.format(sir_id, data))
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
return data, vm_
def query_instance(vm_=None, call=None):
'''
Query an instance upon creation from the EC2 API
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The query_instance action must be called with -a or --action.'
)
instance_id = vm_['instance_id']
location = vm_.get('location', get_location(vm_))
salt.utils.cloud.fire_event(
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
{'instance_id': instance_id},
transport=__opts__['transport']
)
log.debug('The new VM instance_id is {0}'.format(instance_id))
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
provider = get_provider(vm_)
attempts = 5
while attempts > 0:
data, requesturl = aws.query(params, # pylint: disable=W0632
location=location,
provider=provider,
opts=__opts__,
return_url=True,
sigver='4')
log.debug('The query returned: {0}'.format(data))
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0} attempts '
'remaining: {1}'.format(
attempts, data['error']
)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
if isinstance(data, list) and not data:
log.warn(
'Query returned an empty list. {0} attempts '
'remaining.'.format(attempts)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
break
else:
raise SaltCloudSystemExit(
'An error occurred while creating VM: {0}'.format(data['error'])
)
def __query_ip_address(params, url):
data = aws.query(params,
#requesturl=url,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for IP function
return False
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query. {0}'.format(data['error'])
)
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: {0}'.format(data))
if 'ipAddress' in data[0]['instancesSet']['item']:
return data
if ssh_interface(vm_) == 'private_ips' and \
'privateIpAddress' in data[0]['instancesSet']['item']:
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_ip_address,
update_args=(params, requesturl),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
if 'reactor' in vm_ and vm_['reactor'] is True:
salt.utils.cloud.fire_event(
'event',
'instance queried',
'salt/cloud/{0}/query_reactor'.format(vm_['name']),
{'data': data},
transport=__opts__['transport']
)
return data
def wait_for_instance(
vm_=None,
data=None,
ip_address=None,
display_ssh_output=True,
call=None,
):
'''
Wait for an instance upon creation from the EC2 API, to become available
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The wait_for_instance action must be called with -a or --action.'
)
if vm_ is None:
vm_ = {}
if data is None:
data = {}
ssh_gateway_config = vm_.get(
'ssh_gateway_config', get_ssh_gateway_config(vm_)
)
salt.utils.cloud.fire_event(
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(vm_['name']),
{'ip_address': ip_address},
transport=__opts__['transport']
)
ssh_connect_timeout = config.get_cloud_config_value(
'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes
)
if config.get_cloud_config_value('win_installer', vm_, __opts__):
username = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
win_passwd = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
if win_passwd and win_passwd == 'auto':
log.debug('Waiting for auto-generated Windows EC2 password')
while True:
password_data = get_password_data(
name=vm_['name'],
kwargs={
'key_file': vm_['private_key'],
},
call='action',
)
log.debug(password_data)
win_passwd = password_data.get('password', None)
if win_passwd is None:
# This wait is so high, because the password is unlikely to
# be generated for at least 4 minutes
time.sleep(60)
else:
break
if not salt.utils.cloud.wait_for_port(ip_address,
port=445,
timeout=ssh_connect_timeout):
raise SaltCloudSystemExit(
'Failed to connect to remote windows host'
)
if not salt.utils.cloud.validate_windows_cred(ip_address,
username,
win_passwd):
raise SaltCloudSystemExit(
'Failed to authenticate against remote windows host'
)
elif salt.utils.cloud.wait_for_port(ip_address,
timeout=ssh_connect_timeout,
gateway=ssh_gateway_config
):
# If a known_hosts_file is configured, this instance will not be
# accessible until it has a host key. Since this is provided on
# supported instances by cloud-init, and viewable to us only from the
# console output (which may take several minutes to become available,
# we have some more waiting to do here.
known_hosts_file = config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__, default=None
)
if known_hosts_file:
console = {}
while 'output_decoded' not in console:
console = get_console_output(
instance_id=vm_['instance_id'],
call='action',
)
pprint.pprint(console)
time.sleep(5)
output = console['output_decoded']
comps = output.split('-----BEGIN SSH HOST KEY KEYS-----')
if len(comps) < 2:
# Fail; there are no host keys
return False
comps = comps[1].split('-----END SSH HOST KEY KEYS-----')
keys = ''
for line in comps[0].splitlines():
if not line:
continue
keys += '\n{0} {1}'.format(ip_address, line)
with salt.utils.fopen(known_hosts_file, 'a') as fp_:
fp_.write(keys)
fp_.close()
for user in vm_['usernames']:
if salt.utils.cloud.wait_for_passwd(
host=ip_address,
username=user,
ssh_timeout=config.get_cloud_config_value(
'wait_for_passwd_timeout', vm_, __opts__, default=1 * 60
),
key_filename=vm_['key_filename'],
display_ssh_output=display_ssh_output,
gateway=ssh_gateway_config,
maxtries=config.get_cloud_config_value(
'wait_for_passwd_maxtries', vm_, __opts__, default=15
),
known_hosts_file=config.get_cloud_config_value(
'known_hosts_file', vm_, __opts__,
default='/dev/null'
),
):
__opts__['ssh_username'] = user
vm_['ssh_username'] = user
break
else:
raise SaltCloudSystemExit(
'Failed to authenticate against remote ssh'
)
else:
raise SaltCloudSystemExit(
'Failed to connect to remote ssh'
)
if 'reactor' in vm_ and vm_['reactor'] is True:
salt.utils.cloud.fire_event(
'event',
'ssh is available',
'salt/cloud/{0}/ssh_ready_reactor'.format(vm_['name']),
{'ip_address': ip_address},
transport=__opts__['transport']
)
return vm_
def create(vm_=None, call=None):
'''
Create a single VM from a data dict
'''
if call:
raise SaltCloudSystemExit(
'You cannot create an instance with -a or -f.'
)
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
},
transport=__opts__['transport']
)
salt.utils.cloud.cachedir_index_add(
vm_['name'], vm_['profile'], 'ec2', vm_['provider']
)
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined key_filename {0!r} does not exist'.format(
key_filename
)
)
vm_['key_filename'] = key_filename
# Get SSH Gateway config early to verify the private_key,
# if used, exists or not. We don't want to deploy an instance
# and not be able to access it via the gateway.
ssh_gateway_config = get_ssh_gateway_config(vm_)
vm_['ssh_gateway_config'] = ssh_gateway_config
location = get_location(vm_)
vm_['location'] = location
log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location))
vm_['usernames'] = salt.utils.cloud.ssh_usernames(
vm_,
__opts__,
default_users=(
'ec2-user', 'ubuntu', 'fedora', 'admin', 'bitnami', 'root'
)
)
if 'instance_id' in vm_:
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for {0[name]!r}'.format(vm_))
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
else:
# Put together all of the information required to request the instance,
# and then fire off the request for it
data, vm_ = request_instance(vm_, location)
# If data is a str, it's an error
if isinstance(data, str):
log.error('Error requesting instance: {0}'.format(data))
return {}
# Pull the instance ID, valid for both spot and normal instances
# Multiple instances may have been spun up, get all their IDs
vm_['instance_id_list'] = []
for instance in data:
vm_['instance_id_list'].append(instance['instanceId'])
vm_['instance_id'] = vm_['instance_id_list'].pop()
if len(vm_['instance_id_list']) > 0:
# Multiple instances were spun up, get one now, and queue the rest
queue_instances(vm_['instance_id_list'])
# Wait for vital information, such as IP addresses, to be available
# for the new instance
data = query_instance(vm_)
# Now that the instance is available, tag it appropriately. Should
# mitigate race conditions with tags
tags = config.get_cloud_config_value('tag',
vm_,
__opts__,
{},
search_global=False)
if not isinstance(tags, dict):
raise SaltCloudConfigError(
'\'tag\' should be a dict.'
)
for value in six.itervalues(tags):
if not isinstance(value, str):
raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. '
'e.g. "2013-09-19T20:09:46Z".'
)
tags['Name'] = vm_['name']
salt.utils.cloud.fire_event(
'event',
'setting tags',
'salt/cloud/{0}/tagging'.format(vm_['name']),
{'tags': tags},
transport=__opts__['transport']
)
set_tags(
vm_['name'],
tags,
instance_id=vm_['instance_id'],
call='action',
location=location
)
network_interfaces = config.get_cloud_config_value(
'network_interfaces',
vm_,
__opts__,
search_global=False
)
if network_interfaces:
_update_enis(network_interfaces, data)
# At this point, the node is created and tagged, and now needs to be
# bootstrapped, once the necessary port is available.
log.info('Created node {0}'.format(vm_['name']))
instance = data[0]['instancesSet']['item']
# Wait for the necessary port to become available to bootstrap
if ssh_interface(vm_) == 'private_ips':
ip_address = instance['privateIpAddress']
log.info('Salt node data. Private_ip: {0}'.format(ip_address))
else:
ip_address = instance['ipAddress']
log.info('Salt node data. Public_ip: {0}'.format(ip_address))
vm_['ssh_host'] = ip_address
if get_salt_interface(vm_) == 'private_ips':
salt_ip_address = instance['privateIpAddress']
log.info('Salt interface set to: {0}'.format(salt_ip_address))
else:
salt_ip_address = instance['ipAddress']
log.debug('Salt interface set to: {0}'.format(salt_ip_address))
vm_['salt_host'] = salt_ip_address
display_ssh_output = config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
)
vm_ = wait_for_instance(
vm_, data, ip_address, display_ssh_output
)
# The instance is booted and accessible, let's Salt it!
ret = instance.copy()
# Get ANY defined volumes settings, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
salt.utils.cloud.fire_event(
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
{'volumes': volumes},
transport=__opts__['transport']
)
log.info('Create and attach volumes to node {0}'.format(vm_['name']))
created = create_attach_volumes(
vm_['name'],
{
'volumes': volumes,
'zone': ret['placement']['availabilityZone'],
'instance_id': ret['instanceId'],
'del_all_vols_on_destroy': vm_.get('set_del_all_vols_on_destroy', False)
},
call='action'
)
ret['Attached Volumes'] = created
for key, value in salt.utils.cloud.bootstrap(vm_, __opts__).items():
ret.setdefault(key, value)
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
log.debug(
'{0[name]!r} VM creation details:\n{1}'.format(
vm_, pprint.pformat(instance)
)
)
event_data = {
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
'instance_id': vm_['instance_id'],
}
if volumes:
event_data['volumes'] = volumes
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
event_data,
transport=__opts__['transport']
)
return ret
def queue_instances(instances):
'''
Queue a set of instances to be provisioned later. Expects a list.
Currently this only queries node data, and then places it in the cloud
cache (if configured). If the salt-cloud-reactor is being used, these
instances will be automatically provisioned using that.
For more information about the salt-cloud-reactor, see:
https://github.com/saltstack-formulas/salt-cloud-reactor
'''
for instance_id in instances:
node = _get_node(instance_id=instance_id)
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
'''
Create and attach volumes to created node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_attach_volumes action must be called with '
'-a or --action.'
)
if 'instance_id' not in kwargs:
kwargs['instance_id'] = _get_node(name)[name]['instanceId']
if isinstance(kwargs['volumes'], str):
volumes = yaml.safe_load(kwargs['volumes'])
else:
volumes = kwargs['volumes']
ret = []
for volume in volumes:
created = False
volume_name = '{0} on {1}'.format(volume['device'], name)
volume_dict = {
'volume_name': volume_name,
'zone': kwargs['zone']
}
if 'volume_id' in volume:
volume_dict['volume_id'] = volume['volume_id']
elif 'snapshot' in volume:
volume_dict['snapshot'] = volume['snapshot']
else:
volume_dict['size'] = volume['size']
if 'type' in volume:
volume_dict['type'] = volume['type']
if 'iops' in volume:
volume_dict['iops'] = volume['iops']
if 'encrypted' in volume:
volume_dict['encrypted'] = volume['encrypted']
if 'volume_id' not in volume_dict:
created_volume = create_volume(volume_dict, call='function', wait_to_finish=wait_to_finish)
created = True
for item in created_volume:
if 'volumeId' in item:
volume_dict['volume_id'] = item['volumeId']
attach = attach_volume(
name,
{'volume_id': volume_dict['volume_id'],
'device': volume['device']},
instance_id=kwargs['instance_id'],
call='action'
)
# Update the delvol parameter for this volume
delvols_on_destroy = kwargs.get('del_all_vols_on_destroy', None)
if attach and created and delvols_on_destroy is not None:
_toggle_delvol(instance_id=kwargs['instance_id'],
device=volume['device'],
value=delvols_on_destroy)
if attach:
msg = (
'{0} attached to {1} (aka {2}) as device {3}'.format(
volume_dict['volume_id'],
kwargs['instance_id'],
name,
volume['device']
)
)
log.info(msg)
ret.append(msg)
return ret
def stop(name, call=None):
'''
Stop a node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
log.info('Stopping node {0}'.format(name))
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'StopInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return result
def start(name, call=None):
'''
Start a node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The start action must be called with -a or --action.'
)
log.info('Starting node {0}'.format(name))
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'StartInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return result
def set_tags(name=None,
tags=None,
call=None,
location=None,
instance_id=None,
resource_id=None,
kwargs=None): # pylint: disable=W0613
'''
Set tags for a resource. Normally a VM name or instance_id is passed in,
but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a set_tags mymachine tag1=somestuff tag2='Other stuff'
salt-cloud -a set_tags resource_id=vol-3267ab32 tag=somestuff
'''
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'resource_id' in kwargs:
resource_id = kwargs['resource_id']
del kwargs['resource_id']
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
if resource_id is None:
if instance_id is None:
instance_id = _get_node(name, location)[name]['instanceId']
else:
instance_id = resource_id
# This second check is a safety, in case the above still failed to produce
# a usable ID
if instance_id is None:
return {
'Error': 'A valid instance_id or resource_id was not specified.'
}
params = {'Action': 'CreateTags',
'ResourceId.1': instance_id}
log.debug('Tags to set for {0}: {1}'.format(name, tags))
if kwargs and not tags:
tags = kwargs
for idx, (tag_k, tag_v) in enumerate(six.iteritems(tags)):
params['Tag.{0}.Key'.format(idx)] = tag_k
params['Tag.{0}.Value'.format(idx)] = tag_v
attempts = 5
while attempts >= 0:
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
settags = get_tags(
instance_id=instance_id, call='action', location=location
)
log.debug('Setting the tags returned: {0}'.format(settags))
failed_to_set_tags = False
for tag in settags:
if tag['key'] not in tags:
# We were not setting this tag
continue
if str(tags.get(tag['key'])) != str(tag['value']):
# Not set to the proper value!?
failed_to_set_tags = True
break
if failed_to_set_tags:
log.warn(
'Failed to set tags. Remaining attempts {0}'.format(
attempts
)
)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
continue
return settags
raise SaltCloudSystemExit(
'Failed to set tags on {0}!'.format(name)
)
def get_tags(name=None,
instance_id=None,
call=None,
location=None,
kwargs=None,
resource_id=None): # pylint: disable=W0613
'''
Retrieve tags for a resource. Normally a VM name or instance_id is passed
in, but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a get_tags mymachine
salt-cloud -a get_tags resource_id=vol-3267ab32
'''
if location is None:
location = get_location()
if instance_id is None:
if resource_id is None:
if name:
instances = list_nodes_full(location)
if name in instances:
instance_id = instances[name]['instanceId']
elif 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
elif 'resource_id' in kwargs:
instance_id = kwargs['resource_id']
else:
instance_id = resource_id
params = {'Action': 'DescribeTags',
'Filter.1.Name': 'resource-id',
'Filter.1.Value': instance_id}
return aws.query(params,
setname='tagSet',
location=location,
provider=get_provider(),
opts=__opts__,
sigver='4')
def del_tags(name=None,
kwargs=None,
call=None,
instance_id=None,
resource_id=None): # pylint: disable=W0613
'''
Delete tags for a resource. Normally a VM name or instance_id is passed in,
but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a del_tags mymachine tags=mytag,
salt-cloud -a del_tags mymachine tags=tag1,tag2,tag3
salt-cloud -a del_tags resource_id=vol-3267ab32 tags=tag1,tag2,tag3
'''
if kwargs is None:
kwargs = {}
if 'tags' not in kwargs:
raise SaltCloudSystemExit(
'A tag or tags must be specified using tags=list,of,tags'
)
if not name and 'resource_id' in kwargs:
instance_id = kwargs['resource_id']
del kwargs['resource_id']
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'DeleteTags',
'ResourceId.1': instance_id}
for idx, tag in enumerate(kwargs['tags'].split(',')):
params['Tag.{0}.Key'.format(idx)] = tag
aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if resource_id:
return get_tags(resource_id=resource_id)
else:
return get_tags(instance_id=instance_id)
def rename(name, kwargs, call=None):
'''
Properly rename a node. Pass in the new name as "new name".
CLI Example:
.. code-block:: bash
salt-cloud -a rename mymachine newname=yourmachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The rename action must be called with -a or --action.'
)
log.info('Renaming {0} to {1}'.format(name, kwargs['newname']))
set_tags(name, {'Name': kwargs['newname']}, call='action')
salt.utils.cloud.rename_key(
__opts__['pki_dir'], name, kwargs['newname']
)
def destroy(name, call=None):
'''
Destroy a node. Will check termination protection and warn if enabled.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
node_metadata = _get_node(name)
instance_id = node_metadata[name]['instanceId']
sir_id = node_metadata.get('spotInstanceRequestId')
protected = show_term_protect(
name=name,
instance_id=instance_id,
call='action',
quiet=True
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name, 'instance_id': instance_id},
transport=__opts__['transport']
)
if protected == 'true':
raise SaltCloudSystemExit(
'This instance has been protected from being destroyed. '
'Use the following command to disable protection:\n\n'
'salt-cloud -a disable_term_protect {0}'.format(
name
)
)
ret = {}
if config.get_cloud_config_value('rename_on_destroy',
get_configured_provider(),
__opts__,
search_global=False) is True:
newname = '{0}-DEL{1}'.format(name, uuid.uuid4().hex)
rename(name, kwargs={'newname': newname}, call='action')
log.info(
'Machine will be identified as {0} until it has been '
'cleaned up.'.format(
newname
)
)
ret['newname'] = newname
params = {'Action': 'TerminateInstances',
'InstanceId.1': instance_id}
location = get_location()
provider = get_provider()
result = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
log.info(result)
ret.update(result[0])
# If this instance is part of a spot instance request, we
# need to cancel it as well
if sir_id is not None:
params = {'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': sir_id}
result = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
ret['spotInstance'] = result[0]
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name, 'instance_id': instance_id},
transport=__opts__['transport']
)
salt.utils.cloud.cachedir_index_del(name)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return ret
def reboot(name, call=None):
'''
Reboot a node.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot mymachine
'''
instance_id = _get_node(name)[name]['instanceId']
params = {'Action': 'RebootInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if result == []:
log.info('Complete')
return {'Reboot': 'Complete'}
def show_image(kwargs, call=None):
'''
Show the details from EC2 concerning an AMI
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_image action must be called with -f or --function.'
)
params = {'ImageId.1': kwargs['image'],
'Action': 'DescribeImages'}
result = aws.query(params,
setname='tagSet',
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
log.info(result)
return result
def show_instance(name=None, instance_id=None, call=None, kwargs=None):
'''
Show the details from EC2 concerning an AMI.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a show_instance myinstance
...or as a function (which requires either a name or instance_id):
.. code-block:: bash
salt-cloud -f show_instance my-ec2 name=myinstance
salt-cloud -f show_instance my-ec2 instance_id=i-d34db33f
'''
if not name and call == 'action':
raise SaltCloudSystemExit(
'The show_instance action requires a name.'
)
if call == 'function':
name = kwargs.get('name', None)
instance_id = kwargs.get('instance_id', None)
if not name and not instance_id:
raise SaltCloudSystemExit(
'The show_instance function requires '
'either a name or an instance_id'
)
node = _get_node(name=name, instance_id=instance_id)
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
return node
def _get_node(name=None, instance_id=None, location=None):
if location is None:
location = get_location()
params = {'Action': 'DescribeInstances'}
if str(name).startswith('i-') and len(name) == 10:
instance_id = name
if instance_id:
params['InstanceId.1'] = instance_id
else:
params['Filter.1.Name'] = 'tag:Name'
params['Filter.1.Value.1'] = name
log.trace(params)
provider = get_provider()
attempts = 10
while attempts >= 0:
try:
instances = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
return _extract_instance_info(instances)
except KeyError:
attempts -= 1
log.debug(
'Failed to get the data for the node {0!r}. Remaining '
'attempts {1}'.format(
name, attempts
)
)
# Just a little delay between attempts...
time.sleep(0.5)
return {}
def list_nodes_full(location=None, call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f '
'or --function.'
)
if not location:
ret = {}
locations = set(
get_location(vm_) for vm_ in six.itervalues(__opts__['profiles'])
if _vm_provider_driver(vm_)
)
for loc in locations:
ret.update(_list_nodes_full(loc))
return ret
return _list_nodes_full(location)
def _vm_provider_driver(vm_):
alias, driver = vm_['provider'].split(':')
if alias not in __opts__['providers']:
return None
if driver not in __opts__['providers'][alias]:
return None
return driver == 'ec2'
def _extract_name_tag(item):
if 'tagSet' in item:
tagset = item['tagSet']
if isinstance(tagset['item'], list):
for tag in tagset['item']:
if tag['key'] == 'Name':
return tag['value']
return item['instanceId']
return item['tagSet']['item']['value']
return item['instanceId']
def _extract_instance_info(instances):
'''
Given an instance query, return a dict of all instance data
'''
ret = {}
for instance in instances:
# items could be type dict or list (for stopped EC2 instances)
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
name = _extract_name_tag(item)
ret[name] = item
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
else:
item = instance['instancesSet']['item']
name = _extract_name_tag(item)
ret[name] = item
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
return ret
def _list_nodes_full(location=None):
'''
Return a list of the VMs that in this location
'''
provider = __active_provider_name__ or 'ec2'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
params = {'Action': 'DescribeInstances'}
instances = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if 'error' in instances:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
instances['error']['Errors']['Error']['Message']
)
)
ret = _extract_instance_info(instances)
salt.utils.cloud.cache_node_list(ret, provider, __opts__)
return ret
def list_nodes_min(location=None, call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names,
and their state, is returned. This is the minimum amount of information
needed to check for existing VMs.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
params = {'Action': 'DescribeInstances'}
instances = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
if 'error' in instances:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
instances['error']['Errors']['Error']['Message']
)
)
for instance in instances:
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
state = item['instanceState']['name']
name = _extract_name_tag(item)
else:
item = instance['instancesSet']['item']
state = item['instanceState']['name']
name = _extract_name_tag(item)
ret[name] = {'state': state}
return ret
def list_nodes(call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full(get_location())
if 'error' in nodes:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
nodes['error']['Errors']['Error']['Message']
)
)
for node in nodes:
ret[node] = {
'id': nodes[node]['id'],
'image': nodes[node]['image'],
'size': nodes[node]['size'],
'state': nodes[node]['state'],
'private_ips': nodes[node]['private_ips'],
'public_ips': nodes[node]['public_ips'],
}
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(get_location()), __opts__['query.selection'], call,
)
def show_term_protect(name=None, instance_id=None, call=None, quiet=False):
'''
Show the details from EC2 concerning an AMI
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_term_protect action must be called with -a or --action.'
)
if not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
params = {'Action': 'DescribeInstanceAttribute',
'InstanceId': instance_id,
'Attribute': 'disableApiTermination'}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
disable_protect = False
for item in result:
if 'value' in item:
disable_protect = item['value']
break
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Termination Protection is {0} for {1}'.format(
disable_protect == 'true' and 'enabled' or 'disabled',
name
)
)
return disable_protect
def enable_term_protect(name, call=None):
'''
Enable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a enable_term_protect mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The enable_term_protect action must be called with '
'-a or --action.'
)
return _toggle_term_protect(name, 'true')
def disable_term_protect(name, call=None):
'''
Disable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a disable_term_protect mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The disable_term_protect action must be called with '
'-a or --action.'
)
return _toggle_term_protect(name, 'false')
def _toggle_term_protect(name, value):
'''
Disable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a disable_term_protect mymachine
'''
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
params = {'Action': 'ModifyInstanceAttribute',
'InstanceId': instance_id,
'DisableApiTermination.Value': value}
result = aws.query(params,
location=get_location(),
provider=get_provider(),
return_root=True,
opts=__opts__,
sigver='4')
return show_term_protect(name=name, instance_id=instance_id, call='action')
def show_delvol_on_destroy(name, kwargs=None, call=None):
'''
Do not delete all/specified EBS volumes upon instance termination
CLI Example:
.. code-block:: bash
salt-cloud -a show_delvol_on_destroy mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_delvol_on_destroy action must be called '
'with -a or --action.'
)
if not kwargs:
kwargs = {}
instance_id = kwargs.get('instance_id', None)
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
if instance_id is None:
instances = list_nodes_full()
instance_id = instances[name]['instanceId']
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
data = aws.query(params,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
blockmap = data[0]['instancesSet']['item']['blockDeviceMapping']
if not isinstance(blockmap['item'], list):
blockmap['item'] = [blockmap['item']]
items = []
for idx, item in enumerate(blockmap['item']):
device_name = item['deviceName']
if device is not None and device != device_name:
continue
if volume_id is not None and volume_id != item['ebs']['volumeId']:
continue
info = {
'device_name': device_name,
'volume_id': item['ebs']['volumeId'],
'deleteOnTermination': item['ebs']['deleteOnTermination']
}
items.append(info)
return items
def keepvol_on_destroy(name, kwargs=None, call=None):
'''
Do not delete all/specified EBS volumes upon instance termination
CLI Example:
.. code-block:: bash
salt-cloud -a keepvol_on_destroy mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The keepvol_on_destroy action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
return _toggle_delvol(name=name, device=device,
volume_id=volume_id, value='false')
def delvol_on_destroy(name, kwargs=None, call=None):
'''
Delete all/specified EBS volumes upon instance termination
CLI Example:
.. code-block:: bash
salt-cloud -a delvol_on_destroy mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The delvol_on_destroy action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
device = kwargs.get('device', None)
volume_id = kwargs.get('volume_id', None)
return _toggle_delvol(name=name, device=device,
volume_id=volume_id, value='true')
def _toggle_delvol(name=None, instance_id=None, device=None, volume_id=None,
value=None, requesturl=None):
if not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
if requesturl:
data = aws.query(requesturl=requesturl,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
else:
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
data, requesturl = aws.query(params, # pylint: disable=W0632
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
blockmap = data[0]['instancesSet']['item']['blockDeviceMapping']
params = {'Action': 'ModifyInstanceAttribute',
'InstanceId': instance_id}
if not isinstance(blockmap['item'], list):
blockmap['item'] = [blockmap['item']]
for idx, item in enumerate(blockmap['item']):
device_name = item['deviceName']
if device is not None and device != device_name:
continue
if volume_id is not None and volume_id != item['ebs']['volumeId']:
continue
params['BlockDeviceMapping.{0}.DeviceName'.format(idx)] = device_name
params['BlockDeviceMapping.{0}.Ebs.DeleteOnTermination'.format(idx)] = value
aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return _get_node(instance_id=instance_id)
def create_volume(kwargs=None, call=None, wait_to_finish=False):
'''
Create a volume
CLI Examples:
.. code-block:: bash
salt-cloud -f create_volume my-ec2-config zone=us-east-1b
salt-cloud -f create_volume my-ec2-config zone=us-east-1b tags='{"tag1": "val1", "tag2", "val2"}'
'''
if call != 'function':
log.error(
'The create_volume function must be called with -f or --function.'
)
return False
if 'zone' not in kwargs:
log.error('An availability zone must be specified to create a volume.')
return False
if 'size' not in kwargs and 'snapshot' not in kwargs:
# This number represents GiB
kwargs['size'] = '10'
params = {'Action': 'CreateVolume',
'AvailabilityZone': kwargs['zone']}
if 'size' in kwargs:
params['Size'] = kwargs['size']
if 'snapshot' in kwargs:
params['SnapshotId'] = kwargs['snapshot']
if 'type' in kwargs:
params['VolumeType'] = kwargs['type']
if 'iops' in kwargs and kwargs.get('type', 'standard') == 'io1':
params['Iops'] = kwargs['iops']
if 'encrypted' in kwargs:
params['Encrypted'] = kwargs['encrypted']
log.debug(params)
data = aws.query(params,
return_url=True,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
r_data = {}
for d in data[0]:
for k, v in d.items():
r_data[k] = v
volume_id = r_data['volumeId']
# Allow tags to be set upon creation
if 'tags' in kwargs:
if isinstance(kwargs['tags'], six.string_types):
tags = yaml.safe_load(kwargs['tags'])
else:
tags = kwargs['tags']
if isinstance(tags, dict):
new_tags = set_tags(tags=tags,
resource_id=volume_id,
call='action',
location=get_location())
r_data['tags'] = new_tags
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_volumes,
kwargs={'volume_id': volume_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='available')
return r_data
def attach_volume(name=None, kwargs=None, instance_id=None, call=None):
'''
Attach a volume to an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The attach_volume action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
if name and not instance_id:
instances = list_nodes_full(get_location())
instance_id = instances[name]['instanceId']
if not name and not instance_id:
log.error('Either a name or an instance_id is required.')
return False
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
if 'device' not in kwargs:
log.error('A device is required (ex. /dev/sdb1).')
return False
params = {'Action': 'AttachVolume',
'VolumeId': kwargs['volume_id'],
'InstanceId': instance_id,
'Device': kwargs['device']}
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def show_volume(kwargs=None, call=None):
'''
Wrapper around describe_volumes.
Here just to keep functionality.
Might be depreciated later.
'''
if not kwargs:
kwargs = {}
return describe_volumes(kwargs, call)
def detach_volume(name=None, kwargs=None, instance_id=None, call=None):
'''
Detach a volume from an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The detach_volume action must be called with -a or --action.'
)
if not kwargs:
kwargs = {}
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
params = {'Action': 'DetachVolume',
'VolumeId': kwargs['volume_id']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def delete_volume(name=None, kwargs=None, instance_id=None, call=None):
'''
Delete a volume
'''
if not kwargs:
kwargs = {}
if 'volume_id' not in kwargs:
log.error('A volume_id is required.')
return False
params = {'Action': 'DeleteVolume',
'VolumeId': kwargs['volume_id']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def describe_volumes(kwargs=None, call=None):
'''
Describe a volume (or volumes)
volume_id
One or more volume IDs. Multiple IDs must be separated by ",".
TODO: Add all of the filters.
'''
if call != 'function':
log.error(
'The describe_volumes function must be called with -f '
'or --function.'
)
return False
if not kwargs:
kwargs = {}
params = {'Action': 'DescribeVolumes'}
if 'volume_id' in kwargs:
volume_id = kwargs['volume_id'].split(',')
for volume_index, volume_id in enumerate(volume_id):
params['VolumeId.{0}'.format(volume_index)] = volume_id
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def create_keypair(kwargs=None, call=None):
'''
Create an SSH keypair
'''
if call != 'function':
log.error(
'The create_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'CreateKeyPair',
'KeyName': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def show_keypair(kwargs=None, call=None):
'''
Show the details of an SSH keypair
'''
if call != 'function':
log.error(
'The show_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'DescribeKeyPairs',
'KeyName.1': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def delete_keypair(kwargs=None, call=None):
'''
Delete an SSH keypair
'''
if call != 'function':
log.error(
'The delete_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
params = {'Action': 'DeleteKeyPair',
'KeyName.1': kwargs['keyname']}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def create_snapshot(kwargs=None, call=None, wait_to_finish=False):
'''
Create a snapshot
'''
if call != 'function':
log.error(
'The create_snapshot function must be called with -f '
'or --function.'
)
return False
if 'volume_id' not in kwargs:
log.error('A volume_id must be specified to create a snapshot.')
return False
if 'description' not in kwargs:
kwargs['description'] = ''
params = {'Action': 'CreateSnapshot'}
if 'volume_id' in kwargs:
params['VolumeId'] = kwargs['volume_id']
if 'description' in kwargs:
params['Description'] = kwargs['description']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
r_data = {}
for d in data:
for k, v in d.items():
r_data[k] = v
snapshot_id = r_data['snapshotId']
# Waits till volume is available
if wait_to_finish:
salt.utils.cloud.run_func_until_ret_arg(fun=describe_snapshots,
kwargs={'snapshot_id': snapshot_id},
fun_call=call,
argument_being_watched='status',
required_argument_response='completed')
return data
def delete_snapshot(kwargs=None, call=None):
'''
Delete a snapshot
'''
if call != 'function':
log.error(
'The delete_snapshot function must be called with -f '
'or --function.'
)
return False
if 'snapshot_id' not in kwargs:
log.error('A snapshot_id must be specified to delete a snapshot.')
return False
params = {'Action': 'DeleteSnapshot'}
if 'snapshot_id' in kwargs:
params['SnapshotId'] = kwargs['snapshot_id']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def copy_snapshot(kwargs=None, call=None):
'''
Copy a snapshot
'''
if call != 'function':
log.error(
'The copy_snapshot function must be called with -f or --function.'
)
return False
if 'source_region' not in kwargs:
log.error('A source_region must be specified to copy a snapshot.')
return False
if 'source_snapshot_id' not in kwargs:
log.error('A source_snapshot_id must be specified to copy a snapshot.')
return False
if 'description' not in kwargs:
kwargs['description'] = ''
params = {'Action': 'CopySnapshot'}
if 'source_region' in kwargs:
params['SourceRegion'] = kwargs['source_region']
if 'source_snapshot_id' in kwargs:
params['SourceSnapshotId'] = kwargs['source_snapshot_id']
if 'description' in kwargs:
params['Description'] = kwargs['description']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def describe_snapshots(kwargs=None, call=None):
'''
Describe a snapshot (or snapshots)
snapshot_id
One or more snapshot IDs. Multiple IDs must be separated by ",".
owner
Return the snapshots owned by the specified owner. Valid values
include: self, amazon, <AWS Account ID>. Multiple values must be
separated by ",".
restorable_by
One or more AWS accounts IDs that can create volumes from the snapshot.
Multiple aws account IDs must be separated by ",".
TODO: Add all of the filters.
'''
if call != 'function':
log.error(
'The describe_snapshot function must be called with -f '
'or --function.'
)
return False
params = {'Action': 'DescribeSnapshots'}
# The AWS correct way is to use non-plurals like snapshot_id INSTEAD of snapshot_ids.
if 'snapshot_ids' in kwargs:
kwargs['snapshot_id'] = kwargs['snapshot_ids']
if 'snapshot_id' in kwargs:
snapshot_ids = kwargs['snapshot_id'].split(',')
for snapshot_index, snapshot_id in enumerate(snapshot_ids):
params['SnapshotId.{0}'.format(snapshot_index)] = snapshot_id
if 'owner' in kwargs:
owners = kwargs['owner'].split(',')
for owner_index, owner in enumerate(owners):
params['Owner.{0}'.format(owner_index)] = owner
if 'restorable_by' in kwargs:
restorable_bys = kwargs['restorable_by'].split(',')
for restorable_by_index, restorable_by in enumerate(restorable_bys):
params[
'RestorableBy.{0}'.format(restorable_by_index)
] = restorable_by
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data
def get_console_output(
name=None,
instance_id=None,
call=None,
kwargs=None,
):
'''
Show the console output from the instance.
By default, returns decoded data, not the Base64-encoded data that is
actually returned from the EC2 API.
'''
if call != 'action':
raise SaltCloudSystemExit(
'The get_console_output action must be called with '
'-a or --action.'
)
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
params = {'Action': 'GetConsoleOutput',
'InstanceId': instance_id}
ret = {}
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for item in data:
if next(item.iterkeys()) == 'output':
ret['output_decoded'] = binascii.a2b_base64(next(item.itervalues()))
else:
ret[next(item.iterkeys())] = next(item.itervalues())
return ret
def get_password_data(
name=None,
kwargs=None,
instance_id=None,
call=None,
):
'''
Return password data for a Windows instance.
By default only the encrypted password data will be returned. However, if a
key_file is passed in, then a decrypted password will also be returned.
Note that the key_file references the private key that was used to generate
the keypair associated with this instance. This private key will _not_ be
transmitted to Amazon; it is only used internally inside of Salt Cloud to
decrypt data _after_ it has been received from Amazon.
CLI Examples:
.. code-block:: bash
salt-cloud -a get_password_data mymachine
salt-cloud -a get_password_data mymachine key_file=/root/ec2key.pem
Note: PKCS1_v1_5 was added in PyCrypto 2.5
'''
if call != 'action':
raise SaltCloudSystemExit(
'The get_password_data action must be called with '
'-a or --action.'
)
if not instance_id:
instance_id = _get_node(name)[name]['instanceId']
if kwargs is None:
kwargs = {}
if instance_id is None:
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
params = {'Action': 'GetPasswordData',
'InstanceId': instance_id}
ret = {}
data = aws.query(params,
return_root=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
for item in data:
ret[item.keys()[0]] = item.values()[0]
if not HAS_PYCRYPTO:
return ret
if 'key' not in kwargs:
if 'key_file' in kwargs:
with salt.utils.fopen(kwargs['key_file'], 'r') as kf_:
kwargs['key'] = kf_.read()
if 'key' in kwargs:
pwdata = ret.get('passwordData', None)
if pwdata is not None:
rsa_key = kwargs['key']
pwdata = base64.b64decode(pwdata)
dsize = Crypto.Hash.SHA.digest_size
sentinel = Crypto.Random.new().read(15 + dsize)
key_obj = Crypto.PublicKey.RSA.importKey(rsa_key)
key_obj = PKCS1_v1_5.new(key_obj)
ret['password'] = key_obj.decrypt(pwdata, sentinel)
return ret
|
the-stack_0_20194 | import logging
from werkzeug.utils import cached_property
from piecrust.data.base import MergedMapping
from piecrust.data.linker import PageLinkerData
from piecrust.data.pagedata import PageData
from piecrust.data.paginator import Paginator
from piecrust.data.piecrustdata import PieCrustData
from piecrust.data.providersdata import DataProvidersData
from piecrust.routing import CompositeRouteFunction
logger = logging.getLogger(__name__)
class DataBuildingContext(object):
def __init__(self, qualified_page, page_num=1):
self.page = qualified_page
self.page_num = page_num
self.pagination_source = None
self.pagination_filter = None
@property
def app(self):
return self.page.app
@cached_property
def uri(self):
return self.page.getUri(self.page_num)
def build_page_data(ctx):
app = ctx.app
page = ctx.page
pgn_source = ctx.pagination_source or get_default_pagination_source(page)
first_uri = ctx.page.getUri(1)
pc_data = PieCrustData()
config_data = PageData(page, ctx)
paginator = Paginator(page, pgn_source,
page_num=ctx.page_num,
pgn_filter=ctx.pagination_filter)
assetor = page.source.buildAssetor(page, first_uri)
linker = PageLinkerData(page.source, page.rel_path)
data = {
'piecrust': pc_data,
'page': config_data,
'assets': assetor,
'pagination': paginator,
'family': linker
}
for route in app.routes:
name = route.func_name
func = data.get(name)
if func is None:
func = CompositeRouteFunction()
func.addFunc(route)
data[name] = func
elif isinstance(func, CompositeRouteFunction):
func.addFunc(route)
else:
raise Exception("Route function '%s' collides with an "
"existing function or template data." %
name)
#TODO: handle slugified taxonomy terms.
site_data = app.config.getAll()
providers_data = DataProvidersData(page)
data = MergedMapping([data, providers_data, site_data])
# Do this at the end because we want all the data to be ready to be
# displayed in the debugger window.
if (app.config.get('site/show_debug_info') and
not app.config.get('baker/is_baking')):
pc_data.enableDebugInfo(page)
return data
def build_layout_data(page, page_data, contents):
for name, txt in contents.items():
if name in page_data:
logger.warning("Content segment '%s' will hide existing data." %
name)
page_data._prependMapping(contents)
def get_default_pagination_source(page):
app = page.app
source_name = page.config.get('source') or page.config.get('blog')
if source_name is None:
source_name = app.config.get('site/default_pagination_source')
if source_name is None:
blog_names = app.config.get('site/blogs')
if blog_names is not None:
source_name = blog_names[0]
else:
source_name = app.sources[0].name
source = app.getSource(source_name)
return source
|
the-stack_0_20195 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Raw Point Collection
This collection provides very fast points. Output quality is ugly so it must be
used at small size only (2/3 pixels). You've been warned.
"""
import numpy as np
from vispy import glsl
from .collection import Collection
from ..transforms import NullTransform
class RawPointCollection(Collection):
"""
Raw Point Collection
This collection provides very fast points. Output quality is ugly so it
must be used at small size only (2/3 pixels). You've been warned.
"""
def __init__(
self, user_dtype=None, transform=None, vertex=None, fragment=None, **kwargs
):
"""
Initialize the collection.
Parameters
----------
user_dtype: list
The base dtype can be completed (appended) by the used_dtype. It
only make sense if user also provide vertex and/or fragment shaders
transform : Transform instance
Used to define the transform(vec4) function
vertex: string
Vertex shader code
fragment: string
Fragment shader code
color : string
'local', 'shared' or 'global'
"""
base_dtype = [
("position", (np.float32, 3), "!local", (0, 0, 0)),
("size", (np.float32, 1), "global", 3.0),
("color", (np.float32, 4), "global", (0, 0, 0, 1)),
]
dtype = base_dtype
if user_dtype:
dtype.extend(user_dtype)
if vertex is None:
vertex = glsl.get("collections/raw-point.vert")
if transform is None:
transform = NullTransform()
self.transform = transform
if fragment is None:
fragment = glsl.get("collections/raw-point.frag")
Collection.__init__(
self,
dtype=dtype,
itype=None,
mode="points",
vertex=vertex,
fragment=fragment,
**kwargs
)
# Set hooks if necessary
program = self._programs[0]
program.vert["transform"] = self.transform
def append(self, P, itemsize=None, **kwargs):
"""
Append a new set of vertices to the collection.
For kwargs argument, n is the number of vertices (local) or the number
of item (shared)
Parameters
----------
P : np.array
Vertices positions of the points(s) to be added
itemsize: int or None
Size of an individual path
color : list, array or 4-tuple
Path color
"""
itemsize = itemsize or 1
itemcount = int(len(P) / itemsize)
V = np.empty(len(P), dtype=self.vtype)
# Apply default values on vertices
for name in self.vtype.names:
if name not in ["position", "collection_index"]:
V[name] = kwargs.get(name, self._defaults[name])
V["position"] = P
# Uniforms
if self.utype:
U = np.zeros(itemcount, dtype=self.utype)
for name in self.utype.names:
if name not in ["__unused__"]:
U[name] = kwargs.get(name, self._defaults[name])
else:
U = None
Collection.append(self, vertices=V, uniforms=U, itemsize=itemsize)
|
the-stack_0_20196 | # Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref, collections.abc
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown',
'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory',
'lastResort', 'raiseExceptions']
import threading
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
# The following module attributes are no longer updated.
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = False
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelToName = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET',
}
_nameToLevel = {
'CRITICAL': CRITICAL,
'FATAL': FATAL,
'ERROR': ERROR,
'WARN': WARNING,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'NOTSET': NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
# See Issues #22386, #27937 and #29220 for why it's this way
result = _levelToName.get(level)
if result is not None:
return result
result = _nameToLevel.get(level)
if result is not None:
return result
return "Level %s" % level
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelToName[level] = levelName
_nameToLevel[levelName] = level
finally:
_releaseLock()
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except Exception:
return sys.exc_info()[2].tb_frame.f_back
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame, by skipping frames whose filename is that of this
# module's source. It therefore should contain the filename of this module's
# source file.
#
# Ordinarily we would use __file__ for this, but frozen modules don't always
# have __file__ set, for some reason (see Issue #21736). Thus, we get the
# filename from a handy code object from a function defined in this module.
# (There's no particular reason for picking addLevelName.)
#
_srcfile = os.path.normcase(addLevelName.__code__.co_filename)
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called. You can also do this if you want to avoid
# the overhead of fetching caller information, even when _getframe() is
# available.
#if not hasattr(sys, '_getframe'):
# _srcfile = None
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _nameToLevel:
raise ValueError("Unknown level: %r" % level)
rv = _nameToLevel[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
_lock = threading.RLock()
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
# Prevent a held logging lock from blocking a child from logging.
if not hasattr(os, 'register_at_fork'): # Windows and friends.
def _register_at_fork_acquire_release(instance):
pass # no-op when os.register_at_fork does not exist.
else: # The os.register_at_fork API exists
os.register_at_fork(before=_acquireLock,
after_in_child=_releaseLock,
after_in_parent=_releaseLock)
# A collection of instances with acquire and release methods (logging.Handler)
# to be called before and after fork. The weakref avoids us keeping discarded
# Handler instances alive forever in case an odd program creates and destroys
# many over its lifetime.
_at_fork_acquire_release_weakset = weakref.WeakSet()
def _register_at_fork_acquire_release(instance):
# We put the instance itself in a single WeakSet as we MUST have only
# one atomic weak ref. used by both before and after atfork calls to
# guarantee matched pairs of acquire and release calls.
_at_fork_acquire_release_weakset.add(instance)
def _at_fork_weak_calls(method_name):
for instance in _at_fork_acquire_release_weakset:
method = getattr(instance, method_name)
try:
method()
except Exception as err:
# Similar to what PyErr_WriteUnraisable does.
print("Ignoring exception from logging atfork", instance,
method_name, "method:", err, file=sys.stderr)
def _before_at_fork_weak_calls():
_at_fork_weak_calls('acquire')
def _after_at_fork_weak_calls():
_at_fork_weak_calls('release')
os.register_at_fork(before=_before_at_fork_weak_calls,
after_in_child=_after_at_fork_weak_calls,
after_in_parent=_after_at_fork_weak_calls)
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
# Issue #21172: a request was made to relax the isinstance check
# to hasattr(args[0], '__getitem__'). However, the docs on string
# formatting still seem to suggest a mapping object is required.
# Thus, while not removing the isinstance check, it does now look
# for collections.abc.Mapping rather than, as before, dict.
if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping)
and args[0]):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
__repr__ = __str__
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
_STYLES = {
'%': (PercentStyle, BASIC_FORMAT),
'{': (StrFormatStyle, '{levelname}:{name}:{message}'),
'$': (StringTemplateStyle, '${levelname}:${name}:${message}'),
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
the style-dependent default value, "%(message)s", "{message}", or
"${message}", is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument. If datefmt is omitted, you get an
ISO8601-like (or RFC 3339-like) format.
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged:: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style][0](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used.
The resulting string is returned. This function uses a user-configurable
function to convert the creation time to a tuple. By default,
time.localtime() is used; to change this for a particular formatter
instance, set the 'converter' attribute to a function with the same
signature as time.localtime() or time.gmtime(). To change it for all
formatters, for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged:: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. It can also be called from another thread. So we need to
# pre-emptively grab the necessary globals and check if they're None,
# to prevent race conditions and failures during interpreter shutdown.
acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
if acquire and release and handlers:
acquire()
try:
if wr in handlers:
handlers.remove(wr)
finally:
release()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
self.lock = threading.RLock()
_register_at_fork_acquire_release(self)
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
t, v, tb = sys.exc_info()
try:
sys.stderr.write('--- Logging error ---\n')
traceback.print_exception(t, v, tb, None, sys.stderr)
sys.stderr.write('Call stack:\n')
# Walk the stack frame up until we're out of logging,
# so as to print the calling context.
frame = tb.tb_frame
while (frame and os.path.dirname(frame.f_code.co_filename) ==
__path__[0]):
frame = frame.f_back
if frame:
traceback.print_stack(frame, file=sys.stderr)
else:
# couldn't find the right stack frame, for some reason
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
# Issue 18671: output logging message and arguments
try:
sys.stderr.write('Message: %r\n'
'Arguments: %s\n' % (record.msg,
record.args))
except Exception:
sys.stderr.write('Unable to print the message and arguments'
' - possible formatting error.\nUse the'
' traceback above to help find the error.\n'
)
except OSError: #pragma: no cover
pass # see issue 5971
finally:
del t, v, tb
def __repr__(self):
level = getLevelName(self.level)
return '<%s (%s)>' % (self.__class__.__name__, level)
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
# issue 35046: merged two stream.writes into one.
stream.write(msg + self.terminator)
self.flush()
except Exception:
self.handleError(record)
def setStream(self, stream):
"""
Sets the StreamHandler's stream to the specified value,
if it is different.
Returns the old stream, if the stream was changed, or None
if it wasn't.
"""
if stream is self.stream:
result = None
else:
result = self.stream
self.acquire()
try:
self.flush()
self.stream = stream
finally:
self.release()
return result
def __repr__(self):
level = getLevelName(self.level)
name = getattr(self.stream, 'name', '')
if name:
name += ' '
return '<%s %s(%s)>' % (self.__class__.__name__, name, level)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
# Issue #27493: add support for Path objects to be passed in
filename = os.fspath(filename)
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
try:
if self.stream:
try:
self.flush()
finally:
stream = self.stream
self.stream = None
if hasattr(stream, "close"):
stream.close()
finally:
# Issue #19523: call unconditionally to
# prevent a handler leak when delay is set
StreamHandler.close(self)
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
def __repr__(self):
level = getLevelName(self.level)
return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
def _clear_cache(self):
"""
Clear the cache for all loggers in loggerDict
Called when level changes are made
"""
_acquireLock()
for logger in self.loggerDict.values():
if isinstance(logger, Logger):
logger._cache.clear()
self.root._cache.clear()
_releaseLock()
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
self._cache = {}
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
self.manager._clear_cache()
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, exc_info=True, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
self.error(msg, *args, exc_info=exc_info, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if isinstance(exc_info, BaseException):
exc_info = (type(exc_info), exc_info, exc_info.__traceback__)
elif not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
try:
return self._cache[level]
except KeyError:
_acquireLock()
if self.manager.disable >= level:
is_enabled = self._cache[level] = False
else:
is_enabled = self._cache[level] = level >= self.getEffectiveLevel()
_releaseLock()
return is_enabled
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
def __repr__(self):
level = getLevelName(self.getEffectiveLevel())
return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level)
def __reduce__(self):
# In general, only the root logger will not be accessible via its name.
# However, the root logger's class has its own __reduce__ method.
if getLogger(self.name) is not self:
import pickle
raise pickle.PicklingError('logger cannot be pickled')
return getLogger, (self.name,)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
def __reduce__(self):
return getLogger, ()
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, exc_info=True, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
return self.logger.isEnabledFor(level)
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level log implementation, proxied to allow nested logger adapters.
"""
return self.logger._log(
level,
msg,
args,
exc_info=exc_info,
extra=extra,
stack_info=stack_info,
)
@property
def manager(self):
return self.logger.manager
@manager.setter
def manager(self, value):
self.logger.manager = value
@property
def name(self):
return self.logger.name
def __repr__(self):
logger = self.logger
level = getLevelName(logger.getEffectiveLevel())
return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.pop("handlers", None)
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.pop("filename", None)
mode = kwargs.pop("filemode", 'a')
if filename:
h = FileHandler(filename, mode)
else:
stream = kwargs.pop("stream", None)
h = StreamHandler(stream)
handlers = [h]
dfs = kwargs.pop("datefmt", None)
style = kwargs.pop("style", '%')
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
fs = kwargs.pop("format", _STYLES[style][1])
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.pop("level", None)
if level is not None:
root.setLevel(level)
if kwargs:
keys = ', '.join(kwargs.keys())
raise ValueError('Unrecognised argument(s): %s' % keys)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, exc_info=True, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
error(msg, *args, exc_info=exc_info, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level=CRITICAL):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
root.manager._clear_cache()
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (OSError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except: # ignore everything, as we're shutting down
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
the-stack_0_20197 | # pylint: disable=C0115,C0116, E0213
import hashlib
import logging
import os
from typing import Dict, List, Optional, Union, Tuple, Any, MutableMapping
import bcrypt
import toml
from pydantic import (
AnyHttpUrl,
BaseSettings,
PostgresDsn,
ValidationError,
validator,
)
from pydantic.env_settings import SettingsSourceCallable
from fidesops.common_exceptions import MissingConfig
from fidesops.util.logger import NotPii
logger = logging.getLogger(__name__)
class FidesSettings(BaseSettings):
"""Class used as a base model for configuration subsections."""
class Config:
# Set environment variables to take precedence over init values
@classmethod
def customise_sources(
cls,
init_settings: SettingsSourceCallable,
env_settings: SettingsSourceCallable,
file_secret_settings: SettingsSourceCallable,
) -> Tuple[SettingsSourceCallable, ...]:
return env_settings, init_settings
class DatabaseSettings(FidesSettings):
"""Configuration settings for Postgres."""
SERVER: str
USER: str
PASSWORD: str
DB: str
PORT: str = "5432"
TEST_DB: str = "test"
SQLALCHEMY_DATABASE_URI: Optional[PostgresDsn] = None
SQLALCHEMY_TEST_DATABASE_URI: Optional[PostgresDsn] = None
@validator("SQLALCHEMY_DATABASE_URI", pre=True)
def assemble_db_connection(cls, v: Optional[str], values: Dict[str, str]) -> str:
"""Join DB connection credentials into a connection string"""
if isinstance(v, str):
return v
return PostgresDsn.build(
scheme="postgresql",
user=values["USER"],
password=values["PASSWORD"],
host=values["SERVER"],
port=values.get("PORT"),
path=f"/{values.get('DB') or ''}",
)
@validator("SQLALCHEMY_TEST_DATABASE_URI", pre=True)
def assemble_test_db_connection(
cls, v: Optional[str], values: Dict[str, str]
) -> str:
"""Join DB connection credentials into a connection string"""
if isinstance(v, str):
return v
return PostgresDsn.build(
scheme="postgresql",
user=values["USER"],
password=values["PASSWORD"],
host=values["SERVER"],
port=values["PORT"],
path=f"/{values.get('TEST_DB') or ''}",
)
class Config:
env_prefix = "FIDESOPS__DATABASE__"
class ExecutionSettings(FidesSettings):
"""Configuration settings for execution."""
TASK_RETRY_COUNT: int
TASK_RETRY_DELAY: int # In seconds
TASK_RETRY_BACKOFF: int
class Config:
env_prefix = "FIDESOPS__EXECUTION__"
class RedisSettings(FidesSettings):
"""Configuration settings for Redis."""
HOST: str
PORT: int = 6379
PASSWORD: str
CHARSET: str = "utf8"
DECODE_RESPONSES: bool = True
DEFAULT_TTL_SECONDS: int = 3600
DB_INDEX: int
class Config:
env_prefix = "FIDESOPS__REDIS__"
class SecuritySettings(FidesSettings):
"""Configuration settings for Security variables."""
AES_ENCRYPTION_KEY_LENGTH: int = 16
AES_GCM_NONCE_LENGTH: int = 12
APP_ENCRYPTION_KEY: str
@validator("APP_ENCRYPTION_KEY")
def validate_encryption_key_length(
cls, v: Optional[str], values: Dict[str, str]
) -> Optional[str]:
"""Validate the encryption key is exactly 32 bytes"""
if v is None or len(v.encode(values.get("ENCODING", "UTF-8"))) != 32:
raise ValueError("APP_ENCRYPTION_KEY value must be exactly 32 bytes long")
return v
CORS_ORIGINS: List[AnyHttpUrl] = []
@validator("CORS_ORIGINS", pre=True)
def assemble_cors_origins(cls, v: Union[str, List[str]]) -> Union[List[str], str]:
"""Return a list of valid origins for CORS requests"""
if isinstance(v, str) and not v.startswith("["):
return [i.strip() for i in v.split(",")]
if isinstance(v, (list, str)):
return v
raise ValueError(v)
ENCODING: str = "UTF-8"
# OAuth
OAUTH_ROOT_CLIENT_ID: str
OAUTH_ROOT_CLIENT_SECRET: str
OAUTH_ROOT_CLIENT_SECRET_HASH: Optional[Tuple]
OAUTH_ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8
OAUTH_CLIENT_ID_LENGTH_BYTES = 16
OAUTH_CLIENT_SECRET_LENGTH_BYTES = 16
@validator("OAUTH_ROOT_CLIENT_SECRET_HASH", pre=True)
def assemble_root_access_token(
cls, v: Optional[str], values: Dict[str, str]
) -> Tuple:
"""Returns a hashed value of the root access key. This is hashed as it is not wise to
return a plaintext for of the root credential anywhere in the system"""
value = values["OAUTH_ROOT_CLIENT_SECRET"]
encoding = values["ENCODING"]
assert value is not None
assert encoding is not None
salt = bcrypt.gensalt()
hashed_client_id = hashlib.sha512(value.encode(encoding) + salt).hexdigest()
return hashed_client_id, salt
class Config:
env_prefix = "FIDESOPS__SECURITY__"
class FidesopsConfig(FidesSettings):
"""Configuration variables for the FastAPI project"""
database: DatabaseSettings
redis: RedisSettings
security: SecuritySettings
execution: ExecutionSettings
is_test_mode: bool = os.getenv("TESTING") == "True"
hot_reloading: bool = os.getenv("DEV_MODE") == "True"
class Config: # pylint: disable=C0115
case_sensitive = True
logger.warning(f"Startup configuration: reloading = {hot_reloading}")
logger.warning(
f'Startup configuration: pii logging = {os.getenv("LOG_PII") == "True"}'
)
def load_file(file_name: str) -> str:
"""Load a file and from the first matching location.
In order, will check:
- A path set at ENV variable FIDESOPS_CONFIG_PATH
- The current directory
- The parent directory
- users home (~) directory
raises FileNotFound if none is found
"""
possible_directories = [
os.getenv("FIDESOPS_CONFIG_PATH"),
os.curdir,
os.pardir,
os.path.expanduser("~"),
]
directories: List[str] = [d for d in possible_directories if d]
for dir_str in directories:
possible_location = os.path.join(dir_str, file_name)
if possible_location and os.path.isfile(possible_location):
logger.info("Loading file %s from %s", NotPii(file_name), NotPii(dir_str))
return possible_location
logger.debug("%s not found at %s", NotPii(file_name), NotPii(dir_str))
raise FileNotFoundError
def load_toml(file_name: str) -> MutableMapping[str, Any]:
"""
Load toml file from possible locations specified in load_file.
Will raise FileNotFoundError or ValidationError on missing or
bad file
"""
return toml.load(load_file(file_name))
def get_config() -> FidesopsConfig:
"""
Attempt to read config file from:
a) env var FIDESOPS_CONFIG_PATH
b) local directory
c) parent directory
d) home directory
This will fail on the first encountered bad conf file.
"""
try:
return FidesopsConfig.parse_obj(load_toml("fidesops.toml"))
except (FileNotFoundError, ValidationError) as e:
logger.warning("fidesops.toml could not be loaded: %s", NotPii(e))
# If no path is specified Pydantic will attempt to read settings from
# the environment. Default values will still be used if the matching
# environment variable is not set.
try:
return FidesopsConfig()
except ValidationError as exc:
logger.error("ValidationError: %s", exc)
# If FidesopsConfig is missing any required values Pydantic will throw
# an ImportError. This means the config has not been correctly specified
# so we can throw the missing config error.
raise MissingConfig(exc.args[0])
CONFIG_KEY_ALLOWLIST = {
"database": [
"SERVER",
"USER",
"PORT",
"DB",
"TEST_DB",
],
"redis": [
"HOST",
"PORT",
"CHARSET",
"DECODE_RESPONSES",
"DEFAULT_TTL_SECONDS",
"DB_INDEX",
],
"security": [
"CORS_ORIGINS",
"ENCODING",
"OAUTH_ACCESS_TOKEN_EXPIRE_MINUTES",
],
"execution": [
"TASK_RETRY_COUNT",
"TASK_RETRY_DELAY",
"TASK_RETRY_BACKOFF",
],
}
def get_censored_config(the_config: FidesopsConfig) -> Dict[str, Any]:
"""
Returns a config that is safe to expose over the API. This function will
strip out any keys not specified in the `CONFIG_KEY_ALLOWLIST` above.
"""
as_dict = the_config.dict()
filtered: Dict[str, Any] = {}
for key, value in CONFIG_KEY_ALLOWLIST.items():
data = as_dict[key]
filtered[key] = {}
for field in value:
filtered[key][field] = data[field]
return filtered
config = get_config()
# `censored_config` is included below because it's important we keep the censored
# config at parity with `config`. This means if we change the path at which fidesops
# loads `config`, we should also change `censored_config`.
censored_config = get_censored_config(config)
|
the-stack_0_20200 | """Config flow for UPNP."""
import logging
from collections import OrderedDict
import voluptuous as vol
from homeassistant import config_entries
from homeassistant import data_entry_flow
from homeassistant.util import get_local_ip
from .const import (
CONF_ENABLE_PORT_MAPPING, CONF_ENABLE_SENSORS,
CONF_SSDP_DESCRIPTION, CONF_UDN
)
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_ensure_domain_data(hass):
"""Ensure hass.data is filled properly."""
hass.data[DOMAIN] = hass.data.get(DOMAIN, {})
hass.data[DOMAIN]['devices'] = hass.data[DOMAIN].get('devices', {})
hass.data[DOMAIN]['discovered'] = hass.data[DOMAIN].get('discovered', {})
hass.data[DOMAIN]['auto_config'] = hass.data[DOMAIN].get('auto_config', {
'active': False,
'enable_sensors': False,
'enable_port_mapping': False,
'ports': {'hass': 'hass'},
})
if 'local_ip' not in hass.data[DOMAIN]:
hass.data[DOMAIN]['local_ip'] = \
await hass.async_add_executor_job(get_local_ip)
@config_entries.HANDLERS.register(DOMAIN)
class UpnpFlowHandler(data_entry_flow.FlowHandler):
"""Handle a UPnP/IGD config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@property
def _configured_upnp_igds(self):
"""Get all configured IGDs."""
return {
entry.data[CONF_UDN]: {
'udn': entry.data[CONF_UDN],
}
for entry in self.hass.config_entries.async_entries(DOMAIN)
}
@property
def _discovered_upnp_igds(self):
"""Get all discovered entries."""
return self.hass.data[DOMAIN]['discovered']
def _store_discovery_info(self, discovery_info):
"""Add discovery info."""
udn = discovery_info['udn']
self.hass.data[DOMAIN]['discovered'][udn] = discovery_info
def _auto_config_settings(self):
"""Check if auto_config has been enabled."""
return self.hass.data[DOMAIN]['auto_config']
async def async_step_discovery(self, discovery_info):
"""
Handle a discovered UPnP/IGD.
This flow is triggered by the discovery component. It will check if the
host is already configured and delegate to the import step if not.
"""
await async_ensure_domain_data(self.hass)
if not discovery_info.get('udn') or not discovery_info.get('host'):
# Silently ignore incomplete/broken devices to prevent constant
# errors/warnings
_LOGGER.debug('UPnP device is missing the udn. Provided info: %r',
discovery_info)
return self.async_abort(reason='incomplete_device')
# store discovered device
discovery_info['friendly_name'] = discovery_info.get('host', '')
# add name if available
if discovery_info.get('name'):
discovery_info['friendly_name'] += ' ({name})'.format(
**discovery_info)
self._store_discovery_info(discovery_info)
# ensure not already discovered/configured
if discovery_info.get('udn') in self._configured_upnp_igds:
return self.async_abort(reason='already_configured')
# auto config?
auto_config = self._auto_config_settings()
if auto_config['active']:
import_info = {
'name': discovery_info['friendly_name'],
'enable_sensors': auto_config['enable_sensors'],
'enable_port_mapping': auto_config['enable_port_mapping'],
}
return await self._async_save_entry(import_info)
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Manual set up."""
await async_ensure_domain_data(self.hass)
# if user input given, handle it
user_input = user_input or {}
if 'name' in user_input:
if not user_input['enable_sensors'] and \
not user_input['enable_port_mapping']:
return self.async_abort(reason='no_sensors_or_port_mapping')
# ensure not already configured
configured_names = [
entry['friendly_name']
for udn, entry in self._discovered_upnp_igds.items()
if udn in self._configured_upnp_igds
]
if user_input['name'] in configured_names:
return self.async_abort(reason='already_configured')
return await self._async_save_entry(user_input)
# let user choose from all discovered, non-configured, UPnP/IGDs
names = [
entry['friendly_name']
for udn, entry in self._discovered_upnp_igds.items()
if udn not in self._configured_upnp_igds
]
if not names:
return self.async_abort(reason='no_devices_discovered')
return self.async_show_form(
step_id='user',
data_schema=vol.Schema(
OrderedDict([
(vol.Required('name'), vol.In(names)),
(vol.Optional('enable_sensors', default=False), bool),
(vol.Optional('enable_port_mapping', default=False), bool),
])
))
async def async_step_import(self, import_info):
"""Import a new UPnP/IGD as a config entry."""
await async_ensure_domain_data(self.hass)
return await self._async_save_entry(import_info)
async def _async_save_entry(self, import_info):
"""Store UPNP/IGD as new entry."""
await async_ensure_domain_data(self.hass)
# ensure we know the host
name = import_info['name']
discovery_infos = [info
for info in self._discovered_upnp_igds.values()
if info['friendly_name'] == name]
if not discovery_infos:
return self.async_abort(reason='host_not_found')
discovery_info = discovery_infos[0]
return self.async_create_entry(
title=discovery_info['name'],
data={
CONF_SSDP_DESCRIPTION: discovery_info['ssdp_description'],
CONF_UDN: discovery_info['udn'],
CONF_ENABLE_SENSORS: import_info['enable_sensors'],
CONF_ENABLE_PORT_MAPPING: import_info['enable_port_mapping'],
},
)
|
the-stack_0_20201 | import os, gc
from .httpclient import HttpClient
class OTAUpdater:
"""
A class to update your MicroController with the latest version from a GitHub tagged release,
optimized for low power usage.
"""
def __init__(self, github_repo, github_src_dir='', module='', main_dir='main', new_version_dir='next', secrets_file=None, headers={}):
self.http_client = HttpClient(headers=headers)
self.github_repo = github_repo.rstrip('/').replace('https://github.com/', '')
self.github_src_dir = '' if len(github_src_dir) < 1 else github_src_dir.rstrip('/') + '/'
self.module = module.rstrip('/')
self.main_dir = main_dir
self.new_version_dir = new_version_dir
self.secrets_file = secrets_file
def __del__(self):
self.http_client = None
def check_for_update_to_install_during_next_reboot(self) -> bool:
"""Function which will check the GitHub repo if there is a newer version available.
This method expects an active internet connection and will compare the current
version with the latest version available on GitHub.
If a newer version is available, the file 'next/.version' will be created
and you need to call machine.reset(). A reset is needed as the installation process
takes up a lot of memory (mostly due to the http stack)
Returns
-------
bool: true if a new version is available, false otherwise
"""
(current_version, latest_version) = self._check_for_new_version()
if latest_version > current_version:
print('New version available, will download and install on next reboot')
self._create_new_version_file(latest_version)
return True
return False
def install_update_if_available_after_boot(self, ssid, password) -> bool:
"""This method will install the latest version if out-of-date after boot.
This method, which should be called first thing after booting, will check if the
next/.version' file exists.
- If yes, it initializes the WIFI connection, downloads the latest version and installs it
- If no, the WIFI connection is not initialized as no new known version is available
"""
if self.new_version_dir in os.listdir(self.module):
if '.version' in os.listdir(self.modulepath(self.new_version_dir)):
latest_version = self.get_version(self.modulepath(self.new_version_dir), '.version')
print('New update found: ', latest_version)
OTAUpdater._using_network(ssid, password)
self.install_update_if_available()
return True
print('No new updates found...')
return False
def install_update_if_available(self) -> bool:
"""This method will immediately install the latest version if out-of-date.
This method expects an active internet connection and allows you to decide yourself
if you want to install the latest version. It is necessary to run it directly after boot
(for memory reasons) and you need to restart the microcontroller if a new version is found.
Returns
-------
bool: true if a new version is available, false otherwise
"""
(current_version, latest_version) = self._check_for_new_version()
if latest_version > current_version:
print('Updating to version {}...'.format(latest_version))
self._create_new_version_file(latest_version)
self._download_new_version(latest_version)
self._copy_secrets_file()
self._delete_old_version()
self._install_new_version()
return True
return False
@staticmethod
def _using_network(ssid, password):
import network
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
sta_if.connect(ssid, password)
while not sta_if.isconnected():
pass
print('network config:', sta_if.ifconfig())
def _check_for_new_version(self):
current_version = self.get_version(self.modulepath(self.main_dir))
latest_version = self.get_latest_version()
print('Checking version... ')
print('\tCurrent version: ', current_version)
print('\tLatest version: ', latest_version)
return (current_version, latest_version)
def _create_new_version_file(self, latest_version):
self.mkdir(self.modulepath(self.new_version_dir))
with open(self.modulepath(self.new_version_dir + '/.version'), 'w') as versionfile:
versionfile.write(latest_version)
versionfile.close()
def get_version(self, directory, version_file_name='.version'):
if version_file_name in os.listdir(directory):
with open(directory + '/' + version_file_name) as f:
version = f.read()
return version
return '0.0'
def get_latest_version(self):
latest_release = self.http_client.get('https://api.github.com/repos/{}/releases/latest'.format(self.github_repo))
version = latest_release.json()['tag_name']
latest_release.close()
return version
def _download_new_version(self, version):
print('Downloading version {}'.format(version))
self._download_all_files(version)
print('Version {} downloaded to {}'.format(version, self.modulepath(self.new_version_dir)))
def _download_all_files(self, version, sub_dir=''):
url = 'https://api.github.com/repos/{}/contents{}{}{}?ref=refs/tags/{}'.format(self.github_repo, self.github_src_dir, self.main_dir, sub_dir, version)
gc.collect()
file_list = self.http_client.get(url)
for file in file_list.json():
path = self.modulepath(self.new_version_dir + '/' + file['path'].replace(self.main_dir + '/', '').replace(self.github_src_dir, ''))
if file['type'] == 'file':
gitPath = file['path']
print('\tDownloading: ', gitPath, 'to', path)
self._download_file(version, gitPath, path)
elif file['type'] == 'dir':
print('Creating dir', path)
self.mkdir(path)
self._download_all_files(version, sub_dir + '/' + file['name'])
gc.collect()
file_list.close()
def _download_file(self, version, gitPath, path):
self.http_client.get('https://raw.githubusercontent.com/{}/{}/{}'.format(self.github_repo, version, gitPath), saveToFile=path)
def _copy_secrets_file(self):
if self.secrets_file:
fromPath = self.modulepath(self.main_dir + '/' + self.secrets_file)
toPath = self.modulepath(self.new_version_dir + '/' + self.secrets_file)
print('Copying secrets file from {} to {}'.format(fromPath, toPath))
self._copy_file(fromPath, toPath)
print('Copied secrets file from {} to {}'.format(fromPath, toPath))
def _delete_old_version(self):
print('Deleting old version at {} ...'.format(self.modulepath(self.main_dir)))
self._rmtree(self.modulepath(self.main_dir))
print('Deleted old version at {} ...'.format(self.modulepath(self.main_dir)))
def _install_new_version(self):
print('Installing new version at {} ...'.format(self.modulepath(self.main_dir)))
if self._os_supports_rename():
os.rename(self.modulepath(self.new_version_dir), self.modulepath(self.main_dir))
else:
self._copy_directory(self.modulepath(self.new_version_dir), self.modulepath(self.main_dir))
self._rmtree(self.modulepath(self.new_version_dir))
print('Update installed, please reboot now')
def _rmtree(self, directory):
for entry in os.ilistdir(directory):
is_dir = entry[1] == 0x4000
if is_dir:
self._rmtree(directory + '/' + entry[0])
else:
os.remove(directory + '/' + entry[0])
os.rmdir(directory)
def _os_supports_rename(self) -> bool:
self._mk_dirs('otaUpdater/osRenameTest')
os.rename('otaUpdater', 'otaUpdated')
result = len(os.listdir('otaUpdated')) > 0
self._rmtree('otaUpdated')
return result
def _copy_directory(self, fromPath, toPath):
if not self._exists_dir(toPath):
self._mk_dirs(toPath)
for entry in os.ilistdir(fromPath):
is_dir = entry[1] == 0x4000
if is_dir:
self._copy_directory(fromPath + '/' + entry[0], toPath + '/' + entry[0])
else:
self._copy_file(fromPath + '/' + entry[0], toPath + '/' + entry[0])
def _copy_file(self, fromPath, toPath):
with open(fromPath) as fromFile:
with open(toPath, 'w') as toFile:
CHUNK_SIZE = 512 # bytes
data = fromFile.read(CHUNK_SIZE)
while data:
toFile.write(data)
data = fromFile.read(CHUNK_SIZE)
toFile.close()
fromFile.close()
def _exists_dir(self, path) -> bool:
try:
os.listdir(path)
return True
except:
return False
def _mk_dirs(self, path:str):
paths = path.split('/')
pathToCreate = ''
for x in paths:
self.mkdir(pathToCreate + x)
pathToCreate = pathToCreate + x + '/'
# different micropython versions act differently when directory already exists
def mkdir(self, path:str):
try:
os.mkdir(path)
except OSError as exc:
if exc.args[0] == 17:
pass
def modulepath(self, path):
return self.module + '/' + path if self.module else path
|
the-stack_0_20202 | #!/usr/bin/env python3
"""
Access to Ensembl REST API.
http://rest.ensembl.org/documentation/info/lookup
"""
import sys,os,re,argparse,time,json,logging
from .. import ensembl
#
##############################################################################
if __name__=='__main__':
parser = argparse.ArgumentParser(prog=sys.argv[0], description="Ensembl REST API client", epilog="Example IDs: ENSG00000157764, ENSG00000160785")
ops = ["list_species", "get_xrefs", "get_info", "show_version"]
parser.add_argument("op", choices=ops, help='operation')
parser.add_argument("--ids", help="Ensembl_IDs, comma-separated (ex:ENSG00000000003)")
parser.add_argument("--i", dest="ifile", help="input file, Ensembl_IDs")
parser.add_argument("--api_host", default=ensembl.API_HOST)
parser.add_argument("--api_base_path", default=ensembl.API_BASE_PATH)
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("-q", "--quiet", action="count", default=0)
args = parser.parse_args()
# logging.PROGRESS = 15 (custom)
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>0 else logging.ERROR if args.quiet else 15))
base_url='http://'+args.api_host+args.api_base_path
fout = open(args.ofile, "w") if args.ofile else sys.stdout
t0=time.time()
if args.ifile:
fin = open(args.ifile)
ids=[]
while True:
line = fin.readline()
if not line: break
ids.append(line.strip())
elif args.ids:
ids = re.split(r'\s*,\s*', args.ids.strip())
else:
ids=[]
if ids:
logging.info(f"IDs: {len(ids)}")
if re.match("^get_", args.op) and not ids:
parser.error('--i or --ids required.')
if args.op=='list_species':
ensembl.Utils.ListSpecies(base_url, fout)
elif args.op=='get_info':
ensembl.Utils.GetInfo(ids, base_url, fout)
elif args.op=='get_xrefs':
ensembl.Utils.GetXrefs(ids, base_url, fout)
elif args.op=='show_version':
ensembl.Utils.ShowVersion(base_url, fout)
else:
parser.error(f'Invalid operation: {args.op}')
logging.info(('%s: elapsed time: %s'%(os.path.basename(sys.argv[0]), time.strftime('%Hh:%Mm:%Ss', time.gmtime(time.time()-t0)))))
|
the-stack_0_20203 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models, views
urlpatterns = [
# TemplateView
url(r'^template/no_template/$',
TemplateView.as_view()),
url(r'^template/login_required/$',
login_required(TemplateView.as_view())),
url(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
url(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
url(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>[0-9]+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>[0-9]+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
url(r'^detail/author/bycustompk/(?P<foo>[0-9]+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
url(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
url(r'^detail/author/bypkignoreslug/(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bypkandslug/(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view(query_pk_and_slug=True)),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
url(r'^detail/author/(?P<pk>[0-9]+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
url(r'^detail/author/(?P<pk>[0-9]+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
url(r'^detail/page/(?P<pk>[0-9]+)/field/$',
views.PageDetail.as_view()),
url(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
url(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
url(r'^detail/doesnotexist/(?P<pk>[0-9]+)/$',
views.ObjectDoesNotExistDetail.as_view()),
# FormView
url(r'^contact/$',
views.ContactView.as_view()),
url(r'^late-validation/$',
views.LateValidationView.as_view()),
# Create/UpdateView
url(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
url(r'^edit/artists/(?P<pk>[0-9]+)/update/$',
views.ArtistUpdate.as_view()),
url(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
url(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/{id}/update/')),
url(r'^edit/authors/create/interpolate_redirect_nonascii/$',
views.NaiveAuthorCreate.as_view(success_url='/%C3%A9dit/author/{id}/update/')),
url(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
url(r'^[eé]dit/authors/create/$',
views.AuthorCreate.as_view()),
url(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/{id}/update/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect_nonascii/$',
views.NaiveAuthorUpdate.as_view(success_url='/%C3%A9dit/author/{id}/update/')),
url(r'^[eé]dit/author/(?P<pk>[0-9]+)/update/$',
views.AuthorUpdate.as_view()),
url(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted={id}')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect_nonascii/$',
views.NaiveAuthorDelete.as_view(success_url='/%C3%A9dit/authors/create/?deleted={id}')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/$',
views.AuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
url(r'^dates/books/$',
views.BookArchive.as_view()),
url(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
url(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
url(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
url(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
url(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
url(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
url(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
url(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
url(r'^dates/books/sortedbyname/$',
views.BookArchive.as_view(ordering='name')),
url(r'^dates/books/sortedbynamedec/$',
views.BookArchive.as_view(ordering='-name')),
# ListView
url(r'^list/dict/$',
views.DictList.as_view()),
url(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
url(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated/(?P<page>[0-9]+)/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated-orphaned/$',
views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)),
url(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
url(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
url(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
url(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
url(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
url(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
url(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
url(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
url(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
url(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
url(r'^list/books/sorted/$',
views.BookList.as_view(ordering='name')),
url(r'^list/books/sortedbypagesandnamedec/$',
views.BookList.as_view(ordering=('pages', '-name'))),
# YearArchiveView
# Mixing keyword and positional captures below is intentional; the views
# ought to be able to accept either.
url(r'^dates/books/(?P<year>[0-9]{4})/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/sortedbyname/$',
views.BookYearArchive.as_view(make_object_list=True, ordering='name')),
url(r'^dates/books/(?P<year>\d{4})/sortedbypageandnamedec/$',
views.BookYearArchive.as_view(make_object_list=True, ordering=('pages', '-name'))),
url(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/no_month/$',
views.BookMonthArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
url(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
url(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
url(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/nopk/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
url(
r'^dates/books/get_object_custom_queryset/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/'
r'(?P<pk>[0-9]+)/$',
views.BookDetailGetObjectCustomQueryset.as_view(),
),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
url(r'^accounts/login/$', auth_views.login)
]
|
the-stack_0_20205 | #!/bin/python3
n = int(input().strip())
c = [int(c_temp) for c_temp in input().strip().split(' ')]
jump = -1
cloud = 0
while cloud < n:
if cloud < n-2 and c[cloud+2] == 0:
cloud += 1
cloud += 1
jump += 1
print(jump)
|
the-stack_0_20208 | #!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/usr'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = [os.path.join(ROOT,'bin',dll) for dll in dlls]
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print("failed to find headers for libxml2: update includes_dir")
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print("failed to find headers for libiconv: update includes_dir")
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print("failed to find and generate stubs for libxml2, aborting ...")
print(sys.exc_info()[0], sys.exc_info()[1])
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print("libxslt stub generator not found, libxslt not built")
else:
try:
import xsltgenerator
except:
print("failed to generate stubs for libxslt, aborting ...")
print(sys.exc_info()[0], sys.exc_info()[1])
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print("failed to find headers for libxslt: update includes_dir")
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.9.1",
description = descr,
author = "Daniel Veillard",
author_email = "[email protected]",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
|
the-stack_0_20209 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from itertools import product
from fvcore.common.benchmark import benchmark
from test_point_mesh_distance import TestPointMeshDistance
def bm_point_mesh_distance() -> None:
backend = ['cuda:0']
kwargs_list = []
batch_size = [4, 8, 16]
num_verts = [100, 1000]
num_faces = [300, 3000]
num_points = [5000, 10000]
test_cases = product(batch_size, num_verts, num_faces, num_points, backend)
for case in test_cases:
n, v, f, p, b = case
kwargs_list.append({'N': n, 'V': v, 'F': f, 'P': p, 'device': b})
benchmark(
TestPointMeshDistance.point_mesh_edge,
'POINT_MESH_EDGE',
kwargs_list,
warmup_iters=1,
)
benchmark(
TestPointMeshDistance.point_mesh_face,
'POINT_MESH_FACE',
kwargs_list,
warmup_iters=1,
)
|
the-stack_0_20210 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Evan Kaufman <[email protected]
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: replace
author: "Evan Kaufman (@EvanK)"
extends_documentation_fragment:
- files
- validate
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression.
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
path:
description:
- The file to modify.
- Before 2.3 this option was only usable as I(dest), I(destfile) and I(name).
aliases: [ dest, destfile, name ]
required: true
regexp:
description:
- The regular expression to look for in the contents of the file.
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
Uses MULTILINE mode, which means C(^) and C($) match the beginning
and end of the file, as well as the beginning and end respectively
of I(each line) of the file.
- Does not use DOTALL, which means the C(.) special character matches
any character I(except newlines). A common mistake is to assume that
a negated character set like C([^#]) will also not match newlines.
In order to exclude newlines, they must be added to the set like C([^#\\n]).
- Note that, as of ansible 2, short form tasks should have any escape
sequences backslash-escaped in order to prevent them being parsed
as string literal escapes. See the examples.
required: true
replace:
description:
- The string to replace regexp matches. May contain backreferences
that will get expanded with the regexp capture groups if the regexp
matches. If not set, matches are removed entirely.
after:
description:
- If specified, the line after the replace/remove will start. Can be used
in combination with C(before).
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
version_added: "2.4"
before:
description:
- If specified, the line before the replace/remove will occur. Can be used
in combination with C(after).
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
version_added: "2.4"
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: 'no'
others:
description:
- All arguments accepted by the M(file) module also work here.
encoding:
description:
- "The character encoding for reading and writing the file."
default: "utf-8"
version_added: "2.4"
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
- Option I(follow) has been removed in version 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
"""
EXAMPLES = r"""
# Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
backup: yes
# Replace after the expression till the end of the file (requires >=2.4)
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
after: 'Start after line.*'
backup: yes
# Replace before the expression till the begin of the file (requires >=2.4)
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
before: 'Start before line.*'
backup: yes
# Replace between the expressions (requires >=2.4)
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
after: 'Start after line.*'
before: 'Start before line.*'
backup: yes
- replace:
path: /home/jdoe/.ssh/known_hosts
regexp: '^old\.host\.name[^\n]*\n'
owner: jdoe
group: jdoe
mode: 0644
- replace:
path: /etc/apache/ports
regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
replace: '\1 127.0.0.1:8080'
validate: '/usr/sbin/apache2ctl -f %s -t'
- name: short form task (in ansible 2+) necessitates backslash-escaped sequences
replace: dest=/etc/hosts regexp='\\b(localhost)(\\d*)\\b' replace='\\1\\2.localdomain\\2 \\1\\2'
- name: long form task does not
replace:
dest: /etc/hosts
regexp: '\b(localhost)(\d*)\b'
replace: '\1\2.localdomain\2 \1\2'
"""
import os
import re
import tempfile
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.basic import AnsibleModule
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'),
regexp=dict(required=True),
replace=dict(default='', type='str'),
after=dict(required=False),
before=dict(required=False),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
encoding=dict(default='utf-8', type='str'),
),
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
path = params['path']
encoding = params['encoding']
res_args = dict()
params['after'] = to_text(params['after'], errors='surrogate_or_strict', nonstring='passthru')
params['before'] = to_text(params['before'], errors='surrogate_or_strict', nonstring='passthru')
params['regexp'] = to_text(params['regexp'], errors='surrogate_or_strict', nonstring='passthru')
params['replace'] = to_text(params['replace'], errors='surrogate_or_strict', nonstring='passthru')
if os.path.isdir(path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if not os.path.exists(path):
module.fail_json(rc=257, msg='Path %s does not exist !' % path)
else:
f = open(path, 'rb')
contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
f.close()
pattern = u''
if params['after'] and params['before']:
pattern = u'%s(?P<subsection>.*?)%s' % (params['before'], params['after'])
elif params['after']:
pattern = u'%s(?P<subsection>.*)' % params['after']
elif params['before']:
pattern = u'(?P<subsection>.*)%s' % params['before']
if pattern:
section_re = re.compile(pattern, re.DOTALL)
match = re.search(section_re, contents)
if match:
section = match.group('subsection')
else:
res_args['msg'] = 'Pattern for before/after params did not match the given file: %s' % pattern
res_args['changed'] = False
module.exit_json(**res_args)
else:
section = contents
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], section, 0)
if result[1] > 0 and section != result[0]:
if pattern:
result = (contents.replace(section, result[0]), result[1])
msg = '%s replacements made' % result[1]
changed = True
if module._diff:
res_args['diff'] = {
'before_header': path,
'before': contents,
'after_header': path,
'after': result[0],
}
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(path):
res_args['backup_file'] = module.backup_local(path)
# We should always follow symlinks so that we change the real file
path = os.path.realpath(path)
write_changes(module, to_bytes(result[0], encoding=encoding), path)
res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
module.exit_json(**res_args)
if __name__ == '__main__':
main()
|
the-stack_0_20211 | # autocord
# package for simple automation with discord client
#
# m: message
import requests
from .Error import ApiError
class Message():
def __init__(self, token: str, http: requests.Session) -> None:
self.token = token
self.http = http
def send(self, chat: int, content: str):
res = self.http.request(
method="post",
url=f"https://discord.com/api/v9/channels/{chat}/messages",
json = {
"content": content,
}
)
if not res.ok:
raise ApiError(res.json())
return res.json()
def edit(self, message: dict, content: str):
res = self.http.request(
method="patch",
url=f"https://discord.com/api/v9/channels/{message['channel_id']}/messages/{message['id']}",
json = {
"content": content,
}
)
if not res.ok:
raise ApiError(res.json())
return res.json()
def delete(self, message: dict):
res = self.http.request(
method="delete",
url=f"https://discord.com/api/v9/channels/{message['channel_id']}/messages/{message['id']}",
)
if not res.ok:
raise ApiError(res.json())
return True
def fetch(self, chat: int, limit=50):
res = self.http.request(
method="get",
url=f"https://discord.com/api/v9/channels/{chat}/messages",
params={
"limit": limit
}
)
if not res.ok:
raise ApiError(res.json())
return res.json()
|
the-stack_0_20213 | import numpy as np
import math
from robot.robot_core import Robot, Robotworld, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
# define scenario properties
num_agents = 1
num_objects = 1
num_joints = 1
arm_length = 0.35
# create world
world = Robotworld()
# add agents
world.agents = [Robot() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
# add objects
world.objects = [Landmark() for i in range(num_objects)]
for i, object in enumerate(world.objects):
object.name = 'object %d' % i
# add goals
world.goals = [Landmark() for i in range(1)]
for i, goal in enumerate(world.goals):
goal.name = 'goal'
# add world specifications
world.num_joints = num_joints
world.arm_length = arm_length
# reset world
self.reset_world(world)
return world
def reset_world(self, world):
# set agent properties
origins = world.robot_position(len(world.agents))
for i, agent in enumerate(world.agents):
agent.color = np.array([0.25,0.25,0.25])
agent.state.lengths = world.arm_length * np.ones(world.num_joints)
agent.state.angles = (2 * np.random.rand(world.num_joints) - 1) * math.pi
agent.state.p_pos = np.array(origins[i][:])
# set properties for object
for i, object in enumerate(world.objects):
object.color = np.array([0, 0, 1])
object.state.angles = (2 * np.random.rand() - 1) * math.pi
object.state.p_pos = world.arm_length * np.array([math.cos(object.state.angles),
math.sin(object.state.angles)])
# object.state.p_pos = np.array([0, 0.35])
# set properties for goal
for i, goal in enumerate(world.goals):
goal.color = np.array([1, 0, 0])
goal.state.angles = (2 * np.random.rand() - 1) * math.pi
goal.state.p_pos = world.arm_length * np.array([math.cos(goal.state.angles),
math.sin(goal.state.angles)])
# goal.state.p_pos = np.array([0, -0.35])
def reward(self, agent, world): # make suitable for multiple objects
# reward = np.absolute(world.goals[0].state.angles - world.objects[0].state.angles) % math.pi
# reward based on cartesian coordinates
# reward = 0.0
# for i in range(len(agent.state.p_pos)):
# reward += (world.goals[0].state.p_pos[i] - world.objects[0].state.p_pos[i]) ** 2
# return -np.sqrt(reward)
# reward based on polar coordinates
# reward = 0.0
# for i in range(world.num_joints):
# theta_goal = math.atan2(world.goals[0].state.p_pos[1], world.goals[0].state.p_pos[0])
# theta_obj = math.atan2(world.objects[0].state.p_pos[1], world.objects[0].state.p_pos[0])
# reward += np.absolute(theta_goal - theta_obj)
# if reward > math.pi: reward = 2 * math.pi - reward
# reward based on grabbing and goal position
# r_grab, r_obj = 0.0, 0.0
# for i in range(len(agent.state.p_pos)):
# r_grab += (world.objects[0].state.p_pos[i] - agent.position_end_effector()[i]) ** 2
# r_obj += (world.goals[0].state.p_pos[i] - world.objects[0].state.p_pos[i]) ** 2
# # print(f"reward for grabbing = {np.sqrt(r_grab)} and reward for goal = {np.sqrt(r_obj)}")
# return - np.sqrt(r_grab) - 2 * np.sqrt(r_obj)
# normalized polar reward
theta_obj = math.atan2(world.objects[0].state.p_pos[1], world.objects[0].state.p_pos[0])
theta_goal = math.atan2(world.goals[0].state.p_pos[1], world.goals[0].state.p_pos[0])
r_grab = np.absolute(theta_obj - agent.state.angles[0]) / math.pi
r_goal = np.absolute(theta_goal - theta_obj) / math.pi
if r_grab > 1: r_grab = 2 - r_grab
if r_goal > 1: r_goal = 2 - r_goal
# print(f"theta_obj = {theta_obj}, r_grab = {r_grab} and "
# f"theta_goal = {theta_goal}, r_goal = {r_goal}")
return -r_grab - 2 * r_goal
def observation(self, agent, world):
# initialize observation variables
# state_observations = (agent.state.angles / math.pi).tolist() # polar coordinates observation
state_observations = agent.position_end_effector().tolist() # cartesian
object_observation = world.objects[0].state.p_pos.tolist()
# goal_observation = [world.goals[0].state.angles / math.pi] # polar [-1,1]
goal_observation = world.goals[0].state.p_pos.tolist() # cartesian
# goal_observation = [math.atan2(world.goals[0].state.p_pos[1], world.goals[0].state.p_pos[0]) / math.pi] # polar
return state_observations + object_observation + goal_observation + [agent.state.grasp]
|
the-stack_0_20217 | # Copyright 2020 Open Reaction Database Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ord_schema.units."""
from absl.testing import absltest
from absl.testing import parameterized
from ord_schema import units
from ord_schema.proto import reaction_pb2
class UnitsTest(parameterized.TestCase, absltest.TestCase):
def setUp(self):
super().setUp()
self._resolver = units.UnitResolver()
@parameterized.named_parameters(
('capitalized', '15.0 ML',
reaction_pb2.Volume(value=15.0, units=reaction_pb2.Volume.MILLILITER)),
('integer', '24 H',
reaction_pb2.Time(value=24, units=reaction_pb2.Time.HOUR)),
('no space', '32.1g',
reaction_pb2.Mass(value=32.1, units=reaction_pb2.Mass.GRAM)),
('extra space', ' 32.1 \t g ',
reaction_pb2.Mass(value=32.1, units=reaction_pb2.Mass.GRAM)),
('lengths', ' 10 meter',
reaction_pb2.Length(value=10, units=reaction_pb2.Length.METER)),
('nanoliters', '0.12 nL',
reaction_pb2.Volume(value=0.12, units=reaction_pb2.Volume.NANOLITER)))
def test_resolve(self, string, expected):
self.assertEqual(self._resolver.resolve(string), expected)
@parameterized.named_parameters(
('bad units', '1.21 GW', 'unrecognized units'),
('multiple matches', '15.0 ML 20.0 L',
'string does not contain a value with units'),
('extra period', '15.0. ML',
'string does not contain a value with units'),
('ambiguous units', '5.2 m', 'ambiguous'),
)
def test_resolve_should_fail(self, string, expected_error):
with self.assertRaisesRegex((KeyError, ValueError), expected_error):
self._resolver.resolve(string)
if __name__ == '__main__':
absltest.main()
|
the-stack_0_20218 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .assuredworkloads import (
CreateWorkloadOperationMetadata,
CreateWorkloadRequest,
DeleteWorkloadRequest,
GetWorkloadRequest,
ListWorkloadsRequest,
ListWorkloadsResponse,
UpdateWorkloadRequest,
Workload,
)
__all__ = (
"CreateWorkloadOperationMetadata",
"CreateWorkloadRequest",
"DeleteWorkloadRequest",
"GetWorkloadRequest",
"ListWorkloadsRequest",
"ListWorkloadsResponse",
"UpdateWorkloadRequest",
"Workload",
)
|
the-stack_0_20219 | """API planet mosaic tiles."""
from functools import partial
from typing import Any, Dict
from rio_tiler.utils import render
from covid_api.api import utils
from covid_api.db.memcache import CacheLayer
from covid_api.ressources.common import mimetype
from covid_api.ressources.enums import ImageType
from covid_api.ressources.responses import TileResponse
from fastapi import APIRouter, Depends, Path, Query
from starlette.concurrency import run_in_threadpool
_render = partial(run_in_threadpool, render)
_tile = partial(run_in_threadpool, utils.planet_mosaic_tile)
router = APIRouter()
responses = {
200: {
"content": {
"image/png": {},
"image/jpg": {},
"image/webp": {},
"image/tiff": {},
"application/x-binary": {},
},
"description": "Return an image.",
}
}
tile_routes_params: Dict[str, Any] = dict(
responses=responses, tags=["planet"], response_class=TileResponse
)
@router.get(r"/planet/{z}/{x}/{y}", **tile_routes_params)
async def tile(
z: int = Path(..., ge=0, le=30, description="Mercator tiles's zoom level"),
x: int = Path(..., description="Mercator tiles's column"),
y: int = Path(..., description="Mercator tiles's row"),
date: str = Query(..., description="date of site for detections"),
site: str = Query(..., description="id of site for detections"),
cache_client: CacheLayer = Depends(utils.get_cache),
) -> TileResponse:
"""Handle /planet requests."""
timings = []
headers: Dict[str, str] = {}
scenes = utils.site_date_to_scenes(site, date)
tile_hash = utils.get_hash(**dict(z=z, x=x, y=y, scenes=scenes, planet=True))
content = None
if cache_client:
try:
content, ext = cache_client.get_image_from_cache(tile_hash)
headers["X-Cache"] = "HIT"
except Exception:
content = None
if not content:
with utils.Timer() as t:
tile, mask = await _tile(scenes, x, y, z)
timings.append(("Read", t.elapsed))
content = await _render(tile, mask)
timings.append(("Format", t.elapsed))
if cache_client and content:
cache_client.set_image_cache(tile_hash, (content, ImageType.png))
if timings:
headers["X-Server-Timings"] = "; ".join(
["{} - {:0.2f}".format(name, time * 1000) for (name, time) in timings]
)
return TileResponse(
content, media_type=mimetype[ImageType.png.value], headers=headers
)
|
the-stack_0_20220 | #
# This file is part of Python-AD. Python-AD is free software that is made
# available under the MIT license. Consult the file "LICENSE" that is
# distributed together with this file for the exact licensing terms.
#
# Python-AD is copyright (c) 2007 by the Python-AD authors. See the file
# "AUTHORS" for a complete overview.
from __future__ import absolute_import
import re
import dns
import dns.resolver
import dns.exception
import ldap
import ldap.sasl
import ldap.controls
import socket
from .exception import Error as ADError
from .object import factory, instance
from .creds import Creds
from .locate import Locator
from .constant import LDAP_PORT, GC_PORT
from ..protocol import krb5
from ..util import compat
class Client(object):
"""Active Directory Client
This class implements a client interface to AD. It provides LDAP
operations, Kerberos operations and more.
"""
_timelimit = 0
_sizelimit = 0
_referrals = False
_pagesize = 500
def __init__(self, domain, creds=None):
"""Constructor."""
self.m_locator = None
self.m_connections = None
self.m_naming_contexts = None
self.m_domain = self.dn_from_domain_name(domain)
self.m_forest = None
self.m_schema = None
self.m_configuration = None
self.m_creds = creds
def _locator(self):
"""Return our resource locator."""
if self.m_locator is None:
self.m_locator = factory(Locator)
return self.m_locator
def _credentials(self):
"""Return our current AD credentials."""
if self.m_creds:
creds = self.m_creds
else:
creds = instance(Creds)
if creds is None or not creds.principal():
m = 'No current credentials or credentials not activated.'
raise ADError(m)
return creds
def _fixup_scheme(self, scheme):
"""Check an LDAP search scheme."""
if scheme is None:
scheme = 'ldap'
elif isinstance(scheme, str):
if scheme not in ('ldap', 'gc'):
raise ValueError('Illegal scheme: %s' % scheme)
else:
raise TypeError('Illegal scheme type: %s' % type(scheme))
return scheme
def _create_ldap_uri(self, servers, scheme=None):
"""Return an LDAP uri for the server list `servers'."""
scheme = self._fixup_scheme(scheme)
if scheme == 'ldap':
port = LDAP_PORT
elif scheme == 'gc':
port = GC_PORT
parts = [ 'ldap://%s:%d/' % (srv, port) for srv in servers ]
uri = ' '.join(parts)
return uri
def _create_ldap_connection(self, uri, bind=True):
"""Open a new LDAP connection and optionally bind it using GSSAPI."""
ld = ldap.initialize(uri)
ld.procotol_version = 3
ld.timelimit = self._timelimit
ld.sizelimit = self._sizelimit
ld.referrals = self._referrals
if compat.disable_reverse_dns():
ld.set_option(ldap.OPT_X_SASL_NOCANON, True)
if bind:
sasl = ldap.sasl.sasl({}, 'GSSAPI')
ld.sasl_interactive_bind_s('', sasl)
return ld
def domain_name_from_dn(self, dn):
"""Given a DN, return a domain."""
parts = compat.str2dn(dn)
parts.reverse()
domain = []
for part in parts:
type,value,flags = part[0] # weird API..
if type.lower() != 'dc':
break
domain.insert(0, value)
return '.'.join(domain).upper()
def dn_from_domain_name(self, name):
"""Given a domain name, return a DN."""
parts = name.split('.')
dn = [ 'dc=%s' % p for p in parts ]
dn = ','.join(dn)
return dn.lower()
def _init_forest(self):
"""Initialize forest global settings."""
if self.m_forest is not None:
return
locator = self._locator()
servers = locator.locate_many(self.domain())
uri = self._create_ldap_uri(servers)
conn = self._create_ldap_connection(uri, bind=False)
try:
attrs = ('rootDomainNamingContext', 'schemaNamingContext',
'configurationNamingContext')
result = conn.search_s('', ldap.SCOPE_BASE, attrlist=attrs)
if not result:
raise ADError('Could not search rootDSE of domain.')
finally:
conn.unbind_s()
dn, attrs = result[0]
self.m_forest = attrs['rootDomainNamingContext'][0]
self.m_schema = attrs['schemaNamingContext'][0]
self.m_configuration = attrs['configurationNamingContext'][0]
def domain(self):
"""Return the domain name of the current domain."""
return self.domain_name_from_dn(self.m_domain)
def domain_base(self):
"""Return the base DN of the domain."""
return self.m_domain
def forest(self):
"""Return the domain name of the forest root."""
if self.m_forest is None:
self._init_forest()
return self.domain_name_from_dn(self.m_forest)
def forest_base(self):
"""Return the base DN of the forest root."""
if self.m_forest is None:
self._init_forest()
return self.m_forest
def schema_base(self):
"""Return the base DN of the schema naming_context."""
if self.m_schema is None:
self._init_forest()
return self.m_schema
def configuration_base(self):
"""Return the base DN of the configuration naming_context."""
if self.m_configuration is None:
self._init_forest()
return self.m_configuration
def _init_naming_contexts(self):
"""Initialize naming naming_contexts."""
if self.m_naming_contexts is not None:
return
locator = self._locator()
servers = locator.locate_many(self.domain())
uri = self._create_ldap_uri(servers)
conn = self._create_ldap_connection(uri)
base = 'cn=Partitions,%s' % self.configuration_base()
filter = '(objectClass=crossRef)'
try:
attrs = ('nCName',)
result = conn.search_s(base, ldap.SCOPE_ONELEVEL, filter, attrs)
if not result:
raise ADError('Could not search rootDSE of forest root.')
finally:
conn.unbind_s()
naming_contexts = []
for res in result:
dn, attrs = res
nc = attrs['nCName'][0].lower()
naming_contexts.append(nc)
self.m_naming_contexts = naming_contexts
def naming_contexts(self):
"""Return a list of all naming_contexts."""
if self.m_naming_contexts is None:
self._init_naming_contexts()
return self.m_naming_contexts
def domains(self):
"""Return a list of all domains in the forest."""
domains = []
for nc in self.naming_contexts():
if nc.startswith('dc=domaindnszones') or \
nc.startswith('dc=forestdnszones') or \
nc.startswith('dc=tapi3directory') or \
nc.startswith('cn=schema') or \
nc.startswith('cn=configuration'):
continue
domains.append(self.domain_name_from_dn(nc))
return domains
def _resolve_naming_context(self, base):
"""Resolve a base dn to a directory naming_context."""
naming_context = ''
base = base.lower()
for nc in self.naming_contexts():
if base.endswith(nc) and len(nc) > len(naming_context):
naming_context = nc
return naming_context
def _ldap_connection(self, base, server=None, scheme=None):
"""Return the (cached) LDAP connection for a naming naming_context."""
naming_context = self._resolve_naming_context(base)
scheme = self._fixup_scheme(scheme)
if self.m_connections is None:
self.m_connections = {}
key = (naming_context, server, scheme)
if key not in self.m_connections:
locator = self._locator()
if naming_context == '':
assert server != None
uri = self._create_ldap_uri([server])
bind = False # No need to bind for rootDSE
else:
domain = self.domain_name_from_dn(naming_context)
if scheme == 'gc':
role = 'gc'
elif scheme == 'ldap':
role = 'dc'
if server is None:
servers = locator.locate_many(domain, role=role)
uri = self._create_ldap_uri(servers, scheme)
else:
if not locator.check_domain_controller(server, domain, role):
raise ADError('Unsuitable server provided.')
uri = self._create_ldap_uri([server], scheme)
creds = self._credentials()
creds._resolve_servers_for_domain(domain)
bind = True
conn = self._create_ldap_connection(uri, bind)
self.m_connections[key] = conn
return self.m_connections[key]
def close(self):
"""Close any active LDAP connection."""
for conn in self.m_connections.values():
conn.unbind_s()
self.m_connections = None
def _remove_empty_search_entries(self, result):
"""Remove empty search entries from a search result."""
# What I have seen so far these entries are always LDAP referrals
return [x for x in result if x[0] is not None]
re_range = re.compile('([^;]+);[Rr]ange=([0-9]+)(?:-([0-9]+|\\*))?')
def _retrieve_all_ranges(self, dn, key, attrs):
"""Retrieve all ranges for a multivalued attributed."""
assert key in attrs
mobj = self.re_range.match(key)
assert mobj is not None
type, lo, hi = mobj.groups()
values = attrs[key]
conn = self._ldap_connection(dn)
base = self._resolve_naming_context(dn)
while hi != '*':
try:
hi = int(hi)
except ValueError:
m = 'Error while retrieving multi-valued attributes.'
raise ADError(m)
rqattrs = ('%s;range=%s-*' % (type, hi+1),)
filter = '(distinguishedName=%s)' % dn
result = conn.search_s(base, ldap.SCOPE_SUBTREE, filter, rqattrs)
if not result:
# Object deleted? Assume it was and return no further
# attributes.
break
dn2, attrs2 = result[0]
for key2 in attrs2:
mobj = self.re_range.match(key2)
if mobj is None:
continue
type2, lo2, hi2 = mobj.groups()
if type2 == type and lo2 == str(hi+1):
break
else:
m = 'Error while retrieving multi-valued attributes.'
raise ADError(m)
values += attrs2[key2]
hi = hi2
attrs[type] = values
del attrs[key]
def _process_range_subtypes(self, result):
"""Incremental retrieval of multi-valued attributes."""
for dn,attrs in result:
for key in attrs.keys(): # dict will be updated
if self.re_range.match(key):
self._retrieve_all_ranges(dn, key, attrs)
return result
def _fixup_filter(self, filter):
"""Fixup the `filter' argument."""
if filter is None:
filter = '(objectClass=*)'
elif not isinstance(filter, str):
raise TypeError('Illegal filter type: %s' % type(filter))
return filter
def _fixup_base(self, base):
"""Fixup an ldap search base."""
if base is None:
base = self.dn_from_domain_name(self.domain())
elif not isinstance(base, str):
raise TypeError('Illegal search base type: %s' % type(base))
return base
def _fixup_scope(self, scope):
"""Check the ldap scope `scope'."""
if scope is None:
scope = ldap.SCOPE_SUBTREE
elif scope == 'base':
scope = ldap.SCOPE_BASE
elif scope == 'onelevel':
scope = ldap.SCOPE_ONELEVEL
elif scope == 'subtree':
scope = ldap.SCOPE_SUBTREE
elif isinstance(scope, int):
if scope not in (ldap.SCOPE_BASE, ldap.SCOPE_ONELEVEL,
ldap.SCOPE_SUBTREE):
raise ValueError('Illegal scope: %s' % scope)
else:
raise TypeError('Illegal scope type: %s' % type(scope))
return scope
def _fixup_attrs(self, attrs):
"""Check validity of the `attrs' argument to search()."""
if attrs is None:
pass
elif isinstance(attrs, list) or isinstance(attrs, tuple):
for item in attrs:
if not isinstance(item, str):
raise TypeError('Expecting sequence of strings.')
else:
raise TypeError('Expecting sequence of strings.')
return attrs
def _search_with_paged_results(self, conn, filter, base, scope, attrs):
"""Perform an ldap search operation with paged results."""
ctrl = compat.SimplePagedResultsControl(self._pagesize)
result = []
while True:
msgid = conn.search_ext(base, scope, filter, attrs,
serverctrls=[ctrl])
type, data, msgid, ctrls = conn.result3(msgid)
result += data
rctrls = [ c for c in ctrls
if c.controlType == compat.LDAP_CONTROL_PAGED_RESULTS ]
if not rctrls:
m = 'Server does not honour paged results.'
raise ADError(m)
size = rctrls[0].size
cookie = rctrls[0].cookie
if not cookie:
break
ctrl.controlValue = (self._pagesize, cookie)
return result
def search(self, filter=None, base=None, scope=None, attrs=None,
server=None, scheme=None):
"""Search Active Directory and return a list of objects.
The `filter' argument specifies an RFC 2254 search filter. If it is
not provided, the default is '(objectClass=*)'. `base' is the search
base and defaults to the base of the current domain. `scope' is the
search scope and must be one of 'base', 'one' or 'subtree'. The
default scope is 'substree'. `attrs' is the attribute list to
retrieve. The default is to retrieve all attributes.
"""
filter = self._fixup_filter(filter)
base = self._fixup_base(base)
scope = self._fixup_scope(scope)
attrs = self._fixup_attrs(attrs)
scheme = self._fixup_scheme(scheme)
if base == '':
if server is None:
m = 'A server must be specified when querying rootDSE'
raise ADError(m)
if scope != ldap.SCOPE_BASE:
m = 'Search scope must be base when querying rootDSE'
raise ADError(m)
conn = self._ldap_connection(base, server, scheme)
if base == '':
# search rootDSE does not honour paged results
result = conn.search_s(base, scope, filter, attrs)
else:
result = self._search_with_paged_results(conn, filter, base,
scope, attrs)
result = self._remove_empty_search_entries(result)
result = self._process_range_subtypes(result)
return result
def _fixup_add_list(self, attrs):
"""Check the `attrs' arguments to add()."""
if not isinstance(attrs, list) and not isinstance(attrs, tuple):
raise TypeError('Expecting list of 2-tuples %s.')
for item in attrs:
if not isinstance(item, tuple) and not isinstance(item, list) \
or not len(item) == 2:
raise TypeError('Expecting list of 2-tuples.')
for type,values in attrs:
if not isinstance(type, str):
raise TypeError('List items must be 2-tuple of (str, [str]).')
if not isinstance(values, list) and not isinstance(values, tuple):
raise TypeError('List items must be 2-tuple of (str, [str]).')
for val in values:
if not isinstance(val, str):
raise TypeError('List items must be 2-tuple of (str, [str]).')
return attrs
def add(self, dn, attrs, server=None):
"""Add a new object to Active Directory.
The object is createdwith a distinguished name `dn' and with attribute
`attrs'. The `attrs' parameter must be a list of (type, values)
2-tuples. The type component is the LDAP attribute name and must be a
string. The values component is the LDAP attribute values and must be
a list of strings.
"""
attrs = self._fixup_add_list(attrs)
conn = self._ldap_connection(dn, server)
conn.add_s(dn, attrs)
def _fixup_modify_operation(self, op):
"""Fixup an ldap modify operation."""
if op == 'add':
op = ldap.MOD_ADD
elif op == 'replace':
op = ldap.MOD_REPLACE
elif op == 'delete':
op = ldap.MOD_DELETE
elif op not in (ldap.MOD_ADD, ldap.MOD_REPLACE, ldap.MOD_DELETE):
raise ValueError('Illegal modify operation: %s' % op)
return op
def _fixup_modify_list(self, mods):
"""Check the `mods' argument to modify()."""
if not isinstance(mods, list) and not isinstance(mods, tuple):
raise TypeError('Expecting list of 3-tuples.')
for item in mods:
if not isinstance(item, tuple) and not isinstance(item, list) \
or not len(item) == 3:
raise TypeError('Expecting list of 3-tuples.')
result = []
for op,type,values in mods:
op = self._fixup_modify_operation(op)
if not isinstance(type, str):
raise TypeError('List items must be 3-tuple of (str, str, [str]).')
if not isinstance(values, list) and not isinstance(values, tuple):
raise TypeError('List items must be 3-tuple of (str, str, [str]).')
for val in values:
if not isinstance(val, str):
raise TypeError('List item must be 3-tuple of (str, str, [str]).')
result.append((op,type,values))
return result
def modify(self, dn, mods, server=None):
"""Modify the LDAP object `dn' with `mods'.
The `mods' parameter must be a list of 3-tuples (op,type,value), with
op being the operation (MOD_ADD, MOD_REPLACE or MOD_DELETE), type the
attribute name and value a list of strings containing the attribute
value(s).
"""
mods = self._fixup_modify_list(mods)
conn = self._ldap_connection(dn, server)
conn.modify_s(dn, mods)
def delete(self, dn, server=None):
"""Delete the LDAP object referenced by `dn'."""
conn = self._ldap_connection(dn, server)
conn.delete_s(dn)
def modrdn(self, dn, newrdn, delold=True, server=None):
"""Change the RDN of an object in Active Direcotry.
`dn' specifies the object, `newrdn' is the new RDN. The new RDN must
be in the format of "rdn=value". If `delold' is True, the old value
of the RDN is deleted, otherwise it is retained.
"""
self.rename(dn, newrdn, delold=delold, server=server)
def rename(self, dn, newrdn, newsuperior=None, delold=True, server=None):
"""Rename an object in AD, possibly moving it to another point in
the directory. The `dn', `newrdn' and `delold' arguments are as with
modrdn(). If the `newsuperior' argument is specified, it must be a
DN and the object is moved there.
"""
conn = self._ldap_connection(dn, server)
conn.rename_s(dn, newrdn, newsuperior, delold)
def set_password(self, principal, password, server=None):
"""Set the password of `principal' to `password'."""
if '@' in principal:
principal, domain = principal.split('@')
domain = domain.upper()
else:
domain = self.domain()
principal = '%s@%s' % (principal, domain)
creds = self._credentials()
if server is not None:
creds._set_servers_for_domain(domain, [server])
try:
krb5.set_password(principal, password)
except krb5.Error as err:
raise ADError(str(err))
if server is not None:
creds._resolve_servers_for_domain(domain, force=True)
def change_password(self, principal, oldpass, newpass, server=None):
"""Chagne the password of `principal' to `password'."""
if '@' in principal:
principal, domain = principal.split('@')
domain = domain.upper()
else:
domain = self.domain()
principal = '%s@%s' % (principal, domain)
creds = self._credentials()
if server is not None:
creds._set_servers_for_domain(domain, [server])
try:
krb5.change_password(principal, oldpass, newpass)
except krb5.Error as err:
raise ADError(str(err))
if server is not None:
creds._resolve_servers_for_domain(domain, force=True)
|
the-stack_0_20221 | import pandas as pd
from srdatasets.datasets.dataset import Dataset
from srdatasets.datasets.utils import download_url
class Amazon(Dataset):
__corefile__ = {
"Books": "ratings_Books.csv",
"Electronics": "ratings_Electronics.csv",
"Movies": "ratings_Movies_and_TV.csv",
"CDs": "ratings_CDs_and_Vinyl.csv",
"Clothing": "ratings_Clothing_Shoes_and_Jewelry.csv",
"Home": "ratings_Home_and_Kitchen.csv",
"Kindle": "ratings_Kindle_Store.csv",
"Sports": "ratings_Sports_and_Outdoors.csv",
"Phones": "ratings_Cell_Phones_and_Accessories.csv",
"Health": "ratings_Health_and_Personal_Care.csv",
"Toys": "ratings_Toys_and_Games.csv",
"VideoGames": "ratings_Video_Games.csv",
"Tools": "ratings_Tools_and_Home_Improvement.csv",
"Beauty": "ratings_Beauty.csv",
"Apps": "ratings_Apps_for_Android.csv",
"Office": "ratings_Office_Products.csv",
"Pet": "ratings_Pet_Supplies.csv",
"Automotive": "ratings_Automotive.csv",
"Grocery": "ratings_Grocery_and_Gourmet_Food.csv",
"Patio": "ratings_Patio_Lawn_and_Garden.csv",
"Baby": "ratings_Baby.csv",
"Music": "ratings_Digital_Music.csv",
"MusicalInstruments": "ratings_Musical_Instruments.csv",
"InstantVideo": "ratings_Amazon_Instant_Video.csv"
}
url_prefix = "http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/"
def download(self, category):
download_url(self.url_prefix + self.__corefile__[category], self.rootdir.joinpath(self.__corefile__[category]))
def transform(self, category, rating_threshold):
""" Records with rating less than `rating_threshold` are dropped
"""
df = pd.read_csv(self.rootdir.joinpath(self.__corefile__[category]),
header=None,
names=["user_id", "item_id", "rating", "timestamp"])
df = df[df["rating"] >= rating_threshold].drop("rating", axis=1)
return df
|
the-stack_0_20223 | # -*- encoding: utf-8 -*-
from selenium.webdriver.support.wait import WebDriverWait
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
import pytest
from bpp.models import Wydawnictwo_Ciagle
from bpp.models.patent import Patent
from bpp.models.wydawnictwo_zwarte import Wydawnictwo_Zwarte
from bpp.tests import any_ciagle
from bpp.tests.util import any_patent, any_zwarte, assertPopupContains
from django_bpp.selenium_util import SHORT_WAIT_TIME, wait_for, wait_for_page_load
ID = "id_tytul_oryginalny"
pytestmark = [pytest.mark.slow, pytest.mark.selenium]
def test_admin_wydawnictwo_ciagle_toz(admin_browser, asgi_live_server):
Wydawnictwo_Ciagle.objects.all().delete()
c = any_ciagle(informacje="TO INFORMACJE")
admin_browser.visit(
asgi_live_server.url
+ reverse("admin:bpp_wydawnictwo_ciagle_change", args=(c.pk,))
)
wcc = Wydawnictwo_Ciagle.objects.count
assert wcc() == 1
toz = admin_browser.find_by_id("toz")
toz.click()
assertPopupContains(admin_browser, "Utworzysz kopię tego rekordu")
wait_for(lambda: wcc() == 2)
def test_admin_wydawnictwo_zwarte_toz(admin_browser, asgi_live_server):
c = any_zwarte(informacje="TO INFOMRACJE")
admin_browser.visit(
asgi_live_server.url
+ reverse("admin:bpp_wydawnictwo_zwarte_change", args=(c.pk,))
)
wcc = Wydawnictwo_Zwarte.objects.count
assert wcc() == 1
toz = admin_browser.find_by_id("toz")
toz.click()
assertPopupContains(admin_browser, "Utworzysz kopię tego rekordu")
wait_for(lambda: wcc() == 2)
def test_admin_wydawnictwo_ciagle_tamze(admin_browser, asgi_live_server):
c = any_ciagle(informacje="TO INFORMACJE", uwagi="te uwagi", www="te www")
admin_browser.visit(
asgi_live_server.url
+ reverse("admin:bpp_wydawnictwo_ciagle_change", args=(c.pk,))
)
tamze = admin_browser.find_by_id("tamze")
with wait_for_page_load(admin_browser):
tamze.click()
assert "Dodaj wydawnictwo" in admin_browser.html
for elem in ["TO INFORMACJE", "te uwagi"]:
assert elem in admin_browser.html, "BRAK %r" % elem
assert "te www" not in admin_browser.html
assert "te www" not in admin_browser.html
def test_admin_wydawnictwo_zwarte_tamze(admin_browser, asgi_live_server, wydawca):
c = any_zwarte(
informacje="TO INFORMACJE",
uwagi="te uwagi",
miejsce_i_rok="te miejsce i rok",
wydawca=wydawca,
wydawca_opis="te wydawnictwo",
www="ten adres WWW",
isbn="Z_ISBN",
e_isbn="E_ISBN",
)
admin_browser.visit(
asgi_live_server.url
+ reverse("admin:bpp_wydawnictwo_zwarte_change", args=(c.pk,))
)
tamze = admin_browser.find_by_id("tamze")
tamze.click()
WebDriverWait(admin_browser.driver, SHORT_WAIT_TIME).until(
lambda driver: "Dodaj wydawnictwo" in driver.page_source
)
for elem in [
"TO INFORMACJE",
"te uwagi",
"te miejsce i rok",
"te wydawnictwo",
"Z_ISBN",
"E_ISBN",
"Wydawca Testowy",
]:
assert elem in admin_browser.html, "BRAK %r" % elem
assert "ten adres WWW" not in admin_browser.html
def test_admin_patent_toz(admin_browser, asgi_live_server):
c = any_patent(informacje="TO INFORMACJE")
admin_browser.visit(
asgi_live_server.url + reverse("admin:bpp_patent_change", args=(c.pk,))
)
wcc = Patent.objects.count
assert wcc() == 1
toz = admin_browser.find_by_id("toz")
toz.click()
assertPopupContains(admin_browser, "Utworzysz kopię tego rekordu")
WebDriverWait(admin_browser, SHORT_WAIT_TIME).until(
lambda driver: admin_browser.is_element_present_by_id("navigation-menu")
)
wait_for(lambda: wcc() == 2)
def test_admin_patent_tamze(admin_browser, asgi_live_server):
c = any_patent(informacje="TO INFORMACJE")
with wait_for_page_load(admin_browser):
admin_browser.visit(
asgi_live_server.url + reverse("admin:bpp_patent_change", args=(c.pk,))
)
with wait_for_page_load(admin_browser):
admin_browser.execute_script("document.getElementById('tamze').click()")
assert admin_browser.find_by_id("id_informacje").value == "TO INFORMACJE"
|
the-stack_0_20225 | """
*NOTE*: some functions can be implemented in multiple ways, I keep all of them. You can try them for yourself just by
uncommenting them and commenting their counterparts.
"""
import keras.backend as K
import tensorflow as tf
from keras import initializers, layers
class Length(layers.Layer):
"""
Compute the length of vectors.
"""
def call(self, inputs, **kwargs):
return K.sqrt(K.sum(K.square(inputs), -1) + K.epsilon())
def compute_output_shape(self, input_shape):
return input_shape[:-1]
def get_config(self):
config = super(Length, self).get_config()
return config
class Mask(layers.Layer):
"""
Mask a Tensor with shape=[None, num_capsule, dim_vector] either by the capsule with max length or by an additional
input mask. Except the max-length capsule (or specified capsule), all vectors are masked to zeros. Then flatten the
masked Tensor.
"""
def call(self, inputs, **kwargs):
if type(inputs) is list: # true label is provided with shape = [None, n_classes], i.e. one-hot code.
assert len(inputs) == 2
inputs, mask = inputs
else: # if no true label, mask by the max length of capsules. Mainly used for prediction
# compute lengths of capsules
x = K.sqrt(K.sum(K.square(inputs), -1))
# generate the mask which is a one-hot code.
# mask.shape=[None, n_classes]=[None, num_capsule]
mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])
# inputs.shape=[None, num_capsule, dim_capsule]
# mask.shape=[None, num_capsule]
# masked.shape=[None, num_capsule * dim_capsule]
masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
return masked
def compute_output_shape(self, input_shape):
if type(input_shape[0]) is tuple: # true label provided
return tuple([None, input_shape[0][1] * input_shape[0][2]])
else: # no true label provided
return tuple([None, input_shape[1] * input_shape[2]])
def get_config(self):
config = super(Mask, self).get_config()
return config
def squash(vectors, axis=-1):
"""The non-linear activation used in Capsule. """
s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm + K.epsilon())
return scale * vectors
class CapsuleLayer(layers.Layer):
"""The capsule layer. It is similar to Dense layer."""
def __init__(self, num_capsule, dim_capsule, routings=3,
kernel_initializer='glorot_uniform',
**kwargs):
super(CapsuleLayer, self).__init__(**kwargs)
self.num_capsule = num_capsule
self.dim_capsule = dim_capsule
self.routings = routings
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
assert len(input_shape) >= 3, "The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]"
self.input_num_capsule = input_shape[1]
self.input_dim_capsule = input_shape[2]
# Transform matrix
self.W = self.add_weight(shape=[self.num_capsule, self.input_num_capsule,
self.dim_capsule, self.input_dim_capsule],
initializer=self.kernel_initializer,
name='W')
self.built = True
def call(self, inputs, training=None):
# inputs.shape=[None, input_num_capsule, input_dim_capsule]
# inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
inputs_expand = K.expand_dims(inputs, 1)
# Replicate num_capsule dimension to prepare being multiplied by W
# inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
# Compute `inputs * W` by scanning inputs_tiled on dimension 0.
# x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
# W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
# Regard the first two dimensions as `batch` dimension,
# then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
# inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)
# self.add_loss(10*K.mean(K.pow(self.W, 2)))
# inputs_hat = layers.Dropout(0.5)(inputs_hat)
# Begin: Routing algorithm ---------------------------------------------------------------------#
# The prior for coupling coefficient, initialized as zeros.
# b.shape = [None, self.num_capsule, self.input_num_capsule].
b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])
assert self.routings > 0, 'The routings should be > 0.'
for i in range(self.routings):
# c.shape=[batch_size, num_capsule, input_num_capsule]
c = tf.nn.softmax(b, dim=1)
# c.shape = [batch_size, num_capsule, input_num_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
# outputs.shape=[None, num_capsule, dim_capsule]
outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])) # [None, 10, 16]
if i < self.routings - 1:
# outputs.shape = [None, num_capsule, dim_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
# b.shape=[batch_size, num_capsule, input_num_capsule]
b += K.batch_dot(outputs, inputs_hat, [2, 3])
# End: Routing algorithm -----------------------------------------------------------------------#
return outputs
def compute_output_shape(self, input_shape):
return tuple([None, self.num_capsule, self.dim_capsule])
def get_config(self):
config = {
'num_capsule': self.num_capsule,
'dim_capsule': self.dim_capsule,
'routings': self.routings
}
base_config = super(CapsuleLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def Noise(x, dim_capsule, n_channels, kernel_size, strides, padding, log_norm=False):
# conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# noise = layers.Conv2D(filters=dim_capsule*n_channels, kernel_size=kernel_size, strides=strides, padding=padding, name='noise')(conv1)
noise = layers.Conv2D(filters=dim_capsule*n_channels, kernel_size=kernel_size, strides=strides, padding=padding, name='noise')(x)
noise_rescale = layers.Lambda(rescale)(noise)
noise_sample = layers.Lambda(sample_lognormal)(noise_rescale)
alpha = layers.Reshape(target_shape=[-1, dim_capsule], name='noise_reshape')(noise_sample)
if not log_norm:
kl = - tf.log(alpha)
else:
mu1 = tf.get_variable('mu1', [], initializer=tf.constant_initializer(0.))
sigma1 = tf.get_variable('sigma1', [], initializer=tf.constant_initializer(1.))
kl = KL_div2(tf.log(tf.maximum(primarycapsules,1e-4)), alpha, mu1, sigma1)
tf.add_to_collection('kl_terms', kl)
return alpha
def PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding):
conv2 = layers.Conv2D(filters=dim_capsule*n_channels, kernel_size=kernel_size, strides=strides, padding=padding, name='primarycap_conv2d')(inputs)
conv2_flatten = layers.Reshape(target_shape=[-1, dim_capsule], name='primarycap_reshape')(conv2)
primarycapsules = layers.Lambda(squash, name='primarycap_squash')(conv2_flatten)
return primarycapsules
class Addnoise(layers.Layer):
def call(self, x, training=None):
primarycapsules, alpha = x
if training:
return layers.Multiply()([primarycapsules, alpha])
else:
return primarycapsules
def KL_div2(mu, sigma, mu1, sigma1):
'''KL divergence between N(mu,sigma**2) and N(mu1,sigma1**2)'''
return 0.5 * ((sigma/sigma1)**2 + (mu - mu1)**2/sigma1**2 - 1 + 2*(tf.log(sigma1) - tf.log(sigma)))
def sample_lognormal(sigma=None, sigma0=1.):
'''Samples a log-normal using the reparametrization trick'''
sample_shape = tf.zeros_like(sigma)
e = tf.random_normal(tf.shape(sample_shape), mean=0., stddev=1.)
return tf.exp(sample_shape + sigma * sigma0 * e)
def rescale(alpha):
return 0.001 + 0.5 * alpha
def kl_term(alpha):
return -tf.log(alpha/(0.7 + 0.001))
"""
# The following is another way to implement primary capsule layer. This is much slower.
# Apply Conv2D `n_channels` times and concatenate all capsules
def PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding):
outputs = []
for _ in range(n_channels):
output = layers.Conv2D(filters=dim_capsule, kernel_size=kernel_size, strides=strides, padding=padding)(inputs)
outputs.append(layers.Reshape([output.get_shape().as_list()[1] ** 2, dim_capsule])(output))
outputs = layers.Concatenate(axis=1)(outputs)
return layers.Lambda(squash)(outputs)
"""
|
the-stack_0_20228 | # -*- coding: utf-8 -*-
from abc import abstractmethod
import numpy as np
from scipy.spatial import distance
from .base import Base, Property
class Measure(Base):
"""Measure base type
A measure provides a means to assess the seperation between two
:class:`~.State` objects state1 and state2.
"""
mapping: np.ndarray = Property(
default=None,
doc="Mapping array which specifies which elements within the"
" state vectors are to be assessed as part of the measure"
)
mapping2: np.ndarray = Property(
default=None,
doc="A second mapping for when the states being compared exist "
"in different parameter spaces. Defaults to the same as the"
" first mapping"
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.mapping2 is not None and self.mapping is None:
raise ValueError("Cannot set mapping2 if mapping is None. "
"If this is really what you meant to do, then"
" set mapping to include all dimensions.")
if self.mapping2 is None and self.mapping is not None:
self.mapping2 = self.mapping
@abstractmethod
def __call__(self, state1, state2):
r"""
Compute the distance between a pair of :class:`~.State` objects
Parameters
----------
state1 : :class:`~.State`
state2 : :class:`~.State`
Returns
-------
float
distance measure between a pair of input :class:`~.State` objects
"""
return NotImplementedError
class Euclidean(Measure):
r"""Euclidean distance measure
This measure returns the Euclidean distance between a pair of
:class:`~.State` objects.
The Euclidean distance between a pair of state vectors :math:`u` and
:math:`v` is defined as:
.. math::
\sqrt{\sum_{i=1}^{N}{(u_i - v_i)^2}}
"""
def __call__(self, state1, state2):
r"""Calculate the Euclidean distance between a pair of state vectors
Parameters
----------
state1 : :class:`~.State`
state2 : :class:`~.State`
Returns
-------
float
Euclidean distance between two input :class:`~.State`
"""
# Calculate Euclidean distance between two state
if self.mapping is not None:
return distance.euclidean(state1.state_vector[self.mapping, 0],
state2.state_vector[self.mapping2, 0])
else:
return distance.euclidean(state1.state_vector[:, 0], state2.state_vector[:, 0])
class EuclideanWeighted(Measure):
r"""Weighted Euclidean distance measure
This measure returns the Euclidean distance between a pair of
:class:`~.State` objects, taking into account a specified weighting.
The Weighted Euclidean distance between a pair of state vectors :math:`u`
and :math:`v` with weighting :math:`w` is defined as:
.. math::
\sqrt{\sum_{i=1}^{N}{w_i|(u_i - v_i)^2}}
Note
----
The EuclideanWeighted object has a property called weighting, which
allows the method to be called on different pairs of states.
If different weightings need to be used then multiple
:class:`Measure` objects must be created with the specific weighting
"""
weighting: np.ndarray = Property(doc="Weighting vector for the Euclidean calculation")
def __call__(self, state1, state2):
r"""Calculate the weighted Euclidean distance between a pair of state
objects
Parameters
----------
state1 : :class:`~.State`
state2 : :class:`~.State`
Returns
-------
dist : float
Weighted euclidean distance between two input
:class:`~.State` objects
"""
if self.mapping is not None:
return distance.euclidean(state1.state_vector[self.mapping, 0],
state2.state_vector[self.mapping2, 0],
self.weighting)
else:
return distance.euclidean(state1.state_vector[:, 0],
state2.state_vector[:, 0],
self.weighting)
class Mahalanobis(Measure):
r"""Mahalanobis distance measure
This measure returns the Mahalanobis distance between a pair of
:class:`~.State` objects taking into account the distribution (i.e.
the :class:`~.CovarianceMatrix`) of the first :class:`.State` object
The Mahalanobis distance between a distribution with mean :math:`\mu` and
Covariance matrix :math:`\Sigma` and a point :math:`x` is defined as:
.. math::
\sqrt{( {\mu - x}) \Sigma^{-1} ({\mu - x}^T )}
"""
def __call__(self, state1, state2):
r"""Calculate the Mahalanobis distance between a pair of state objects
Parameters
----------
state1 : :class:`~.State`
state2 : :class:`~.State`
Returns
-------
float
Mahalanobis distance between a pair of input :class:`~.State`
objects
"""
if self.mapping is not None:
u = state1.state_vector[self.mapping, 0]
v = state2.state_vector[self.mapping2, 0]
# extract the mapped covariance data
rows = np.array(self.mapping, dtype=np.intp)
columns = np.array(self.mapping, dtype=np.intp)
cov = state1.covar[rows[:, np.newaxis], columns]
else:
u = state1.state_vector[:, 0]
v = state2.state_vector[:, 0]
cov = state1.covar
vi = np.linalg.inv(cov)
return distance.mahalanobis(u, v, vi)
class SquaredGaussianHellinger(Measure):
r"""Squared Gaussian Hellinger distance measure
This measure returns the Squared Hellinger distance between a pair of
:class:`~.GaussianState` multivariate objects.
The Squared Hellinger distance between two multivariate normal
distributions :math:`P \sim N(\mu_1,\Sigma_1)` and
:math:`Q \sim N(\mu_2,\Sigma_2)` is defined as:
.. math::
H^{2}(P, Q) &= 1 - {\frac{\det(\Sigma_1)^{\frac{1}{4}}\det(\Sigma_2)^{\frac{1}{4}}}
{\det\left(\frac{\Sigma_1+\Sigma_2}{2}\right)^{\frac{1}{2}}}}
\exp\left(-\frac{1}{8}(\mu_1-\mu_2)^T
\left(\frac{\Sigma_1+\Sigma_2}{2}\right)^{-1}(\mu_1-\mu_2)\right)\\
&\equiv 1 - \sqrt{\frac{\det(\Sigma_1)^{\frac{1}{2}}\det(\Sigma_2)^{\frac{1}{2}}}
{\det\left(\frac{\Sigma_1+\Sigma_2}{2}\right)}}
\exp\left(-\frac{1}{8}(\mu_1-\mu_2)^T
\left(\frac{\Sigma_1+\Sigma_2}{2}\right)^{-1}(\mu_1-\mu_2)\right)
Note
----
This distance is bounded between 0 and 1
"""
def __call__(self, state1, state2):
r""" Calculate the Squared Hellinger distance multivariate normal
distributions
Parameters
----------
state1 : :class:`~.GaussianState`
state2 : :class:`~.GaussianState`
Returns
-------
float
Squared Hellinger distance between two input
:class:`~.GaussianState`
"""
if self.mapping is not None:
mu1 = state1.state_vector[self.mapping, :]
mu2 = state2.state_vector[self.mapping2, :]
# extract the mapped covariance data
rows = np.array(self.mapping, dtype=np.intp)
columns = np.array(self.mapping, dtype=np.intp)
sigma1 = state1.covar[rows[:, np.newaxis], columns]
sigma2 = state2.covar[rows[:, np.newaxis], columns]
else:
mu1 = state1.state_vector
mu2 = state2.state_vector
sigma1 = state1.covar
sigma2 = state2.covar
sigma1_plus_sigma2 = sigma1 + sigma2
mu1_minus_mu2 = mu1 - mu2
E = mu1_minus_mu2.T @ np.linalg.inv(sigma1_plus_sigma2/2) @ mu1_minus_mu2
epsilon = -0.125*E
numerator = np.sqrt(np.linalg.det(sigma1 @ sigma2))
denominator = np.linalg.det(sigma1_plus_sigma2/2)
squared_hellinger = 1 - np.sqrt(numerator/denominator)*np.exp(epsilon)
squared_hellinger = squared_hellinger.item()
return squared_hellinger
class GaussianHellinger(SquaredGaussianHellinger):
r"""Gaussian Hellinger distance measure
This measure returns the Hellinger distance between a pair of
:class:`~.GaussianState` multivariate objects.
The Hellinger distance between two multivariate normal distributions
:math:`P \sim N(\mu_1,\Sigma_1)` and :math:`Q \sim N(\mu_2,\Sigma_2)`
is defined as:
.. math::
H(P,Q) = \sqrt{1 - {\frac{\det(\Sigma_1)^{\frac{1}{4}}\det(\Sigma_2)^{\frac{1}{4}}}
{\det\left(\frac{\Sigma_1+\Sigma_2}{2}\right)^{\frac{1}{2}}}}
\exp\left(-\frac{1}{8}(\mu_1-\mu_2)^T
\left(\frac{\Sigma_1+\Sigma_2}{2}\right)^{-1}(\mu_1-\mu_2)\right)}
Note
----
This distance is bounded between 0 and 1
"""
def __call__(self, state1, state2):
r""" Calculate the Hellinger distance between 2 state elements
Parameters
----------
state1 : :class:`~.GaussianState`
state2 : :class:`~.GaussianState`
Returns
-------
float
Hellinger distance between two input :class:`~.GaussianState`
"""
return np.sqrt(super().__call__(state1, state2))
|
the-stack_0_20229 | """ test """
from pathlib import Path
import yaml
def main():
""" main func """
print("hello factorio")
data = {
'wiki': {
'foo': 'bar',
'key': 'value',
'the answer': 42
},
'bili_wiki': {
'foo': 'bar',
'key': 'value',
'the answer': 42
}
}
cfg_file = Path("key_test.yaml")
# Write YAML file
with cfg_file.open('w+', encoding='utf8') as outfile:
yaml.safe_dump(data, outfile, default_flow_style=False, allow_unicode=True)
# Read YAML file
data_loaded = yaml.safe_load(cfg_file.read_text(encoding="UTF8"))
print(data)
print(data_loaded)
print(data == data_loaded)
if __name__ == '__main__':
main()
|
the-stack_0_20230 | from math import *
from collections import Counter
from functools import reduce
import bach_random_factored_numbers
lg = lambda x: log(x, 2)
def categorize(tup, easy_ecm_frac=(180./3800), easy_ecm_reject_frac=(180./3800),
ecm_frac=(621./3800), gnfs_frac=(2027./3800)):
"""
Categorizes a number into different kinds of moduli:
easy_ecm_frac: bit fraction of largest factor we can remove by ECM factorization.
easy_ecm_reject_frac: We reject after this many bits have been "removed" by easy ECM
factorization.
ecm_frac: bit fraction of largest factor an attacker could remove by ECM.
gnfs_frac: bit fraction of largest composite an attacker could factorize by GNFS.
Output:
[rejected numbers]
can_reject: We can reject this modulus because it too much has been factored
can_factor: After removing easy ECM factors, a prime is left
[invalid numbers - could not reject but weak RSA moduli]
invalid_ecm - invalid because all its factors are smaller than the threshold defined
by ecm_frac
invalid_gnfs - invalid because after removing the ECM factors the remaining rough part
is under then gnfs_frac threshold
[valid numbers - good RSA moduli]
valid_lopsided - valid, second factor less than gnfs_bits / 2
valid - valid
"""
n = tup[0]
f = tup[1][:]
assert len(f) > 0
lg_n = lg(n)
easy_ecm_bits = easy_ecm_frac*lg_n
easy_ecm_reject_bits = easy_ecm_reject_frac*lg_n
ecm_bits = ecm_frac*lg_n
gnfs_bits = gnfs_frac*lg_n
easy_f = filter(lambda x: lg(x) <= easy_ecm_bits, f)
if lg(reduce(lambda x,y:x*y, easy_f, 1)) > easy_ecm_reject_bits:
return 'can_reject'
f = list(filter(lambda x: lg(x) > easy_ecm_bits, f))
if len(f) < 2:
return 'can_factor'
f = list(filter(lambda x: lg(x) > ecm_bits, f))
if len(f) < 2:
return 'invalid_ecm'
x = reduce(lambda x,y:x*y, f)
if lg(x) < gnfs_bits:
return 'invalid_gnfs'
f.sort()
if lg(f[-2]) < gnfs_bits/2:
return 'valid_lopsided'
return 'valid'
def p_bad(easy_ecm_frac=(180./3800), easy_ecm_reject_frac=0, \
ecm_frac=(621./3800), gnfs_frac=(2027./3800)):
"""
Compute approximate probability of getting a bad RSA modulus, using the given
parameters.
"""
results = list(map(lambda x: categorize(x, easy_ecm_frac, \
easy_ecm_reject_frac, ecm_frac, gnfs_frac), factor_arr))
non_rejected = list(filter(lambda x: x not in ["can_reject", "can_factor"], results))
good_moduli = list(filter(lambda x: x in ["valid_lopsided", "valid"], results))
return 1 - 1.0 * len(good_moduli) / len(non_rejected)
if __name__ == "__main__":
import numpy as np
#import matplotlib.pyplot as plt
# Difficulty 1e12
# I.e. this aims for moduli which are ca. 1e12 times
easy_ecm_bits = 180.
ecm_reject_bits = 0.
ecm_bits = 621.
gnfs_bits = 2027.
# Generate 10,000 128-bit random factored numbers.
# These numbers are only for quick testing, need a lot more numbers and more bits
# for accurate results
factor_arr = bach_random_factored_numbers.generate_random_factored_numbers(128, 8, 10000)
# Uncomment to load factors from file instead
#factor_arr = []
#with open("random_factored_numbers.txt", "r") as f:
# for l in f:
# factor_arr.append(eval(l))
x = np.arange(3000, 5000, 25)
y = list(map(lambda b: p_bad(easy_ecm_frac=(easy_ecm_bits/b), easy_ecm_reject_frac=(ecm_reject_bits/b), \
ecm_frac=(ecm_bits/b), gnfs_frac=(gnfs_bits/b)), x))
#plt.plot(x, y)
#plt.show()
y2 = list(map(lambda x: -log(x[0]) / x[1], zip(y, x)))
#plt.plot(x, y2, label="-log(p)/b")
#plt.title("RSA moduli difficulti level 1e12")
#plt.legend()
#plt.show()
fun, best_p_1e12, best_b_1e12 = max(zip(y2, y, x))
print("Best value: p_bad=", best_p_1e12, "at", best_b_1e12, "bits")
print("Modulus length for 1e-9 chance of bad modulus=", log(1e-9) / log(best_p_1e12) * best_b_1e12)
print("Modulus length for 1e-12 chance of bad modulus=", log(1e-12) / log(best_p_1e12) * best_b_1e12)
print("Modulus length for 1e-15 chance of bad modulus=", log(1e-15) / log(best_p_1e12) * best_b_1e12)
|
the-stack_0_20231 | MockAuthSessionKey = '76f7e712abd55921d4bcad43a92c20'
MockAuthCustomerUUID = 'd632c88d-2555-440d-9ade-a850e91cd3a8'
MockAuthUserUUID = '290fff0a-61dc-42ca-ba54-0efa4b519eeb'
MockAuthUsername = 'abcdef'
MockAuthPassword = 'fdjsak'
MockAuthAccountid = '14322314'
mock_auth_responses = {
'/is/1.0.0/generatetoken': {
('default', 'POST'): {
'json': {
'token_type': 'bearer',
'expires_in': 6899,
'refresh_token': '903c481cb5dffcfc52e292867075037',
'access_token': MockAuthSessionKey
},
'status_code': 200
},
('login_incorrect', 'POST'): {
'json': {
'error': 'invalid_grant',
'error_description': 'Provided Authorization Grant is invalid.'
},
'status_code': 412
}
},
'/1.0.0/auth/validatetoken': {
('default', 'GET'): {
'json': {
'customerid': MockAuthCustomerUUID,
'userid': MockAuthUserUUID,
'username': f'{MockAuthAccountid}/{MockAuthUsername}'
},
'status_code': 200
}
}
}
|
the-stack_0_20232 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection matcher from configuration."""
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.protos import matcher_pb2
def build(matcher_config):
"""Builds a matcher object based on the matcher config.
Args:
matcher_config: A matcher.proto object containing the config for the desired
Matcher.
Returns:
Matcher based on the config.
Raises:
ValueError: On empty matcher proto.
"""
if not isinstance(matcher_config, matcher_pb2.Matcher):
raise ValueError('matcher_config not of type matcher_pb2.Matcher.')
if matcher_config.WhichOneof('matcher_oneof') == 'argmax_matcher':
matcher = matcher_config.argmax_matcher
matched_threshold = unmatched_threshold = None
if not matcher.ignore_thresholds:
matched_threshold = matcher.matched_threshold
unmatched_threshold = matcher.unmatched_threshold
return argmax_matcher.ArgMaxMatcher(
matched_threshold=matched_threshold,
unmatched_threshold=unmatched_threshold,
negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched,
force_match_for_each_row=matcher.force_match_for_each_row,
use_matmul_gather=matcher.use_matmul_gather)
if matcher_config.WhichOneof('matcher_oneof') == 'bipartite_matcher':
matcher = matcher_config.bipartite_matcher
return bipartite_matcher.GreedyBipartiteMatcher(matcher.use_matmul_gather)
raise ValueError('Empty matcher.')
|
the-stack_0_20233 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.dialogflow.v2beta1 Agents API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import google.api_core.protobuf_helpers
import grpc
from dialogflow_v2beta1.gapic import agents_client_config
from dialogflow_v2beta1.gapic import enums
from dialogflow_v2beta1.gapic.transports import agents_grpc_transport
from dialogflow_v2beta1.proto import agent_pb2
from dialogflow_v2beta1.proto import agent_pb2_grpc
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import struct_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'dialogflow', ).version
class AgentsClient(object):
"""
Agents are best described as Natural Language Understanding (NLU)
modules that transform user requests into actionable data. You can
include agents in your app, product, or service to determine user intent
and respond to the user in a natural way.
After you create an agent, you can add ``Intents``, ``Contexts``,
``Entity Types``, ``Webhooks``, and so on to manage the flow of a
conversation and match user input to predefined intents and actions.
You can create an agent using both Dialogflow Standard Edition and
Dialogflow Enterprise Edition. For details, see `Dialogflow
Editions <https://cloud.google.com/dialogflow-enterprise/docs/editions>`__.
You can save your agent for backup or versioning by exporting the agent
by using the ``ExportAgent`` method. You can import a saved agent by
using the ``ImportAgent`` method.
Dialogflow provides several `prebuilt
agents <https://cloud.google.com/dialogflow-enterprise/docs/agents-prebuilt>`__
for common conversation scenarios such as determining a date and time,
converting currency, and so on.
For more information about agents, see the `Dialogflow
documentation <https://cloud.google.com/dialogflow-enterprise/docs/agents-overview>`__.
"""
SERVICE_ADDRESS = 'dialogflow.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.dialogflow.v2beta1.Agents'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
dialogflow_v2beta1.AgentsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None):
"""Constructor.
Args:
transport (Union[~.AgentsGrpcTransport,
Callable[[~.Credentials, type], ~.AgentsGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning,
stacklevel=2)
else:
client_config = agents_client_config.config
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning,
stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=agents_grpc_transport.AgentsGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = agents_grpc_transport.AgentsGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION, )
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_agent(self,
parent,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Retrieves the specified agent.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.get_agent(parent)
Args:
parent (str): Required. The project that the agent to fetch is associated with.
Format: ``projects/<Project ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.Agent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_agent' not in self._inner_api_calls:
self._inner_api_calls[
'get_agent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_agent,
default_retry=self._method_configs['GetAgent'].retry,
default_timeout=self._method_configs['GetAgent'].timeout,
client_info=self._client_info,
)
request = agent_pb2.GetAgentRequest(parent=parent, )
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_agent'](request,
retry=retry,
timeout=timeout,
metadata=metadata)
def search_agents(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the list of agents.
Since there is at most one conversational agent per project, this method
is useful primarily for listing all agents across projects the caller
has access to. One can achieve that with a wildcard project collection
id "-". Refer to `List
Sub-Collections <https://cloud.google.com/apis/design/design_patterns#list_sub-collections>`__.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.search_agents(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.search_agents(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The project to list agents from. Format:
``projects/<Project ID or '-'>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dialogflow_v2beta1.types.Agent` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'search_agents' not in self._inner_api_calls:
self._inner_api_calls[
'search_agents'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.search_agents,
default_retry=self._method_configs['SearchAgents'].retry,
default_timeout=self._method_configs['SearchAgents'].
timeout,
client_info=self._client_info,
)
request = agent_pb2.SearchAgentsRequest(
parent=parent,
page_size=page_size,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(self._inner_api_calls['search_agents'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='agents',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def train_agent(self,
parent,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Trains the specified agent.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.train_agent(parent)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project that the agent to train is associated with.
Format: ``projects/<Project ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'train_agent' not in self._inner_api_calls:
self._inner_api_calls[
'train_agent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.train_agent,
default_retry=self._method_configs['TrainAgent'].retry,
default_timeout=self._method_configs['TrainAgent'].timeout,
client_info=self._client_info,
)
request = agent_pb2.TrainAgentRequest(parent=parent, )
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header)
metadata.append(routing_metadata)
operation = self._inner_api_calls['train_agent'](request,
retry=retry,
timeout=timeout,
metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
def export_agent(self,
parent,
agent_uri=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Exports the specified agent to a ZIP file.
Operation <response: ``ExportAgentResponse``>
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.export_agent(parent)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project that the agent to export is associated with.
Format: ``projects/<Project ID>``.
agent_uri (str): Optional. The `Google Cloud
Storage <https://cloud.google.com/storage/docs/>`__ URI to export the
agent to. The format of this URI must be
``gs://<bucket-name>/<object-name>``. If left unspecified, the
serialized agent is returned inline.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'export_agent' not in self._inner_api_calls:
self._inner_api_calls[
'export_agent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.export_agent,
default_retry=self._method_configs['ExportAgent'].retry,
default_timeout=self._method_configs['ExportAgent'].
timeout,
client_info=self._client_info,
)
request = agent_pb2.ExportAgentRequest(
parent=parent,
agent_uri=agent_uri,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header)
metadata.append(routing_metadata)
operation = self._inner_api_calls['export_agent'](request,
retry=retry,
timeout=timeout,
metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
agent_pb2.ExportAgentResponse,
metadata_type=struct_pb2.Struct,
)
def import_agent(self,
parent,
agent_uri=None,
agent_content=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Imports the specified agent from a ZIP file.
Uploads new intents and entity types without deleting the existing ones.
Intents and entity types with the same name are replaced with the new
versions from ImportAgentRequest.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.import_agent(parent)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project that the agent to import is associated with.
Format: ``projects/<Project ID>``.
agent_uri (str): The URI to a Google Cloud Storage file containing the agent to import.
Note: The URI must start with "gs://".
agent_content (bytes): The agent to import.
Example for how to import an agent via the command line:
.. raw:: html
<pre>curl \
'https://dialogflow.googleapis.com/v2beta1/projects/<project_name>/agent:import\
-X POST \
-H 'Authorization: Bearer '$(gcloud auth application-default
print-access-token) \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
--compressed \
--data-binary "{
'agentContent': '$(cat <agent zip file> | base64 -w 0)'
}"</pre>
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'import_agent' not in self._inner_api_calls:
self._inner_api_calls[
'import_agent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.import_agent,
default_retry=self._method_configs['ImportAgent'].retry,
default_timeout=self._method_configs['ImportAgent'].
timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
agent_uri=agent_uri,
agent_content=agent_content,
)
request = agent_pb2.ImportAgentRequest(
parent=parent,
agent_uri=agent_uri,
agent_content=agent_content,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header)
metadata.append(routing_metadata)
operation = self._inner_api_calls['import_agent'](request,
retry=retry,
timeout=timeout,
metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
def restore_agent(self,
parent,
agent_uri=None,
agent_content=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Restores the specified agent from a ZIP file.
Replaces the current agent version with a new one. All the intents and
entity types in the older version are deleted.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.restore_agent(parent)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project that the agent to restore is associated with.
Format: ``projects/<Project ID>``.
agent_uri (str): The URI to a Google Cloud Storage file containing the agent to restore.
Note: The URI must start with "gs://".
agent_content (bytes): The agent to restore.
Example for how to restore an agent via the command line:
.. raw:: html
<pre>curl \
'https://dialogflow.googleapis.com/v2beta1/projects/<project_name>/agent:restore\
-X POST \
-H 'Authorization: Bearer '$(gcloud auth application-default
print-access-token) \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
--compressed \
--data-binary "{
'agentContent': '$(cat <agent zip file> | base64 -w 0)'
}"</pre>
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'restore_agent' not in self._inner_api_calls:
self._inner_api_calls[
'restore_agent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.restore_agent,
default_retry=self._method_configs['RestoreAgent'].retry,
default_timeout=self._method_configs['RestoreAgent'].
timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
agent_uri=agent_uri,
agent_content=agent_content,
)
request = agent_pb2.RestoreAgentRequest(
parent=parent,
agent_uri=agent_uri,
agent_content=agent_content,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header)
metadata.append(routing_metadata)
operation = self._inner_api_calls['restore_agent'](request,
retry=retry,
timeout=timeout,
metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
|
the-stack_0_20234 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrf_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VRF AF.
description:
- Manages VRF AF
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default, where supported, restores params default value.
options:
vrf:
description:
- Name of the VRF.
required: true
afi:
description:
- Address-Family Identifier (AFI).
required: true
choices: ['ipv4', 'ipv6']
default: null
safi:
description:
- Sub Address-Family Identifier (SAFI).
- Deprecated in 2.4
required: true
choices: ['unicast', 'multicast']
default: null
route_target_both_auto_evpn:
description:
- Enable/Disable the EVPN route-target 'auto' setting for both
import and export target communities.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Determines whether the config should be present or
not on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vrf_af:
vrf: ntc
afi: ipv4
route_target_both_auto_evpn: True
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context ntc", "address-family ipv4 unicast"]
'''
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig
def main():
argument_spec = dict(
vrf=dict(required=True),
afi=dict(required=True, choices=['ipv4', 'ipv6']),
route_target_both_auto_evpn=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
safi=dict(choices=['unicast', 'multicast'], removed_in_version="2.4"),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config_text = get_config(module)
config = NetworkConfig(indent=2, contents=config_text)
path = ['vrf context %s' % module.params['vrf'],
'address-family %s unicast' % module.params['afi']]
try:
current = config.get_block_config(path)
except ValueError:
current = None
commands = list()
if current and module.params['state'] == 'absent':
commands.append('no address-family %s unicast' % module.params['afi'])
elif module.params['state'] == 'present':
if current:
have = 'route-target both auto evpn' in current
if module.params['route_target_both_auto_evpn'] is not None:
want = bool(module.params['route_target_both_auto_evpn'])
if want and not have:
commands.append('address-family %s unicast' % module.params['afi'])
commands.append('route-target both auto evpn')
elif have and not want:
commands.append('address-family %s unicast' % module.params['afi'])
commands.append('no route-target both auto evpn')
else:
commands.append('address-family %s unicast' % module.params['afi'])
if module.params['route_target_both_auto_evpn']:
commands.append('route-target both auto evpn')
if commands:
commands.insert(0, 'vrf context %s' % module.params['vrf'])
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
result['commands'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
|
the-stack_0_20236 | #!/usr/bin/env python
#
# Copyright (c) 2016 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Extract background images from a tar archive.
"""
__all__ = (
'extract_backgrounds',
)
import os
import sys
import tarfile
import cv2
import numpy
def im_from_file(f):
a = numpy.asarray(bytearray(f.read()), dtype=numpy.uint8)
return cv2.imdecode(a, cv2.IMREAD_GRAYSCALE)
def extract_backgrounds(archive_name):
"""
Extract backgrounds from provided tar archive.
JPEGs from the archive are converted into grayscale, and cropped/resized to
256x256, and saved in ./bgs/.
:param archive_name:
Name of the .tar file containing JPEGs of background images.
"""
if os.path.isdir("bgs") == False:
os.mkdir("bgs")
t = tarfile.open(name=archive_name)
def members():
m = t.next()
while m:
yield m
m = t.next()
index = 0
for m in members():
if not m.name.endswith(".jpg"):
continue
f = t.extractfile(m)
try:
im = im_from_file(f)
finally:
f.close()
if im is None:
continue
if im.shape[0] > im.shape[1]:
im = im[:im.shape[1], :]
else:
im = im[:, :im.shape[0]]
if im.shape[0] > 256:
im = cv2.resize(im, (256, 256))
fname = "bgs/{:08}.jpg".format(index)
print(fname)
rc = cv2.imwrite(fname, im)
if not rc:
raise Exception("Failed to write file {}".format(fname))
index += 1
if __name__ == "__main__":
extract_backgrounds(sys.argv[1])
|
the-stack_0_20237 | import torch.nn as nn
import torch
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
myinputs = []
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class MultiContext(nn.Module):
def __init__(self, model_slice, model_patch):
super(MultiContext, self).__init__()
self.stream1 = nn.Sequential(*list(model_slice.children())[:-1])
self.stream2 = nn.Sequential(*list(model_patch.children())[:-1])
''' fixed all pre-trained parameters, only fine-tune fc layer '''
# for p in self.parameters():
# p.requires_grad = False
# fully connected layer reduce softly or directly, there's a choice
self.fcl = nn.Sequential(
nn.Linear(512 * 2, 256),
nn.Dropout(0.25),
nn.Linear(256, 2))
self.fc = nn.Linear(1024, 2)
self.fcfc = nn.Linear(4, 2)
def forward(self, input_slice, input_patch):
vector_slice = self.stream1(input_slice)
vector_patch = self.stream2(input_patch)
vector_slice = vector_slice.view(vector_slice.size(0), -1)
vector_patch = vector_patch.view(vector_patch.size(0), -1)
combined_vector = torch.cat((vector_slice, vector_patch), 1)
outputs = self.fc(combined_vector)
return outputs
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
the-stack_0_20238 | """
Gateway for Binance Crypto Exchange.
"""
import urllib
import hashlib
import hmac
import time
from copy import copy
from datetime import datetime, timedelta
from enum import Enum
from threading import Lock
from typing import Dict, List, Tuple
import pytz
from howtrader.api.rest import RestClient, Request
from howtrader.api.websocket import WebsocketClient
from howtrader.trader.constant import (
Direction,
Exchange,
Product,
Status,
OrderType,
Interval,
Offset
)
from howtrader.trader.gateway import BaseGateway
from howtrader.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
PositionData,
BarData,
OrderRequest,
QueryRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from howtrader.trader.event import EVENT_TIMER
from howtrader.event import Event, EventEngine
F_REST_HOST: str = "https://fapi.binance.com"
F_WEBSOCKET_TRADE_HOST: str = "wss://fstream.binance.com/ws/"
F_WEBSOCKET_DATA_HOST: str = "wss://fstream.binance.com/stream?streams="
F_TESTNET_RESTT_HOST: str = "https://testnet.binancefuture.com"
F_TESTNET_WEBSOCKET_TRADE_HOST: str = "wss://stream.binancefuture.com/ws/"
F_TESTNET_WEBSOCKET_DATA_HOST: str = "wss://stream.binancefuture.com/stream?streams="
D_REST_HOST: str = "https://dapi.binance.com"
D_WEBSOCKET_TRADE_HOST: str = "wss://dstream.binance.com/ws/"
D_WEBSOCKET_DATA_HOST: str = "wss://dstream.binance.com/stream?streams="
D_TESTNET_RESTT_HOST: str = "https://testnet.binancefuture.com"
D_TESTNET_WEBSOCKET_TRADE_HOST: str = "wss://dstream.binancefuture.com/ws/"
D_TESTNET_WEBSOCKET_DATA_HOST: str = "wss://dstream.binancefuture.com/stream?streams="
STATUS_BINANCES2VT: Dict[str, Status] = {
"NEW": Status.NOTTRADED,
"PARTIALLY_FILLED": Status.PARTTRADED,
"FILLED": Status.ALLTRADED,
"CANCELED": Status.CANCELLED,
"REJECTED": Status.REJECTED,
"EXPIRED": Status.CANCELLED
}
ORDERTYPE_VT2BINANCES: Dict[OrderType, Tuple[str, str]] = {
OrderType.LIMIT: ("LIMIT", "GTC"),
OrderType.MARKET: ("MARKET", "GTC"),
OrderType.FAK: ("LIMIT", "IOC"),
OrderType.FOK: ("LIMIT", "FOK"),
OrderType.LIMIT_MAKER: ("LIMIT", "GTX")
}
ORDERTYPE_BINANCES2VT: Dict[Tuple[str, str], OrderType] = {v: k for k, v in ORDERTYPE_VT2BINANCES.items()}
DIRECTION_VT2BINANCES: Dict[Direction, str] = {
Direction.LONG: "BUY",
Direction.SHORT: "SELL"
}
DIRECTION_BINANCES2VT: Dict[str, Direction] = {v: k for k, v in DIRECTION_VT2BINANCES.items()}
INTERVAL_VT2BINANCES: Dict[Interval, str] = {
Interval.MINUTE: "1m",
Interval.HOUR: "1h",
Interval.DAILY: "1d",
}
TIMEDELTA_MAP: Dict[Interval, timedelta] = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class Security(Enum):
NONE: int = 0
SIGNED: int = 1
API_KEY: int = 2
symbol_name_map: Dict[str, str] = {}
class BinancesGateway(BaseGateway):
"""
VN Trader Gateway for Binance connection.
"""
default_setting = {
"key": "",
"secret": "",
"会话数": 3,
"服务器": ["REAL", "TESTNET"],
"合约模式": ["正向", "反向"],
"代理地址": "",
"代理端口": 0,
}
exchanges: Exchange = [Exchange.BINANCE]
def __init__(self, event_engine: EventEngine):
"""Constructor"""
super().__init__(event_engine, "BINANCES")
self.trade_ws_api = BinancesTradeWebsocketApi(self)
self.market_ws_api = BinancesDataWebsocketApi(self)
self.rest_api = BinancesRestApi(self)
def connect(self, setting: dict) -> None:
""""""
key = setting["key"]
secret = setting["secret"]
session_number = setting["会话数"]
server = setting["服务器"]
proxy_host = setting["代理地址"]
proxy_port = setting["代理端口"]
if setting["合约模式"] == "正向":
usdt_base = True
else:
usdt_base = False
self.rest_api.connect(usdt_base, key, secret, session_number, server,
proxy_host, proxy_port)
self.market_ws_api.connect(usdt_base, proxy_host, proxy_port, server)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def subscribe(self, req: SubscribeRequest) -> None:
""""""
self.market_ws_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.rest_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
""""""
self.rest_api.cancel_order(req)
def query_order(self, req: QueryRequest):
self.rest_api.query_order(req)
def query_account(self) -> None:
""""""
self.rest_api.query_account()
def query_position(self) -> None:
""""""
self.rest_api.query_position()
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
return self.rest_api.query_history(req)
def close(self) -> None:
""""""
self.rest_api.stop()
self.trade_ws_api.stop()
self.market_ws_api.stop()
def process_timer_event(self, event: Event) -> None:
""""""
self.rest_api.keep_user_stream()
class BinancesRestApi(RestClient):
"""
BINANCE Future REST API
"""
def __init__(self, gateway: BinancesGateway):
""""""
super().__init__()
self.gateway: BinancesGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.trade_ws_api: BinancesTradeWebsocketApi = self.gateway.trade_ws_api
self.market_ws_api: BinancesDataWebsocketApi = self.gateway.market_ws_api
self.key: str = ""
self.secret: str = ""
self.user_stream_key: str = ""
self.keep_alive_count: int = 0
self.recv_window: int = 5000
self.time_offset: int = 0
self.order_count: int = 1_000_000
self.order_count_lock: Lock = Lock()
self.connect_time: int = 0
self.usdt_base: bool = False
def sign(self, request: Request) -> Request:
"""
Generate BINANCE signature.
"""
security = request.data["security"]
if security == Security.NONE:
request.data = None
return request
if request.params:
path = request.path + "?" + urllib.parse.urlencode(request.params)
else:
request.params = dict()
path = request.path
if security == Security.SIGNED:
timestamp = int(time.time() * 1000)
if self.time_offset > 0:
timestamp -= abs(self.time_offset)
elif self.time_offset < 0:
timestamp += abs(self.time_offset)
request.params["timestamp"] = timestamp
query = urllib.parse.urlencode(sorted(request.params.items()))
signature = hmac.new(self.secret, query.encode(
"utf-8"), hashlib.sha256).hexdigest()
query += "&signature={}".format(signature)
path = request.path + "?" + query
request.path = path
request.params = {}
request.data = {}
# Add headers
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"X-MBX-APIKEY": self.key
}
if security in [Security.SIGNED, Security.API_KEY]:
request.headers = headers
return request
def connect(
self,
usdt_base: bool,
key: str,
secret: str,
session_number: int,
server: str,
proxy_host: str,
proxy_port: int
) -> None:
"""
Initialize connection to REST server.
"""
self.usdt_base = usdt_base
self.key = key
self.secret = secret.encode()
self.proxy_port = proxy_port
self.proxy_host = proxy_host
self.server = server
self.connect_time = (
int(datetime.now().strftime("%y%m%d%H%M%S")) * self.order_count
)
if self.server == "REAL":
if self.usdt_base:
self.init(F_REST_HOST, proxy_host, proxy_port)
else:
self.init(D_REST_HOST, proxy_host, proxy_port)
else:
if self.usdt_base:
self.init(F_TESTNET_RESTT_HOST, proxy_host, proxy_port)
else:
self.init(D_TESTNET_RESTT_HOST, proxy_host, proxy_port)
self.start(session_number)
self.gateway.write_log("REST API启动成功")
self.query_time()
self.query_account()
self.query_position()
self.query_orders()
self.query_contract()
self.start_user_stream()
def query_time(self) -> Request:
""""""
data = {
"security": Security.NONE
}
if self.usdt_base:
path = "/fapi/v1/time"
else:
path = "/dapi/v1/time"
return self.add_request(
"GET",
path,
callback=self.on_query_time,
data=data
)
def query_account(self) -> Request:
""""""
data = {"security": Security.SIGNED}
if self.usdt_base:
path = "/fapi/v1/account"
else:
path = "/dapi/v1/account"
self.add_request(
method="GET",
path=path,
callback=self.on_query_account,
data=data
)
def query_position(self) -> Request:
""""""
data = {"security": Security.SIGNED}
if self.usdt_base:
path = "/fapi/v2/positionRisk"
else:
path = "/dapi/v1/positionRisk"
self.add_request(
method="GET",
path=path,
callback=self.on_query_position,
data=data
)
def query_orders(self) -> None:
""""""
data = {"security": Security.SIGNED}
if self.usdt_base:
path = "/fapi/v1/openOrders"
else:
path = "/dapi/v1/openOrders"
self.add_request(
method="GET",
path=path,
callback=self.on_query_orders,
data=data
)
def query_order(self, req: QueryRequest) -> None:
""""""
data = {
"security": Security.SIGNED
}
params = {
"symbol": req.symbol,
"origClientOrderId": req.orderid
}
if self.usdt_base:
path = "/fapi/v1/order"
else:
path = "/dapi/v1/order"
self.add_request(
method="GET",
path=path,
callback=self.on_query_order,
params=params,
data=data,
extra=req
)
def query_contract(self) -> Request:
""""""
data = {
"security": Security.NONE
}
if self.usdt_base:
path = "/fapi/v1/exchangeInfo"
else:
path = "/dapi/v1/exchangeInfo"
self.add_request(
method="GET",
path=path,
callback=self.on_query_contract,
data=data
)
def _new_order_id(self) -> int:
""""""
with self.order_count_lock:
self.order_count += 1
return self.order_count
def send_order(self, req: OrderRequest) -> str:
""""""
orderid = "x-cLbi5uMH" + str(self.connect_time + self._new_order_id())
order = req.create_order_data(
orderid,
self.gateway_name
)
self.gateway.on_order(order)
data = {
"security": Security.SIGNED
}
order_type, time_condition = ORDERTYPE_VT2BINANCES[req.type]
params = {
"symbol": req.symbol,
"side": DIRECTION_VT2BINANCES[req.direction],
"type": order_type,
"timeInForce": time_condition,
"price": float(req.price),
"quantity": float(req.volume),
"newClientOrderId": orderid,
}
if req.offset == Offset.CLOSE:
params["reduceOnly"] = True
if self.usdt_base:
path = "/fapi/v1/order"
else:
path = "/dapi/v1/order"
self.add_request(
method="POST",
path=path,
callback=self.on_send_order,
data=data,
params=params,
extra=order,
on_error=self.on_send_order_error,
on_failed=self.on_send_order_failed
)
return order.vt_orderid
def cancel_order(self, req: CancelRequest) -> None:
""""""
data = {
"security": Security.SIGNED
}
params = {
"symbol": req.symbol,
"origClientOrderId": req.orderid
}
if self.usdt_base:
path = "/fapi/v1/order"
else:
path = "/dapi/v1/order"
self.add_request(
method="DELETE",
path=path,
callback=self.on_cancel_order,
params=params,
data=data,
extra=req
)
def start_user_stream(self) -> None:
""""""
data = {
"security": Security.API_KEY
}
if self.usdt_base:
path = "/fapi/v1/listenKey"
else:
path = "/dapi/v1/listenKey"
self.add_request(
method="POST",
path=path,
callback=self.on_start_user_stream,
data=data
)
def keep_user_stream(self) -> Request:
""""""
self.keep_alive_count += 1
if self.keep_alive_count < 600:
return
self.keep_alive_count = 0
data = {
"security": Security.API_KEY
}
params = {
"listenKey": self.user_stream_key
}
if self.usdt_base:
path = "/fapi/v1/listenKey"
else:
path = "/dapi/v1/listenKey"
self.add_request(
method="PUT",
path=path,
callback=self.on_keep_user_stream,
params=params,
data=data
)
def on_query_time(self, data: dict, request: Request) -> None:
""""""
local_time = int(time.time() * 1000)
server_time = int(data["serverTime"])
self.time_offset = local_time - server_time
def on_query_account(self, data: dict, request: Request) -> None:
""""""
for asset in data["assets"]:
account = AccountData(
accountid=asset["asset"],
balance=float(asset["walletBalance"]),
frozen=float(asset["maintMargin"]),
gateway_name=self.gateway_name
)
# if account.balance:
self.gateway.on_account(account)
self.gateway.write_log("账户资金查询成功")
def on_query_position(self, data: list, request: Request) -> None:
"""
Query position.
"""
symbols = self.market_ws_api.ticks.keys()
for d in data:
if d['positionSide'] == "BOTH":
position = PositionData(
symbol=d["symbol"],
exchange=Exchange.BINANCE,
direction=Direction.NET,
volume=float(float(d["positionAmt"])),
price=float(d["entryPrice"]),
pnl=float(d["unRealizedProfit"]),
gateway_name=self.gateway_name,
)
# if position.volume or d['symbol'].lower() in symbols:
self.gateway.on_position(position)
self.gateway.write_log("持仓信息查询成功")
def on_query_orders(self, data: dict, request: Request) -> None:
""""""
for d in data:
key = (d["type"], d["timeInForce"])
order_type = ORDERTYPE_BINANCES2VT.get(key, None)
if not order_type:
continue
order = OrderData(
orderid=d["clientOrderId"],
symbol=d["symbol"],
exchange=Exchange.BINANCE,
price=float(d["price"]),
volume=float(d["origQty"]),
type=order_type,
direction=DIRECTION_BINANCES2VT[d["side"]],
traded=float(d["executedQty"]),
status=STATUS_BINANCES2VT.get(d["status"], None),
datetime=generate_datetime(d["time"]),
gateway_name=self.gateway_name,
)
self.gateway.on_order(order)
self.gateway.write_log("委托信息查询成功")
def on_query_order(self, data: dict, request: Request) -> None:
""""""
key = (data["type"], data["timeInForce"])
order_type = ORDERTYPE_BINANCES2VT.get(key, None)
if not order_type:
return
order = OrderData(
orderid=data["clientOrderId"],
symbol=data["symbol"],
exchange=Exchange.BINANCE,
price=float(data["price"]),
volume=float(data["origQty"]),
type=order_type,
direction=DIRECTION_BINANCES2VT[data["side"]],
traded=float(data["executedQty"]),
status=STATUS_BINANCES2VT.get(data["status"], None),
datetime=generate_datetime(data["time"]),
gateway_name=self.gateway_name,
)
self.gateway.on_order(order)
self.gateway.write_log("订单信息查询成功")
def on_query_contract(self, data: dict, request: Request) -> None:
""""""
for d in data["symbols"]:
base_currency = d["baseAsset"]
quote_currency = d["quoteAsset"]
name = f"{base_currency.upper()}/{quote_currency.upper()}"
pricetick = 1
min_volume = 1
for f in d["filters"]:
if f["filterType"] == "PRICE_FILTER":
pricetick = float(f["tickSize"])
elif f["filterType"] == "LOT_SIZE":
min_volume = float(f["stepSize"])
contract = ContractData(
symbol=d["symbol"],
exchange=Exchange.BINANCE,
name=name,
pricetick=pricetick,
size=1,
min_volume=min_volume,
product=Product.FUTURES,
history_data=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
symbol_name_map[contract.symbol] = contract.name
self.gateway.write_log("合约信息查询成功")
def on_send_order(self, data: dict, request: Request) -> None:
""""""
pass
def on_send_order_failed(self, status_code: str, request: Request) -> None:
"""
Callback when sending order failed on server.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
msg = f"委托失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_send_order_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
) -> None:
"""
Callback when sending order caused exception.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_cancel_order(self, data: dict, request: Request) -> None:
""""""
pass
def on_start_user_stream(self, data: dict, request: Request) -> None:
""""""
self.user_stream_key = data["listenKey"]
self.keep_alive_count = 0
if self.server == "REAL":
url = F_WEBSOCKET_TRADE_HOST + self.user_stream_key
if not self.usdt_base:
url = D_WEBSOCKET_TRADE_HOST + self.user_stream_key
else:
url = F_TESTNET_WEBSOCKET_TRADE_HOST + self.user_stream_key
if not self.usdt_base:
url = D_TESTNET_WEBSOCKET_TRADE_HOST + self.user_stream_key
self.trade_ws_api.connect(url, self.proxy_host, self.proxy_port)
def on_keep_user_stream(self, data: dict, request: Request) -> None:
""""""
pass
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
history = []
limit = 1500
start_time = int(datetime.timestamp(req.start))
while True:
# Create query params
params = {
"symbol": req.symbol,
"interval": INTERVAL_VT2BINANCES[req.interval],
"limit": limit,
"startTime": start_time * 1000, # convert to millisecond
}
# Add end time if specified
if req.end:
end_time = int(datetime.timestamp(req.end))
params["endTime"] = end_time * 1000 # convert to millisecond
# Get response from server
if self.usdt_base:
path = "/fapi/v1/klines"
else:
path = "/dapi/v1/klines"
resp = self.request(
"GET",
path=path,
data={"security": Security.NONE},
params=params
)
# Break if request failed with other status code
if resp.status_code // 100 != 2:
msg = f"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}"
self.gateway.write_log(msg)
break
else:
data = resp.json()
if not data:
msg = f"获取历史数据为空,开始时间:{start_time}"
self.gateway.write_log(msg)
break
buf = []
for l in data:
bar = BarData(
symbol=req.symbol,
exchange=req.exchange,
datetime=generate_datetime(l[0]),
interval=req.interval,
volume=float(l[5]),
open_price=float(l[1]),
high_price=float(l[2]),
low_price=float(l[3]),
close_price=float(l[4]),
gateway_name=self.gateway_name
)
buf.append(bar)
history.extend(buf)
begin = buf[0].datetime
end = buf[-1].datetime
msg = f"获取历史数据成功,{req.symbol} - {req.interval.value},{begin} - {end}"
self.gateway.write_log(msg)
# Break if total data count less than limit (latest date collected)
if len(data) < limit:
break
# Update start time
start_dt = bar.datetime + TIMEDELTA_MAP[req.interval]
start_time = int(datetime.timestamp(start_dt))
return history
class BinancesTradeWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway: BinancesGateway):
""""""
super().__init__()
self.gateway: BinancesGateway = gateway
self.gateway_name: str = gateway.gateway_name
def connect(self, url: str, proxy_host: str, proxy_port: int) -> None:
""""""
self.init(url, proxy_host, proxy_port)
self.start()
def on_connected(self) -> None:
""""""
self.gateway.write_log("交易Websocket API连接成功")
def on_packet(self, packet: dict) -> None: # type: (dict)->None
""""""
if packet["e"] == "ACCOUNT_UPDATE":
self.on_account(packet)
elif packet["e"] == "ORDER_TRADE_UPDATE":
self.on_order(packet)
def on_account(self, packet: dict) -> None:
""""""
for acc_data in packet["a"]["B"]:
account = AccountData(
accountid=acc_data["a"],
balance=float(acc_data["wb"]),
frozen=float(acc_data["wb"]) - float(acc_data["cw"]),
gateway_name=self.gateway_name
)
# if account.balance:
self.gateway.on_account(account)
for pos_data in packet["a"]["P"]:
if pos_data['ps'] == "BOTH":
position = PositionData(
symbol=pos_data["s"],
exchange=Exchange.BINANCE,
direction=Direction.NET,
volume=float(pos_data["pa"]),
price=float(pos_data["ep"]),
pnl=float(pos_data["up"]),
gateway_name=self.gateway_name,
)
self.gateway.on_position(position)
def on_order(self, packet: dict) -> None:
""""""
ord_data = packet["o"]
key = (ord_data["o"], ord_data["f"])
order_type = ORDERTYPE_BINANCES2VT.get(key, None)
if not order_type:
return
order = OrderData(
symbol=ord_data["s"],
exchange=Exchange.BINANCE,
orderid=str(ord_data["c"]),
type=order_type,
direction=DIRECTION_BINANCES2VT[ord_data["S"]],
price=float(ord_data["p"]),
volume=float(ord_data["q"]),
traded=float(ord_data["z"]),
status=STATUS_BINANCES2VT[ord_data["X"]],
datetime=generate_datetime(packet["E"]),
gateway_name=self.gateway_name
)
# Push trade event
trade_volume = float(ord_data["l"])
if trade_volume:
trade_data = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=ord_data["t"],
direction=order.direction,
price=float(ord_data["L"]),
volume=trade_volume,
datetime=generate_datetime(ord_data["T"]),
gateway_name=self.gateway_name,
)
order.trade_data = trade_data
else:
order.trade_data = None
self.gateway.on_order(order)
class BinancesDataWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway: BinancesGateway):
""""""
super().__init__()
self.gateway: BinancesGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.ticks: Dict[str, TickData] = {}
self.bars: Dict[str, BarData] = {}
self.usdt_base = False
def connect(
self,
usdt_base: bool,
proxy_host: str,
proxy_port: int,
server: str
) -> None:
""""""
self.usdt_base = usdt_base
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.server = server
def on_connected(self) -> None:
""""""
self.gateway.write_log("行情Websocket API连接刷新")
def subscribe(self, req: SubscribeRequest) -> None:
""""""
if req.symbol not in symbol_name_map:
self.gateway.write_log(f"找不到该合约代码{req.symbol}")
return
# Create tick buf data
tick = TickData(
symbol=req.symbol,
name=symbol_name_map.get(req.symbol, ""),
exchange=Exchange.BINANCE,
datetime=datetime.now(CHINA_TZ),
gateway_name=self.gateway_name,
)
self.ticks[req.symbol.lower()] = tick
# Create bar buf data
bar = BarData(
symbol=req.symbol,
exchange=Exchange.BINANCE,
datetime=datetime.now(CHINA_TZ),
gateway_name=self.gateway_name,
interval=Interval.MINUTE
)
self.bars[req.symbol.lower()] = bar
# Close previous connection
if self._active:
self.stop()
self.join()
# Create new connection
channels = []
for ws_symbol in self.ticks.keys():
channels.append(ws_symbol + "@ticker")
channels.append(ws_symbol + "@depth5")
channels.append(ws_symbol + "@kline_1m")
if self.server == "REAL":
url = F_WEBSOCKET_DATA_HOST + "/".join(channels)
if not self.usdt_base:
url = D_WEBSOCKET_DATA_HOST + "/".join(channels)
else:
url = F_TESTNET_WEBSOCKET_DATA_HOST + "/".join(channels)
if not self.usdt_base:
url = D_TESTNET_WEBSOCKET_DATA_HOST + "/".join(channels)
self.init(url, self.proxy_host, self.proxy_port)
self.start()
def on_packet(self, packet: dict) -> None:
""""""
stream = packet["stream"]
data = packet["data"]
symbol, channel = stream.split("@")
tick: TickData = self.ticks[symbol]
bar: BarData = self.bars[symbol]
if channel == "ticker":
tick.volume = float(data['v'])
tick.open_price = float(data['o'])
tick.high_price = float(data['h'])
tick.low_price = float(data['l'])
tick.last_price = float(data['c'])
tick.datetime = generate_datetime(float(data['E'])) # datetime.fromtimestamp(float(data['E']) / 1000)
elif channel == "kline_1m":
dt = generate_datetime(float(data['k']['t']))
if dt < bar.datetime and bar.close_price > 0:
# filter the older kline.
return
bar.open_price = float(data['k']['o'])
bar.high_price = float(data['k']['h'])
bar.low_price = float(data['k']['l'])
bar.close_price = float(data['k']['c'])
bar.volume = float(data['k']['v'])
bar.datetime = dt
if data['k']['x']: # bar finished
self.gateway.on_bar(copy(bar))
else:
bids = data["b"]
for n in range(min(5, len(bids))):
price, volume = bids[n]
tick.__setattr__("bid_price_" + str(n + 1), float(price))
tick.__setattr__("bid_volume_" + str(n + 1), float(volume))
asks = data["a"]
for n in range(min(5, len(asks))):
price, volume = asks[n]
tick.__setattr__("ask_price_" + str(n + 1), float(price))
tick.__setattr__("ask_volume_" + str(n + 1), float(volume))
if tick.last_price:
self.gateway.on_tick(copy(tick))
def generate_datetime(timestamp: float) -> datetime:
""""""
dt = datetime.fromtimestamp(timestamp / 1000)
dt = CHINA_TZ.localize(dt)
return dt
|
the-stack_0_20239 | misspelling_dict = {
'abandonned': ['abandoned'],
'aberation': ['aberration'],
'abilties': ['abilities'],
'abilty': ['ability'],
'abondon': ['abandon'],
'abbout': ['about'],
'abotu': ['about'],
'abouta': ['about a'],
'aboutit': ['about it'],
'aboutthe': ['about the'],
'abscence': ['absence'],
'abondoned': ['abandoned'],
'abondoning': ['abandoning'],
'abondons': ['abandons'],
'aborigene': ['aborigine'],
'accesories': ['accessories'],
'accidant': ['accident'],
'abortificant': ['abortifacient'],
'abreviate': ['abbreviate'],
'abreviated': ['abbreviated'],
'abreviation': ['abbreviation'],
'abritrary': ['arbitrary'],
'absail': ['abseil'],
'absailing': ['abseiling'],
'absense': ['absence'],
'absolutly': ['absolutely'],
'absorbsion': ['absorption'],
'absorbtion': ['absorption'],
'abudance': ['abundance'],
'abundacies': ['abundances'],
'abundancies': ['abundances'],
'abundunt': ['abundant'],
'abutts': ['abuts'],
'acadamy': ['academy'],
'acadmic': ['academic'],
'accademic': ['academic'],
'accademy': ['academy'],
'acccused': ['accused'],
'accelleration': ['acceleration'],
'accension': ['accession', 'ascension'],
'acceptence': ['acceptance'],
'acceptible': ['acceptable'],
'accessable': ['accessible'],
'accidentaly': ['accidentally'],
'accidently': ['accidentally'],
'acclimitization': ['acclimatization'],
'accomadate': ['accommodate'],
'accomadated': ['accommodated'],
'accomadates': ['accommodates'],
'accomadating': ['accommodating'],
'accomadation': ['accommodation'],
'accomadations': ['accommodations'],
'accomdate': ['accommodate'],
'accomodate': ['accommodate'],
'accomodated': ['accommodated'],
'accomodates': ['accommodates'],
'accomodating': ['accommodating'],
'accomodation': ['accommodation'],
'accomodations': ['accommodations'],
'accompanyed': ['accompanied'],
'accordeon': ['accordion'],
'accordian': ['accordion'],
'accoring': ['according'],
'accoustic': ['acoustic'],
'accquainted': ['acquainted'],
'accrediation': ['accreditation'],
'accredidation': ['accreditation'],
'accross': ['across'],
'accussed': ['accused'],
'acedemic': ['academic'],
'acheive': ['achieve'],
'acheived': ['achieved'],
'acheivement': ['achievement'],
'acheivements': ['achievements'],
'acheives': ['achieves'],
'acheiving': ['achieving'],
'acheivment': ['achievement'],
'acheivments': ['achievements'],
'achievment': ['achievement'],
'achievments': ['achievements'],
'achive': ['achieve', 'archive'],
'achived': ['achieved', 'archived'],
'achivement': ['achievement'],
'achivements': ['achievements'],
'acident': ['accident'],
'acknowldeged': ['acknowledged'],
'acknowledgeing': ['acknowledging'],
'ackward': ['awkward', 'backward'],
'acommodate': ['accommodate'],
'acomplish': ['accomplish'],
'acomplished': ['accomplished'],
'acomplishment': ['accomplishment'],
'acomplishments': ['accomplishments'],
'acording': ['according'],
'acordingly': ['accordingly'],
'acquaintence': ['acquaintance'],
'acquaintences': ['acquaintances'],
'acquiantence': ['acquaintance'],
'acquiantences': ['acquaintances'],
'acquited': ['acquitted'],
'activites': ['activities'],
'activly': ['actively'],
'actualy': ['actually'],
'acuracy': ['accuracy'],
'acused': ['accused'],
'acustom': ['accustom'],
'acustommed': ['accustomed'],
'adavanced': ['advanced'],
'adbandon': ['abandon'],
'addional': ['additional'],
'addionally': ['additionally'],
'additinally': ['additionally'],
'additionaly': ['additionally'],
'additonal': ['additional'],
'additonally': ['additionally'],
'addmission': ['admission'],
'addopt': ['adopt'],
'addopted': ['adopted'],
'addoptive': ['adoptive'],
'addres': ['address', 'adders'],
'addresable': ['addressable'],
'addresed': ['addressed'],
'addresing': ['addressing'],
'addressess': ['addresses'],
'addtion': ['addition'],
'addtional': ['additional'],
'adecuate': ['adequate'],
'adequit': ['adequate'],
'adhearing': ['adhering'],
'adherance': ['adherence'],
'admendment': ['amendment'],
'admininistrative': ['administrative'],
'adminstered': ['administered'],
'adminstrate': ['administrate'],
'adminstration': ['administration'],
'adminstrative': ['administrative'],
'adminstrator': ['administrator'],
'admissability': ['admissibility'],
'admissable': ['admissible'],
'admited': ['admitted'],
'admitedly': ['admittedly'],
'adn': ['and'],
'adolecent': ['adolescent'],
'adquire': ['acquire'],
'adquired': ['acquired'],
'adquires': ['acquires'],
'adquiring': ['acquiring'],
'adres': ['address'],
'adresable': ['addressable'],
'adresing': ['addressing'],
'adress': ['address'],
'adressable': ['addressable'],
'adressed': ['addressed'],
'adressing': ['addressing', 'dressing'],
'adventrous': ['adventurous'],
'advertisment': ['advertisement'],
'advertisments': ['advertisements'],
'advesary': ['adversary'],
'adviced': ['advised'],
'aeriel': ['aerial'],
'aeriels': ['aerials'],
'afair': ['affair'],
'afficianados': ['aficionados'],
'afficionado': ['aficionado'],
'afficionados': ['aficionados'],
'affilate': ['affiliate'],
'affilliate': ['affiliate'],
'affort': ['afford', 'effort'],
'aforememtioned': ['aforementioned'],
'againnst': ['against'],
'agains': ['against'],
'agaisnt': ['against'],
'aganist': ['against'],
'aggaravates': ['aggravates'],
'aggreed': ['agreed'],
'aggreement': ['agreement'],
'aggregious': ['egregious'],
'aggresive': ['aggressive'],
'agian': ['again'],
'agianst': ['against'],
'agin': ['again'],
'agina': ['again', 'angina'],
'aginst': ['against'],
'agravate': ['aggravate'],
'agre': ['agree'],
'agred': ['agreed'],
'agreeement': ['agreement'],
'agreemnt': ['agreement'],
'agregate': ['aggregate'],
'agregates': ['aggregates'],
'agreing': ['agreeing'],
'agression': ['aggression'],
'agressive': ['aggressive'],
'agressively': ['aggressively'],
'agressor': ['aggressor'],
'agricultue': ['agriculture'],
'agriculure': ['agriculture'],
'agricuture': ['agriculture'],
'agrieved': ['aggrieved'],
'ahev': ['have'],
'ahppen': ['happen'],
'ahve': ['have'],
'aicraft': ['aircraft'],
'aiport': ['airport'],
'airbourne': ['airborne'],
'aircaft': ['aircraft'],
'aircrafts': ['aircraft'],
'airporta': ['airports'],
'airrcraft': ['aircraft'],
'aisian': ['asian'],
'albiet': ['albeit'],
'alchohol': ['alcohol'],
'alchoholic': ['alcoholic'],
'alchol': ['alcohol'],
'alcholic': ['alcoholic'],
'alcohal': ['alcohol'],
'alcoholical': ['alcoholic'],
'aledge': ['allege'],
'aledged': ['alleged'],
'aledges': ['alleges'],
'alege': ['allege'],
'aleged': ['alleged'],
'alegience': ['allegiance'],
'algebraical': ['algebraic'],
'algorhitms': ['algorithms'],
'algoritm': ['algorithm'],
'algoritms': ['algorithms'],
'alientating': ['alienating'],
'alledge': ['allege'],
'alledged': ['alleged'],
'alledgedly': ['allegedly'],
'alledges': ['alleges'],
'allegedely': ['allegedly'],
'allegedy': ['allegedly'],
'allegely': ['allegedly'],
'allegence': ['allegiance'],
'allegience': ['allegiance'],
'allign': ['align'],
'alligned': ['aligned'],
'alliviate': ['alleviate'],
'allopone': ['allophone'],
'allopones': ['allophones'],
'allready': ['already'],
'allthough': ['although'],
'alltime': ['all-time'],
'alltogether': ['altogether'],
'almsot': ['almost'],
'alochol': ['alcohol'],
'alomst': ['almost'],
'alot': ['a lot', 'allot'],
'alotted': ['allotted'],
'alowed': ['allowed'],
'alowing': ['allowing'],
'alreayd': ['already'],
'alse': ['else'],
'alsot': ['also'],
'alternitives': ['alternatives'],
'altho': ['although'],
'althought': ['although'],
'altough': ['although'],
'alusion': ['allusion', 'illusion'],
'alwasy': ['always'],
'alwyas': ['always'],
'amalgomated': ['amalgamated'],
'amatuer': ['amateur'],
'amature': ['armature', 'amateur'],
'amendmant': ['amendment'],
'Amercia': ['America'],
'amerliorate': ['ameliorate'],
'amke': ['make'],
'amking': ['making'],
'ammend': ['amend'],
'ammended': ['amended'],
'ammendment': ['amendment'],
'ammendments': ['amendments'],
'ammount': ['amount'],
'ammused': ['amused'],
'amoung': ['among'],
'amoungst': ['amongst'],
'amung': ['among'],
'amunition': ['ammunition'],
'analagous': ['analogous'],
'analitic': ['analytic'],
'analogeous': ['analogous'],
'anarchim': ['anarchism'],
'anarchistm': ['anarchism'],
'anbd': ['and'],
'ancestory': ['ancestry'],
'ancilliary': ['ancillary'],
'andd': ['and'],
'androgenous': ['androgynous'],
'androgeny': ['androgyny'],
'anihilation': ['annihilation'],
'aniversary': ['anniversary'],
'annoint': ['anoint'],
'annointed': ['anointed'],
'annointing': ['anointing'],
'annoints': ['anoints'],
'annouced': ['announced'],
'annualy': ['annually'],
'annuled': ['annulled'],
'anohter': ['another'],
'anomolies': ['anomalies'],
'anomolous': ['anomalous'],
'anomoly': ['anomaly'],
'anonimity': ['anonymity'],
'anounced': ['announced'],
'anouncement': ['announcement'],
'ansalisation': ['nasalisation'],
'ansalization': ['nasalization'],
'ansestors': ['ancestors'],
'antartic': ['antarctic'],
'anthromorphization': ['anthropomorphization'],
'anthropolgist': ['anthropologist'],
'anthropolgy': ['anthropology'],
'anual': ['annual'],
'anulled': ['annulled'],
'anwsered': ['answered'],
'anyhwere': ['anywhere'],
'anyother': ['any other'],
'anytying': ['anything'],
'aparent': ['apparent'],
'aparment': ['apartment'],
'apenines': ['apennines', 'Apennines'],
'aplication': ['application'],
'aplied': ['applied'],
'apolegetics': ['apologetics'],
'apon': ['upon', 'apron'],
'apparant': ['apparent'],
'apparantly': ['apparently'],
'appart': ['apart'],
'appartment': ['apartment'],
'appartments': ['apartments'],
'appealling': ['appealing', 'appalling'],
'appeareance': ['appearance'],
'appearence': ['appearance'],
'appearences': ['appearances'],
'appenines': ['apennines', 'Apennines'],
'apperance': ['appearance'],
'apperances': ['appearances'],
'appereance': ['appearance'],
'appereances': ['appearances'],
'applicaiton': ['application'],
'applicaitons': ['applications'],
'appologies': ['apologies'],
'appology': ['apology'],
'apprearance': ['appearance'],
'apprieciate': ['appreciate'],
'approachs': ['approaches'],
'appropiate': ['appropriate'],
'appropraite': ['appropriate'],
'appropropiate': ['appropriate'],
'approproximate': ['approximate'],
'approxamately': ['approximately'],
'approxiately': ['approximately'],
'approximitely': ['approximately'],
'aprehensive': ['apprehensive'],
'apropriate': ['appropriate'],
'apropriately': ['appropriately'],
'aproval': ['approval'],
'aproximate': ['approximate'],
'aproximately': ['approximately'],
'aquaduct': ['aqueduct'],
'aquaintance': ['acquaintance'],
'aquainted': ['acquainted'],
'aquiantance': ['acquaintance'],
'aquire': ['acquire'],
'aquired': ['acquired'],
'aquiring': ['acquiring'],
'aquisition': ['acquisition'],
'aquitted': ['acquitted'],
'aranged': ['arranged'],
'arangement': ['arrangement'],
'arbitarily': ['arbitrarily'],
'arbitary': ['arbitrary'],
'archaelogical': ['archaeological'],
'archaelogists': ['archaeologists'],
'archaelogy': ['archaeology'],
'archaoelogy': ['archeology', 'archaeology'],
'archaology': ['archeology', 'archaeology'],
'archeaologist': ['archeologist', 'archaeologist'],
'archeaologists': ['archeologists', 'archaeologists'],
'archetect': ['architect'],
'archetects': ['architects'],
'archetectural': ['architectural'],
'archetecturally': ['architecturally'],
'archetecture': ['architecture'],
'archiac': ['archaic'],
'archictect': ['architect'],
'archimedian': ['archimedean'],
'architecht': ['architect'],
'architechturally': ['architecturally'],
'architechture': ['architecture'],
'architechtures': ['architectures'],
'architectual': ['architectural'],
'archtype': ['archetype'],
'archtypes': ['archetypes'],
'aready': ['already'],
'areodynamics': ['aerodynamics'],
'argubly': ['arguably'],
'arguement': ['argument'],
'arguements': ['arguments'],
'arised': ['arose'],
'arival': ['arrival'],
'armamant': ['armament'],
'armistace': ['armistice'],
'arogant': ['arrogant'],
'arogent': ['arrogant'],
'aroud': ['around'],
'arrangment': ['arrangement'],
'arrangments': ['arrangements'],
'arrengement': ['arrangement'],
'arrengements': ['arrangements'],
'arround': ['around'],
'artcile': ['article'],
'artical': ['article'],
'artice': ['article'],
'articel': ['article'],
'artifical': ['artificial'],
'artifically': ['artificially'],
'artillary': ['artillery'],
'arund': ['around'],
'asetic': ['ascetic'],
'asfar': ['as far'],
'asign': ['assign'],
'aslo': ['also'],
'asociated': ['associated'],
'asorbed': ['absorbed'],
'asphyxation': ['asphyxiation'],
'assasin': ['assassin'],
'assasinate': ['assassinate'],
'assasinated': ['assassinated'],
'assasinates': ['assassinates'],
'assasination': ['assassination'],
'assasinations': ['assassinations'],
'assasined': ['assassinated'],
'assasins': ['assassins'],
'assassintation': ['assassination'],
'assemple': ['assemble'],
'assertation': ['assertion'],
'asside': ['aside'],
'assisnate': ['assassinate'],
'assit': ['assist'],
'assitant': ['assistant'],
'assocation': ['association'],
'assoicate': ['associate'],
'assoicated': ['associated'],
'assoicates': ['associates'],
'assosication': ['assassination'],
'asssassans': ['assassins'],
'assualt': ['assault'],
'assualted': ['assaulted'],
'assymetric': ['asymmetric'],
'assymetrical': ['asymmetrical'],
'asteriod': ['asteroid'],
'asthetic': ['aesthetic'],
'asthetical': ['aesthetical'],
'asthetically': ['aesthetically'],
'asume': ['assume'],
'aswell': ['as well'],
'atain': ['attain'],
'atempting': ['attempting'],
'atheistical': ['atheistic'],
'athenean': ['athenian'],
'atheneans': ['athenians'],
'athiesm': ['atheism'],
'athiest': ['atheist'],
'atorney': ['attorney'],
'atribute': ['attribute'],
'atributed': ['attributed'],
'atributes': ['attributes'],
'attaindre': ['attainder', 'attained'],
'attemp': ['attempt'],
'attemped': ['attempted'],
'attemt': ['attempt'],
'attemted': ['attempted'],
'attemting': ['attempting'],
'attemts': ['attempts'],
'attendence': ['attendance'],
'attendent': ['attendant'],
'attendents': ['attendants'],
'attened': ['attended'],
'attension': ['attention'],
'attitide': ['attitude'],
'attributred': ['attributed'],
'attrocities': ['atrocities'],
'audeince': ['audience'],
'auromated': ['automated'],
'austrailia': ['Australia'],
'austrailian': ['Australian'],
'auther': ['author'],
'authobiographic': ['autobiographic'],
'authobiography': ['autobiography'],
'authorative': ['authoritative'],
'authorites': ['authorities'],
'authorithy': ['authority'],
'authoritiers': ['authorities'],
'authoritive': ['authoritative'],
'authrorities': ['authorities'],
'autochtonous': ['autochthonous'],
'autoctonous': ['autochthonous'],
'automaticly': ['automatically'],
'automibile': ['automobile'],
'automonomous': ['autonomous'],
'autor': ['author'],
'autority': ['authority'],
'auxilary': ['auxiliary'],
'auxillaries': ['auxiliaries'],
'auxillary': ['auxiliary'],
'auxilliaries': ['auxiliaries'],
'auxilliary': ['auxiliary'],
'availabe': ['available'],
'availablity': ['availability'],
'availaible': ['available'],
'availble': ['available'],
'availiable': ['available'],
'availible': ['available'],
'avalable': ['available'],
'avalance': ['avalanche'],
'avaliable': ['available'],
'avation': ['aviation'],
'avengence': ['a vengeance'],
'averageed': ['averaged'],
'avilable': ['available'],
'awared': ['awarded'],
'awya': ['away'],
'baceause': ['because'],
'backgorund': ['background'],
'backrounds': ['backgrounds'],
'bakc': ['back'],
'banannas': ['bananas'],
'bandwith': ['bandwidth'],
'bankrupcy': ['bankruptcy'],
'banruptcy': ['bankruptcy'],
'baout': ['about', 'bout'],
'basicaly': ['basically'],
'basicly': ['basically'],
'bcak': ['back'],
'beachead': ['beachhead'],
'beacuse': ['because'],
'beastiality': ['bestiality'],
'beatiful': ['beautiful'],
'beaurocracy': ['bureaucracy'],
'beaurocratic': ['bureaucratic'],
'beautyfull': ['beautiful'],
'becamae': ['became'],
'becames': ['becomes', 'became'],
'becasue': ['because'],
'beccause': ['because'],
'becomeing': ['becoming'],
'becomming': ['becoming'],
'becouse': ['because'],
'becuase': ['because'],
'bedore': ['before'],
'beeing': ['being'],
'befoer': ['before'],
'beggin': ['begin', 'begging'],
'begginer': ['beginner'],
'begginers': ['beginners'],
'beggining': ['beginning'],
'begginings': ['beginnings'],
'beggins': ['begins'],
'begining': ['beginning'],
'beginnig': ['beginning'],
'behavour': ['behavior', 'behaviour'],
'beleagured': ['beleaguered'],
'beleif': ['belief'],
'beleive': ['believe'],
'beleived': ['believed'],
'beleives': ['believes'],
'beleiving': ['believing'],
'beligum': ['belgium'],
'belive': ['believe'],
'belived': ['believed', 'beloved'],
'belives': ['believes', 'beliefs'],
'belligerant': ['belligerent'],
'bellweather': ['bellwether'],
'bemusemnt': ['bemusement'],
'beneficary': ['beneficiary'],
'beng': ['being'],
'benificial': ['beneficial'],
'benifit': ['benefit'],
'benifits': ['benefits'],
'bergamont': ['bergamot'],
'Bernouilli': ['Bernoulli'],
'beseige': ['besiege'],
'beseiged': ['besieged'],
'beseiging': ['besieging'],
'beteen': ['between'],
'betwen': ['between'],
'beween': ['between'],
'bewteen': ['between'],
'bilateraly': ['bilaterally'],
'billingualism': ['bilingualism'],
'binominal': ['binomial'],
'bizzare': ['bizarre'],
'blaim': ['blame'],
'blaimed': ['blamed'],
'blessure': ['blessing'],
'Blitzkreig': ['Blitzkrieg'],
'boaut': ['bout', 'boat', 'about'],
'bodydbuilder': ['bodybuilder'],
'bombardement': ['bombardment'],
'bombarment': ['bombardment'],
'bondary': ['boundary'],
'Bonnano': ['Bonanno'],
'borke': ['broke'],
'boundry': ['boundary'],
'bouyancy': ['buoyancy'],
'bouyant': ['buoyant'],
'boyant': ['buoyant'],
'Brasillian': ['Brazilian'],
'breakthough': ['breakthrough'],
'breakthroughts': ['breakthroughs'],
'breif': ['brief'],
'breifly': ['briefly'],
'brethen': ['brethren'],
'bretheren': ['brethren'],
'briliant': ['brilliant'],
'brillant': ['brilliant'],
'brimestone': ['brimstone'],
'Britian': ['Britain'],
'Brittish': ['British'],
'broacasted': ['broadcast'],
'broadacasting': ['broadcasting'],
'broady': ['broadly'],
'Buddah': ['Buddha'],
'Buddist': ['Buddhist'],
'buisness': ['business'],
'buisnessman': ['businessman'],
'buoancy': ['buoyancy'],
'buring': ['burying', 'burning', 'burin', 'during'],
'burried': ['buried'],
'busines': ['business'],
'busineses': ['business', 'businesses'],
'busness': ['business'],
'bussiness': ['business'],
'caculater': ['calculator'],
'cacuses': ['caucuses'],
'cahracters': ['characters'],
'calaber': ['caliber'],
'calander': ['calendar', 'calender', 'colander'],
'calculater': ['calculator'],
'calculs': ['calculus'],
'calenders': ['calendars'],
'caligraphy': ['calligraphy'],
'caluclate': ['calculate'],
'caluclated': ['calculated'],
'caluculate': ['calculate'],
'caluculated': ['calculated'],
'calulate': ['calculate'],
'calulated': ['calculated'],
'calulater': ['calculator'],
'Cambrige': ['Cambridge'],
'camoflage': ['camouflage'],
'campain': ['campaign'],
'campains': ['campaigns'],
'candadate': ['candidate'],
'candiate': ['candidate'],
'candidiate': ['candidate'],
'cannister': ['canister'],
'cannisters': ['canisters'],
'cannnot': ['cannot'],
'cannonical': ['canonical'],
'cannotation': ['connotation'],
'cannotations': ['connotations'],
'cant': ['cannot', 'can not', 'can\'t'],
'caost': ['coast'],
'caperbility': ['capability'],
'Capetown': ['Cape Town'],
'capible': ['capable'],
'captial': ['capital'],
'captued': ['captured'],
'capturd': ['captured'],
'carachter': ['character'],
'caracterized': ['characterized'],
'carcas': ['carcass', 'Caracas'],
'carefull': ['careful'],
'careing': ['caring'],
'carismatic': ['charismatic'],
'Carmalite': ['Carmelite'],
'Carnagie': ['Carnegie'],
'Carnagie-Mellon': ['Carnegie-Mellon'],
'carnege': ['carnage', 'Carnegie'],
'carnige': ['carnage', 'Carnegie'],
'Carnigie': ['Carnegie'],
'Carnigie-Mellon': ['Carnegie-Mellon'],
'carniverous': ['carnivorous'],
'carreer': ['career'],
'carrers': ['careers'],
'Carribbean': ['Caribbean'],
'Carribean': ['Caribbean'],
'cartdridge': ['cartridge'],
'Carthagian': ['Carthaginian'],
'carthographer': ['cartographer'],
'cartilege': ['cartilage'],
'cartilidge': ['cartilage'],
'cartrige': ['cartridge'],
'casette': ['cassette'],
'casion': ['caisson'],
'cassawory': ['cassowary'],
'cassowarry': ['cassowary'],
'casue': ['cause'],
'casued': ['caused'],
'casues': ['causes'],
'casuing': ['causing'],
'casulaties': ['casualties'],
'casulaty': ['casualty'],
'catagories': ['categories'],
'catagorized': ['categorized'],
'catagory': ['category'],
'Cataline': ['Catiline', 'Catalina'],
'catapillar': ['caterpillar'],
'catapillars': ['caterpillars'],
'catapiller': ['caterpillar'],
'catapillers': ['caterpillars'],
'catepillar': ['caterpillar'],
'catepillars': ['caterpillars'],
'catergorize': ['categorize'],
'catergorized': ['categorized'],
'caterpilar': ['caterpillar'],
'caterpilars': ['caterpillars'],
'caterpiller': ['caterpillar'],
'caterpillers': ['caterpillars'],
'cathlic': ['catholic'],
'catholocism': ['catholicism'],
'catterpilar': ['caterpillar'],
'catterpilars': ['caterpillars'],
'catterpillar': ['caterpillar'],
'catterpillars': ['caterpillars'],
'cattleship': ['battleship'],
'causalities': ['casualties'],
'Ceasar': ['Caesar'],
'Celcius': ['Celsius'],
'cellpading': ['cellpadding'],
'cementary': ['cemetery'],
'cemetarey': ['cemetery'],
'cemetaries': ['cemeteries'],
'cemetary': ['cemetery'],
'cencus': ['census'],
'censur': ['censor', 'censure'],
'cententenial': ['centennial'],
'centruies': ['centuries'],
'centruy': ['century'],
'centuties': ['centuries'],
'centuty': ['century'],
'ceratin': ['certain', 'keratin'],
'cerimonial': ['ceremonial'],
'cerimonies': ['ceremonies'],
'cerimonious': ['ceremonious'],
'cerimony': ['ceremony'],
'ceromony': ['ceremony'],
'certainity': ['certainty'],
'certian': ['certain'],
'cervial': ['cervical', 'servile', 'serval'],
'chalenging': ['challenging'],
'challange': ['challenge'],
'challanged': ['challenged'],
'challege': ['challenge'],
'Champange': ['Champagne'],
'changable': ['changeable'],
'charachter': ['character'],
'charachters': ['characters'],
'charactersistic': ['characteristic'],
'charactor': ['character'],
'charactors': ['characters'],
'charasmatic': ['charismatic'],
'charaterized': ['characterized'],
'chariman': ['chairman'],
'charistics': ['characteristics'],
'chasr': ['chaser', 'chase'],
'cheif': ['chief'],
'cheifs': ['chiefs'],
'chemcial': ['chemical'],
'chemcially': ['chemically'],
'chemestry': ['chemistry'],
'chemicaly': ['chemically'],
'childbird': ['childbirth'],
'childen': ['children'],
'choosen': ['chosen'],
'chracter': ['character'],
'chuch': ['church'],
'churchs': ['churches'],
'Cincinatti': ['Cincinnati'],
'Cincinnatti': ['Cincinnati'],
'circulaton': ['circulation'],
'circumsicion': ['circumcision'],
'circut': ['circuit'],
'ciricuit': ['circuit'],
'ciriculum': ['curriculum'],
'civillian': ['civilian'],
'claer': ['clear'],
'claerer': ['clearer'],
'claerly': ['clearly'],
'claimes': ['claims'],
'clas': ['class'],
'clasic': ['classic'],
'clasical': ['classical'],
'clasically': ['classically'],
'cleareance': ['clearance'],
'clera': ['clear', 'sclera'],
'clincial': ['clinical'],
'clinicaly': ['clinically'],
'cmo': ['com'],
'cmoputer': ['computer'],
'co-incided': ['coincided'],
'Coca Cola': ['Coca-Cola'],
'coctail': ['cocktail'],
'coform': ['conform'],
'cognizent': ['cognizant'],
'coincedentally': ['coincidentally'],
'colaborations': ['collaborations'],
'colateral': ['collateral'],
'colelctive': ['collective'],
'collaberative': ['collaborative'],
'collecton': ['collection'],
'collegue': ['colleague'],
'collegues': ['colleagues'],
'collonade': ['colonnade'],
'collonies': ['colonies'],
'collony': ['colony'],
'collosal': ['colossal'],
'colonizators': ['colonizers'],
'comander': ['commander', 'commandeer'],
'comando': ['commando'],
'comandos': ['commandos'],
'comany': ['company'],
'comapany': ['company'],
'comback': ['comeback'],
'combanations': ['combinations'],
'combinatins': ['combinations'],
'combusion': ['combustion'],
'comdemnation': ['condemnation'],
'comemmorates': ['commemorates'],
'comemoretion': ['commemoration'],
'comision': ['commission'],
'comisioned': ['commissioned'],
'comisioner': ['commissioner'],
'comisioning': ['commissioning'],
'comisions': ['commissions'],
'comission': ['commission'],
'comissioned': ['commissioned'],
'comissioner': ['commissioner'],
'comissioning': ['commissioning'],
'comissions': ['commissions'],
'comited': ['committed'],
'comiting': ['committing'],
'comitted': ['committed'],
'comittee': ['committee'],
'comitting': ['committing'],
'commandoes': ['commandos'],
'commedic': ['comedic'],
'commemerative': ['commemorative'],
'commemmorate': ['commemorate'],
'commemmorating': ['commemorating'],
'commerical': ['commercial'],
'commerically': ['commercially'],
'commericial': ['commercial'],
'commericially': ['commercially'],
'commerorative': ['commemorative'],
'comming': ['coming'],
'comminication': ['communication'],
'commision': ['commission'],
'commisioned': ['commissioned'],
'commisioner': ['commissioner'],
'commisioning': ['commissioning'],
'commisions': ['commissions'],
'commited': ['committed'],
'commitee': ['committee'],
'commiting': ['committing'],
'committe': ['committee'],
'committment': ['commitment'],
'committments': ['commitments'],
'commmemorated': ['commemorated'],
'commongly': ['commonly'],
'commonweath': ['commonwealth'],
'commuications': ['communications'],
'commuinications': ['communications'],
'communciation': ['communication'],
'communiation': ['communication'],
'communites': ['communities'],
'compability': ['compatibility'],
'comparision': ['comparison'],
'comparisions': ['comparisons'],
'comparitive': ['comparative'],
'comparitively': ['comparatively'],
'compatabilities': ['compatibilities'],
'compatability': ['compatibility'],
'compatable': ['compatible'],
'compatablities': ['compatibilities'],
'compatablity': ['compatibility'],
'compatiable': ['compatible'],
'compatiblities': ['compatibilities'],
'compatiblity': ['compatibility'],
'compeitions': ['competitions'],
'compensantion': ['compensation'],
'competance': ['competence'],
'competant': ['competent'],
'competative': ['competitive'],
'competion': ['competition', 'completion'],
'competitiion': ['competition'],
'competive': ['competitive'],
'competiveness': ['competitiveness'],
'comphrehensive': ['comprehensive'],
'compitent': ['competent'],
'completedthe': ['completed the'],
'completelyl': ['completely'],
'completetion': ['completion'],
'complier': ['compiler'],
'componant': ['component'],
'comprable': ['comparable'],
'comprimise': ['compromise'],
'compulsary': ['compulsory'],
'compulsery': ['compulsory'],
'computarized': ['computerized'],
'concensus': ['consensus'],
'concider': ['consider'],
'concidered': ['considered'],
'concidering': ['considering'],
'conciders': ['considers'],
'concieted': ['conceited'],
'concieved': ['conceived'],
'concious': ['conscious'],
'conciously': ['consciously'],
'conciousness': ['consciousness'],
'condamned': ['condemned'],
'condemmed': ['condemned'],
'condidtion': ['condition'],
'condidtions': ['conditions'],
'conditionsof': ['conditions of'],
'conected': ['connected'],
'conection': ['connection'],
'conesencus': ['consensus'],
'confidental': ['confidential'],
'confidentally': ['confidentially'],
'confids': ['confides'],
'configureable': ['configurable'],
'confortable': ['comfortable'],
'congradulations': ['congratulations'],
'congresional': ['congressional'],
'conived': ['connived'],
'conjecutre': ['conjecture'],
'conjuction': ['conjunction'],
'Conneticut': ['Connecticut'],
'conotations': ['connotations'],
'conquerd': ['conquered'],
'conquerer': ['conqueror'],
'conquerers': ['conquerors'],
'conqured': ['conquered'],
'conscent': ['consent'],
'consciouness': ['consciousness'],
'consdider': ['consider'],
'consdidered': ['considered'],
'consdiered': ['considered'],
'consectutive': ['consecutive'],
'consenquently': ['consequently'],
'consentrate': ['concentrate'],
'consentrated': ['concentrated'],
'consentrates': ['concentrates'],
'consept': ['concept'],
'consequentually': ['consequently'],
'consequeseces': ['consequences'],
'consern': ['concern'],
'conserned': ['concerned'],
'conserning': ['concerning'],
'conservitive': ['conservative'],
'consiciousness': ['consciousness'],
'consicousness': ['consciousness'],
'considerd': ['considered'],
'consideres': ['considered'],
'consious': ['conscious'],
'consistant': ['consistent'],
'consistantly': ['consistently'],
'consituencies': ['constituencies'],
'consituency': ['constituency'],
'consituted': ['constituted'],
'consitution': ['constitution'],
'consitutional': ['constitutional'],
'consolodate': ['consolidate'],
'consolodated': ['consolidated'],
'consonent': ['consonant'],
'consonents': ['consonants'],
'consorcium': ['consortium'],
'conspiracys': ['conspiracies'],
'conspiriator': ['conspirator'],
'constaints': ['constraints'],
'constanly': ['constantly'],
'constarnation': ['consternation'],
'constatn': ['constant'],
'constinually': ['continually'],
'constituant': ['constituent'],
'constituants': ['constituents'],
'constituion': ['constitution'],
'constituional': ['constitutional'],
'consttruction': ['construction'],
'constuction': ['construction'],
'contstruction': ['construction'],
'consulant': ['consultant'],
'consumate': ['consummate'],
'consumated': ['consummated'],
'contaiminate': ['contaminate'],
'containes': ['contains'],
'contamporaries': ['contemporaries'],
'contamporary': ['contemporary'],
'contempoary': ['contemporary'],
'contemporaneus': ['contemporaneous'],
'contempory': ['contemporary'],
'contendor': ['contender'],
'contibute': ['contribute'],
'contibuted': ['contributed'],
'contibutes': ['contributes'],
'contigent': ['contingent'],
'contined': ['continued'],
'continous': ['continuous'],
'continously': ['continuously'],
'continueing': ['continuing'],
'contravercial': ['controversial'],
'contraversy': ['controversy'],
'contributer': ['contributor'],
'contributers': ['contributors'],
'contritutions': ['contributions'],
'controled': ['controlled'],
'controling': ['controlling'],
'controll': ['control'],
'controlls': ['controls'],
'controvercial': ['controversial'],
'controvercy': ['controversy'],
'controveries': ['controversies'],
'controversal': ['controversial'],
'controversey': ['controversy'],
'controvertial': ['controversial'],
'controvery': ['controversy'],
'contruction': ['construction'],
'conveinent': ['convenient'],
'convenant': ['covenant'],
'convential': ['conventional'],
'convertables': ['convertibles'],
'convertion': ['conversion'],
'conviced': ['convinced'],
'convienient': ['convenient'],
'coordiantion': ['coordination'],
'coorperation': ['cooperation', 'corporation'],
'coorperations': ['corporations'],
'copmetitors': ['competitors'],
'coputer': ['computer'],
'copywrite': ['copyright'],
'coridal': ['cordial'],
'cornmitted': ['committed'],
'corosion': ['corrosion'],
'corparate': ['corporate'],
'corperations': ['corporations'],
'correcters': ['correctors'],
'correponding': ['corresponding'],
'correposding': ['corresponding'],
'correspondant': ['correspondent'],
'correspondants': ['correspondents'],
'corridoors': ['corridors'],
'corrispond': ['correspond'],
'corrispondant': ['correspondent'],
'corrispondants': ['correspondents'],
'corrisponded': ['corresponded'],
'corrisponding': ['corresponding'],
'corrisponds': ['corresponds'],
'costitution': ['constitution'],
'coucil': ['council'],
'coudl': ['could', 'cloud'],
'councellor': ['councillor', 'counselor', 'councilor'],
'councellors': ['councillors', 'counselors', 'councilors'],
'counries': ['countries'],
'countains': ['contains'],
'countires': ['countries'],
'countrie\'s': ['countries', 'countries\'', 'country\'s'],
'coururier': ['courier', 'couturier'],
'coverted': ['converted', 'covered', 'coveted'],
'cpoy': ['coy', 'copy'],
'creaeted': ['created'],
'creche': ['crèche'],
'creedence': ['credence'],
'critereon': ['criterion'],
'criterias': ['criteria'],
'criticists': ['critics'],
'critising': ['criticising', 'criticizing'],
'critisising': ['criticising'],
'critisism': ['criticism'],
'critisisms': ['criticisms'],
'critisize': ['criticise', 'criticize'],
'critisized': ['criticised', 'criticized'],
'critisizes': ['criticises', 'criticizes'],
'critisizing': ['criticising', 'criticizing'],
'critized': ['criticized'],
'critizing': ['criticizing'],
'crockodiles': ['crocodiles'],
'crowm': ['crown'],
'crtical': ['critical'],
'crticised': ['criticised'],
'crucifiction': ['crucifixion'],
'crusies': ['cruises'],
'crystalisation': ['crystallisation'],
'culiminating': ['culminating'],
'cumulatative': ['cumulative'],
'curch': ['church'],
'curcuit': ['circuit'],
'currenly': ['currently'],
'curriculem': ['curriculum'],
'cxan': ['cyan'],
'cyclinder': ['cylinder'],
'dacquiri': ['daiquiri'],
'daed': ['dead'],
'dael': ['deal', 'dial', 'dahl'],
'dalmation': ['dalmatian'],
'damenor': ['demeanor'],
'dammage': ['damage'],
'Dardenelles': ['Dardanelles'],
'daugher': ['daughter'],
'debateable': ['debatable'],
'decendant': ['descendant'],
'decendants': ['descendants'],
'decendent': ['descendant'],
'decendents': ['descendants'],
'decideable': ['decidable'],
'decidely': ['decidedly'],
'decieved': ['deceived'],
'decison': ['decision'],
'decomissioned': ['decommissioned'],
'decomposit': ['decompose'],
'decomposited': ['decomposed'],
'decompositing': ['decomposing'],
'decomposits': ['decomposes'],
'decress': ['decrees'],
'decribe': ['describe'],
'decribed': ['described'],
'decribes': ['describes'],
'decribing': ['describing'],
'dectect': ['detect'],
'defendent': ['defendant'],
'defendents': ['defendants'],
'deffensively': ['defensively'],
'deffine': ['define'],
'deffined': ['defined'],
'definance': ['defiance'],
'definate': ['definite'],
'definately': ['definitely'],
'definatly': ['definitely'],
'definetly': ['definitely'],
'definining': ['defining'],
'definit': ['definite'],
'definitly': ['definitely'],
'definiton': ['definition'],
'defintion': ['definition'],
'degrate': ['degrade'],
'delagates': ['delegates'],
'delapidated': ['dilapidated'],
'delerious': ['delirious'],
'delevopment': ['development'],
'deliberatly': ['deliberately'],
'delusionally': ['delusively'],
'demenor': ['demeanor'],
'demographical': ['demographic'],
'demolision': ['demolition'],
'demorcracy': ['democracy'],
'demostration': ['demonstration'],
'denegrating': ['denigrating'],
'densly': ['densely'],
'deparment': ['department'],
'deparmental': ['departmental'],
'deparments': ['departments'],
'dependance': ['dependence'],
'dependancy': ['dependency'],
'dependant': ['dependent'],
'deram': ['dram', 'dream'],
'deriviated': ['derived'],
'derivitive': ['derivative'],
'derogitory': ['derogatory'],
'descendands': ['descendants'],
'descibed': ['described'],
'descision': ['decision'],
'descisions': ['decisions'],
'descriibes': ['describes'],
'descripters': ['descriptors'],
'descripton': ['description'],
'desctruction': ['destruction'],
'descuss': ['discuss'],
'desgined': ['designed'],
'deside': ['decide'],
'desigining': ['designing'],
'desinations': ['destinations'],
'desintegrated': ['disintegrated'],
'desintegration': ['disintegration'],
'desireable': ['desirable'],
'desitned': ['destined'],
'desktiop': ['desktop'],
'desorder': ['disorder'],
'desoriented': ['disoriented'],
'desparate': ['desperate', 'disparate'],
'despict': ['depict'],
'despiration': ['desperation'],
'dessicated': ['desiccated'],
'dessigned': ['designed'],
'destablized': ['destabilized'],
'destory': ['destroy'],
'detailled': ['detailed'],
'detatched': ['detached'],
'deteoriated': ['deteriorated'],
'deteriate': ['deteriorate'],
'deterioriating': ['deteriorating'],
'determinining': ['determining'],
'detremental': ['detrimental'],
'devasted': ['devastated'],
'develope': ['develop'],
'developement': ['development'],
'developped': ['developed'],
'developper': ['developer'],
'develpment': ['development'],
'devels': ['delves'],
'devestated': ['devastated'],
'devestating': ['devastating'],
'devide': ['divide'],
'devided': ['divided'],
'devistating': ['devastating'],
'devolopement': ['development'],
'diablical': ['diabolical'],
'diamons': ['diamonds'],
'diaster': ['disaster'],
'dichtomy': ['dichotomy'],
'diconnects': ['disconnects'],
'dicover': ['discover'],
'dicovered': ['discovered'],
'dicovering': ['discovering'],
'dicovers': ['discovers'],
'dicovery': ['discovery'],
'dicussed': ['discussed'],
'didnt': ['didn\'t'],
'diea': ['idea', 'die'],
'dieing': ['dying', 'dyeing'],
'dieties': ['deities'],
'diety': ['deity'],
'diferent': ['different'],
'diferrent': ['different'],
'differentiatiations': ['differentiations'],
'differnt': ['different'],
'difficulity': ['difficulty'],
'diffrent': ['different'],
'dificulties': ['difficulties'],
'dificulty': ['difficulty'],
'dimenions': ['dimensions'],
'dimention': ['dimension'],
'dimentional': ['dimensional'],
'dimentions': ['dimensions'],
'dimesnional': ['dimensional'],
'diminuitive': ['diminutive'],
'dimunitive': ['diminutive'],
'diosese': ['diocese'],
'diphtong': ['diphthong'],
'diphtongs': ['diphthongs'],
'diplomancy': ['diplomacy'],
'dipthong': ['diphthong'],
'dipthongs': ['diphthongs'],
'dirived': ['derived'],
'disagreeed': ['disagreed'],
'disapeared': ['disappeared'],
'disapointing': ['disappointing'],
'disappearred': ['disappeared'],
'disaproval': ['disapproval'],
'disasterous': ['disastrous'],
'disatisfaction': ['dissatisfaction'],
'disatisfied': ['dissatisfied'],
'disatrous': ['disastrous'],
'discontentment': ['discontent'],
'discribe': ['describe'],
'discribed': ['described'],
'discribes': ['describes'],
'discribing': ['describing'],
'disctinction': ['distinction'],
'disctinctive': ['distinctive'],
'disemination': ['dissemination'],
'disenchanged': ['disenchanted'],
'disiplined': ['disciplined'],
'disobediance': ['disobedience'],
'disobediant': ['disobedient'],
'disolved': ['dissolved'],
'disover': ['discover'],
'dispair': ['despair'],
'disparingly': ['disparagingly'],
'dispence': ['dispense'],
'dispenced': ['dispensed'],
'dispencing': ['dispensing'],
'dispicable': ['despicable'],
'dispite': ['despite'],
'dispostion': ['disposition'],
'disproportiate': ['disproportionate'],
'disputandem': ['disputandum'],
'disricts': ['districts'],
'dissagreement': ['disagreement'],
'dissapear': ['disappear'],
'dissapearance': ['disappearance'],
'dissapeared': ['disappeared'],
'dissapearing': ['disappearing'],
'dissapears': ['disappears'],
'dissappear': ['disappear'],
'dissappears': ['disappears'],
'dissappointed': ['disappointed'],
'dissarray': ['disarray'],
'dissobediance': ['disobedience'],
'dissobediant': ['disobedient'],
'dissobedience': ['disobedience'],
'dissobedient': ['disobedient'],
'distiction': ['distinction'],
'distingish': ['distinguish'],
'distingished': ['distinguished'],
'distingishes': ['distinguishes'],
'distingishing': ['distinguishing'],
'distingquished': ['distinguished'],
'distrubution': ['distribution'],
'distruction': ['destruction'],
'distructive': ['destructive'],
'ditributed': ['distributed'],
'diversed': ['diverse', 'diverged'],
'divice': ['device'],
'divison': ['division'],
'divisons': ['divisions'],
'dum': ['dumb'],
'doccument': ['document'],
'doccumented': ['documented'],
'doccuments': ['documents'],
'docrines': ['doctrines'],
'doctines': ['doctrines'],
'documenatry': ['documentary'],
'doens': ['does'],
'doesnt': ['doesn\'t'],
'doign': ['doing'],
'dominaton': ['domination'],
'dominent': ['dominant'],
'dominiant': ['dominant'],
'donig': ['doing'],
'dosen\'t': ['doesn\'t'],
'doub': ['doubt', 'daub'],
'doulbe': ['double'],
'dowloads': ['downloads'],
'dramtic': ['dramatic'],
'draughtman': ['draughtsman'],
'Dravadian': ['Dravidian'],
'dreasm': ['dreams'],
'driectly': ['directly'],
'drnik': ['drink'],
'druming': ['drumming'],
'drummless': ['drumless'],
'dupicate': ['duplicate'],
'durig': ['during'],
'durring': ['during'],
'duting': ['during'],
'dyas': ['dryas'],
'eahc': ['each'],
'ealier': ['earlier'],
'earlies': ['earliest'],
'earnt': ['earned'],
'ecclectic': ['eclectic'],
'eceonomy': ['economy'],
'ecidious': ['deciduous'],
'eclispe': ['eclipse'],
'ecomonic': ['economic'],
'ect': ['etc'],
'eearly': ['early'],
'efel': ['evil'],
'effeciency': ['efficiency'],
'effecient': ['efficient'],
'effeciently': ['efficiently'],
'efficency': ['efficiency'],
'efficent': ['efficient'],
'efficently': ['efficiently'],
'efford': ['effort', 'afford'],
'effords': ['efforts', 'affords'],
'effulence': ['effluence'],
'eigth': ['eighth', 'eight'],
'eiter': ['either'],
'elction': ['election'],
'electic': ['eclectic', 'electric'],
'electon': ['election', 'electron'],
'electrial': ['electrical'],
'electricly': ['electrically'],
'electricty': ['electricity'],
'elementay': ['elementary'],
'eleminated': ['eliminated'],
'eleminating': ['eliminating'],
'eles': ['eels'],
'eletricity': ['electricity'],
'elicided': ['elicited'],
'eligable': ['eligible'],
'elimentary': ['elementary'],
'ellected': ['elected'],
'elphant': ['elephant'],
'embarass': ['embarrass'],
'embarassed': ['embarrassed'],
'embarassing': ['embarrassing'],
'embarassment': ['embarrassment'],
'embargos': ['embargoes'],
'embarras': ['embarrass'],
'embarrased': ['embarrassed'],
'embarrasing': ['embarrassing'],
'embarrasment': ['embarrassment'],
'embezelled': ['embezzled'],
'emblamatic': ['emblematic'],
'eminate': ['emanate'],
'eminated': ['emanated'],
'emision': ['emission'],
'emited': ['emitted'],
'emiting': ['emitting'],
'emition': ['emission', 'emotion'],
'emmediately': ['immediately'],
'emmigrated': ['emigrated', 'immigrated'],
'emminent': ['eminent', 'imminent'],
'emminently': ['eminently'],
'emmisaries': ['emissaries'],
'emmisarries': ['emissaries'],
'emmisarry': ['emissary'],
'emmisary': ['emissary'],
'emmision': ['emission'],
'emmisions': ['emissions'],
'emmited': ['emitted'],
'emmiting': ['emitting'],
'emmitted': ['emitted'],
'emmitting': ['emitting'],
'emnity': ['enmity'],
'emperical': ['empirical'],
'emphaised': ['emphasised'],
'emphsis': ['emphasis'],
'emphysyma': ['emphysema'],
'empirial': ['empirical', 'imperial'],
'emprisoned': ['imprisoned'],
'enameld': ['enameled'],
'enchancement': ['enhancement'],
'encouraing': ['encouraging'],
'encryptiion': ['encryption'],
'encylopedia': ['encyclopedia'],
'endevors': ['endeavors'],
'endevour': ['endeavour'],
'endig': ['ending'],
'endolithes': ['endoliths'],
'enduce': ['induce'],
'ened': ['need'],
'enflamed': ['inflamed'],
'enforceing': ['enforcing'],
'engagment': ['engagement'],
'engeneer': ['engineer'],
'engeneering': ['engineering'],
'engieneer': ['engineer'],
'engieneers': ['engineers'],
'enlargment': ['enlargement'],
'enlargments': ['enlargements'],
'Enlish': ['English', 'enlist'],
'enourmous': ['enormous'],
'enourmously': ['enormously'],
'enrty': ['entry'],
'ensconsed': ['ensconced'],
'entaglements': ['entanglements'],
'enteratinment': ['entertainment'],
'enthusiatic': ['enthusiastic'],
'entitity': ['entity'],
'entitlied': ['entitled'],
'entrepeneur': ['entrepreneur'],
'entrepeneurs': ['entrepreneurs'],
'entrys': ['entries'],
'enviorment': ['environment'],
'enviormental': ['environmental'],
'enviormentally': ['environmentally'],
'enviorments': ['environments'],
'enviornment': ['environment'],
'enviornmental': ['environmental'],
'enviornmentalist': ['environmentalist'],
'enviornmentally': ['environmentally'],
'enviornments': ['environments'],
'enviroment': ['environment'],
'enviromental': ['environmental'],
'enviromentalist': ['environmentalist'],
'enviromentally': ['environmentally'],
'enviroments': ['environments'],
'envolutionary': ['evolutionary'],
'envrionments': ['environments'],
'enxt': ['next'],
'epidsodes': ['episodes'],
'epsiode': ['episode'],
'equialent': ['equivalent'],
'equilibium': ['equilibrium'],
'equilibrum': ['equilibrium'],
'equiped': ['equipped'],
'equippment': ['equipment'],
'equitorial': ['equatorial'],
'equivelant': ['equivalent'],
'equivelent': ['equivalent'],
'equivilant': ['equivalent'],
'equivilent': ['equivalent'],
'equivlalent': ['equivalent'],
'erally': ['orally', 'really'],
'eratic': ['erratic'],
'eratically': ['erratically'],
'eraticly': ['erratically'],
'erested': ['arrested', 'erected'],
'errupted': ['erupted'],
'esential': ['essential'],
'esitmated': ['estimated'],
'esle': ['else'],
'especialy': ['especially'],
'essencial': ['essential'],
'essense': ['essence'],
'essentail': ['essential'],
'essentialy': ['essentially'],
'essentual': ['essential'],
'essesital': ['essential'],
'estabishes': ['establishes'],
'establising': ['establishing'],
'ethnocentricm': ['ethnocentrism'],
'ethose': ['those', 'ethos'],
'Europian': ['European'],
'Europians': ['Europeans'],
'Eurpean': ['European'],
'Eurpoean': ['European'],
'evenhtually': ['eventually'],
'eventally': ['eventually'],
'eventhough': ['even though'],
'eventially': ['eventually'],
'eventualy': ['eventually'],
'everthing': ['everything'],
'everytime': ['every time'],
'everyting': ['everything'],
'eveyr': ['every'],
'evidentally': ['evidently'],
'exagerate': ['exaggerate'],
'exagerated': ['exaggerated'],
'exagerates': ['exaggerates'],
'exagerating': ['exaggerating'],
'exagerrate': ['exaggerate'],
'exagerrated': ['exaggerated'],
'exagerrates': ['exaggerates'],
'exagerrating': ['exaggerating'],
'examinated': ['examined'],
'exampt': ['exempt'],
'exapansion': ['expansion'],
'excact': ['exact'],
'excange': ['exchange'],
'excecute': ['execute'],
'excecuted': ['executed'],
'excecutes': ['executes'],
'excecuting': ['executing'],
'excecution': ['execution'],
'excedded': ['exceeded'],
'excelent': ['excellent'],
'excell': ['excel'],
'excellance': ['excellence'],
'excellant': ['excellent'],
'excells': ['excels'],
'excercise': ['exercise'],
'exchanching': ['exchanging'],
'excisted': ['existed'],
'exculsivly': ['exclusively'],
'execising': ['exercising'],
'exection': ['execution'],
'exectued': ['executed'],
'exeedingly': ['exceedingly'],
'exelent': ['excellent'],
'exellent': ['excellent'],
'exemple': ['example'],
'exept': ['except'],
'exeptional': ['exceptional'],
'exerbate': ['exacerbate'],
'exerbated': ['exacerbated'],
'exerciese': ['exercises'],
'exerpt': ['excerpt'],
'exerpts': ['excerpts'],
'exersize': ['exercise'],
'exerternal': ['external'],
'exhalted': ['exalted'],
'exhibtion': ['exhibition'],
'exibition': ['exhibition'],
'exibitions': ['exhibitions'],
'exicting': ['exciting'],
'exinct': ['extinct'],
'existance': ['existence'],
'existant': ['existent'],
'existince': ['existence'],
'exliled': ['exiled'],
'exludes': ['excludes'],
'exmaple': ['example'],
'exonorate': ['exonerate'],
'exoskelaton': ['exoskeleton'],
'expalin': ['explain'],
'expatriot': ['expatriate'],
'expeced': ['expected'],
'expecially': ['especially'],
'expeditonary': ['expeditionary'],
'expeiments': ['experiments'],
'expell': ['expel'],
'expells': ['expels'],
'experiance': ['experience'],
'experianced': ['experienced'],
'expiditions': ['expeditions'],
'expierence': ['experience'],
'explaination': ['explanation'],
'explaning': ['explaining'],
'explictly': ['explicitly'],
'exploititive': ['exploitative'],
'explotation': ['exploitation'],
'expropiated': ['expropriated'],
'expropiation': ['expropriation'],
'exressed': ['expressed'],
'extemely': ['extremely'],
'extention': ['extension'],
'extentions': ['extensions'],
'extered': ['exerted'],
'extermist': ['extremist'],
'extint': ['extinct', 'extant'],
'extradiction': ['extradition'],
'extraterrestial': ['extraterrestrial'],
'extraterrestials': ['extraterrestrials'],
'extravagent': ['extravagant'],
'extrememly': ['extremely'],
'extremeophile': ['extremophile'],
'extremly': ['extremely'],
'extrordinarily': ['extraordinarily'],
'extrordinary': ['extraordinary'],
'eyar': ['year', 'eyas'],
'eyars': ['years', 'eyas'],
'eyasr': ['years', 'eyas'],
'faciliate': ['facilitate'],
'faciliated': ['facilitated'],
'faciliates': ['facilitates'],
'facilites': ['facilities'],
'facillitate': ['facilitate'],
'facinated': ['fascinated'],
'facist': ['fascist'],
'familes': ['families'],
'familliar': ['familiar'],
'famoust': ['famous'],
'fanatism': ['fanaticism'],
'Farenheit': ['Fahrenheit'],
'fatc': ['fact'],
'faught': ['fought'],
'favoutrable': ['favourable'],
'feasable': ['feasible'],
'Febuary': ['February'],
'Feburary': ['February'],
'fedreally': ['federally'],
'feromone': ['pheromone'],
'fertily': ['fertility'],
'fianite': ['finite'],
'fianlly': ['finally'],
'ficticious': ['fictitious'],
'fictious': ['fictitious'],
'fidn': ['find'],
'fiel': ['feel', 'field', 'file', 'phial'],
'fiels': ['feels', 'fields', 'files', 'phials'],
'fiercly': ['fiercely'],
'fightings': ['fighting'],
'filiament': ['filament'],
'fimilies': ['families'],
'finacial': ['financial'],
'finaly': ['finally'],
'financialy': ['financially'],
'firends': ['friends'],
'firts': ['flirts', 'first'],
'fisionable': ['fissionable'],
'flamable': ['flammable'],
'flawess': ['flawless'],
'fleed': ['fled', 'freed'],
'Flemmish': ['Flemish'],
'florescent': ['fluorescent'],
'flourescent': ['fluorescent'],
'flourine': ['fluorine'],
'fluorish': ['flourish'],
'flourishment': ['flourishing'],
'follwoing': ['following'],
'folowing': ['following'],
'fomed': ['formed'],
'fomr': ['from', 'form'],
'fonetic': ['phonetic'],
'fontrier': ['fontier'],
'foootball': ['football'],
'forbad': ['forbade'],
'forbiden': ['forbidden'],
'foreward': ['foreword'],
'forfiet': ['forfeit'],
'forhead': ['forehead'],
'foriegn': ['foreign'],
'Formalhaut': ['Fomalhaut'],
'formallize': ['formalize'],
'formallized': ['formalized'],
'formaly': ['formally', 'formerly'],
'formelly': ['formerly'],
'formidible': ['formidable'],
'formost': ['foremost'],
'forsaw': ['foresaw'],
'forseeable': ['foreseeable'],
'fortelling': ['foretelling'],
'forunner': ['forerunner'],
'foucs': ['focus'],
'foudn': ['found'],
'fougth': ['fought'],
'foundaries': ['foundries'],
'foundary': ['foundry'],
'Foundland': ['Newfoundland'],
'fourties': ['forties'],
'fourty': ['forty'],
'fouth': ['fourth'],
'foward': ['forward'],
'Fransiscan': ['Franciscan'],
'Fransiscans': ['Franciscans'],
'freind': ['friend'],
'freindly': ['friendly'],
'frequentily': ['frequently'],
'frome': ['from'],
'fromed': ['formed'],
'froniter': ['frontier'],
'fucntion': ['function'],
'fucntioning': ['functioning'],
'fufill': ['fulfill'],
'fufilled': ['fulfilled'],
'fulfiled': ['fulfilled'],
'fullfill': ['fulfill'],
'fullfilled': ['fulfilled'],
'fundametal': ['fundamental'],
'fundametals': ['fundamentals'],
'funguses': ['fungi'],
'funtion': ['function'],
'furuther': ['further'],
'futher': ['further'],
'futhermore': ['furthermore'],
'futhroc': ['futhark', 'futhorc'],
'gae': ['game', 'Gael', 'gale'],
'galatic': ['galactic'],
'Galations': ['Galatians'],
'gallaxies': ['galaxies'],
'galvinized': ['galvanized'],
'Gameboy': ['Game Boy'],
'ganerate': ['generate'],
'ganes': ['games'],
'ganster': ['gangster'],
'garantee': ['guarantee'],
'garanteed': ['guaranteed'],
'garantees': ['guarantees'],
'gardai': ['gardaí'],
'garnison': ['garrison'],
'gauarana': ['guaraná'],
'gaurantee': ['guarantee'],
'gauranteed': ['guaranteed'],
'gaurantees': ['guarantees'],
'gaurd': ['guard', 'gourd'],
'gaurentee': ['guarantee'],
'gaurenteed': ['guaranteed'],
'gaurentees': ['guarantees'],
'geneological': ['genealogical'],
'geneologies': ['genealogies'],
'geneology': ['genealogy'],
'generaly': ['generally'],
'generatting': ['generating'],
'genialia': ['genitalia'],
'geographicial': ['geographical'],
'geometrician': ['geometer'],
'geometricians': ['geometers'],
'gerat': ['great'],
'Ghandi': ['Gandhi'],
'glight': ['flight'],
'gnawwed': ['gnawed'],
'godess': ['goddess'],
'godesses': ['goddesses'],
'Godounov': ['Godunov'],
'gogin': ['going', 'Gauguin'],
'goign': ['going'],
'gonig': ['going'],
'Gothenberg': ['Gothenburg'],
'Gottleib': ['Gottlieb'],
'gouvener': ['governor'],
'govement': ['government'],
'govenment': ['government'],
'govenrment': ['government'],
'goverance': ['governance'],
'goverment': ['government'],
'govermental': ['governmental'],
'governer': ['governor'],
'governmnet': ['government'],
'govorment': ['government'],
'govormental': ['governmental'],
'govornment': ['government'],
'gracefull': ['graceful'],
'graet': ['great'],
'grafitti': ['graffiti'],
'gramatically': ['grammatically'],
'grammaticaly': ['grammatically'],
'grammer': ['grammar'],
'grat': ['great'],
'gratuitious': ['gratuitous'],
'greatful': ['grateful'],
'greatfully': ['gratefully'],
'greif': ['grief'],
'gridles': ['griddles'],
'gropu': ['group'],
'grwo': ['grow'],
'Guaduloupe': ['Guadalupe', 'Guadeloupe'],
'Guadulupe': ['Guadalupe', 'Guadeloupe'],
'guage': ['gauge'],
'guarentee': ['guarantee'],
'guarenteed': ['guaranteed'],
'guarentees': ['guarantees'],
'Guatamala': ['Guatemala'],
'Guatamalan': ['Guatemalan'],
'guerrila': ['guerrilla'],
'guerrilas': ['guerrillas'],
'guidence': ['guidance'],
'Guilia': ['Giulia'],
'Guilio': ['Giulio'],
'Guiness': ['Guinness'],
'Guiseppe': ['Giuseppe'],
'gunanine': ['guanine'],
'gurantee': ['guarantee'],
'guranteed': ['guaranteed'],
'gurantees': ['guarantees'],
'guttaral': ['guttural'],
'gutteral': ['guttural'],
'habaeus': ['habeas'],
'habeus': ['habeas'],
'Habsbourg': ['Habsburg'],
'haemorrage': ['haemorrhage'],
'haev': ['have', 'heave'],
'halarious': ['hilarious'],
'Hallowean': ['Hallowe\'en', 'Halloween'],
'halp': ['help'],
'hapen': ['happen'],
'hapened': ['happened'],
'hapening': ['happening'],
'happend': ['happened'],
'happended': ['happened'],
'happenned': ['happened'],
'harased': ['harassed'],
'harases': ['harasses'],
'harasment': ['harassment'],
'harasments': ['harassments'],
'harassement': ['harassment'],
'harras': ['harass'],
'harrased': ['harassed'],
'harrases': ['harasses'],
'harrasing': ['harassing'],
'harrasment': ['harassment'],
'harrasments': ['harassments'],
'harrassed': ['harassed'],
'harrasses': ['harassed'],
'harrassing': ['harassing'],
'harrassment': ['harassment'],
'harrassments': ['harassments'],
'hasnt': ['hasn\'t'],
'Hatian': ['Haitian'],
'haviest': ['heaviest'],
'headquarer': ['headquarter'],
'headquater': ['headquarter'],
'headquatered': ['headquartered'],
'headquaters': ['headquarters'],
'healthercare': ['healthcare'],
'heared': ['heard'],
'heathy': ['healthy'],
'Heidelburg': ['Heidelberg'],
'heigher': ['higher'],
'heirarchy': ['hierarchy'],
'heiroglyphics': ['hieroglyphics'],
'helment': ['helmet'],
'helpfull': ['helpful'],
'helpped': ['helped'],
'hemmorhage': ['hemorrhage'],
'herad': ['heard', 'Hera'],
'heridity': ['heredity'],
'heroe': ['hero'],
'heros': ['heroes'],
'hertiage': ['heritage'],
'hertzs': ['hertz'],
'hesistant': ['hesitant'],
'heterogenous': ['heterogeneous'],
'hieght': ['height'],
'hierachical': ['hierarchical'],
'hierachies': ['hierarchies'],
'hierachy': ['hierarchy'],
'hierarcical': ['hierarchical'],
'hierarcy': ['hierarchy'],
'hieroglph': ['hieroglyph'],
'hieroglphs': ['hieroglyphs'],
'higer': ['higher'],
'higest': ['highest'],
'higway': ['highway'],
'hillarious': ['hilarious'],
'himselv': ['himself'],
'hinderance': ['hindrance'],
'hinderence': ['hindrance'],
'hindrence': ['hindrance'],
'hipopotamus': ['hippopotamus'],
'hismelf': ['himself'],
'histocompatability': ['histocompatibility'],
'historicians': ['historians'],
'hitsingles': ['hit singles'],
'holf': ['hold'],
'holliday': ['holiday'],
'homestate': ['home state'],
'homogeneize': ['homogenize'],
'homogeneized': ['homogenized'],
'honory': ['honorary'],
'horrifing': ['horrifying'],
'hosited': ['hoisted'],
'hospitible': ['hospitable'],
'hounour': ['honour'],
'housr': ['hours', 'house'],
'howver': ['however'],
'hsitorians': ['historians'],
'hstory': ['history'],
'hten': ['then', 'hen', 'the'],
'htere': ['there', 'here'],
'htey': ['they'],
'htikn': ['think'],
'hting': ['thing'],
'htink': ['think'],
'htis': ['this'],
'humer': ['humor', 'humour'],
'humerous': ['humorous', 'humerus'],
'huminoid': ['humanoid'],
'humoural': ['humoral'],
'humurous': ['humorous'],
'husban': ['husband'],
'hvae': ['have'],
'hvaing': ['having'],
'hvea': ['have', 'heave'],
'hwihc': ['which'],
'hwile': ['while'],
'hwole': ['whole'],
'hydogen': ['hydrogen'],
'hydropile': ['hydrophile'],
'hydropilic': ['hydrophilic'],
'hydropobe': ['hydrophobe'],
'hydropobic': ['hydrophobic'],
'hygeine': ['hygiene'],
'hypocracy': ['hypocrisy'],
'hypocrasy': ['hypocrisy'],
'hypocricy': ['hypocrisy'],
'hypocrit': ['hypocrite'],
'hypocrits': ['hypocrites'],
'iconclastic': ['iconoclastic'],
'idaeidae': ['idea'],
'idaes': ['ideas'],
'idealogies': ['ideologies'],
'idealogy': ['ideology'],
'identicial': ['identical'],
'identifers': ['identifiers'],
'ideosyncratic': ['idiosyncratic'],
'idesa': ['ideas', 'ides'],
'idiosyncracy': ['idiosyncrasy'],
'Ihaca': ['Ithaca'],
'illegimacy': ['illegitimacy'],
'illegitmate': ['illegitimate'],
'illess': ['illness'],
'illiegal': ['illegal'],
'illution': ['illusion'],
'ilness': ['illness'],
'ilogical': ['illogical'],
'imagenary': ['imaginary'],
'imagin': ['imagine'],
'imaginery': ['imaginary', 'imagery'],
'imanent': ['eminent', 'imminent'],
'imcomplete': ['incomplete'],
'imediately': ['immediately'],
'imense': ['immense'],
'imigrant': ['emigrant', 'immigrant'],
'imigrated': ['emigrated', 'immigrated'],
'imigration': ['emigration', 'immigration'],
'iminent': ['eminent', 'imminent', 'immanent'],
'immediatley': ['immediately'],
'immediatly': ['immediately'],
'immidately': ['immediately'],
'immidiately': ['immediately'],
'immitate': ['imitate'],
'immitated': ['imitated'],
'immitating': ['imitating'],
'immitator': ['imitator'],
'immunosupressant': ['immunosuppressant'],
'impecabbly': ['impeccably'],
'impedence': ['impedance'],
'implamenting': ['implementing'],
'impliment': ['implement'],
'implimented': ['implemented'],
'imploys': ['employs'],
'importamt': ['important'],
'impressario': ['impresario'],
'imprioned': ['imprisoned'],
'imprisonned': ['imprisoned'],
'improvision': ['improvisation'],
'improvments': ['improvements'],
'inablility': ['inability'],
'inaccessable': ['inaccessible'],
'inadiquate': ['inadequate'],
'inadquate': ['inadequate'],
'inadvertant': ['inadvertent'],
'inadvertantly': ['inadvertently'],
'inagurated': ['inaugurated'],
'inaguration': ['inauguration'],
'inappropiate': ['inappropriate'],
'inaugures': ['inaugurates'],
'inbalance': ['imbalance'],
'inbalanced': ['imbalanced'],
'inbetween': ['between'],
'incarcirated': ['incarcerated'],
'incidentially': ['incidentally'],
'incidently': ['incidentally'],
'inclreased': ['increased'],
'includ': ['include'],
'includng': ['including'],
'incompatabilities': ['incompatibilities'],
'incompatability': ['incompatibility'],
'incompatable': ['incompatible'],
'incompatablities': ['incompatibilities'],
'incompatablity': ['incompatibility'],
'incompatiblities': ['incompatibilities'],
'incompatiblity': ['incompatibility'],
'incompetance': ['incompetence'],
'incompetant': ['incompetent'],
'incomptable': ['incompatible'],
'incomptetent': ['incompetent'],
'inconsistant': ['inconsistent'],
'incoroporated': ['incorporated'],
'incorperation': ['incorporation'],
'incorportaed': ['incorporated'],
'incorprates': ['incorporates'],
'incorruptable': ['incorruptible'],
'incramentally': ['incrementally'],
'increadible': ['incredible'],
'incredable': ['incredible'],
'inctroduce': ['introduce'],
'inctroduced': ['introduced'],
'incuding': ['including'],
'incunabla': ['incunabula'],
'indefinately': ['indefinitely'],
'indefineable': ['undefinable'],
'indefinitly': ['indefinitely'],
'indentical': ['identical'],
'indepedantly': ['independently'],
'indepedence': ['independence'],
'independance': ['independence'],
'independant': ['independent'],
'independantly': ['independently'],
'independece': ['independence'],
'independendet': ['independent'],
'indespensable': ['indispensable'],
'indespensible': ['indispensable'],
'indictement': ['indictment'],
'indigineous': ['indigenous'],
'indipendence': ['independence'],
'indipendent': ['independent'],
'indipendently': ['independently'],
'indispensible': ['indispensable'],
'indisputible': ['indisputable'],
'indisputibly': ['indisputably'],
'indite': ['indict'],
'individualy': ['individually'],
'indpendent': ['independent'],
'indpendently': ['independently'],
'indulgue': ['indulge'],
'indutrial': ['industrial'],
'indviduals': ['individuals'],
'inefficienty': ['inefficiently'],
'inevatible': ['inevitable'],
'inevitible': ['inevitable'],
'inevititably': ['inevitably'],
'infalability': ['infallibility'],
'infallable': ['infallible'],
'infectuous': ['infectious'],
'infered': ['inferred'],
'infilitrate': ['infiltrate'],
'infilitrated': ['infiltrated'],
'infilitration': ['infiltration'],
'infinit': ['infinite'],
'inflamation': ['inflammation'],
'influencial': ['influential'],
'influented': ['influenced'],
'infomation': ['information'],
'informtion': ['information'],
'infrantryman': ['infantryman'],
'infrigement': ['infringement'],
'ingenius': ['ingenious'],
'ingreediants': ['ingredients'],
'inhabitans': ['inhabitants'],
'inherantly': ['inherently'],
'inheritage': ['heritage', 'inheritance'],
'inheritence': ['inheritance'],
'inital': ['initial'],
'initalize': ['initialize'],
'initally': ['initially'],
'initalization': ['initialization'],
'initation': ['initiation'],
'initiaitive': ['initiative'],
'inlcuding': ['including'],
'inmigrant': ['immigrant'],
'inmigrants': ['immigrants'],
'innoculated': ['inoculated'],
'inocence': ['innocence'],
'inofficial': ['unofficial'],
'inot': ['into'],
'inpeach': ['impeach'],
'inpolite': ['impolite'],
'inprisonment': ['imprisonment'],
'inproving': ['improving'],
'insectiverous': ['insectivorous'],
'insensative': ['insensitive'],
'inseperable': ['inseparable'],
'insistance': ['insistence'],
'insitution': ['institution'],
'insitutions': ['institutions'],
'inspite': ['in spite', 'inspire'],
'instade': ['instead'],
'instatance': ['instance'],
'institue': ['institute'],
'instuction': ['instruction'],
'instuments': ['instruments'],
'instutionalized': ['institutionalized'],
'instutions': ['intuitions'],
'insurence': ['insurance'],
'intelectual': ['intellectual'],
'inteligence': ['intelligence'],
'inteligent': ['intelligent'],
'intenational': ['international'],
'intented': ['intended', 'indented'],
'intepretation': ['interpretation'],
'intepretator': ['interpretor'],
'interational': ['international'],
'interbread': ['interbreed', 'interbred'],
'interchangable': ['interchangeable'],
'interchangably': ['interchangeably'],
'intercontinetal': ['intercontinental'],
'intered': ['interred', 'interned'],
'interelated': ['interrelated'],
'interferance': ['interference'],
'interfereing': ['interfering'],
'intergrated': ['integrated'],
'intergration': ['integration'],
'interm': ['interim'],
'internation': ['international'],
'interpet': ['interpret'],
'interrim': ['interim'],
'interrugum': ['interregnum'],
'intertaining': ['entertaining'],
'interupt': ['interrupt'],
'intervines': ['intervenes'],
'intevene': ['intervene'],
'intial': ['initial'],
'intially': ['initially'],
'intrduced': ['introduced'],
'intrest': ['interest'],
'introdued': ['introduced'],
'intruduced': ['introduced'],
'intrument': ['instrument'],
'intrumental': ['instrumental'],
'intruments': ['instruments'],
'intrusted': ['entrusted'],
'intutive': ['intuitive'],
'intutively': ['intuitively'],
'inudstry': ['industry'],
'inumerable': ['enumerable', 'innumerable'],
'inventer': ['inventor'],
'invertibrates': ['invertebrates'],
'investingate': ['investigate'],
'involvment': ['involvement'],
'irelevent': ['irrelevant'],
'iresistable': ['irresistible'],
'iresistably': ['irresistibly'],
'iresistible': ['irresistible'],
'iresistibly': ['irresistibly'],
'iritable': ['irritable'],
'iritated': ['irritated'],
'ironicly': ['ironically'],
'irregardless': ['regardless'],
'irrelevent': ['irrelevant'],
'irreplacable': ['irreplaceable'],
'irresistable': ['irresistible'],
'irresistably': ['irresistibly'],
'isnt': ['isn\'t'],
'Israelies': ['Israelis'],
'issueing': ['issuing'],
'itnroduced': ['introduced'],
'iunior': ['junior'],
'iwll': ['will'],
'iwth': ['with'],
'Janurary': ['January'],
'Januray': ['January'],
'Japanes': ['Japanese'],
'jaques': ['jacques'],
'jeapardy': ['jeopardy'],
'jewllery': ['jewellery'],
'Johanine': ['Johannine'],
'jorunal': ['journal'],
'Jospeh': ['Joseph'],
'jouney': ['journey'],
'journied': ['journeyed'],
'journies': ['journeys'],
'jstu': ['just'],
'jsut': ['just'],
'Juadaism': ['Judaism'],
'Juadism': ['Judaism'],
'judical': ['judicial'],
'judisuary': ['judiciary'],
'juducial': ['judicial'],
'juristiction': ['jurisdiction'],
'juristictions': ['jurisdictions'],
'kindergarden': ['kindergarten'],
'klenex': ['kleenex'],
'knifes': ['knives'],
'knive': ['knife'],
'knowlege': ['knowledge'],
'knowlegeable': ['knowledgeable'],
'knwo': ['know'],
'knwos': ['knows'],
'konw': ['know'],
'konws': ['knows'],
'kwno': ['know'],
'labatory': ['lavatory', 'laboratory'],
'labled': ['labelled', 'labeled'],
'labratory': ['laboratory'],
'laguage': ['language'],
'laguages': ['languages'],
'larg': ['large'],
'largst': ['largest'],
'larrry': ['larry'],
'lastr': ['last'],
'lattitude': ['latitude'],
'launchs': ['launch', 'launches'],
'launhed': ['launched'],
'lavae': ['larvae'],
'layed': ['laid'],
'lazyness': ['laziness'],
'leage': ['league'],
'leanr': ['lean', 'learn', 'leaner'],
'leathal': ['lethal'],
'lefted': ['left'],
'legitamate': ['legitimate'],
'legitmate': ['legitimate'],
'leibnitz': ['leibniz'],
'lenght': ['length'],
'leran': ['learn'],
'lerans': ['learns'],
'leutenant': ['lieutenant'],
'levetate': ['levitate'],
'levetated': ['levitated'],
'levetates': ['levitates'],
'levetating': ['levitating'],
'levle': ['level'],
'liasion': ['liaison'],
'liason': ['liaison'],
'liasons': ['liaisons'],
'libary': ['library'],
'libell': ['libel'],
'libguistic': ['linguistic'],
'libguistics': ['linguistics'],
'libitarianisn': ['libertarianism'],
'lible': ['libel', 'liable'],
'lieing': ['lying'],
'liek': ['like'],
'liekd': ['liked'],
'liesure': ['leisure'],
'lieuenant': ['lieutenant'],
'lieved': ['lived'],
'liftime': ['lifetime'],
'lightyear': ['light year'],
'lightyears': ['light years'],
'likelyhood': ['likelihood'],
'linnaena': ['linnaean'],
'lippizaner': ['lipizzaner'],
'liquify': ['liquefy'],
'liscense': ['license', 'licence'],
'lisence': ['license', 'licence'],
'lisense': ['license', 'licence'],
'listners': ['listeners'],
'litature': ['literature'],
'literaly': ['literally'],
'literture': ['literature'],
'littel': ['little'],
'litterally': ['literally'],
'liuke': ['like'],
'livley': ['lively'],
'lmits': ['limits'],
'loev': ['love'],
'lonelyness': ['loneliness'],
'longitudonal': ['longitudinal'],
'lonley': ['lonely'],
'lonly': ['lonely', 'only'],
'loosing': ['losing'],
'lotharingen': ['lothringen'],
'lsat': ['last'],
'lukid': ['likud'],
'lveo': ['love'],
'lvoe': ['love'],
'Lybia': ['Libya'],
'maching': ['machine', 'marching', 'matching'],
'mackeral': ['mackerel'],
'magasine': ['magazine'],
'magincian': ['magician'],
'magnificient': ['magnificent'],
'magolia': ['magnolia'],
'mailny': ['mainly'],
'maintainance': ['maintenance'],
'maintainence': ['maintenance'],
'maintance': ['maintenance'],
'maintenence': ['maintenance'],
'maintinaing': ['maintaining'],
'maintioned': ['mentioned'],
'majoroty': ['majority'],
'maked': ['marked', 'made'],
'makse': ['makes'],
'Malcom': ['Malcolm'],
'maltesian': ['Maltese'],
'mamal': ['mammal'],
'mamalian': ['mammalian'],
'managable': ['manageable', 'manageably'],
'managment': ['management'],
'maneouvre': ['manoeuvre'],
'maneouvred': ['manoeuvred'],
'maneouvres': ['manoeuvres'],
'maneouvring': ['manoeuvring'],
'manisfestations': ['manifestations'],
'manoeuverability': ['maneuverability'],
'manouver': ['maneuver', 'manoeuvre'],
'manouverability': ['maneuverability', 'manoeuvrability',
'manoeuverability'],
'manouverable': ['maneuverable', 'manoeuvrable'],
'manouvers': ['maneuvers', 'manoeuvres'],
'mantained': ['maintained'],
'manuever': ['maneuver', 'manoeuvre'],
'manuevers': ['maneuvers', 'manoeuvres'],
'manufacturedd': ['manufactured'],
'manufature': ['manufacture'],
'manufatured': ['manufactured'],
'manufaturing': ['manufacturing'],
'manuver': ['maneuver'],
'mariage': ['marriage'],
'marjority': ['majority'],
'markes': ['marks'],
'marketting': ['marketing'],
'marmelade': ['marmalade'],
'marrage': ['marriage'],
'marraige': ['marriage'],
'marrtyred': ['martyred'],
'marryied': ['married'],
'Massachussets': ['Massachusetts'],
'Massachussetts': ['Massachusetts'],
'massmedia': ['mass media'],
'masterbation': ['masturbation'],
'mataphysical': ['metaphysical'],
'materalists': ['materialist'],
'mathamatics': ['mathematics'],
'mathematican': ['mathematician'],
'mathematicas': ['mathematics'],
'matheticians': ['mathematicians'],
'mathmatically': ['mathematically'],
'mathmatician': ['mathematician'],
'mathmaticians': ['mathematicians'],
'mccarthyst': ['mccarthyist'],
'mchanics': ['mechanics'],
'meaninng': ['meaning'],
'mear': ['wear', 'mere', 'mare'],
'mechandise': ['merchandise'],
'medacine': ['medicine'],
'medeival': ['medieval'],
'medevial': ['medieval'],
'mediciney': ['mediciny'],
'medievel': ['medieval'],
'mediterainnean': ['mediterranean'],
'Mediteranean': ['Mediterranean'],
'meerkrat': ['meerkat'],
'melieux': ['milieux'],
'membranaphone': ['membranophone'],
'memeber': ['member'],
'menally': ['mentally'],
'meranda': ['veranda', 'Miranda'],
'mercentile': ['mercantile'],
'messanger': ['messenger'],
'messenging': ['messaging'],
'metalic': ['metallic'],
'metalurgic': ['metallurgic'],
'metalurgical': ['metallurgical'],
'metalurgy': ['metallurgy'],
'metamorphysis': ['metamorphosis'],
'metaphoricial': ['metaphorical'],
'meterologist': ['meteorologist'],
'meterology': ['meteorology'],
'methaphor': ['metaphor'],
'methaphors': ['metaphors'],
'Michagan': ['Michigan'],
'micoscopy': ['microscopy'],
'midwifes': ['midwives'],
'mileau': ['milieu'],
'milennia': ['millennia'],
'milennium': ['millennium'],
'mileu': ['milieu'],
'miliary': ['military'],
'milion': ['million'],
'miliraty': ['military'],
'millenia': ['millennia'],
'millenial': ['millennial'],
'millenialism': ['millennialism'],
'millenium': ['millennium'],
'millepede': ['millipede'],
'millioniare': ['millionaire'],
'millitary': ['military'],
'millon': ['million'],
'miltary': ['military'],
'minature': ['miniature'],
'minerial': ['mineral'],
'miniscule': ['minuscule'],
'ministery': ['ministry'],
'minstries': ['ministries'],
'minstry': ['ministry'],
'minumum': ['minimum'],
'mirrorred': ['mirrored'],
'miscelaneous': ['miscellaneous'],
'miscellanious': ['miscellaneous'],
'miscellanous': ['miscellaneous'],
'mischeivous': ['mischievous'],
'mischevious': ['mischievous'],
'mischievious': ['mischievous'],
'misdameanor': ['misdemeanor'],
'misdameanors': ['misdemeanors'],
'misdemenor': ['misdemeanor'],
'misdemenors': ['misdemeanors'],
'misfourtunes': ['misfortunes'],
'misile': ['missile'],
'Misouri': ['Missouri'],
'mispell': ['misspell'],
'mispelled': ['misspelled'],
'mispelling': ['misspelling'],
'missen': ['mizzen'],
'Missisipi': ['Mississippi'],
'Missisippi': ['Mississippi'],
'missle': ['missile'],
'missonary': ['missionary'],
'misterious': ['mysterious'],
'mistery': ['mystery'],
'misteryous': ['mysterious'],
'mkae': ['make'],
'mkaes': ['makes'],
'mkaing': ['making'],
'mkea': ['make'],
'moderm': ['modem'],
'modle': ['model'],
'moent': ['moment'],
'moeny': ['money'],
'mohammedans': ['muslims'],
'moil': ['mohel', 'soil'],
'moleclues': ['molecules'],
'momento': ['memento'],
'monestaries': ['monasteries'],
'monestary': ['monastery', 'monetary'],
'monickers': ['monikers'],
'monolite': ['monolithic'],
'Monserrat': ['Montserrat'],
'montains': ['mountains'],
'montanous': ['mountainous'],
'Montnana': ['Montana'],
'monts': ['months'],
'montypic': ['monotypic'],
'moreso': ['more', 'more so'],
'morgage': ['mortgage'],
'Morisette': ['Morissette'],
'Morrisette': ['Morissette'],
'morroccan': ['moroccan'],
'morrocco': ['morocco'],
'morroco': ['morocco'],
'mortage': ['mortgage'],
'mosture': ['moisture'],
'motiviated': ['motivated'],
'mounth': ['month'],
'movei': ['movie'],
'movment': ['movement'],
'mroe': ['more'],
'mucuous': ['mucous'],
'muder': ['murder'],
'mudering': ['murdering'],
'muhammadan': ['muslim'],
'multicultralism': ['multiculturalism'],
'multipled': ['multiplied'],
'multiplers': ['multipliers'],
'munbers': ['numbers'],
'muncipalities': ['municipalities'],
'muncipality': ['municipality'],
'munnicipality': ['municipality'],
'muscels': ['mussels', 'muscles'],
'muscial': ['musical'],
'muscician': ['musician'],
'muscicians': ['musicians'],
'mutiliated': ['mutilated'],
'myraid': ['myriad'],
'mysef': ['myself'],
'mysogynist': ['misogynist'],
'mysogyny': ['misogyny'],
'mysterous': ['mysterious'],
'Mythraic': ['Mithraic'],
'naieve': ['naive'],
'Naploeon': ['Napoleon'],
'Napolean': ['Napoleon'],
'Napoleonian': ['Napoleonic'],
'naturaly': ['naturally'],
'naturely': ['naturally'],
'naturual': ['natural'],
'naturually': ['naturally'],
'Nazereth': ['Nazareth'],
'neccesarily': ['necessarily'],
'neccesary': ['necessary'],
'neccessarily': ['necessarily'],
'neccessary': ['necessary'],
'neccessities': ['necessities'],
'necesarily': ['necessarily'],
'necesary': ['necessary'],
'necessiate': ['necessitate'],
'neglible': ['negligible'],
'negligable': ['negligible'],
'negociate': ['negotiate'],
'negociation': ['negotiation'],
'negociations': ['negotiations'],
'negotation': ['negotiation'],
'neice': ['niece', 'nice'],
'neigborhood': ['neighborhood'],
'neigbour': ['neighbour', 'neighbor'],
'neigbourhood': ['neighbourhood'],
'neigbouring': ['neighbouring', 'neighboring'],
'neigbours': ['neighbours', 'neighbors'],
'neolitic': ['neolithic'],
'nessasarily': ['necessarily'],
'nessecary': ['necessary'],
'nestin': ['nesting'],
'neverthless': ['nevertheless'],
'newletters': ['newsletters'],
'Newyorker': ['New Yorker'],
'nickle': ['nickel'],
'nightime': ['nighttime'],
'nineth': ['ninth'],
'ninteenth': ['nineteenth'],
'ninties': ['1990s'],
'ninty': ['ninety'],
'nkow': ['know'],
'nkwo': ['know'],
'nmae': ['name'],
'noncombatents': ['noncombatants'],
'nonsence': ['nonsense'],
'nontheless': ['nonetheless'],
'noone': ['no one'],
'norhern': ['northern'],
'northen': ['northern'],
'northereastern': ['northeastern'],
'notabley': ['notably'],
'noteable': ['notable'],
'noteably': ['notably'],
'noteriety': ['notoriety'],
'noth': ['north'],
'nothern': ['northern'],
'noticable': ['noticeable'],
'noticably': ['noticeably'],
'noticeing': ['noticing'],
'noticible': ['noticeable'],
'notwhithstanding': ['notwithstanding'],
'noveau': ['nouveau'],
'Novermber': ['November'],
'nowdays': ['nowadays'],
'nowe': ['now'],
'nto': ['not'],
'nucular': ['nuclear'],
'nuculear': ['nuclear'],
'nuisanse': ['nuisance'],
'Nullabour': ['Nullarbor'],
'numberous': ['numerous'],
'Nuremburg': ['Nuremberg'],
'nusance': ['nuisance'],
'nutritent': ['nutrient'],
'nutritents': ['nutrients'],
'nuturing': ['nurturing'],
'obediance': ['obedience'],
'obediant': ['obedient'],
'obession': ['obsession'],
'obssessed': ['obsessed'],
'obstacal': ['obstacle'],
'obstancles': ['obstacles'],
'obstruced': ['obstructed'],
'ocasion': ['occasion'],
'ocasional': ['occasional'],
'ocasionally': ['occasionally'],
'ocasionaly': ['occasionally'],
'ocasioned': ['occasioned'],
'ocasions': ['occasions'],
'ocassion': ['occasion'],
'ocassional': ['occasional'],
'ocassionally': ['occasionally'],
'ocassionaly': ['occasionally'],
'ocassioned': ['occasioned'],
'ocassions': ['occasions'],
'occaison': ['occasion'],
'occassion': ['occasion'],
'occassional': ['occasional'],
'occassionally': ['occasionally'],
'occassionaly': ['occasionally'],
'occassioned': ['occasioned'],
'occassions': ['occasions'],
'occationally': ['occasionally'],
'occour': ['occur'],
'occurance': ['occurrence'],
'occurances': ['occurrences'],
'occured': ['occurred'],
'occurence': ['occurrence'],
'occurences': ['occurrences'],
'occuring': ['occurring'],
'occurr': ['occur'],
'occurrance': ['occurrence'],
'occurrances': ['occurrences'],
'octohedra': ['octahedra'],
'octohedral': ['octahedral'],
'octohedron': ['octahedron'],
'ocuntries': ['countries'],
'ocuntry': ['country'],
'ocurr': ['occur'],
'ocurrance': ['occurrence'],
'ocurred': ['occurred'],
'ocurrence': ['occurrence'],
'offcers': ['officers'],
'offcially': ['officially'],
'offereings': ['offerings'],
'offical': ['official'],
'offically': ['officially'],
'officals': ['officials'],
'officaly': ['officially'],
'officialy': ['officially'],
'offred': ['offered'],
'oftenly': ['often'],
'oging': ['going', 'ogling'],
'omision': ['omission'],
'omited': ['omitted'],
'omiting': ['omitting'],
'omlette': ['omelette'],
'ommision': ['omission'],
'ommited': ['omitted'],
'ommiting': ['omitting'],
'ommitted': ['omitted'],
'ommitting': ['omitting'],
'omniverous': ['omnivorous'],
'omniverously': ['omnivorously'],
'omre': ['more'],
'onot': ['note', 'not'],
'onyl': ['only'],
'openess': ['openness'],
'oponent': ['opponent'],
'oportunity': ['opportunity'],
'opose': ['oppose'],
'oposite': ['opposite'],
'oposition': ['opposition'],
'oppenly': ['openly'],
'oppinion': ['opinion'],
'opponant': ['opponent'],
'oppononent': ['opponent'],
'oppositition': ['opposition'],
'oppossed': ['opposed'],
'opprotunity': ['opportunity'],
'opression': ['oppression'],
'opressive': ['oppressive'],
'opthalmic': ['ophthalmic'],
'opthalmologist': ['ophthalmologist'],
'opthalmology': ['ophthalmology'],
'opthamologist': ['ophthalmologist'],
'optmizations': ['optimizations'],
'optomism': ['optimism'],
'orded': ['ordered'],
'organim': ['organism'],
'organistion': ['organisation'],
'organiztion': ['organization'],
'orgin': ['origin', 'organ'],
'orginal': ['original'],
'orginally': ['originally'],
'orginize': ['organise'],
'oridinarily': ['ordinarily'],
'origanaly': ['originally'],
'originall': ['original', 'originally'],
'originaly': ['originally'],
'originially': ['originally'],
'originnally': ['originally'],
'origional': ['original'],
'orignally': ['originally'],
'orignially': ['originally'],
'otehr': ['other'],
'oublisher': ['publisher'],
'ouevre': ['oeuvre'],
'oustanding': ['outstanding'],
'overshaddowed': ['overshadowed'],
'overthere': ['over there'],
'overwelming': ['overwhelming'],
'overwheliming': ['overwhelming'],
'owrk': ['work'],
'owudl': ['would'],
'oxigen': ['oxygen'],
'oximoron': ['oxymoron'],
'p0enis': ['penis'],
'paide': ['paid'],
'paitience': ['patience'],
'palce': ['place', 'palace'],
'paleolitic': ['paleolithic'],
'paliamentarian': ['parliamentarian'],
'Palistian': ['Palestinian'],
'Palistinian': ['Palestinian'],
'Palistinians': ['Palestinians'],
'pallete': ['palette'],
'pamflet': ['pamphlet'],
'pamplet': ['pamphlet'],
'pantomine': ['pantomime'],
'Papanicalou': ['Papanicolaou'],
'paralel': ['parallel'],
'paralell': ['parallel'],
'paralelly': ['parallelly'],
'paralely': ['parallelly'],
'parallely': ['parallelly'],
'paranthesis': ['parenthesis'],
'paraphenalia': ['paraphernalia'],
'parellels': ['parallels'],
'parituclar': ['particular'],
'parliment': ['parliament'],
'parrakeets': ['parakeets'],
'parralel': ['parallel'],
'parrallel': ['parallel'],
'parrallell': ['parallel'],
'parrallelly': ['parallelly'],
'parrallely': ['parallelly'],
'partialy': ['partially'],
'particually': ['particularly'],
'particualr': ['particular'],
'particuarly': ['particularly'],
'particularily': ['particularly'],
'particulary': ['particularly'],
'pary': ['party'],
'pased': ['passed'],
'pasengers': ['passengers'],
'passerbys': ['passersby'],
'pasttime': ['pastime'],
'pastural': ['pastoral'],
'paticular': ['particular'],
'pattented': ['patented'],
'pavillion': ['pavilion'],
'payed': ['paid'],
'pblisher': ['publisher'],
'pbulisher': ['publisher'],
'peacefuland': ['peaceful and'],
'peageant': ['pageant'],
'peculure': ['peculiar'],
'pedestrain': ['pedestrian'],
'peformed': ['performed'],
'peice': ['piece'],
'Peloponnes': ['Peloponnesus'],
'penatly': ['penalty'],
'penerator': ['penetrator'],
'penisula': ['peninsula'],
'penisular': ['peninsular'],
'penninsula': ['peninsula'],
'penninsular': ['peninsular'],
'pennisula': ['peninsula'],
'Pennyslvania': ['Pennsylvania'],
'pensinula': ['peninsula'],
'peom': ['poem'],
'peoms': ['poems'],
'peopel': ['people'],
'peotry': ['poetry'],
'perade': ['parade'],
'percepted': ['perceived'],
'percieve': ['perceive'],
'percieved': ['perceived'],
'perenially': ['perennially'],
'perfomance': ['performance'],
'perfomers': ['performers'],
'performence': ['performance'],
'performes': ['performed', 'performs'],
'perhasp': ['perhaps'],
'perheaps': ['perhaps'],
'perhpas': ['perhaps'],
'peripathetic': ['peripatetic'],
'peristent': ['persistent'],
'perjery': ['perjury'],
'perjorative': ['pejorative'],
'permanant': ['permanent'],
'permenant': ['permanent'],
'permenantly': ['permanently'],
'permissable': ['permissible'],
'perogative': ['prerogative'],
'peronal': ['personal'],
'perosnality': ['personality'],
'perphas': ['perhaps'],
'perpindicular': ['perpendicular'],
'perseverence': ['perseverance'],
'persistance': ['persistence'],
'persistant': ['persistent'],
'personel': ['personnel', 'personal'],
'personell': ['personnel'],
'personnell': ['personnel'],
'persuded': ['persuaded'],
'persue': ['pursue'],
'persued': ['pursued'],
'persuing': ['pursuing'],
'persuit': ['pursuit'],
'persuits': ['pursuits'],
'pertubation': ['perturbation'],
'pertubations': ['perturbations'],
'pessiary': ['pessary'],
'petetion': ['petition'],
'Pharoah': ['Pharaoh'],
'phenomenom': ['phenomenon'],
'phenomenonal': ['phenomenal'],
'phenomenonly': ['phenomenally'],
'phenomonenon': ['phenomenon'],
'phenomonon': ['phenomenon'],
'phenonmena': ['phenomena'],
'Philipines': ['Philippines'],
'philisopher': ['philosopher'],
'philisophical': ['philosophical'],
'philisophy': ['philosophy'],
'Phillipine': ['Philippine'],
'Phillipines': ['Philippines'],
'Phillippines': ['Philippines'],
'phillosophically': ['philosophically'],
'philospher': ['philosopher'],
'philosphies': ['philosophies'],
'philosphy': ['philosophy'],
'Phonecian': ['Phoenecian'],
'phongraph': ['phonograph'],
'phylosophical': ['philosophical'],
'physicaly': ['physically'],
'piblisher': ['publisher'],
'pich': ['pitch'],
'pilgrimmage': ['pilgrimage'],
'pilgrimmages': ['pilgrimages'],
'pinapple': ['pineapple'],
'pinnaple': ['pineapple'],
'pinoneered': ['pioneered'],
'plagarism': ['plagiarism'],
'planation': ['plantation'],
'planed': ['planned'],
'plantiff': ['plaintiff'],
'plateu': ['plateau'],
'plausable': ['plausible'],
'playright': ['playwright'],
'playwrite': ['playwright'],
'playwrites': ['playwrights'],
'pleasent': ['pleasant'],
'plebicite': ['plebiscite'],
'plesant': ['pleasant'],
'poenis': ['penis'],
'poeoples': ['peoples'],
'poety': ['poetry'],
'poisin': ['poison'],
'polical': ['political'],
'polinator': ['pollinator'],
'polinators': ['pollinators'],
'politican': ['politician'],
'politicans': ['politicians'],
'poltical': ['political'],
'polute': ['pollute'],
'poluted': ['polluted'],
'polutes': ['pollutes'],
'poluting': ['polluting'],
'polution': ['pollution'],
'polyphonyic': ['polyphonic'],
'polysaccaride': ['polysaccharide'],
'polysaccharid': ['polysaccharide'],
'pomegranite': ['pomegranate'],
'pomotion': ['promotion'],
'poportional': ['proportional'],
'popoulation': ['population'],
'popularaty': ['popularity'],
'populare': ['popular'],
'populer': ['popular'],
'portait': ['portrait'],
'portayed': ['portrayed'],
'portraing': ['portraying'],
'Portugese': ['Portuguese'],
'portuguease': ['portuguese'],
'portugues': ['Portuguese'],
'posess': ['possess'],
'posessed': ['possessed'],
'posesses': ['possesses'],
'posessing': ['possessing'],
'posession': ['possession'],
'posessions': ['possessions'],
'posion': ['poison'],
'positon': ['position', 'positron'],
'possable': ['possible'],
'possably': ['possibly'],
'posseses': ['possesses'],
'possesing': ['possessing'],
'possesion': ['possession'],
'possessess': ['possesses'],
'possibile': ['possible'],
'possibilty': ['possibility'],
'possiblility': ['possibility'],
'possiblilty': ['possibility'],
'possiblities': ['possibilities'],
'possiblity': ['possibility'],
'possition': ['position'],
'Postdam': ['Potsdam'],
'posthomous': ['posthumous'],
'postion': ['position'],
'postive': ['positive'],
'potatos': ['potatoes'],
'potrait': ['portrait'],
'potrayed': ['portrayed'],
'poulations': ['populations'],
'poverful': ['powerful'],
'poweful': ['powerful'],
'powerfull': ['powerful'],
'ppublisher': ['publisher'],
'practial': ['practical'],
'practially': ['practically'],
'practicaly': ['practically'],
'practicioner': ['practitioner'],
'practicioners': ['practitioners'],
'practicly': ['practically'],
'practioner': ['practitioner'],
'practioners': ['practitioners'],
'prairy': ['prairie'],
'prarie': ['prairie'],
'praries': ['prairies'],
'pratice': ['practice'],
'preample': ['preamble'],
'precedessor': ['predecessor'],
'preceed': ['precede'],
'preceeded': ['preceded'],
'preceeding': ['preceding'],
'preceeds': ['precedes'],
'precentage': ['percentage'],
'precice': ['precise'],
'precisly': ['precisely'],
'precurser': ['precursor'],
'predecesors': ['predecessors'],
'predicatble': ['predictable'],
'predicitons': ['predictions'],
'predomiantly': ['predominately'],
'prefered': ['preferred'],
'prefering': ['preferring'],
'preferrably': ['preferably'],
'pregancies': ['pregnancies'],
'preiod': ['period'],
'preliferation': ['proliferation'],
'premeire': ['premiere'],
'premeired': ['premiered'],
'premillenial': ['premillennial'],
'preminence': ['preeminence'],
'premission': ['permission'],
'Premonasterians': ['Premonstratensians'],
'preocupation': ['preoccupation'],
'prepair': ['prepare'],
'prepartion': ['preparation'],
'prepatory': ['preparatory'],
'preperation': ['preparation'],
'preperations': ['preparations'],
'preriod': ['period'],
'presedential': ['presidential'],
'presense': ['presence'],
'presidenital': ['presidential'],
'presidental': ['presidential'],
'presitgious': ['prestigious'],
'prespective': ['perspective'],
'prestigeous': ['prestigious'],
'prestigous': ['prestigious'],
'presumabely': ['presumably'],
'presumibly': ['presumably'],
'pretection': ['protection'],
'prevelant': ['prevalent'],
'preverse': ['perverse'],
'previvous': ['previous'],
'pricipal': ['principal'],
'priciple': ['principle'],
'priestood': ['priesthood'],
'primarly': ['primarily'],
'primative': ['primitive'],
'primatively': ['primitively'],
'primatives': ['primitives'],
'primordal': ['primordial'],
'principlaity': ['principality'],
'principaly': ['principality'],
'principial': ['principal'],
'principly': ['principally'],
'prinicipal': ['principal'],
'privalege': ['privilege'],
'privaleges': ['privileges'],
'priveledges': ['privileges'],
'privelege': ['privilege'],
'priveleged': ['privileged'],
'priveleges': ['privileges'],
'privelige': ['privilege'],
'priveliged': ['privileged'],
'priveliges': ['privileges'],
'privelleges': ['privileges'],
'privilage': ['privilege'],
'priviledge': ['privilege'],
'priviledges': ['privileges'],
'privledge': ['privilege'],
'privte': ['private'],
'probabilaty': ['probability'],
'probablistic': ['probabilistic'],
'probablly': ['probably'],
'probalibity': ['probability'],
'probaly': ['probably'],
'probelm': ['problem'],
'proccess': ['process'],
'proccessing': ['processing'],
'procede': ['proceed', 'precede'],
'proceded': ['proceeded', 'preceded'],
'procedes': ['proceeds', 'precedes'],
'procedger': ['procedure'],
'proceding': ['proceeding', 'preceding'],
'procedings': ['proceedings'],
'proceedure': ['procedure'],
'proces': ['process'],
'processer': ['processor'],
'proclaimation': ['proclamation'],
'proclamed': ['proclaimed'],
'proclaming': ['proclaiming'],
'proclomation': ['proclamation'],
'profesion': ['profusion', 'profession'],
'profesor': ['professor'],
'professer': ['professor'],
'proffesed': ['professed'],
'proffesion': ['profession'],
'proffesional': ['professional'],
'proffesor': ['professor'],
'profilic': ['prolific'],
'progessed': ['progressed'],
'progidy': ['prodigy'],
'programable': ['programmable'],
'progrom': ['pogrom', 'program'],
'progroms': ['pogroms', 'programs'],
'prohabition': ['prohibition'],
'prologomena': ['prolegomena'],
'prominance': ['prominence'],
'prominant': ['prominent'],
'prominantly': ['prominently'],
'prominately': ['prominently', 'predominately'],
'promiscous': ['promiscuous'],
'promotted': ['promoted'],
'pronomial': ['pronominal'],
'pronouced': ['pronounced'],
'pronounched': ['pronounced'],
'pronounciation': ['pronunciation'],
'proove': ['prove'],
'prooved': ['proved'],
'prophacy': ['prophecy'],
'propietary': ['proprietary'],
'propmted': ['prompted'],
'propoganda': ['propaganda'],
'propogate': ['propagate'],
'propogates': ['propagates'],
'propogation': ['propagation'],
'propostion': ['proposition'],
'propotions': ['proportions'],
'propper': ['proper'],
'propperly': ['properly'],
'proprietory': ['proprietary'],
'proseletyzing': ['proselytizing'],
'protaganist': ['protagonist'],
'protaganists': ['protagonists'],
'protocal': ['protocol'],
'protoganist': ['protagonist'],
'protrayed': ['portrayed'],
'protruberance': ['protuberance'],
'protruberances': ['protuberances'],
'prouncements': ['pronouncements'],
'provacative': ['provocative'],
'provded': ['provided'],
'provicial': ['provincial'],
'provinicial': ['provincial'],
'provisiosn': ['provision'],
'provisonal': ['provisional'],
'proximty': ['proximity'],
'pseudononymous': ['pseudonymous'],
'pseudonyn': ['pseudonym'],
'psuedo': ['pseudo'],
'psycology': ['psychology'],
'psyhic': ['psychic'],
'pubilsher': ['publisher'],
'pubisher': ['publisher'],
'publiaher': ['publisher'],
'publically': ['publicly'],
'publicaly': ['publicly'],
'publicher': ['publisher'],
'publihser': ['publisher'],
'publisehr': ['publisher'],
'publiser': ['publisher'],
'publisger': ['publisher'],
'publisheed': ['published'],
'publisherr': ['publisher'],
'publishher': ['publisher'],
'publishor': ['publisher'],
'publishre': ['publisher'],
'publissher': ['publisher'],
'publlisher': ['publisher'],
'publsiher': ['publisher'],
'publusher': ['publisher'],
'puchasing': ['purchasing'],
'Pucini': ['Puccini'],
'Puertorrican': ['Puerto Rican'],
'Puertorricans': ['Puerto Ricans'],
'pulisher': ['publisher'],
'pumkin': ['pumpkin'],
'puplisher': ['publisher'],
'puritannical': ['puritanical'],
'purposedly': ['purposely'],
'purpotedly': ['purportedly'],
'pursuade': ['persuade'],
'pursuaded': ['persuaded'],
'pursuades': ['persuades'],
'pususading': ['persuading'],
'puting': ['putting'],
'pwoer': ['power'],
'pyscic': ['psychic'],
'qtuie': ['quite', 'quiet'],
'quantaty': ['quantity'],
'quantitiy': ['quantity'],
'quarantaine': ['quarantine'],
'Queenland': ['Queensland'],
'questonable': ['questionable'],
'quicklyu': ['quickly'],
'quinessential': ['quintessential'],
'quitted': ['quit'],
'quizes': ['quizzes'],
'qutie': ['quite', 'quiet'],
'rabinnical': ['rabbinical'],
'racaus': ['raucous'],
'radiactive': ['radioactive'],
'radify': ['ratify'],
'raelly': ['really'],
'rarified': ['rarefied'],
'reaccurring': ['recurring'],
'reacing': ['reaching'],
'reacll': ['recall'],
'readmition': ['readmission'],
'realitvely': ['relatively'],
'realsitic': ['realistic'],
'realtions': ['relations'],
'realy': ['really'],
'realyl': ['really'],
'reasearch': ['research'],
'rebiulding': ['rebuilding'],
'rebllions': ['rebellions'],
'rebounce': ['rebound'],
'reccomend': ['recommend'],
'reccomendations': ['recommendations'],
'reccomended': ['recommended'],
'reccomending': ['recommending'],
'reccommend': ['recommend'],
'reccommended': ['recommended'],
'reccommending': ['recommending'],
'reccuring': ['recurring'],
'receeded': ['receded'],
'receeding': ['receding'],
'receivedfrom': ['received from'],
'recepient': ['recipient'],
'recepients': ['recipients'],
'receving': ['receiving'],
'rechargable': ['rechargeable'],
'reched': ['reached'],
'recide': ['reside'],
'recided': ['resided'],
'recident': ['resident'],
'recidents': ['residents'],
'reciding': ['residing'],
'reciepents': ['recipients'],
'reciept': ['receipt'],
'recieve': ['receive'],
'recieved': ['received'],
'reciever': ['receiver'],
'recievers': ['receivers'],
'recieves': ['receives'],
'recieving': ['receiving'],
'recipiant': ['recipient'],
'recipiants': ['recipients'],
'recived': ['received'],
'recivership': ['receivership'],
'recogise': ['recognise'],
'recogize': ['recognize'],
'recomend': ['recommend'],
'recomended': ['recommended'],
'recomending': ['recommending'],
'recomends': ['recommends'],
'recommedations': ['recommendations'],
'recompence': ['recompense'],
'reconaissance': ['reconnaissance'],
'reconcilation': ['reconciliation'],
'reconized': ['recognized'],
'reconnaisance': ['reconnaissance'],
'reconnaissence': ['reconnaissance'],
'recontructed': ['reconstructed'],
'recordproducer': ['record producer'],
'recquired': ['required'],
'recrational': ['recreational'],
'recrod': ['record'],
'recuiting': ['recruiting'],
'recuring': ['recurring'],
'recurrance': ['recurrence'],
'rediculous': ['ridiculous'],
'reedeming': ['redeeming'],
'reenforced': ['reinforced'],
'refect': ['reflect'],
'refedendum': ['referendum'],
'referal': ['referral'],
'referece': ['reference'],
'refereces': ['references'],
'refered': ['referred'],
'referemce': ['reference'],
'referemces': ['references'],
'referencs': ['references'],
'referenece': ['reference'],
'refereneced': ['referenced'],
'refereneces': ['references'],
'referiang': ['referring'],
'refering': ['referring'],
'refernce': ['reference', 'references'],
'refernces': ['references'],
'referrence': ['reference'],
'referrences': ['references'],
'referrs': ['refers'],
'reffered': ['referred'],
'refference': ['reference'],
'reffering': ['referring'],
'refrence': ['reference'],
'refrences': ['references'],
'refrers': ['refers'],
'refridgeration': ['refrigeration'],
'refridgerator': ['refrigerator'],
'refromist': ['reformist'],
'refusla': ['refusal'],
'regardes': ['regards'],
'regluar': ['regular'],
'reguarly': ['regularly'],
'regulaion': ['regulation'],
'regulaotrs': ['regulators'],
'regularily': ['regularly'],
'rehersal': ['rehearsal'],
'reicarnation': ['reincarnation'],
'reigining': ['reigning'],
'reknown': ['renown'],
'reknowned': ['renowned'],
'rela': ['real'],
'relaly': ['really'],
'relatiopnship': ['relationship'],
'relativly': ['relatively'],
'relected': ['reelected'],
'releive': ['relieve'],
'releived': ['relieved'],
'releiver': ['reliever'],
'releses': ['releases'],
'relevence': ['relevance'],
'relevent': ['relevant'],
'reliablity': ['reliability'],
'relient': ['reliant'],
'religeous': ['religious'],
'religous': ['religious'],
'religously': ['religiously'],
'relinqushment': ['relinquishment'],
'relitavely': ['relatively'],
'relized': ['realised', 'realized'],
'relpacement': ['replacement'],
'remaing': ['remaining'],
'remeber': ['remember'],
'rememberable': ['memorable'],
'rememberance': ['remembrance'],
'remembrence': ['remembrance'],
'remenant': ['remnant'],
'remenicent': ['reminiscent'],
'reminent': ['remnant'],
'reminescent': ['reminiscent'],
'reminscent': ['reminiscent'],
'reminsicent': ['reminiscent'],
'rendevous': ['rendezvous'],
'rendezous': ['rendezvous'],
'renedered': ['rende'],
'renewl': ['renewal'],
'rennovate': ['renovate'],
'rennovated': ['renovated'],
'rennovating': ['renovating'],
'rennovation': ['renovation'],
'rentors': ['renters'],
'reoccurrence': ['recurrence'],
'reorganision': ['reorganisation'],
'repatition': ['repetition', 'repartition'],
'repblic': ['republic'],
'repblican': ['republican'],
'repblicans': ['republicans'],
'repblics': ['republics'],
'repectively': ['respectively'],
'repeition': ['repetition'],
'repentence': ['repentance'],
'repentent': ['repentant'],
'repeteadly': ['repeatedly'],
'repetion': ['repetition'],
'repid': ['rapid'],
'reponse': ['response'],
'reponsible': ['responsible'],
'reportadly': ['reportedly'],
'represantative': ['representative'],
'representive': ['representative'],
'representives': ['representatives'],
'reproducable': ['reproducible'],
'reprtoire': ['repertoire'],
'repsectively': ['respectively'],
'reptition': ['repetition'],
'repubic': ['republic'],
'repubican': ['republican'],
'repubicans': ['republicans'],
'repubics': ['republics'],
'republi': ['republic'],
'republian': ['republican'],
'republians': ['republicans'],
'republis': ['republics'],
'repulic': ['republic'],
'repulican': ['republican'],
'repulicans': ['republicans'],
'repulics': ['republics'],
'requirment': ['requirement'],
'requred': ['required'],
'resaurant': ['restaurant'],
'resembelance': ['resemblance'],
'resembes': ['resembles'],
'resemblence': ['resemblance'],
'resevoir': ['reservoir'],
'residental': ['residential'],
'resignement': ['resignment'],
'resistable': ['resistible'],
'resistence': ['resistance'],
'resistent': ['resistant'],
'respectivly': ['respectively'],
'responce': ['response'],
'responibilities': ['responsibilities'],
'responisble': ['responsible'],
'responnsibilty': ['responsibility'],
'responsability': ['responsibility'],
'responsibile': ['responsible'],
'responsibilites': ['responsibilities'],
'responsiblities': ['responsibilities'],
'responsiblity': ['responsibility'],
'ressemblance': ['resemblance'],
'ressemble': ['resemble'],
'ressembled': ['resembled'],
'ressemblence': ['resemblance'],
'ressembling': ['resembling'],
'resssurecting': ['resurrecting'],
'ressurect': ['resurrect'],
'ressurected': ['resurrected'],
'ressurection': ['resurrection'],
'ressurrection': ['resurrection'],
'restarant': ['restaurant'],
'restarants': ['restaurants'],
'restaraunt': ['restaurant'],
'restaraunteur': ['restaurateur'],
'restaraunteurs': ['restaurateurs'],
'restaraunts': ['restaurants'],
'restauranteurs': ['restaurateurs'],
'restauration': ['restoration'],
'restauraunt': ['restaurant'],
'resteraunt': ['restaurant'],
'resteraunts': ['restaurants'],
'resticted': ['restricted'],
'restraunt': ['restraint', 'restaurant'],
'resturant': ['restaurant'],
'resturants': ['restaurants'],
'resturaunt': ['restaurant'],
'resturaunts': ['restaurants'],
'resurecting': ['resurrecting'],
'retalitated': ['retaliated'],
'retalitation': ['retaliation'],
'retreive': ['retrieve'],
'returnd': ['returned'],
'revaluated': ['reevaluated'],
'reveiw': ['review'],
'reveral': ['reversal'],
'reversable': ['reversible'],
'revolutionar': ['revolutionary'],
'rewitten': ['rewritten'],
'rewriet': ['rewrite'],
'rference': ['reference'],
'rferences': ['references'],
'rhymme': ['rhyme'],
'rhythem': ['rhythm'],
'rhythim': ['rhythm'],
'rhytmic': ['rhythmic'],
'rigeur': ['rigueur', 'rigour', 'rigor'],
'rigourous': ['rigorous'],
'rininging': ['ringing'],
'rised': ['rose'],
'Rockerfeller': ['Rockefeller'],
'rococco': ['rococo'],
'rocord': ['record'],
'roomate': ['roommate'],
'rougly': ['roughly'],
'rucuperate': ['recuperate'],
'rudimentatry': ['rudimentary'],
'rulle': ['rule'],
'runing': ['running'],
'runnung': ['running'],
'russina': ['Russian'],
'Russion': ['Russian'],
'rwite': ['write'],
'rythem': ['rhythm'],
'rythim': ['rhythm'],
'rythm': ['rhythm'],
'rythmic': ['rhythmic'],
'rythyms': ['rhythms'],
'sacrafice': ['sacrifice'],
'sacreligious': ['sacrilegious'],
'Sacremento': ['Sacramento'],
'sacrifical': ['sacrificial'],
'saftey': ['safety'],
'safty': ['safety'],
'salery': ['salary'],
'sanctionning': ['sanctioning'],
'sandwhich': ['sandwich'],
'Sanhedrim': ['Sanhedrin'],
'santioned': ['sanctioned'],
'sargant': ['sergeant'],
'sargeant': ['sergeant'],
'sasy': ['says', 'sassy'],
'satelite': ['satellite'],
'satelites': ['satellites'],
'Saterday': ['Saturday'],
'Saterdays': ['Saturdays'],
'satisfactority': ['satisfactorily'],
'satric': ['satiric'],
'satrical': ['satirical'],
'satrically': ['satirically'],
'sattelite': ['satellite'],
'sattelites': ['satellites'],
'saught': ['sought'],
'saveing': ['saving'],
'saxaphone': ['saxophone'],
'scaleable': ['scalable'],
'scandanavia': ['Scandinavia'],
'scaricity': ['scarcity'],
'scavanged': ['scavenged'],
'schedual': ['schedule'],
'scholarhip': ['scholarship'],
'scholarstic': ['scholastic', 'scholarly'],
'scientfic': ['scientific'],
'scientifc': ['scientific'],
'scientis': ['scientist'],
'scince': ['science'],
'scinece': ['science'],
'scirpt': ['script'],
'scoll': ['scroll'],
'screenwrighter': ['screenwriter'],
'scrutinity': ['scrutiny'],
'scuptures': ['sculptures'],
'seach': ['search'],
'seached': ['searched'],
'seaches': ['searches'],
'secceeded': ['seceded', 'succeeded'],
'seceed': ['succeed', 'secede'],
'seceeded': ['succeeded', 'seceded'],
'secratary': ['secretary'],
'secretery': ['secretary'],
'sedereal': ['sidereal'],
'seeked': ['sought'],
'segementation': ['segmentation'],
'seguoys': ['segues'],
'seige': ['siege'],
'seing': ['seeing'],
'seinor': ['senior'],
'seldomly': ['seldom'],
'senarios': ['scenarios'],
'sence': ['sense', 'since'],
'senstive': ['sensitive'],
'sensure': ['censure'],
'seperate': ['separate'],
'seperated': ['separated'],
'seperately': ['separately'],
'seperates': ['separates'],
'seperating': ['separating'],
'seperation': ['separation'],
'seperatism': ['separatism'],
'seperatist': ['separatist'],
'seperator': ['separator'],
'sepina': ['subpoena'],
'sepulchure': ['sepulchre', 'sepulcher'],
'sepulcre': ['sepulchre', 'sepulcher'],
'sergent': ['sergeant'],
'settelement': ['settlement'],
'settlment': ['settlement'],
'severeal': ['several'],
'severley': ['severely'],
'severly': ['severely'],
'sevice': ['service'],
'shadasloo': ['shadaloo'],
'shaddow': ['shadow'],
'shadoloo': ['shadaloo'],
'shamen': ['shaman', 'shamans'],
'sheat': ['sheath', 'sheet', 'cheat'],
'sheild': ['shield'],
'sherif': ['sheriff'],
'shineing': ['shining'],
'shiped': ['shipped'],
'shiping': ['shipping'],
'shopkeeepers': ['shopkeepers'],
'shorly': ['shortly'],
'shortwhile': ['short while'],
'shoudl': ['should'],
'shoudln': ['should', 'shouldn\'t'],
'shouldnt': ['shouldn\'t'],
'shreak': ['shriek'],
'shrinked': ['shrunk'],
'sicne': ['since'],
'sideral': ['sidereal'],
'sieze': ['seize', 'size'],
'siezed': ['seized', 'sized'],
'siezing': ['seizing', 'sizing'],
'siezure': ['seizure'],
'siezures': ['seizures'],
'siginificant': ['significant'],
'signficant': ['significant'],
'signficiant': ['significant'],
'signfies': ['signifies'],
'signifantly': ['significantly'],
'significently': ['significantly'],
'signifigant': ['significant'],
'signifigantly': ['significantly'],
'signitories': ['signatories'],
'signitory': ['signatory'],
'similarily': ['similarly'],
'similiar': ['similar'],
'similiarity': ['similarity'],
'similiarly': ['similarly'],
'simmilar': ['similar'],
'simpley': ['simply'],
'simplier': ['simpler'],
'simultanous': ['simultaneous'],
'simultanously': ['simultaneously'],
'sincerley': ['sincerely'],
'singsog': ['singsong'],
'sinse': ['sines', 'since'],
'Sionist': ['Zionist'],
'Sionists': ['Zionists'],
'Sixtin': ['Sistine'],
'Skagerak': ['Skagerrak'],
'skateing': ['skating'],
'slaugterhouses': ['slaughterhouses'],
'slighly': ['slightly'],
'slippy': ['slippery'],
'slowy': ['slowly'],
'smae': ['same'],
'smealting': ['smelting'],
'smoe': ['some'],
'sneeks': ['sneaks'],
'snese': ['sneeze'],
'socalism': ['socialism'],
'socities': ['societies'],
'soem': ['some'],
'sofware': ['software'],
'sohw': ['show'],
'soilders': ['soldiers'],
'solatary': ['solitary'],
'soley': ['solely'],
'soliders': ['soldiers'],
'soliliquy': ['soliloquy'],
'soluable': ['soluble'],
'somene': ['someone'],
'somtimes': ['sometimes'],
'somwhere': ['somewhere'],
'sophicated': ['sophisticated'],
'sophmore': ['sophomore'],
'sorceror': ['sorcerer'],
'sorrounding': ['surrounding'],
'sotry': ['story'],
'sotyr': ['satyr', 'story'],
'soudn': ['sound'],
'soudns': ['sounds'],
'sould': ['could', 'should', 'sold', 'soul'],
'sountrack': ['soundtrack'],
'sourth': ['south'],
'sourthern': ['southern'],
'souvenier': ['souvenir'],
'souveniers': ['souvenirs'],
'soveits': ['soviets'],
'sovereignity': ['sovereignty'],
'soverign': ['sovereign'],
'soverignity': ['sovereignty'],
'soverignty': ['sovereignty'],
'spainish': ['Spanish'],
'speach': ['speech'],
'specfic': ['specific'],
'speciallized': ['specialised', 'specialized'],
'specif': ['specific', 'specify'],
'specifiying': ['specifying'],
'speciman': ['specimen'],
'spectauclar': ['spectacular'],
'spectaulars': ['spectaculars'],
'spects': ['aspects', 'expects'],
'spectum': ['spectrum'],
'speices': ['species'],
'spendour': ['splendour'],
'spermatozoan': ['spermatozoon'],
'spoace': ['space'],
'sponser': ['sponsor'],
'sponsered': ['sponsored'],
'spontanous': ['spontaneous'],
'sponzored': ['sponsored'],
'spoonfulls': ['spoonfuls'],
'sppeches': ['speeches'],
'spreaded': ['spread'],
'sprech': ['speech'],
'spred': ['spread'],
'spriritual': ['spiritual'],
'spritual': ['spiritual'],
'sqaure': ['square'],
'stablility': ['stability'],
'stainlees': ['stainless'],
'staion': ['station'],
'standars': ['standards'],
'stange': ['strange'],
'startegic': ['strategic'],
'startegies': ['strategies'],
'startegy': ['strategy'],
'stateman': ['statesman'],
'statememts': ['statements'],
'statment': ['statement'],
'steriods': ['steroids'],
'sterotypes': ['stereotypes'],
'stilus': ['stylus'],
'stingent': ['stringent'],
'stiring': ['stirring'],
'stirrs': ['stirs'],
'stlye': ['style'],
'stomache': ['stomach'],
'stong': ['strong'],
'stopry': ['story'],
'storeis': ['stories'],
'storise': ['stories'],
'stornegst': ['strongest'],
'stoyr': ['story'],
'stpo': ['stop'],
'stradegies': ['strategies'],
'stradegy': ['strategy'],
'strat': ['start', 'strata'],
'stratagically': ['strategically'],
'streemlining': ['streamlining'],
'stregth': ['strength'],
'strenghen': ['strengthen'],
'strenghened': ['strengthened'],
'strenghening': ['strengthening'],
'strenght': ['strength'],
'strenghten': ['strengthen'],
'strenghtened': ['strengthened'],
'strenghtening': ['strengthening'],
'strengtened': ['strengthened'],
'strenous': ['strenuous'],
'strictist': ['strictest'],
'strikely': ['strikingly'],
'strnad': ['strand'],
'stroy': ['story', 'destroy'],
'structual': ['structural'],
'stubborness': ['stubbornness'],
'stucture': ['structure'],
'stuctured': ['structured'],
'studdy': ['study'],
'studing': ['studying'],
'stuggling': ['struggling'],
'sturcture': ['structure'],
'subcatagories': ['subcategories'],
'subcatagory': ['subcategory'],
'subconsiously': ['subconsciously'],
'subjudgation': ['subjugation'],
'submachne': ['submachine'],
'subpecies': ['subspecies'],
'subsidary': ['subsidiary'],
'subsiduary': ['subsidiary'],
'subsquent': ['subsequent'],
'subsquently': ['subsequently'],
'substace': ['substance'],
'substancial': ['substantial'],
'substatial': ['substantial'],
'substituded': ['substituted'],
'substract': ['subtract'],
'substracted': ['subtracted'],
'substracting': ['subtracting'],
'substraction': ['subtraction'],
'substracts': ['subtracts'],
'subtances': ['substances'],
'subterranian': ['subterranean'],
'suburburban': ['suburban'],
'succceeded': ['succeeded'],
'succcesses': ['successes'],
'succedded': ['succeeded'],
'succeded': ['succeeded'],
'succeds': ['succeeds'],
'succesful': ['successful'],
'succesfully': ['successfully'],
'succesfuly': ['successfully'],
'succesion': ['succession'],
'succesive': ['successive'],
'successfull': ['successful'],
'successully': ['successfully'],
'succsess': ['success'],
'succsessfull': ['successful'],
'suceed': ['succeed'],
'suceeded': ['succeeded'],
'suceeding': ['succeeding'],
'suceeds': ['succeeds'],
'sucesful': ['successful'],
'sucesfully': ['successfully'],
'sucesfuly': ['successfully'],
'sucesion': ['succession'],
'sucess': ['success'],
'sucesses': ['successes'],
'sucessful': ['successful'],
'sucessfull': ['successful'],
'sucessfully': ['successfully'],
'sucessfuly': ['successfully'],
'sucession': ['succession'],
'sucessive': ['successive'],
'sucessor': ['successor'],
'sucessot': ['successor'],
'sucide': ['suicide'],
'sucidial': ['suicidal'],
'sufferage': ['suffrage'],
'sufferred': ['suffered'],
'sufferring': ['suffering'],
'sufficent': ['sufficient'],
'sufficently': ['sufficiently'],
'sumary': ['summary'],
'sunglases': ['sunglasses'],
'suop': ['soup'],
'superceeded': ['superseded'],
'superintendant': ['superintendent'],
'suphisticated': ['sophisticated'],
'suplimented': ['supplemented'],
'supose': ['suppose'],
'suposed': ['supposed'],
'suposedly': ['supposedly'],
'suposes': ['supposes'],
'suposing': ['supposing'],
'supplamented': ['supplemented'],
'suppliementing': ['supplementing'],
'suppoed': ['supposed'],
'supposingly': ['supposedly'],
'suppy': ['supply'],
'supress': ['suppress'],
'supressed': ['suppressed'],
'supresses': ['suppresses'],
'supressing': ['suppressing'],
'suprise': ['surprise'],
'suprised': ['surprised'],
'suprising': ['surprising'],
'suprisingly': ['surprisingly'],
'suprize': ['surprise'],
'suprized': ['surprised'],
'suprizing': ['surprising'],
'suprizingly': ['surprisingly'],
'surfce': ['surface'],
'surley': ['surly', 'surely'],
'suround': ['surround'],
'surounded': ['surrounded'],
'surounding': ['surrounding'],
'suroundings': ['surroundings'],
'surounds': ['surrounds'],
'surplanted': ['supplanted'],
'surpress': ['suppress'],
'surpressed': ['suppressed'],
'surprize': ['surprise'],
'surprized': ['surprised'],
'surprizing': ['surprising'],
'surprizingly': ['surprisingly'],
'surrended': ['surrounded', 'surrendered'],
'surrepetitious': ['surreptitious'],
'surrepetitiously': ['surreptitiously'],
'surreptious': ['surreptitious'],
'surreptiously': ['surreptitiously'],
'surronded': ['surrounded'],
'surrouded': ['surrounded'],
'surrouding': ['surrounding'],
'surrundering': ['surrendering'],
'surveilence': ['surveillance'],
'surveill': ['surveil'],
'surveyer': ['surveyor'],
'surviver': ['survivor'],
'survivers': ['survivors'],
'survivied': ['survived'],
'suseptable': ['susceptible'],
'suseptible': ['susceptible'],
'suspention': ['suspension'],
'swaer': ['swear'],
'swaers': ['swears'],
'swepth': ['swept'],
'swiming': ['swimming'],
'syas': ['says'],
'symetrical': ['symmetrical'],
'symetrically': ['symmetrically'],
'symetry': ['symmetry'],
'symettric': ['symmetric'],
'symmetral': ['symmetric'],
'symmetricaly': ['symmetrically'],
'synagouge': ['synagogue'],
'syncronization': ['synchronization'],
'synonomous': ['synonymous'],
'synonymns': ['synonyms'],
'synphony': ['symphony'],
'syphyllis': ['syphilis'],
'sypmtoms': ['symptoms'],
'syrap': ['syrup'],
'sysmatically': ['systematically'],
'sytem': ['system'],
'sytle': ['style'],
'tabacco': ['tobacco'],
'tahn': ['than'],
'taht': ['that'],
'talekd': ['talked'],
'targetted': ['targeted'],
'targetting': ['targeting'],
'tast': ['taste'],
'tath': ['that'],
'tattooes': ['tattoos'],
'taxanomic': ['taxonomic'],
'taxanomy': ['taxonomy'],
'teached': ['taught'],
'techician': ['technician'],
'techicians': ['technicians'],
'techiniques': ['techniques'],
'technitian': ['technician'],
'technnology': ['technology'],
'technolgy': ['technology'],
'teh': ['the'],
'tehy': ['they'],
'telelevision': ['television'],
'televsion': ['television'],
'telphony': ['telephony'],
'temerature': ['temperature'],
'tempalte': ['template'],
'tempaltes': ['templates'],
'temparate': ['temperate'],
'temperarily': ['temporarily'],
'temperment': ['temperament'],
'tempertaure': ['temperature'],
'temperture': ['temperature'],
'temprary': ['temporary'],
'tenacle': ['tentacle'],
'tenacles': ['tentacles'],
'tendacy': ['tendency'],
'tendancies': ['tendencies'],
'tendancy': ['tendency'],
'tennisplayer': ['tennis player'],
'tepmorarily': ['temporarily'],
'terrestial': ['terrestrial'],
'terriories': ['territories'],
'terriory': ['territory'],
'territorist': ['terrorist'],
'territoy': ['territory'],
'terroist': ['terrorist'],
'testiclular': ['testicular'],
'tghe': ['the'],
'thast': ['that', 'that\'s'],
'theather': ['theater'],
'theese': ['these'],
'theif': ['thief'],
'theives': ['thieves'],
'themselfs': ['themselves'],
'themslves': ['themselves'],
'ther': ['there', 'their', 'the'],
'therafter': ['thereafter'],
'therby': ['thereby'],
'theri': ['their'],
'theyre': ['they\'re'],
'thgat': ['that'],
'thge': ['the'],
'thier': ['their'],
'thign': ['thing'],
'thigns': ['things'],
'thigsn': ['things'],
'thikn': ['think'],
'thikning': ['thinking', 'thickening'],
'thikns': ['thinks'],
'thiunk': ['think'],
'thn': ['then'],
'thna': ['than'],
'thne': ['then'],
'thnig': ['thing'],
'thnigs': ['things'],
'thoughout': ['throughout'],
'threatend': ['threatened'],
'threatning': ['threatening'],
'threee': ['three'],
'threshhold': ['threshold'],
'thrid': ['third'],
'throrough': ['thorough'],
'throughly': ['thoroughly'],
'throught': ['thought', 'through', 'throughout'],
'througout': ['throughout'],
'thru': ['through'],
'thsi': ['this'],
'thsoe': ['those'],
'thta': ['that'],
'thyat': ['that'],
'tiem': ['time', 'Tim'],
'tihkn': ['think'],
'tihs': ['this'],
'timne': ['time'],
'tiome': ['time', 'tome'],
'tje': ['the'],
'tjhe': ['the'],
'tjpanishad': ['upanishad'],
'tkae': ['take'],
'tkaes': ['takes'],
'tkaing': ['taking'],
'tlaking': ['talking'],
'tobbaco': ['tobacco'],
'todays': ['today\'s'],
'todya': ['today'],
'toghether': ['together'],
'toke': ['took'],
'tolerence': ['tolerance'],
'Tolkein': ['Tolkien'],
'tomatos': ['tomatoes'],
'tommorow': ['tomorrow'],
'tommorrow': ['tomorrow'],
'tongiht': ['tonight'],
'toriodal': ['toroidal'],
'tormenters': ['tormentors'],
'tornadoe': ['tornado'],
'torpeados': ['torpedoes'],
'torpedos': ['torpedoes'],
'tothe': ['to the'],
'toubles': ['troubles'],
'tounge': ['tongue'],
'tourch': ['torch', 'touch'],
'towords': ['towards'],
'towrad': ['toward'],
'tradionally': ['traditionally'],
'traditionaly': ['traditionally'],
'traditionnal': ['traditional'],
'traditition': ['tradition'],
'tradtionally': ['traditionally'],
'trafficed': ['trafficked'],
'trafficing': ['trafficking'],
'trafic': ['traffic'],
'trancendent': ['transcendent'],
'trancending': ['transcending'],
'tranform': ['transform'],
'tranformed': ['transformed'],
'transcendance': ['transcendence'],
'transcendant': ['transcendent'],
'transcendentational': ['transcendental'],
'transcripting': ['transcribing', 'transcription'],
'transending': ['transcending'],
'transesxuals': ['transsexuals'],
'transfered': ['transferred'],
'transfering': ['transferring'],
'transformaton': ['transformation'],
'transistion': ['transition'],
'translater': ['translator'],
'translaters': ['translators'],
'transmissable': ['transmissible'],
'transporation': ['transportation'],
'tremelo': ['tremolo'],
'tremelos': ['tremolos'],
'triguered': ['triggered'],
'triology': ['trilogy'],
'troling': ['trolling'],
'troup': ['troupe'],
'troups': ['troupes', 'troops'],
'truely': ['truly'],
'trustworthyness': ['trustworthiness'],
'turnk': ['turnkey', 'trunk'],
'Tuscon': ['Tucson'],
'tust': ['trust'],
'twelth': ['twelfth'],
'twon': ['town'],
'twpo': ['two'],
'tyhat': ['that'],
'tyhe': ['they'],
'typcial': ['typical'],
'typicaly': ['typically'],
'tyranies': ['tyrannies'],
'tyrany': ['tyranny'],
'tyrranies': ['tyrannies'],
'tyrrany': ['tyranny'],
'ubiquitious': ['ubiquitous'],
'ublisher': ['publisher'],
'uise': ['use'],
'Ukranian': ['Ukrainian'],
'ultimely': ['ultimately'],
'unacompanied': ['unaccompanied'],
'unahppy': ['unhappy'],
'unanymous': ['unanimous'],
'unathorised': ['unauthorised'],
'unavailible': ['unavailable'],
'unballance': ['unbalance'],
'unbeknowst': ['unbeknownst'],
'unbeleivable': ['unbelievable'],
'uncertainity': ['uncertainty'],
'unchallengable': ['unchallengeable'],
'unchangable': ['unchangeable'],
'uncompetive': ['uncompetitive'],
'unconcious': ['unconscious'],
'unconciousness': ['unconsciousness'],
'unconfortability': ['discomfort'],
'uncontitutional': ['unconstitutional'],
'unconvential': ['unconventional'],
'undecideable': ['undecidable'],
'understoon': ['understood'],
'undesireable': ['undesirable'],
'undetecable': ['undetectable'],
'undoubtely': ['undoubtedly'],
'undreground': ['underground'],
'uneccesary': ['unnecessary'],
'unecessary': ['unnecessary'],
'unequalities': ['inequalities'],
'unforetunately': ['unfortunately'],
'unforgetable': ['unforgettable'],
'unforgiveable': ['unforgivable'],
'unfortunatley': ['unfortunately'],
'unfortunatly': ['unfortunately'],
'unfourtunately': ['unfortunately'],
'unihabited': ['uninhabited'],
'unilateraly': ['unilaterally'],
'unilatreal': ['unilateral'],
'unilatreally': ['unilaterally'],
'uninterruped': ['uninterrupted'],
'uninterupted': ['uninterrupted'],
'UnitesStates': ['UnitedStates'],
'univeral': ['universal'],
'univeristies': ['universities'],
'univeristy': ['university'],
'univerity': ['university'],
'universtiy': ['university'],
'univesities': ['universities'],
'univesity': ['university'],
'unkown': ['unknown'],
'unlikey': ['unlikely'],
'unmanouverable': ['unmaneuverable', 'unmanoeuvrable'],
'unmistakeably': ['unmistakably'],
'unneccesarily': ['unnecessarily'],
'unneccesary': ['unnecessary'],
'unneccessarily': ['unnecessarily'],
'unneccessary': ['unnecessary'],
'unnecesarily': ['unnecessarily'],
'unnecesary': ['unnecessary'],
'unoffical': ['unofficial'],
'unoperational': ['nonoperational'],
'unoticeable': ['unnoticeable'],
'unplease': ['displease'],
'unplesant': ['unpleasant'],
'unprecendented': ['unprecedented'],
'unprecidented': ['unprecedented'],
'unrepentent': ['unrepentant'],
'unrepetant': ['unrepentant'],
'unrepetent': ['unrepentant'],
'unsed': ['used', 'unused', 'unsaid'],
'unsubstanciated': ['unsubstantiated'],
'unsuccesful': ['unsuccessful'],
'unsuccesfully': ['unsuccessfully'],
'unsuccessfull': ['unsuccessful'],
'unsucesful': ['unsuccessful'],
'unsucesfuly': ['unsuccessfully'],
'unsucessful': ['unsuccessful'],
'unsucessfull': ['unsuccessful'],
'unsucessfully': ['unsuccessfully'],
'unsuprised': ['unsurprised'],
'unsuprising': ['unsurprising'],
'unsuprisingly': ['unsurprisingly'],
'unsuprized': ['unsurprised'],
'unsuprizing': ['unsurprising'],
'unsuprizingly': ['unsurprisingly'],
'unsurprized': ['unsurprised'],
'unsurprizing': ['unsurprising'],
'unsurprizingly': ['unsurprisingly'],
'untill': ['until'],
'untranslateable': ['untranslatable'],
'unuseable': ['unusable'],
'unusuable': ['unusable'],
'unviersity': ['university'],
'unwarrented': ['unwarranted'],
'unweildly': ['unwieldy'],
'unwieldly': ['unwieldy'],
'upcomming': ['upcoming'],
'upgradded': ['upgraded'],
'upto': ['up to'],
'usally': ['usually'],
'useage': ['usage'],
'usefull': ['useful'],
'usefuly': ['usefully'],
'useing': ['using'],
'usualy': ['usually'],
'ususally': ['usually'],
'vaccum': ['vacuum'],
'vaccume': ['vacuum'],
'vacinity': ['vicinity'],
'vaguaries': ['vagaries'],
'vaieties': ['varieties'],
'vailidty': ['validity'],
'valetta': ['valletta'],
'valuble': ['valuable'],
'valueable': ['valuable'],
'varations': ['variations'],
'varient': ['variant'],
'variey': ['variety'],
'varing': ['varying'],
'varities': ['varieties'],
'varity': ['variety'],
'vasall': ['vassal'],
'vasalls': ['vassals'],
'vegatarian': ['vegetarian'],
'vegitable': ['vegetable'],
'vegitables': ['vegetables'],
'vegtable': ['vegetable'],
'vehicule': ['vehicle'],
'vell': ['well'],
'venemous': ['venomous'],
'vengance': ['vengeance'],
'vengence': ['vengeance'],
'verfication': ['verification'],
'verison': ['version'],
'verisons': ['versions'],
'vermillion': ['vermilion'],
'versitilaty': ['versatility'],
'versitlity': ['versatility'],
'vetween': ['between'],
'veyr': ['very'],
'vigeur': ['vigueur', 'vigour', 'vigor'],
'vigilence': ['vigilance'],
'vigourous': ['vigorous'],
'villian': ['villain'],
'villification': ['vilification'],
'villify': ['vilify'],
'villin': ['villi', 'villain', 'villein'],
'vincinity': ['vicinity'],
'violentce': ['violence'],
'virtualy': ['virtually'],
'virutal': ['virtual'],
'virutally': ['virtually'],
'visable': ['visible'],
'visably': ['visibly'],
'visting': ['visiting'],
'vistors': ['visitors'],
'vitories': ['victories'],
'volcanoe': ['volcano'],
'voleyball': ['volleyball'],
'volontary': ['voluntary'],
'volonteer': ['volunteer'],
'volonteered': ['volunteered'],
'volonteering': ['volunteering'],
'volonteers': ['volunteers'],
'volounteer': ['volunteer'],
'volounteered': ['volunteered'],
'volounteering': ['volunteering'],
'volounteers': ['volunteers'],
'volumne': ['volume'],
'vreity': ['variety'],
'vrey': ['very'],
'vriety': ['variety'],
'vulnerablility': ['vulnerability'],
'vyer': ['very'],
'vyre': ['very'],
'waht': ['what'],
'wanna': ['want to'],
'warantee': ['warranty'],
'wardobe': ['wardrobe'],
'warrent': ['warrant'],
'warrriors': ['warriors'],
'wasnt': ['wasn\'t'],
'wass': ['was'],
'watn': ['want'],
'wayword': ['wayward'],
'weaponary': ['weaponry'],
'weas': ['was'],
'wehn': ['when'],
'weild': ['wield', 'wild'],
'weilded': ['wielded'],
'wendsay': ['Wednesday'],
'wensday': ['Wednesday'],
'wereabouts': ['whereabouts'],
'wether': ['weather', 'whether'],
'whant': ['want'],
'whants': ['wants'],
'whcih': ['which'],
'wheras': ['whereas'],
'wherease': ['whereas'],
'whereever': ['wherever'],
'whic': ['which'],
'whihc': ['which'],
'whith': ['with'],
'whlch': ['which'],
'whn': ['when'],
'wholey': ['wholly'],
'wholy': ['wholly', 'holy'],
'whta': ['what'],
'whther': ['whether'],
'wich': ['which', 'witch'],
'widesread': ['widespread'],
'wief': ['wife'],
'wierd': ['weird'],
'wiew': ['view'],
'wih': ['with'],
'wiht': ['with'],
'wille': ['will'],
'willk': ['will'],
'willingless': ['willingness'],
'wirting': ['writing'],
'withdrawl': ['withdrawal', 'withdraw'],
'witheld': ['withheld'],
'withh': ['with'],
'withing': ['within'],
'withold': ['withhold'],
'witht': ['with'],
'witn': ['with'],
'wiull': ['will'],
'wnat': ['want'],
'wnated': ['wanted'],
'wnats': ['wants'],
'wohle': ['whole'],
'wokr': ['work'],
'wokring': ['working'],
'wonderfull': ['wonderful'],
'wordlwide': ['worldwide'],
'workststion': ['workstation'],
'worls': ['world'],
'worstened': ['worsened'],
'woudl': ['would'],
'wresters': ['wrestlers'],
'wriet': ['write'],
'writen': ['written'],
'wroet': ['wrote'],
'wrok': ['work'],
'wroking': ['working'],
'wtih': ['with'],
'wupport': ['support'],
'xenophoby': ['xenophobia'],
'yaching': ['yachting'],
'yaer': ['year'],
'yaerly': ['yearly'],
'yaers': ['years'],
'yatch': ['yacht'],
'yearm': ['year'],
'yeasr': ['years'],
'yeild': ['yield'],
'yeilding': ['yielding'],
'Yementite': ['Yemenite', 'Yemeni'],
'yera': ['year'],
'yrea': ['year'],
'yeras': ['years'],
'yersa': ['years'],
'yotube': ['youtube'],
'youseff': ['yousef'],
'youself': ['yourself'],
'ytou': ['you'],
'yuo': ['you'],
'zeebra': ['zebra'],
} |
the-stack_0_20240 | # MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import re
import shutil
from pathlib import Path
from typing import Optional, List, Tuple, Union
import git
from packaging import version
from packit.utils import is_a_git_ref, run_command, git_remote_url_to_https_url
from packit.actions import ActionName
from packit.base_git import PackitRepositoryBase
from packit.config import Config, PackageConfig, SyncFilesConfig
from packit.constants import COMMON_ARCHIVE_EXTENSIONS
from packit.exceptions import PackitException, FailedCreateSRPM
from packit.specfile import Specfile
from packit.local_project import LocalProject
logger = logging.getLogger(__name__)
class Upstream(PackitRepositoryBase):
""" interact with upstream project """
def __init__(
self, config: Config, package_config: PackageConfig, local_project: LocalProject
):
"""
:param config: global configuration
:param package_config: configuration of the upstream project
:param local_project: public offender
"""
self._local_project = local_project
super().__init__(config=config, package_config=package_config)
self.config = config
self.package_config = package_config
self.files_to_sync: Optional[SyncFilesConfig] = self.package_config.synced_files
@property
def local_project(self):
if not self._local_project:
self._local_project = LocalProject(
git_url=self.package_config.upstream_project_url,
repo_name=self.package_config.upstream_package_name,
)
if self._local_project.git_project is None:
if not self.package_config.upstream_project_url:
self.package_config.upstream_project_url = git_remote_url_to_https_url(
self._local_project.git_url
)
self._local_project.git_project = self.config.get_project(
url=self.package_config.upstream_project_url
)
# self._local_project.refresh_the_arguments()
return self._local_project
@property
def active_branch(self) -> str:
return self.local_project.ref
def get_commits_to_upstream(
self, upstream: str, add_usptream_head_commit=False
) -> List[git.Commit]:
"""
Return the list of different commits between current branch and upstream rev/tag.
Always choosing the first-parent, so we have a line/path of the commits.
It contains merge-commits from the master and commits on top of the master.
(e.g. commits from PR)
:param add_usptream_head_commit: bool
:param upstream: str -- git branch or tag
:return: list of commits (last commit on the current branch.).
"""
if is_a_git_ref(repo=self.local_project.git_repo, ref=upstream):
upstream_ref = upstream
else:
upstream_ref = f"origin/{upstream}"
if upstream_ref not in self.local_project.git_repo.refs:
raise Exception(
f"Upstream {upstream_ref} branch nor {upstream} tag not found."
)
commits = list(
self.local_project.git_repo.iter_commits(
rev=f"{upstream_ref}..{self.local_project.ref}",
reverse=True,
first_parent=True,
)
)
if add_usptream_head_commit:
commits.insert(0, self.local_project.git_repo.commit(upstream_ref))
logger.debug(
f"Delta ({upstream_ref}..{self.local_project.ref}): {len(commits)}"
)
return commits
def push_to_fork(
self,
branch_name: str,
force: bool = False,
fork: bool = True,
remote_name: str = None,
) -> Tuple[str, Optional[str]]:
"""
push current branch to fork if fork=True, else to origin
:param branch_name: the branch where we push
:param force: push forcefully?
:param fork: push to fork?
:param remote_name: name of remote where we should push
if None, try to find a ssh_url
:return: name of the branch where we pushed
"""
logger.debug(
f"About to {'force ' if force else ''}push changes to branch {branch_name}."
)
fork_username = None
if not remote_name:
if fork:
if self.local_project.git_project.is_fork:
project = self.local_project.git_project
else:
# ogr is awesome! if you want to fork your own repo, you'll get it!
project = self.local_project.git_project.get_fork(create=True)
fork_username = project.namespace
fork_urls = project.get_git_urls()
ssh_url = fork_urls["ssh"]
remote_name = "fork-ssh"
for remote in self.local_project.git_repo.remotes:
pushurl = next(remote.urls) # afaik this is what git does as well
if ssh_url.startswith(pushurl):
logger.info(f"Will use remote {remote} using URL {pushurl}.")
remote_name = str(remote)
break
else:
logger.info(f"Creating remote fork-ssh with URL {ssh_url}.")
self.local_project.git_repo.create_remote(
name="fork-ssh", url=ssh_url
)
else:
# push to origin and hope for the best
remote_name = "origin"
logger.info(f"Pushing to remote {remote_name} using branch {branch_name}.")
try:
self.push(refspec=branch_name, force=force, remote_name=remote_name)
except git.GitError as ex:
msg = (
f"Unable to push to remote {remote_name} using branch {branch_name}, "
f"the error is:\n{ex}"
)
raise PackitException(msg)
return str(branch_name), fork_username
def create_pull(
self,
pr_title: str,
pr_description: str,
source_branch: str,
target_branch: str,
fork_username: str = None,
) -> None:
"""
Create upstream pull request using the requested branches
"""
project = self.local_project.git_project
if self.local_project.git_project.is_fork:
source_branch = f"{project.namespace}:{source_branch}"
project = self.local_project.git_project.parent
elif fork_username:
source_branch = f"{fork_username}:{source_branch}"
try:
upstream_pr = project.pr_create(
title=pr_title,
body=pr_description,
source_branch=source_branch,
target_branch=target_branch,
)
except Exception as ex:
logger.error("there was an error while create a PR: %r", ex)
raise
else:
logger.info(f"PR created: {upstream_pr.url}")
def create_patches(
self, upstream: str = None, destination: str = None
) -> List[Tuple[str, str]]:
"""
Create patches from downstream commits.
:param destination: str
:param upstream: str -- git branch or tag
:return: [(patch_name, msg)] list of created patches (tuple of the file name and commit msg)
"""
upstream = upstream or self.get_specfile_version()
commits = self.get_commits_to_upstream(upstream, add_usptream_head_commit=True)
destination = destination or self.local_project.working_dir
patches_to_create = []
for i, commit in enumerate(commits[1:]):
parent = commits[i]
git_diff_cmd = [
"git",
"diff",
"--patch",
parent.hexsha,
commit.hexsha,
"--",
".",
] + [
f":(exclude){sync_file.src}"
for sync_file in self.package_config.synced_files.get_raw_files_to_sync(
Path(self.local_project.working_dir),
Path(
# this is not important, we only care about src
destination
),
)
]
diff = run_command(
cmd=git_diff_cmd, cwd=self.local_project.working_dir, output=True
)
if not diff:
logger.info(f"No patch for commit: {commit.summary} ({commit.hexsha})")
continue
patches_to_create.append((commit, diff))
patch_list = []
for i, (commit, diff) in enumerate(patches_to_create):
patch_name = f"{i + 1:04d}-{commit.hexsha}.patch"
patch_path = os.path.join(destination, patch_name)
patch_msg = f"{commit.summary}\nAuthor: {commit.author.name} <{commit.author.email}>"
logger.debug(f"Saving patch: {patch_name}\n{patch_msg}")
with open(patch_path, mode="w") as patch_file:
patch_file.write(diff)
patch_list.append((patch_name, patch_msg))
return patch_list
def get_latest_released_version(self) -> str:
"""
Return version of the upstream project for the latest official release
:return: the version string (e.g. "1.0.0")
"""
version = Specfile.get_upstream_version(
versioneer=None,
package_name=self.package_config.downstream_package_name,
category=None,
)
logger.info(f"Version in upstream registries is {version!r}.")
return version
def get_specfile_version(self) -> str:
""" provide version from specfile """
version = self.specfile.get_version()
logger.info(f"Version in spec file is {version!r}.")
return version
def get_version(self) -> str:
"""
Return version of the latest release available: prioritize bigger from upstream
package repositories or the version in spec
"""
ups_ver = version.parse(self.get_latest_released_version() or "")
logger.debug(f"Version in upstream package repositories: {ups_ver}")
spec_ver = version.parse(self.get_specfile_version())
logger.debug(f"Version in spec file: {spec_ver}")
if ups_ver > spec_ver:
logger.warning("Version in spec file is outdated")
logger.info(
"Picking version of the latest release from the upstream registry."
)
return str(ups_ver)
logger.info("Picking version found in spec file.")
return str(spec_ver)
def get_current_version(self) -> str:
"""
Get version of the project in current state (hint `git describe`)
:return: e.g. 0.1.1.dev86+ga17a559.d20190315 or 0.6.1.1.gce4d84e
"""
action_output = self.get_output_from_action(
action=ActionName.get_current_version
)
if action_output:
return action_output[-1]
ver = self.command_handler.run_command(
self.package_config.current_version_command, return_output=True
).strip()
logger.debug("version = %s", ver)
# RPM refuses dashes in version/release
ver = ver.replace("-", ".")
logger.debug("sanitized version = %s", ver)
return ver
def get_archive_extension(self, archive_basename: str, version: str) -> str:
"""
Obtains archive extension from SpecFile based on basename of the archive.
Defaults to .tar.gz if no Source corresponds to the basename.
"""
for source in self.specfile.get_sources():
base = os.path.basename(source)
# Version in archive_basename could contain hash, the version
# can be different from Spec version. Replace it to ensure proper match.
base = base.replace(self.specfile.get_version(), version)
if base.startswith(archive_basename):
archive_basename_len = len(archive_basename)
return base[archive_basename_len:]
return ".tar.gz"
def create_archive(self, version: str = None) -> str:
"""
Create archive, using `git archive` by default, from the content of the upstream
repository, only committed changes are present in the archive
"""
version = version or self.get_current_version()
if self.package_config.upstream_package_name:
dir_name = f"{self.package_config.upstream_package_name}-{version}"
else:
dir_name = f"{self.package_config.downstream_package_name}-{version}"
logger.debug("name + version = %s", dir_name)
env = {
"PACKIT_PROJECT_VERSION": version,
"PACKIT_PROJECT_NAME_VERSION": dir_name,
}
if self.has_action(action=ActionName.create_archive):
outputs = self.get_output_from_action(
action=ActionName.create_archive, env=env
)
if not outputs:
raise PackitException("No output from create-archive action.")
# one of the returned strings has to contain existing archive name
for output in reversed(outputs):
for archive_name in reversed(output.splitlines()):
try:
archive_path = Path(
self._local_project.working_dir, archive_name.strip()
)
if archive_path.is_file():
logger.info(f"Created archive: {archive_name.strip()}")
return archive_name
except OSError as ex:
# File too long
if ex.errno == 36:
logger.error(
f"Skipping long output command output while getting archive name"
)
continue
raise ex
else:
raise PackitException(
"No existing file in create-archive action output."
)
archive_extension = self.get_archive_extension(dir_name, version)
if archive_extension not in COMMON_ARCHIVE_EXTENSIONS:
raise PackitException(
"The target archive doesn't use a common extension ({}), "
"git archive can't be used. Please provide your own script "
"for archive creation.".format(", ".join(COMMON_ARCHIVE_EXTENSIONS))
)
archive_name = f"{dir_name}{archive_extension}"
if self.package_config.create_tarball_command:
archive_cmd = self.package_config.create_tarball_command
else:
archive_cmd = [
"git",
"archive",
"-o",
archive_name,
"--prefix",
f"{dir_name}/",
"HEAD",
]
self.command_handler.run_command(archive_cmd, return_output=True, env=env)
return archive_name
def fix_spec(self, archive: str, version: str, commit: str):
"""
In order to create a SRPM from current git checkout, we need to have the spec reference
the tarball and unpack it. This method updates the spec so it's possible.
:param archive: relative path to the archive: used as Source0
:param version: version to set in the spec
:param commit: commit to set in the changelog
"""
prefix = "Source"
regex = re.compile(r"^Source\s*:.+$")
for line in self.specfile.spec_content.section("%package"):
# we are looking for Source lines
if line.startswith(prefix):
# it's a Source line!
if line.startswith(self.package_config.spec_source_id):
# it even matches the specific Source\d+
full_name = self.package_config.spec_source_id
elif regex.match(line):
# okay, let's try the other very common default
# https://github.com/packit-service/packit/issues/536#issuecomment-534074925
full_name = prefix
else:
# nope, let's continue the search
continue
# we found it!
break
else:
raise PackitException(
"The spec file doesn't have sources set "
f"via {self.package_config.spec_source_id} nor {prefix}."
)
self.specfile.set_tag(full_name, archive)
prep = self.specfile.spec_content.section("%prep")
if not prep:
logger.warning("this package doesn't have a %prep section")
return
# stolen from tito, thanks!
# https://github.com/dgoodwin/tito/blob/master/src/tito/common.py#L695
regex = re.compile(r"^(\s*%(?:auto)?setup)(.*?)$")
for idx, line in enumerate(prep):
m = regex.match(line)
if m:
break
else:
logger.error(
"this package is not using %(auto)setup macro in prep, "
"packit can't work in this environment"
)
return
new_setup_line = m[1]
# replace -n with our -n because it's better
args_match = re.search(r"(.*?)\s+-n\s+\S+(.*)", m[2])
if args_match:
new_setup_line += args_match.group(1)
new_setup_line += args_match.group(2)
else:
new_setup_line += m[2]
if not self.package_config.upstream_package_name:
raise PackitException(
f'"upstream_package_name" is not set: unable to fix the spec file; please set it.'
)
new_setup_line += f" -n {self.package_config.upstream_package_name}-{version}"
logger.debug(
f"new {'%autosetup' if 'autosetup' in new_setup_line else '%setup'}"
f" line:\n{new_setup_line}"
)
prep[idx] = new_setup_line
self.specfile.spec_content.replace_section("%prep", prep)
self.specfile.write_spec_content()
msg = f"Development snapshot ({commit})"
self.specfile.set_spec_version(version=f"{version}", changelog_entry=msg)
def create_srpm(
self,
srpm_path: str = None,
source_dir: Union[str, Path] = None,
srpm_dir: str = None,
) -> Path:
"""
Create SRPM from the actual content of the repo
:param source_dir: path with the source files (defaults to dir with specfile)
:param srpm_path: path to the srpm
:param srpm_dir: path to the directory where the srpm is meant to be placed
:return: path to the srpm
"""
if self.running_in_service():
srpm_dir = "."
rpmbuild_dir = "."
else:
srpm_dir = srpm_dir or os.getcwd()
rpmbuild_dir = str(self.absolute_specfile_dir)
cmd = [
"rpmbuild",
"-bs",
"--define",
f"_sourcedir {source_dir or rpmbuild_dir}",
"--define",
f"_specdir {rpmbuild_dir}",
"--define",
f"_srcrpmdir {srpm_dir}",
# no idea about this one, but tests were failing in tox w/o it
"--define",
f"_topdir {rpmbuild_dir}",
# we also need these 3 so that rpmbuild won't create them
"--define",
f"_builddir {rpmbuild_dir}",
"--define",
f"_rpmdir {rpmbuild_dir}",
"--define",
f"_buildrootdir {rpmbuild_dir}",
self.package_config.specfile_path,
]
present_srpms = set(Path(srpm_dir).glob("*.src.rpm"))
logger.debug("present srpms = %s", present_srpms)
try:
out = self.command_handler.run_command(cmd, return_output=True).strip()
except PackitException as ex:
logger.error(f"Failed to create SRPM: {ex!r}")
raise FailedCreateSRPM("Failed to create SRPM.")
logger.debug(f"{out}")
# not doing 'Wrote: (.+)' since people can have different locales; hi Franto!
reg = r": (.+\.src\.rpm)$"
try:
the_srpm = re.findall(reg, out)[0]
except IndexError:
raise PackitException("SRPM cannot be found, something is wrong.")
logger.info("SRPM is %s", the_srpm)
if srpm_path:
shutil.move(the_srpm, srpm_path)
return Path(srpm_path)
if self.running_in_service():
return Path(self.local_project.working_dir).joinpath(the_srpm)
return Path(the_srpm)
|
the-stack_0_20243 | import torch
import torch.nn as nn
from dgl.nn.pytorch import RelGraphConv
from allennlp.commands.elmo import ElmoEmbedder
from hyperpara import *
import torch.nn.functional as F
import dgl
from functools import partial
import dgl
class RGCN(nn.Module):
def __init__(self, num_nodes, gnn_h_dim, out_dim, num_rels,
num_gcn_hidden_layers, dropout=0,
use_self_loop=False, use_cuda=True):
super(RGCN, self).__init__()
self.num_nodes = num_nodes
self.gnn_h_dim = gnn_h_dim
self.out_dim = out_dim
self.num_rels = num_rels
self.num_gcn_hidden_layers = num_gcn_hidden_layers
self.dropout = dropout
self.use_self_loop = use_self_loop
self.use_cuda = use_cuda
# create rgcn layers
self.build_model()
def build_model(self):
# for query
self.elmo_embedder = self.build_elmo_embedder()
self.gcn_layers = nn.ModuleList()
# 1-layer FFN 256 dim for ElMo embedding
self.elmo_fnn = nn.Linear(1024 * 3, 256)
# 2layer bi-LSTM for query embedding
# convatenation of q and node embeddings 256+256 = 512
# h2h 512 to 512
self.shared_gcn = RGCNLayer(self.gnn_h_dim, self.gnn_h_dim, self.num_rels)
# concatenation h_G with q: 512+128 = 768
# 3 layers FF [256, 128, 1]
# Global representation layer
self.output_fnn = nn.Linear(self.gnn_h_dim, self.out_dim)
def build_elmo_embedder(self):
# Setting for Elmo Embedder - CHANGE THE PATH
options_file = args.project_address+'mlp_project/src/elmo_2x4096_512_2048cnn_2xhighway_options.json'
weight_file = args.project_address+'mlp_project/src/elmo_2x4096_512_2048cnn_2xhighway_weights'
return ElmoEmbedder(
options_file=options_file,
weight_file=weight_file)
def forward(self, h, b_norm_adj):
# embedding the query
# self.elmo_embedder
# 1-layer FFN 256 dim for ElMo embedding
h = self.elmo_fnn(h)
h = F.relu(h, inplace=True)
# 2layer bi-LSTM for query embedding
# Pending
# convatenation of q and node embeddings 256+256 = 512
if self.use_cuda:
q = torch.zeros(h.shape).cuda()
else:
q = torch.zeros(h.shape)
h = torch.cat((h, q), dim=-1)
# print("h.shape:", h.shape) # 512
# print("h before gcn shape is :", h.shape) # bsx500x512
# print("Shared gcn check:", self.num_gcn_hidden_layers)
# print("Before reshape to batch:", len(g), h.shape, r.shape, norm.shape)
bs = h.shape[0]
# Reshape to batch
# h = h.view(-1, self.gnn_h_dim)
for i in range(self.num_gcn_hidden_layers):
h = self.shared_gcn(h, b_norm_adj)
# h = h.view(bs, -1, self.gnn_h_dim)
# print("Reshape back to batch:", h.shape)
# Average globall pool
# print("h after gcn shape is :", h.shape) # bsx500x256
h = torch.mean(h, dim = 1)
# print(h.shape)
# print(self.output_fnn)
out = self.output_fnn(h)
out = F.softmax(out, dim=-1)
return out
class RGCNLayer(nn.Module):
def __init__(self, in_feat, out_feat, num_rels, bias=None,
activation=None, is_input_layer=False):
super(RGCNLayer, self).__init__()
self.in_feat = in_feat
self.out_feat = out_feat
self.num_rels = num_rels
self.bias = bias
self.activation = activation
self.is_input_layer = is_input_layer
self.rel_weights = nn.ModuleList()
for _ in range(self.num_rels):
rel_W = nn.Linear(self.in_feat, self.out_feat)
self.rel_weights.append(rel_W)
self.weight_hid = nn.Linear(self.in_feat, self.out_feat)
self.weight_gate = nn.Linear(2 * self.out_feat, self.out_feat)
def forward(self, h, norm_adj):
# 1. message aggregation
# norm_adj: [batch_size, num_realtion, num_nodes, num_nodes]
# h: [batch_size, num_nodes, in_dim]
# copy along relational dimension
h_i = torch.stack([W_r(h) for W_r in self.rel_weights], dim=1) # (bs, rels, num_nodes, hdim)
# print(h_i.shape)
# msg: [batch_size * num_relation, num_nodes, in_dim]
# print("before matmul:",norm_adj.shape, h_i.shape)
msg = torch.matmul(norm_adj, h_i).sum(1) # bs, num_nodes, out_dim
# print("After and reduce sum at dim 1:",msg.shape)
update = msg + self.weight_hid(h) # bs, num_nodes, out_dim
gate = self.weight_gate(torch.cat((update, h), -1))
gate = F.sigmoid(gate)
h = gate * F.tanh(update) + (1 - gate) * h
return h |
the-stack_0_20245 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import Counter
from typing import TYPE_CHECKING
from sqlalchemy import func
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule as TR
if TYPE_CHECKING:
from sqlalchemy.orm import Session
from airflow.models.taskinstance import TaskInstance
class TriggerRuleDep(BaseTIDep):
"""
Determines if a task's upstream tasks are in a state that allows a given task instance
to run.
"""
NAME = "Trigger Rule"
IGNORABLE = True
IS_TASK_DEP = True
@staticmethod
def _get_states_count_upstream_ti(task, finished_tis):
"""
This function returns the states of the upstream tis for a specific ti in order to determine
whether this ti can run in this iteration
:param ti: the ti that we want to calculate deps for
:param finished_tis: all the finished tasks of the dag_run
"""
counter = Counter(ti.state for ti in finished_tis if ti.task_id in task.upstream_task_ids)
return (
counter.get(State.SUCCESS, 0),
counter.get(State.SKIPPED, 0),
counter.get(State.FAILED, 0),
counter.get(State.UPSTREAM_FAILED, 0),
sum(counter.values()),
)
@provide_session
def _get_dep_statuses(self, ti, session, dep_context: DepContext):
# Checking that all upstream dependencies have succeeded
if not ti.task.upstream_list:
yield self._passing_status(reason="The task instance did not have any upstream tasks.")
return
if ti.task.trigger_rule == TR.ALWAYS:
yield self._passing_status(reason="The task had a always trigger rule set.")
return
# see if the task name is in the task upstream for our task
successes, skipped, failed, upstream_failed, done = self._get_states_count_upstream_ti(
task=ti.task, finished_tis=dep_context.ensure_finished_tis(ti.get_dagrun(session), session)
)
yield from self._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=dep_context.flag_upstream_failed,
session=session,
)
@staticmethod
def _count_upstreams(ti: "TaskInstance", *, session: "Session"):
from airflow.models.taskinstance import TaskInstance
# Optimization: Don't need to hit the database if no upstreams are mapped.
upstream_task_ids = ti.task.upstream_task_ids
if ti.task.dag and not any(ti.task.dag.get_task(tid).is_mapped for tid in upstream_task_ids):
return len(upstream_task_ids)
# We don't naively count task instances because it is not guaranteed
# that all upstreams have been created in the database at this point.
# Instead, we look for already-expanded tasks, and add them to the raw
# task count without considering mapping.
mapped_tis_addition = (
session.query(func.count())
.filter(
TaskInstance.dag_id == ti.dag_id,
TaskInstance.run_id == ti.run_id,
TaskInstance.task_id.in_(upstream_task_ids),
TaskInstance.map_index > 0,
)
.scalar()
)
return len(upstream_task_ids) + mapped_tis_addition
@provide_session
def _evaluate_trigger_rule(
self,
ti,
successes,
skipped,
failed,
upstream_failed,
done,
flag_upstream_failed,
session: "Session" = NEW_SESSION,
):
"""
Yields a dependency status that indicate whether the given task instance's trigger
rule was met.
:param ti: the task instance to evaluate the trigger rule of
:param successes: Number of successful upstream tasks
:param skipped: Number of skipped upstream tasks
:param failed: Number of failed upstream tasks
:param upstream_failed: Number of upstream_failed upstream tasks
:param done: Number of completed upstream tasks
:param flag_upstream_failed: This is a hack to generate
the upstream_failed state creation while checking to see
whether the task instance is runnable. It was the shortest
path to add the feature
:param session: database session
"""
task = ti.task
upstream = self._count_upstreams(ti, session=session)
trigger_rule: TR = task.trigger_rule
upstream_done = done >= upstream
upstream_tasks_state = {
"total": upstream,
"successes": successes,
"skipped": skipped,
"failed": failed,
"upstream_failed": upstream_failed,
"done": done,
}
# TODO(aoen): Ideally each individual trigger rules would be its own class, but
# this isn't very feasible at the moment since the database queries need to be
# bundled together for efficiency.
# handling instant state assignment based on trigger rules
if flag_upstream_failed:
if trigger_rule == TR.ALL_SUCCESS:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped:
ti.set_state(State.SKIPPED, session)
elif trigger_rule == TR.ALL_FAILED:
if successes or skipped:
ti.set_state(State.SKIPPED, session)
elif trigger_rule == TR.ONE_SUCCESS:
if upstream_done and done == skipped:
# if upstream is done and all are skipped mark as skipped
ti.set_state(State.SKIPPED, session)
elif upstream_done and successes <= 0:
# if upstream is done and there are no successes mark as upstream failed
ti.set_state(State.UPSTREAM_FAILED, session)
elif trigger_rule == TR.ONE_FAILED:
if upstream_done and not (failed or upstream_failed):
ti.set_state(State.SKIPPED, session)
elif trigger_rule == TR.NONE_FAILED:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif trigger_rule == TR.NONE_FAILED_MIN_ONE_SUCCESS:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped == upstream:
ti.set_state(State.SKIPPED, session)
elif trigger_rule == TR.NONE_SKIPPED:
if skipped:
ti.set_state(State.SKIPPED, session)
elif trigger_rule == TR.ALL_SKIPPED:
if successes or failed:
ti.set_state(State.SKIPPED, session)
if trigger_rule == TR.ONE_SUCCESS:
if successes <= 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires one upstream task success, "
f"but none were found. upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ONE_FAILED:
if not failed and not upstream_failed:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires one upstream task failure, "
f"but none were found. upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_SUCCESS:
num_failures = upstream - successes
if num_failures > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"succeeded, but found {num_failures} non-success(es). "
f"upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_FAILED:
num_successes = upstream - failed - upstream_failed
if num_successes > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have failed, "
f"but found {num_successes} non-failure(s). "
f"upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_DONE:
if not upstream_done:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"completed, but found {upstream_done} task(s) that were not done. "
f"upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.NONE_FAILED:
num_failures = upstream - successes - skipped
if num_failures > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"succeeded or been skipped, but found {num_failures} non-success(es). "
f"upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.NONE_FAILED_MIN_ONE_SUCCESS:
num_failures = upstream - successes - skipped
if num_failures > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have "
f"succeeded or been skipped, but found {num_failures} non-success(es). "
f"upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.NONE_SKIPPED:
if not upstream_done or (skipped > 0):
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to not have been "
f"skipped, but found {skipped} task(s) skipped. "
f"upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
elif trigger_rule == TR.ALL_SKIPPED:
num_non_skipped = upstream - skipped
if num_non_skipped > 0:
yield self._failing_status(
reason=(
f"Task's trigger rule '{trigger_rule}' requires all upstream tasks to have been "
f"skipped, but found {num_non_skipped} task(s) in non skipped state. "
f"upstream_tasks_state={upstream_tasks_state}, "
f"upstream_task_ids={task.upstream_task_ids}"
)
)
else:
yield self._failing_status(reason=f"No strategy to evaluate trigger rule '{trigger_rule}'.")
|
the-stack_0_20246 | import os
import json
import copy
import math
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from utils.tools import get_mask_from_lengths, pad
from .blocks import (
SwishBlock,
LinearNorm,
LConvBlock,
ConvBlock,
ConvNorm,
FFTBlock,
VariableLengthAttention,
MultiHeadAttention,
)
from text.symbols import symbols
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
""" Sinusoid position encoding table """
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array(
[get_posi_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.0
return torch.FloatTensor(sinusoid_table)
class TextEncoder(nn.Module):
""" Text Encoder """
def __init__(self, config):
super(TextEncoder, self).__init__()
n_position = config["max_seq_len"] + 1
n_src_vocab = len(symbols) + 1
d_word_vec = config["text_encoder"]["encoder_hidden"]
n_layers_conv = config["text_encoder"]["conv_layer"]
n_layers_trans = config["text_encoder"]["trans_layer"]
n_head = config["text_encoder"]["trans_head"]
d_k = d_v = (
config["text_encoder"]["encoder_hidden"]
// config["text_encoder"]["trans_head"]
)
d_encoder = config["text_encoder"]["encoder_hidden"]
d_inner = config["text_encoder"]["trans_filter_size"]
kernel_size_conv = config["text_encoder"]["conv_kernel_size"]
kernel_size_trans = config["text_encoder"]["trans_kernel_size"]
dropout = config["text_encoder"]["encoder_dropout"]
self.max_seq_len = config["max_seq_len"]
self.d_encoder = d_encoder
self.src_word_emb = nn.Embedding(
n_src_vocab, d_word_vec, padding_idx=0
)
self.position_enc = nn.Parameter(
get_sinusoid_encoding_table(n_position, d_word_vec).unsqueeze(0),
requires_grad=False,
)
self.convolution_stack = nn.ModuleList(
[
ConvBlock(
d_encoder, d_encoder, kernel_size_conv, dropout=dropout
)
for _ in range(n_layers_conv)
]
)
self.transformer_stack = nn.ModuleList(
[
FFTBlock(
d_encoder, n_head, d_k, d_v, d_inner, kernel_size_trans, dropout=dropout
)
for _ in range(n_layers_trans)
]
)
def forward(self, src_seq, mask):
batch_size, max_len = src_seq.shape[0], src_seq.shape[1]
slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1)
enc_output = self.src_word_emb(src_seq)
for enc_layer in self.convolution_stack:
enc_output = enc_layer(
enc_output, mask=mask
)
if not self.training and src_seq.shape[1] > self.max_seq_len:
enc_output = enc_output + get_sinusoid_encoding_table(
src_seq.shape[1], self.d_encoder
)[: src_seq.shape[1], :].unsqueeze(0).expand(batch_size, -1, -1).to(
src_seq.device
)
else:
enc_output = enc_output + self.position_enc[
:, :max_len, :
].expand(batch_size, -1, -1)
for enc_layer in self.transformer_stack:
enc_output, _ = enc_layer(
enc_output, mask=mask, slf_attn_mask=slf_attn_mask
)
return enc_output
class ResidualEncoder(nn.Module):
""" Residual Encoder """
def __init__(self, config):
super(ResidualEncoder, self).__init__()
n_position = config["max_seq_len"] + 1
n_mel_channels = config["n_mel_channels"]
d_encoder_t = config["text_encoder"]["encoder_hidden"]
d_encoder = config["residual_encoder"]["encoder_hidden"]
d_latent = config["residual_encoder"]["latent_dim"]
d_neck = config["residual_encoder"]["bottleneck_size"]
n_layers = config["residual_encoder"]["conv_layer"]
n_head = config["residual_encoder"]["conv_head"]
kernel_size = config["residual_encoder"]["conv_kernel_size"]
dropout = config["residual_encoder"]["conv_dropout"]
speaker_embed_size = config["speaker_embed_size"]
self.max_seq_len = config["max_seq_len"]
self.d_encoder = d_encoder
self.d_latent = d_latent
self.text_linear = LinearNorm(d_encoder_t, d_encoder)
self.layer_norm1 = nn.LayerNorm(d_encoder)
self.input_linear = nn.Sequential(
LinearNorm(
d_encoder + speaker_embed_size + n_mel_channels, d_encoder
),
nn.ReLU()
)
self.position_enc = nn.Parameter(
get_sinusoid_encoding_table(n_position, d_encoder).unsqueeze(0),
requires_grad=False,
)
self.convolution_stack = nn.ModuleList(
[
LConvBlock(
d_encoder, kernel_size, n_head, dropout=dropout
)
for _ in range(n_layers)
]
)
self.attention = VariableLengthAttention(
d_encoder, d_encoder, d_encoder_t, dropout=dropout
)
self.fc_mu = LinearNorm(d_encoder, d_latent)
self.fc_var = LinearNorm(d_encoder, d_latent)
self.output_linear = nn.Sequential(
LinearNorm(
d_latent + speaker_embed_size + d_encoder, d_neck
),
nn.Tanh()
)
self.layer_norm2 = nn.LayerNorm(d_neck)
def reparameterize(self, mu, logvar):
"""
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def forward(self, mel, text_encoding, mel_mask, src_mask, max_mel_len, max_src_len, speaker_embedding):
batch_size = text_encoding.shape[0]
text_encoding = self.layer_norm1(self.text_linear(text_encoding))
if not self.training and mel is None:
mu = log_var = torch.zeros([batch_size, max_src_len, self.d_latent], device=device)
attn = None
else:
speaker_embedding_m = speaker_embedding.unsqueeze(1).expand(
-1, max_mel_len, -1
)
position_enc = self.position_enc[
:, :max_mel_len, :
].expand(batch_size, -1, -1)
enc_input = torch.cat([position_enc, speaker_embedding_m, mel], dim=-1)
enc_output = self.input_linear(enc_input)
for enc_layer in self.convolution_stack:
enc_output = enc_layer(
enc_output, mask=mel_mask
)
enc_output, attn = self.attention(enc_output, text_encoding, mel_mask, src_mask)
mu = self.fc_mu(enc_output).masked_fill(src_mask.unsqueeze(-1), 0)
log_var = self.fc_var(enc_output).masked_fill(src_mask.unsqueeze(-1), 0)
# Phoneme-Level Fine-Grained VAE
z = self.reparameterize(mu, log_var)
z = z.masked_fill(src_mask.unsqueeze(-1), 0)
speaker_embedding_t = speaker_embedding.unsqueeze(1).expand(
-1, max_src_len, -1
)
enc_output = torch.cat([z, speaker_embedding_t, text_encoding], dim=-1)
enc_output = self.layer_norm2(self.output_linear(enc_output))
enc_output = enc_output.masked_fill(src_mask.unsqueeze(-1), 0)
return enc_output, attn, mu, log_var
class DurationPredictor(nn.Module):
""" Duration Predictor """
def __init__(self, config):
super(DurationPredictor, self).__init__()
d_predictor = config["duration_predictor"]["predictor_hidden"]
d_encoder_t = config["text_encoder"]["encoder_hidden"]
d_encoder_r = config["residual_encoder"]["bottleneck_size"]
n_layers = config["duration_predictor"]["conv_layer"]
n_head = config["duration_predictor"]["conv_head"]
kernel_size = config["duration_predictor"]["conv_kernel_size"]
dropout = config["duration_predictor"]["conv_dropout"]
speaker_embed_size = config["speaker_embed_size"]
self.input_linear = LinearNorm(
d_encoder_t + d_encoder_r + speaker_embed_size, d_predictor
)
self.convolution_stack = nn.ModuleList(
[
LConvBlock(
d_predictor, kernel_size, n_head, dropout=dropout
)
for _ in range(n_layers)
]
)
self.projection = LinearNorm(d_predictor, 1)
def forward(self, x, mask=None):
V = self.input_linear(x)
for enc_layer in self.convolution_stack:
V = enc_layer(
V, mask=mask
)
duration = F.softplus(self.projection(V))
if mask is not None:
duration = duration.masked_fill(mask.unsqueeze(-1), 0)
duration = duration.squeeze(-1)
return duration, V
class LearnedUpsampling(nn.Module):
""" Learned Upsampling """
def __init__(self, config):
super(LearnedUpsampling, self).__init__()
d_predictor = config["duration_predictor"]["predictor_hidden"]
kernel_size = config["learned_upsampling"]["conv_kernel_size"]
dropout = config["learned_upsampling"]["conv_dropout"]
conv_output_size = config["learned_upsampling"]["conv_output_size"]
dim_w = config["learned_upsampling"]["linear_output_size_w"]
dim_c = config["learned_upsampling"]["linear_output_size_c"]
self.max_seq_len = config["max_seq_len"]
# Attention (W)
self.conv_w = ConvBlock(
d_predictor, conv_output_size, kernel_size, dropout=dropout, activation=nn.SiLU()
)
self.swish_w = SwishBlock(
conv_output_size+2, dim_w, dim_w
)
self.linear_w = LinearNorm(dim_w, 1, bias=True)
self.softmax_w = nn.Softmax(dim=2)
# Auxiliary Attention Context (C)
self.conv_c = ConvBlock(
d_predictor, conv_output_size, kernel_size, dropout=dropout, activation=nn.SiLU()
)
self.swish_c = SwishBlock(
conv_output_size+2, dim_c, dim_c
)
# Upsampled Representation (O)
self.linear_einsum = LinearNorm(dim_c, d_predictor) # A
self.layer_norm = nn.LayerNorm(d_predictor)
def forward(self, duration, V, src_len, src_mask, max_src_len):
batch_size = duration.shape[0]
# Duration Interpretation
mel_len = torch.round(duration.sum(-1)).type(torch.LongTensor).to(device)
mel_len = torch.clamp(mel_len, max=self.max_seq_len)
max_mel_len = mel_len.max().item()
mel_mask = get_mask_from_lengths(mel_len, max_mel_len)
# Prepare Attention Mask
src_mask_ = src_mask.float().unsqueeze(-1) # [B, src_len, 1]
mel_mask_ = mel_mask.float().unsqueeze(-1) # [B, tgt_len, 1]
attn_mask = torch.bmm(mel_mask_, src_mask_.transpose(-2, -1)).bool() # [B, tgt_len, src_len]
# Token Boundary Grid
e_k = torch.cumsum(duration, dim=1)
s_k = e_k - duration
e_k = e_k.unsqueeze(1).expand(batch_size, max_mel_len, -1)
s_k = s_k.unsqueeze(1).expand(batch_size, max_mel_len, -1)
t_arange = torch.arange(1, max_mel_len+1, device=device).unsqueeze(0).unsqueeze(-1).expand(
batch_size, -1, max_src_len
)
S, E = (t_arange - s_k).masked_fill(attn_mask, 0), (e_k - t_arange).masked_fill(attn_mask, 0)
# Attention (W)
W = self.swish_w(S, E, self.conv_w(V)) # [B, T, K, dim_w]
W = self.linear_w(W).squeeze(-1).masked_fill(attn_mask, -np.inf)
W = self.softmax_w(W)
# Auxiliary Attention Context (C)
C = self.swish_c(S, E, self.conv_c(V)) # [B, T, K, dim_c]
# Upsampled Representation (O)
upsampled_rep = torch.matmul(W, V) + self.linear_einsum(torch.einsum('btk,btkp->btp', W, C)) # [B, T, M]
upsampled_rep = self.layer_norm(upsampled_rep)
upsampled_rep = upsampled_rep.masked_fill(mel_mask.unsqueeze(-1), 0)
return upsampled_rep, mel_mask, mel_len, W
class Decoder(nn.Module):
""" Spectrogram Decoder With Iterative Mel Prediction """
def __init__(self, config):
super(Decoder, self).__init__()
n_position = config["max_seq_len"] + 1
n_mel_channels = config["n_mel_channels"]
d_decoder = config["decoder"]["decoder_hidden"]
n_layers = config["decoder"]["decoder_layer"]
n_head_conv = config["decoder"]["trans_head"]
n_head_trans = config["decoder"]["trans_head"]
d_k = d_v = (
config["decoder"]["decoder_hidden"]
// config["decoder"]["trans_head"]
)
kernel_size = config["decoder"]["conv_kernel_size"]
dropout = config["decoder"]["decoder_dropout"]
self.n_layers = n_layers
self.max_seq_len = config["max_seq_len"]
self.position_enc = nn.Parameter(
get_sinusoid_encoding_table(n_position, d_decoder).unsqueeze(0),
requires_grad=False,
)
self.convolution_stack = nn.ModuleList(
[
LConvBlock(
d_decoder, kernel_size, n_head_conv, dropout=dropout
)
for _ in range(n_layers)
]
)
self.mel_projection = nn.ModuleList(
[
LinearNorm(
d_decoder, n_mel_channels
)
for _ in range(n_layers)
]
)
def forward(self, x, mask):
mel_iters = list()
batch_size, max_len = x.shape[0], x.shape[1]
dec_output = x
if not self.training and max_len > self.max_seq_len:
dec_output = dec_output + get_sinusoid_encoding_table(
max_len, self.d_decoder
)[: max_len, :].unsqueeze(0).expand(batch_size, -1, -1).to(
x.device
)
else:
max_len = min(max_len, self.max_seq_len)
dec_output = dec_output + self.position_enc[
:, :max_len, :
].expand(batch_size, -1, -1)
mask = mask[:, :max_len]
for i, (conv, linear) in enumerate(zip(self.convolution_stack, self.mel_projection)):
dec_output = dec_output.masked_fill(mask.unsqueeze(-1), 0)
dec_output = torch.tanh(conv(
dec_output, mask=mask
))
if self.training or not self.training and i == self.n_layers-1:
mel_iters.append(
linear(dec_output).masked_fill(mask.unsqueeze(-1), 0)
)
return mel_iters, mask
|
the-stack_0_20247 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import math
import dace
import polybench
M = dace.symbol('M')
N = dace.symbol('N')
#datatypes = [dace.float64, dace.int32, dace.float32]
datatype = dace.float64
# Dataset sizes
sizes = [{M: 20, N: 30}, {M: 60, N: 80}, {M: 200, N: 240}, {M: 1000, N: 1200}, {M: 2000, N: 2600}]
args = [
([M, M], datatype),
([M, N], datatype),
([1], datatype),
]
outputs = [(1, 'B')]
def init_array(A, B, alpha):
n = N.get()
m = M.get()
alpha[0] = datatype(1.5)
for i in range(m):
for j in range(i):
A[i, j] = datatype((i + j) % m) / m
A[i, i] = 1.0
for j in range(n):
B[i, j] = datatype((n + (i - j)) % n) / n
@dace.program(datatype[M, M], datatype[M, N], datatype[1])
def trmm(A, B, alpha):
@dace.mapscope
def compute(j: _[0:N]):
@dace.mapscope
def computecol(i: _[0:M]):
tmp = dace.define_local_scalar(datatype)
@dace.tasklet
def reset_tmp():
out >> tmp
out = 0
@dace.map
def compute_elem(k: _[i + 1:M]):
ia << A[k, i]
ib << B[k, j]
ob >> tmp(1, lambda a, b: a + b)
ob = ia * ib
@dace.tasklet
def mult():
ib << B[i, j]
ialpha << alpha
itmp << tmp
ob >> B[i, j]
ob = ialpha * (ib + itmp)
if __name__ == '__main__':
polybench.main(sizes, args, outputs, init_array, trmm)
|
the-stack_0_20251 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
программа обнаруживает машины в рамках и меняет цвет рамки
версия где рамки - полигоны
заливаем черным все кроме рамки
обрезаем картинки по размеру рамки
недоработки : надо разделить выделение фореграунда и обучение фона.
первое нужно для каждого кадра, обучение фона - сильно реже.
сейчас есть компромис - первое и второе одновременно, раз в 100мс
доработка начата 23.10.2017, цель:
- добавить тип рамки : присутствие,остановка
- добавить детектирование направления движения в рамке
6.10 - направление в рамке берется из polygones.dat
каждую рамку делим на 4 части. при пересечении 2-х одновременно
на одной строне и потом 2-х одновременно на противоположной - проезд
в нужно мнаправлении считаем засчитан.
этот файл - бывший main.py переделанный для работы во flask с python3
'''
import cv2
import numpy as np
import os,sys,time,json,math
import requests
from multiprocessing import cpu_count
import threading
from threading import Timer
import threading
showMode = 1 # режим с показом картинок в gui (не работает с автозагрузкой в linux)
# больше не используется linWinMode = 0 # linux =0 Windows =1, в Main есть автоопределение.
# тестовые рамки для проверки, заменяются реальными по ходу программы
testRamki = [
[
[61, 325],
[106, 277],
[296, 464],
[88, 539]
],
[
[344, 293],
[370, 236],
[698, 483],
[427, 555]
],
[
[462, 101],
[603, 150],
[656, 257],
[532, 247]
]
]
testMode = 0 # режим работы с тестовыми рамками - в нем не надо выдавать ничего на концентратор
dets = [] # массив экземпляров класса detector == рамки
ramki = [] # рамки которые считываются из файла polygones.dat
ramki4 = [] # массив рамок, где каждой из polygones соотв.4 рамки внутри.
ramkiModes = [] # режим работы рамок: 0 - присутствие, 1 остановка.
ramkiDirections = [] # направления в рамках для каждой [0,0,1,0] - 1 обозн. активно
ramkiEntrance =[] # массив для фиксации события въезда в рамку
ramkiMonitor = [] # массив для монироринга статуса больших рамок (введен после добавления направлений для отобр статуса большой рамки текстом)
colorStatus = [] # цвета рамок
height = 300 # px размер окна в котором происходит проверка не менять!!! чревато !!!
width = 400 # px ##
origWidth, origHeight = 800, 600 # размер окна браузера для пересчета
#detection_settings["frame_tresh"] = 20 # frame overlap, %
#detection_settings["frame_hyst"] = 10 # frame hysteresis, %
learningRate = 0.0001 #0.00001 0.001 - это 13 секунд 0.0001 - 113 секунд !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#detection_settings["move_tresh"] = 60 # порог, после корого считаем что в рамке есть движуха. в перспективе видимо это надо грузить это из конфига либо из настроечной web страницы
#detection_move_hyst = 58 # гистерезис срабатывания по движухе
detection_settings = {"frame_tresh":20,"frame_hyst":10,"move_tresh":60,"move_hyst":58} # настройки детектора перенесены в словарь
adaptLearningRateInit = 0.005 #0.005 это параметр на старте для времени обучения фона
adaptLearningRate =0 # при старте ему присваивается Init время и во время работы убавляется.
#fixLearningRate = 0.005 # параметр для рамок присутствия? пока не буду его. вместо него возьму adaptLearningRateInit
""" это старая ботва. пути перенесены в app.py
polygonesFilePath = 'polygones.dat'
tsNumberMinuteFilePath = 'minTSNumber.dat'
tsNumberHourFilePath = 'hourTSNumber.dat'
"""
statusFilePath = 'status.dat'
tsCalcTimeInterval = 5 # раз в это число секунд считать тс может быть 1,2,3,4,5,6,10,15,20,30,60
maxNumberTS = 10000 # если накопленное количество тс станет слишком большим, сбрасывать его.
# linImagePath = '/dev/shm/mjpeg/cam.jpg' # - это от старой версии кандидат на удаление
#linImagePath = 'C:/WebServers/home/savepic/www/pic.jpg' а это не вертать!
# класс вызывает функцию function с аргументами function, *args, **kwargs с интервалом interval
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
#self.start()
def _run(self):
##self.is_running = False
self._start()
self.function(*self.args, **self.kwargs)
def _start(self):
self._timer = Timer(self.interval, self._run)
self._timer.start()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
if self._timer:
self._timer.cancel()
self.is_running = False
def isAlive(self):
return self.is_running
# класс создает рамки, фон def getFgmask обновляет фон и вычисляет разницу с тек. кадром
# обновлен 6.11 - для понимания направления движеия каждая рамка поделена на 4. и все выше делается для каждой из 4-х.
# поделена линиями, соединяющими середины противоположных сторон.
class detector():
def __init__(self, pict, frame, i):
self.frame = frame
self.pict=pict
self.borders = rectOverPolygon(frame)
##print ('self.borders =', self.borders) ####################################################################
self.mask = np.zeros((self.pict.shape[0], self.pict.shape[1]),dtype=np.uint8) # черная маска по размерам входной картинки
adaptLearningRate = adaptLearningRateInit
# self.bg = cv2.BackgroundSubtractorMOG2(500, 5, 0) # аргументы (history, treshold, shadow) history не активно если юзаем learninRate
self.bg = cv2.createBackgroundSubtractorMOG2(500, 5, 0) # аргументы (history, treshold, shadow) history не активно если юзаем learninRate
#### Преобразование рамки в кооринаты обрезанного под нее окна
#print (i, 'frame =', frame)
self.roi_corners = np.array([frame], dtype=np.int32) # вершины рамки
cv2.fillConvexPoly(self.mask, self.roi_corners, (255, 255, 255)) # из черной маски делаем черную с белой рамкой
self.framedPict = cv2.bitwise_and(self.pict, self.mask)
self.smallPict = self.framedPict[int(self.borders[1]):int(self.borders[3]), int(self.borders[0]):int(self.borders[2])]
self.prev_smallPict = self.smallPict[:,:] # нужна для детектора движения
self.fgmask = self.bg.apply(self.smallPict, learningRate=adaptLearningRate)
self.absMass = np.zeros((self.smallPict.shape[0], self.smallPict.shape[1]), np.uint8) # матрица с нулями
self.frameTrigger = 0 # (0, 0, 0) Это состояние сработки от изменения по алгоритму сравнения с фоном без учета направления
self.frameMoveTrigger = [0,0,0,0] # это состояние сработки от детектирования движения в определенном направлении
self.frameMoveTriggerCommon =0 # это состояние сработки от детектирования движения общее для рамки, оно =1, если есть сработка хотя-бы по одному направлению
self.frameMoveValCalculated = [0,0,0,0] # направление движения, зафиксированное детектором в рамке - аналоговая вел-на. берется из алгоритма, вычисляющего движение.
self.cos_alfa_calculator()
#print ('self.cos_alfax , self.cos_alfay = ',self.cos_alfax,self.cos_alfay)
self.tss = 0 # тайм стамп, участвующий в обновлении фона
self.tsRamkiUpd = 0 # тайм стамп, нужный для задержки в обновлении состояния рамки
self.noRamkiDirectionsFlag = 0 # признак того, что в рамке не заданы направления, тогда она срабатывает по всем направлениям
def getFgmask(self,pict,frame, adaptLearningRate):
self.pict = pict
self.frameArea = polygonAreaCalc(frame)
self.framedPict = cv2.bitwise_and(self.pict, self.mask)
self.smallPict = self.framedPict[int(self.borders[1]):int(self.borders[3]), int(self.borders[0]):int(self.borders[2])]
if (self.tss>time.time()): # на случай еслт time.time перейдет через 0 - не знаю может так быть, или нет
self.tss=0
print ('tss over!!!!!!!!!!!!')
if ((time.time()-self.tss)>0.1):# для увеличения скорости прореживаем обновление фона
self.fgmask = self.bg.apply(self.smallPict, learningRate=adaptLearningRate)
self.fgmask = cv2.erode(self.fgmask, None)
self.fgmask = cv2.dilate(self.fgmask, None)
self.tss = time.time()
#print ('tss', self.tss, time.time())
#print ('dif',time.time()-self.tss)
cv2.convertScaleAbs(self.fgmask, self.absMass)
self.nonZeros = cv2.countNonZero(self.absMass)
#print('self.frameArea',self.frameArea, self.nonZeros)
#print (self.nonZeros%frameArea* detection_settings["frame_tresh"])
if (self.nonZeros/float(self.frameArea) * 100 > detection_settings["frame_tresh"]):
self.frameTrigger = 1#(255, 255, 0)
elif (self.nonZeros/float(self.frameArea) * 100 < detection_settings["frame_tresh"] - detection_settings["frame_hyst"]): # гистерезис в 10% чтобы рамка не дергалась
self.frameTrigger = 0#(0, 0, 0)
elif (self.nonZeros / float(self.frameArea) * 100 <= 0): # на случай если порог установлен меньше гистерезиса
self.frameTrigger = 0#(0, 0, 0)
def cos_alfa_calculator(self): # метод вычисляет угол(косинус угла) наклона медианы - возвращает кортеж - 2 значения - по х и по у
# return self.frame
x0 = self.frame[0][0]
y0 = self.frame[0][1]
x1 = self.frame[1][0]
y1 = self.frame[1][1]
x2 = self.frame[2][0]
y2 = self.frame[2][1]
x3 = self.frame[3][0]
y3 = self.frame[3][1]
x01 = int((x0 + x1) / 2) # x0+(x1-x0)/2
y01 = int((y0 + y1) / 2)
x12 = int((x1 + x2) / 2)
y12 = int((y1 + y2) / 2)
x23 = int((x2 + x3) / 2)
y23 = int((y2 + y3) / 2)
x30 = int((x3 + x0) / 2)
y30 = int((y3 + y0) / 2)
if showMode: # не рисуется, т.к. надо вызывать всегда во время работы, а не только в init
cv2.line(self.pict, (x30, y30), (x12, y12), 255, 2, 1) # это x-медиана рамки белая
cv2.line(self.pict, (x01, y01), (x23, y23), 127, 2, 1) # это y-медиана рамки серая
# длина медианы
medianax = math.sqrt((y12 - y30)*(y12 - y30) + (x12 - x30)*(x12 - x30))
medianay = math.sqrt((x01 - x23)*(x01 - x23) + (y23 - y01)*(y23 - y01))
if medianax == 0: medianax=1
if medianay == 0: medianay=1
# print('medianax= ',medianax)
# print('medianay= ',medianay)
'''
if (y23 - y01)!=0 :
alfay = math.atan((x01 - x23)/(y23 - y01))
else:
alfay = 90/180*math.pi
if (x30-x12)!=0 :
alfax = math.atan((y30 - y12) / (x30 - x12))
else:
alfax = 0
'''
# print('x01 , x23 ', x01 , x23)
# print('y01 , y23 ', y01 , y23)
# print('x12 , x30 ', x12 , x30)
# print('y12 , y30 ', y12 , y30)
if medianax != 0:
self.cos_alfax = (x12-x30) / medianax
self.sin_alfax = (y12-y30) / medianax
else:
self.cos_alfax = 0
self.sin_alfax = 1
if medianay != 0:
self.cos_alfay = (y23-y01) / medianay
self.sin_alfay = (x01-x23) / medianay
else:
self.cos_alfay = 1
self.sin_alfay = 0
# print('self.cos_alfax ', self.cos_alfax)
# print('self.sin_alfax ', self.sin_alfax)
# print('self.cos_alfay ', self.cos_alfay)
# print('self.sin_alfay ', self.sin_alfay)
#return (math.cos(alfax),math.cos(alfay))
return (0,0)
def directionCalc(self): # метод возвращает движ по каждому из направлений в ROI в виде 0 если нет движа и 1 если есть
flow = cv2.calcOpticalFlowFarneback(self.smallPict, self.prev_smallPict, None, 0.5, 1, 15, 1, 2, 1.2, 0)#cv2.OPTFLOW_FARNEBACK_GAUSSIAN) # вернет массив точек в каждой будет смещение, а в 3-й координате будет по x и y соответственно
flowx = flow[:,:,0] # flow=None, pyr_scale=0.5, levels=3, winsize=15, iterations=3, poly_n=5, poly_sigma=1.1, flags=0
flowy = flow[:,:,1]
self.prev_smallPict = self.smallPict # уточнить зачем это тут!!!!!!!!
# далее технология следующая: умножаем полученное движение по осям на cos угла
# между направлением и x-медианой для направлений по X координате и направллением и y-медианой для направлений по y-медиане.
# x-медиана рамки - отрезок соединяющий середины правой и левой сторон рамки. Y - медиана соединяет середины верхней и нижней сторон.
# self.frameMoveValCalculated = [flowx.mean(),0,0,0] # времненная затычка
self.frameMoveValCalculated[0] = int((flowy.mean() * self.cos_alfay - flowx.mean() * self.sin_alfay) * 100)
self.frameMoveValCalculated[1] = - int((flowx.mean() * self.cos_alfax + flowy.mean() * self.sin_alfax) * 100)
self.frameMoveValCalculated[2] = - self.frameMoveValCalculated[0]
self.frameMoveValCalculated[3] = - self.frameMoveValCalculated[1]
if showMode:
self.indicator = np.zeros((100,100),dtype=np.uint8) # это временная картинка чтобы отображать на ней циферки, бо они мешают считать движуху на основной
cv2.putText(self.indicator, str(abs(round(flowx.mean(), 1))), (5, 10), cv2.FONT_HERSHEY_PLAIN, 0.8, 255, 2)
cv2.putText(self.indicator, str(abs(round(flowy.mean(), 1))), (5, 20), cv2.FONT_HERSHEY_PLAIN, 0.8, 255, 2)
cv2.putText(self.indicator, str(self.frameMoveValCalculated[0]), (50, 10), cv2.FONT_HERSHEY_PLAIN, 0.8, 255, 2)
cv2.putText(self.indicator, str(self.frameMoveValCalculated[1]), (50, 20), cv2.FONT_HERSHEY_PLAIN, 0.8, 255, 2)
cv2.waitKey(1)
def draw_str(dst, x, y, s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=1)
# cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=1)
def writeFile(filePath,status):
with open(filePath,'w') as f:
string = f.write(str(status))
#print status,str(status)
return 1
def polygonAreaCalc(polygon):
polygonArea = 0 # площадь полигона
polyLen = len(polygon)
# print ('n=',n)
for i in range(polyLen):
x = polygon[i][0]
if i == 0:
y = polygon[polyLen - 1][1]
y1 = polygon[i + 1][1]
elif i == polyLen - 1:
y = polygon[i - 1][1]
y1 = polygon[0][1]
else:
y = polygon[i - 1][1]
y1 = polygon[i + 1][1]
polygonArea += x * (y - y1)
#print (x * (y - y1))
return abs(polygonArea) / 2
def rectOverPolygon(polygon):
x1,y1=x2,y2=polygon[0]
for i in range (len(polygon)):
if polygon[i][0] < x1: x1 = polygon[i][0]
if polygon[i][1] < y1: y1 = polygon[i][1]
if polygon[i][0] > x2: x2 = polygon[i][0]
if polygon[i][1] > y2: y2 = polygon[i][1]
return x1,y1,x2,y2
def readPolyFile(polygonesFilePath): # считывание файла с полигонами
#time.sleep(0.1)
#global origWidth, origHeight,ramki,ramkiModes,ramkiDirections,linPolygonesFilePath
global origWidth, origHeight, testMode
try:
with open(polygonesFilePath, 'r') as f:
jsRamki = json.load(f)
ramki = jsRamki.get("polygones", testRamki)
origWidth, origHeight = jsRamki.get("frame", (800, 600)) # получаем размер картинки из web интрефейса
ramkiModes = jsRamki.get("ramkiModes", [0 for i in range(len(ramki))]) # по дефолту все рамки - в режиме присутствие
ramkiDirections = jsRamki.get("ramkiDirections", [[0,0,0,0] for i in range(len(ramki))]) # дефолт - нет направлений. дефолт переделать, т.к. может быть не 4 - надо сделать генератор
testMode = 0
except Exception as Error: # если рамки не считались, и подставились тестовые, работает криво - рамки каждый раз масштабируются, как это поправить, пока не знаю.
print (u'считать рамки не удалось, пришлось подставить тестовые..', Error)
ramki = testRamki # это уже вроде выше присвоилось... удалять не надо, иначе вызывает ошибки типа ramki не определены при попадании в exception
ramkiModes = [0 for i in range(len(ramki))]
ramkiDirections = [[0,0,0,0] for i in range(len(ramki))]
testMode = 1
# масштабируем рамки
#dets = [] # будущие экземпляры класса detector
# в цикле создаем рамки и передем им данные рамок из веб интерфейса
for i in range(len(ramki)):
xRate = round(origWidth/float(width)) # соотношение сторон по x картинки с web и картинки в питоне
yRate = round(origHeight/float(height)) # то-же по y
###print (ramki[i] , 'i=',i, 'origWidth',origWidth, 'origHeight',origHeight)
for j in range(len(ramki[i])):
ramki[i][j][0] = ramki[i][j][0]/xRate # масштабирование всех рамок
ramki[i][j][1] = ramki[i][j][1]/yRate
return ramki,ramkiModes,ramkiDirections
# функция делает 4 рамки из одной. входной аргумент однако - весь массив рамок
def make4RamkiFrom1(ramki):
# вход ramki[i]=[[x1,y1],[x2,y2],[x3,y3],[x4,y4]]
# выхлоп ramki4[i] = [[[x0,y0],[x1,y1],[x2,y2],[x3,y3]],[вторая внутр. рамка],(третья....] т.е. [i][внурт рамка 1-4][угол][кордината x или y]
ramki4=[[[[0,0] for k in range(4)] for j in range(4)] for i in range(len(ramki))] # нулями ее забить выхлоп при старте
for i in range(len(ramki)):
x0= ramki4[i][0][0][0]= ramki[i][0][0]
y0= ramki4[i][0][0][1]= ramki[i][0][1]
x1= ramki4[i][1][1][0]= ramki[i][1][0]
y1= ramki4[i][1][1][1]= ramki[i][1][1]
x2= ramki4[i][2][2][0]= ramki[i][2][0]
y2= ramki4[i][2][2][1]= ramki[i][2][1]
x3= ramki4[i][3][3][0]= ramki[i][3][0]
y3= ramki4[i][3][3][1]= ramki[i][3][1]
# находим середины строн, они-же координаты углов внутренних рамок
x01= ramki4[i][0][1][0]= ramki4[i][1][0][0]= (x0+x1)/2 #x0+(x1-x0)/2
y01= ramki4[i][0][1][1]= ramki4[i][1][0][1]= (y0+y1)/2
x12= ramki4[i][1][2][0]= ramki4[i][2][1][0]= (x1+x2)/2
y12= ramki4[i][1][2][1]= ramki4[i][2][1][1]= (y1+y2)/2
x23= ramki4[i][2][3][0]= ramki4[i][3][2][0]= (x2+x3)/2
y23= ramki4[i][2][3][1]= ramki4[i][3][2][1]= (y2+y3)/2
x30= ramki4[i][3][0][0]= ramki4[i][0][3][0]= (x3+x0)/2
y30= ramki4[i][3][0][1]= ramki4[i][0][3][1]= (y3+y0)/2
xm = ramki4[i][0][2][0]= ramki4[i][1][3][0]=ramki4[i][2][0][0]=ramki4[i][3][1][0]=(x01+x23)/2
ym = ramki4[i][0][2][1]= ramki4[i][1][3][1]=ramki4[i][2][0][1]=ramki4[i][3][1][1]=(y01+y23)/2
#print ("ramki[i]",ramki[i])
return ramki4
|
the-stack_0_20253 | # Django
from django.urls import include, path
# Django Rest Framework
from rest_framework import routers
# Views
from . import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet, basename='users')
router.register(r'meals', views.MealViewSet, basename='meals')
router.register(r'foods', views.FoodViewSet, basename='foods')
router.register(r'users-daily-meals', views.UserDailyMealViewSet, basename='users-daily-meals')
urlpatterns = [
path('', include(router.urls)),
] |
the-stack_0_20254 | #Title: Train.py
#Authors: Fischer ; Thimabru @ cyberlabs
#Date: 5/2021
#Usage: python3 train.py --dp <dataset folder>
import torch
from tools.FASDataset import FASDataset
from tools.FASDataset_RAW import FASDataset_RAW
from trainer.FASTTrainer import FASTrainer
from utils.utils import read_cfg, save_cfg, get_optimizer, get_device, build_network
from torch.optim.lr_scheduler import StepLR
from torchvision import transforms
import os
import argparse
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import numpy as np
def training_data_RAW(cfg):
train_transform = transforms.Compose ([transforms.Resize(cfg['model']['input_size']),
transforms.ToTensor(),
transforms.Normalize(cfg['dataset']['mean'], cfg['dataset']['sigma'])])
trainset = FASDataset_RAW(root_dir=cfg['dataset']['root'],
# images_file=cfg['dataset']['train_images'],
images_file=cfg['dataset']['train_images'],
transform=train_transform,
smoothing=cfg['train']['smoothing'])
trainloader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=cfg['train']['batch_size'],
shuffle=True,
num_workers=2)
return trainloader
def training_data(cfg, max_d, pm):
train_transform = transforms.Compose ([transforms.Resize(cfg['model']['input_size']),
transforms.ToTensor(),
transforms.Normalize(cfg['dataset']['mean'], cfg['dataset']['sigma'])])
trainset = FASDataset(root_dir=cfg['dataset']['root'],
images_file=cfg['dataset']['train_images'],
transform=train_transform,
smoothing=cfg['train']['smoothing'],
max_d=max_d, pm=pm,
rot_crop=cfg['dataset']['augmentation']['rot_crop'])
print(f"Training on {len(trainset)} images")
trainloader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=cfg['train']['batch_size'],
shuffle=True,
num_workers=cfg['dataset']['num_workers'],
pin_memory=True)
return trainloader
def val_data(cfg, max_d, pm):
val_transform = transforms.Compose([transforms.Resize(cfg['model']['input_size']),
transforms.ToTensor(),
transforms.Normalize(cfg['dataset']['mean'], cfg['dataset']['sigma'])])
valset = FASDataset(root_dir=cfg['dataset']['root'],
images_file=cfg['dataset']['val_images'],
transform=val_transform,
smoothing=cfg['train']['smoothing'],
max_d=max_d, pm=pm,
rot_crop=False,
split_validation=0.4)
print(f"Validating on {len(valset)} images")
valloader = torch.utils.data.DataLoader(dataset=valset,
batch_size=cfg['val']['batch_size'],
shuffle=True,
num_workers=cfg['dataset']['num_workers'],
pin_memory=True)
return valloader
def find_mean(trainloader):
pbar = tqdm(trainloader, total=len(trainloader), dynamic_ncols=True)
x_max = []
x_min = []
y_max = []
y_min = []
z_max = []
z_min = []
for _, point_map, _ in pbar:
max_val = point_map.max(2)[0]
min_val = point_map.min(2)[0]
lx = max_val[:, 0]
sx = min_val[:, 0]
ly = max_val[:, 1]
sy = min_val[:, 1]
lz = max_val[:, 2]
sz = min_val[:, 2]
x_max.append(lx)
x_min.append(sx)
y_max.append(ly)
y_min.append(sy)
z_max.append(lz)
z_min.append(sz)
lx = torch.cat(x_max, axis=0).max()
sx = torch.cat(x_min, axis=0).min()
ly = torch.cat(y_max, axis=0).max()
sy = torch.cat(y_min, axis=0).min()
lz = torch.cat(z_max, axis=0).max()
sz = torch.cat(z_min, axis=0).min()
max_d = max([lx - sx, ly - sy, lz - sz])
pm = torch.tensor([((lx + sx)/2).item(), ((ly + sy)/2).item(),
((lz + sz)/2).item()])
return max_d, np.array(torch.unsqueeze(pm, axis=1))
#=========================================
#=================MAIN====================
#=========================================
def main():
print("Starting training 3DPC_NET anti-spoofing")
print("Pytorch Version:" + torch.__version__)
print("Cuda Version:" + torch.version.cuda)
#parsing arguments----------------------------------
parser = argparse.ArgumentParser()
parser.add_argument("--load_model", type=str, help="Path where to load model to continue training")
parser.add_argument("--logs_path", type=str, default="experiments", help="Path where to save logs")
parser.add_argument("--exp_name", type=str, default="exp_1", help="Name of experiment folder to save logs")
parser.add_argument("--cfg", type=str, default="config/DPC3_NET_config.yaml", help="Path to config(.yaml) file")
args = parser.parse_args()
#---------------------------------------------------
print("Reading config file....")
cfg=read_cfg(cfg_file=args.cfg)
print ("ok")
device = get_device(cfg)
print('Using {} device'.format(device))
print("Load Network...")
network=build_network(cfg)
print("ok")
print("model:" + cfg['model']['base'])
print("Load Optimizer...")
optimizer=get_optimizer(cfg,network)
print("ok")
print("optimizer:" + cfg['train']['optimizer'])
print("Loss Funtion: Chamfer distance")
lr_scheduler = StepLR(optimizer, step_size=5, gamma=0.8)
print("Finding normalization values in training data...")
trainloader=training_data_RAW(cfg)
max_d, pm = find_mean(trainloader)
print("Ok")
print("========= Normalization values =========")
# TODO: Save normalization values on experiments folder
print(f"Maximum distance: {max_d}")
print(f"Medium point: {pm}")
print("Load training data...")
trainloader=training_data(cfg, max_d, pm)
print("Ok")
print("Load validation data...")
valloader=val_data(cfg, max_d, pm)
print("Ok")
logs_full_path = os.path.join(args.logs_path, args.exp_name)
if not os.path.exists(logs_full_path):
os.makedirs(logs_full_path)
save_cfg(cfg, os.path.join(logs_full_path, "train_config.yaml"))
print("Starting TensorBoard.....")
writer = SummaryWriter(f'{logs_full_path}/Tensoboard_logs')
print("Ok")
trainer= FASTrainer(cfg=cfg,
network=network,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
device=device,
trainloader=trainloader,
valloader=valloader,
writer=writer,
logs_path=logs_full_path,
model_path=args.load_model)
print("Starting training...")
trainer.train()
print("Finish Training")
writer.close()
#==============================================
if __name__=='__main__':
main()
|
the-stack_0_20255 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for filter commands """
from asyncio import sleep
from re import IGNORECASE, fullmatch
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP
from userbot.events import register
@register(incoming=True, disable_edited=True, disable_errors=True)
async def filter_incoming_handler(handler):
"""Checks if the incoming message contains handler of a filter"""
try:
if not (await handler.get_sender()).bot:
try:
from userbot.modules.sql_helper.filter_sql import get_filters
except AttributeError:
await handler.edit("`Running on Non-SQL mode!`")
return
name = handler.raw_text
filters = get_filters(handler.chat_id)
if not filters:
return
for trigger in filters:
pro = fullmatch(trigger.keyword, name, flags=IGNORECASE)
if pro:
if trigger.f_mesg_id:
msg_o = await handler.client.get_messages(
entity=BOTLOG_CHATID, ids=int(trigger.f_mesg_id)
)
await handler.reply(msg_o.message, file=msg_o.media)
elif trigger.reply:
await handler.reply(trigger.reply)
except AttributeError:
pass
@register(outgoing=True, pattern=r"^\.filter ((@)?\w*)")
async def add_new_filter(new_handler):
"""For .filter command, allows adding new filters in a chat"""
try:
from userbot.modules.sql_helper.filter_sql import add_filter
except AttributeError:
await new_handler.edit("`Running on Non-SQL mode!`")
return
keyword = new_handler.pattern_match.group(1)
string = new_handler.text.partition(keyword)[2]
msg = await new_handler.get_reply_message()
msg_id = None
if msg and msg.media and not string:
if BOTLOG_CHATID:
await new_handler.client.send_message(
BOTLOG_CHATID,
f"#FILTER\nCHAT ID: {new_handler.chat_id}\nTRIGGER: {keyword}"
"\n\nThe following message is saved as the filter's reply data for the chat, please do NOT delete it !!",
)
msg_o = await new_handler.client.forward_messages(
entity=BOTLOG_CHATID,
messages=msg,
from_peer=new_handler.chat_id,
silent=True,
)
msg_id = msg_o.id
else:
return await new_handler.edit(
"`Saving media as reply to the filter requires the BOTLOG_CHATID to be set.`"
)
elif new_handler.reply_to_msg_id and not string:
rep_msg = await new_handler.get_reply_message()
string = rep_msg.text
success = "`Filter` **{}** `{} successfully`"
if add_filter(str(new_handler.chat_id), keyword, string, msg_id) is True:
await new_handler.edit(success.format(keyword, "added"))
else:
await new_handler.edit(success.format(keyword, "updated"))
@register(outgoing=True, pattern=r"^\.stop ((@)?\w*)")
async def remove_a_filter(r_handler):
"""For .stop command, allows you to remove a filter from a chat."""
try:
from userbot.modules.sql_helper.filter_sql import remove_filter
except AttributeError:
return await r_handler.edit("`Running on Non-SQL mode!`")
filt = r_handler.pattern_match.group(1)
if not remove_filter(r_handler.chat_id, filt):
await r_handler.edit(f"`Filter` **{filt}** `doesn't exist.`")
else:
await r_handler.edit(f"`Filter` **{filt}** `was deleted successfully`")
@register(outgoing=True, pattern=r"^\.rmbotfilters (.*)")
async def kick_marie_filter(event):
""" For .rmfilters command, allows you to kick all \
Marie(or her clones) filters from a chat. """
bot_type = event.pattern_match.group(1).lower()
if bot_type not in ["marie", "rose"]:
return await event.edit("`That bot is not yet supported!`")
await event.edit("```Will be kicking away all Filters!```")
await sleep(3)
resp = await event.get_reply_message()
filters = resp.text.split("-")[1:]
for i in filters:
if bot_type.lower() == "marie":
await event.reply("/stop %s" % (i.strip()))
if bot_type.lower() == "rose":
i = i.replace("`", "")
await event.reply("/stop %s" % (i.strip()))
await sleep(0.3)
await event.respond("```Successfully purged bots filters yaay!```\n Gimme cookies!")
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID, "I cleaned all filters at " + str(event.chat_id)
)
@register(outgoing=True, pattern=r"^\.filters$")
async def filters_active(event):
"""For .filters command, lists all of the active filters in a chat."""
try:
from userbot.modules.sql_helper.filter_sql import get_filters
except AttributeError:
return await event.edit("`Running on Non-SQL mode!`")
transact = "`There are no filters in this chat.`"
filters = get_filters(event.chat_id)
for filt in filters:
if transact == "`There are no filters in this chat.`":
transact = "Active filters in this chat:\n"
transact += f"`{filt.keyword}`\n"
await event.edit(transact)
CMD_HELP.update(
{
"filter": ">`.filters`"
"\nUsage: Lists all active userbot filters in a chat."
"\n\n>`.filter <keyword> <reply text>` or reply to a message with >`.filter <keyword>`"
"\nUsage: Saves the replied message as a reply to the 'keyword'."
"\nThe bot will reply to the message whenever 'keyword' is mentioned."
"\nWorks with everything from files to stickers."
"\n\n>`.stop <filter>`"
"\nUsage: Stops the specified filter."
"\n\n>`.rmbotfilters <marie/rose>`"
"\nUsage: Removes all filters of admin bots (Currently supported: Marie, Rose and their clones.) in the chat."
}
)
|
the-stack_0_20256 | """This module is heavily inspired by
https://github.com/maximecb/gym-minigrid/blob/master/gym_minigrid/minigrid.py
This refactor adds GPU support
"""
from __future__ import annotations
import logging
from functools import wraps
from re import A
from typing import Any, Callable, Dict, List, Sequence, Tuple, Type, Union
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
from jax.numpy import ndarray as Array
from jax.random import KeyArray
from jax.tree_util import register_pytree_node_class
from . import rendering, tiles
from .tiles import AgentEast, AgentNorth, Empty, Floor, Goal, Tile
Point = Union[Tuple[float, float], Array, None]
ActionFunction = Callable[[Any], None]
ACTIONS: Dict[int, Callable[[Any], None]] = {}
def register_action(idx: int) -> Callable[[ActionFunction], ActionFunction]:
def register_action_ad_idx(action_fn: ActionFunction):
@wraps(action_fn)
def fn(grid: Any) -> None:
return action_fn(grid)
if idx in ACTIONS:
logging.warning(
f"Action with idx {idx} already register. Please use another idx."
)
return fn
ACTIONS[idx] = action_fn
return fn
return register_action_ad_idx
DIRECTIONS: Dict[int, Array] = {
# Pointing east
0: jnp.array((1, 0), dtype=int),
# Pointing south
1: jnp.array((0, 1)),
# Pointing west
2: jnp.array((-1, 0)),
# Pointing north
3: jnp.array((0, -1)),
}
@register_pytree_node_class
class Grid:
"""Square grid"""
_tiles_registry = tiles._registry
def __init__(
self,
size: int,
tiles: Array,
seed: int,
key: KeyArray,
):
assert size > 0, f"Size must be a positive number, got {size} instead"
assert size == len(
tiles
), f"Got incompatible size with tiles {size}, expected {len(tiles)}"
# static:
self.width: int = size
self.height: int = size
self.seed: int = seed
# dynamic:
self.tiles: Array = tiles
self.key: KeyArray = key
def tree_flatten(self):
return ((self.tiles, self.key), (self.seed,))
@classmethod
def tree_unflatten(cls, aux: Tuple[int], children: Tuple[Array, KeyArray]):
tiles, key = children
seed = aux[0]
return cls(len(tiles), tiles, seed, key)
@classmethod
def from_size(cls, size: int, seed: int = 0) -> Grid:
# `3` stands for floor
tiles: Array = jnp.ones((size, size), dtype=int) * 3
key: KeyArray = jax.random.PRNGKey(seed)
return cls(size, tiles, seed, key)
def transition(self, actions: List[int]):
for a in actions:
act = ACTIONS[int(a)]
act(self)
return
def next_key(self, n_keys: int = 1) -> KeyArray:
self.key, k = jax.random.split(self.key, n_keys + 1)
return k
def get(self, pos: Point) -> int:
if pos is None:
raise ValueError("Invalid position to inspect {}".format(pos))
obj = self.tiles[tuple(pos)]
return obj
def set(self, pos: Point, id: int):
if pos is None:
raise ValueError("Invalid position to inspect {}".format(pos))
pos = tuple(pos)
self.tiles = self.tiles.at[pos].set(id)
return
def is_free(self, pos: Point) -> bool:
if pos is None:
return False
return self.tiles[tuple(pos)] in (Empty.ID, Floor.ID)
def rand_pos(self) -> Array:
k = self.next_key()
return jax.random.randint(k, (2,), 1, self.tiles.shape[0] - 2)
def free_pos(self) -> Array:
pos, i = self.rand_pos(), 0
available_cells = ((self.tiles.shape[0] - 2) * 2) - 1
while i < available_cells:
i += 1
pos = self.rand_pos()
if not self.is_free(pos):
continue
return pos
raise ValueError(f"There are no positions availble on the grid")
def find_obj(self, obj: Type[Tile]) -> Sequence[Point]:
"""
Args:
obj Type[Tile]: The type of the tiles to search for
Returns:
A two-dimensional `Array` of shape (N, 2), containing
the N obj
"""
mask = self.tiles == obj.ID
return jnp.asarray(jnp.nonzero(mask)).T
def find_agent(self) -> Point:
mask = jnp.logical_and(AgentEast.ID <= self.tiles, self.tiles <= AgentNorth.ID)
return jnp.asarray(jnp.nonzero(mask, size=1, fill_value=0)).T[-1]
def place_obj(self, obj: Type[Tile], pos: Point = None, force: bool = False):
if 10 <= obj.ID <= 13:
return self.place_agent(pos, force=force)
# if no pos is specified, find a suitable one
if pos is None:
pos = self.free_pos()
# if position is not free, and we don't force placing
if not force and not self.is_free(pos):
pos = self.free_pos()
return self.set(pos, obj.ID)
def place_agent(self, pos: Point = None, direction: int = 0, force: bool = False):
if not (0 <= direction <= 3):
raise ValueError(
"agent's `direction` must be within `0` and `3`, got {} instaed".format(
direction
)
)
# if no pos is specified, find a suitable one
if pos is None or (not force and not self.is_free(pos)):
pos = self.free_pos()
# update agent's position
self.set(pos, AgentEast.ID + direction)
return
def visibility_mask(self, radius: int) -> Array:
a = self.find_agent()
shape = (self.width, self.height)
if a is None or radius == -1:
return jnp.ones(shape)
a_x, a_y = a
directions = {
0: (0, radius),
1: (radius, 0),
2: (a_x - 2 * radius, a_y - radius),
3: (a_x - radius, a_y - 2 * radius)
}
direction = self.tiles[tuple(a)] - AgentEast.ID
a_x, a_y = directions[direction]
b_x, b_y = (a_x + 2 * radius, a_y + 2 * radius)
x, y = jnp.mgrid[: self.width :, : self.height :]
mask = jnp.logical_and(x >= a_x, x <= b_x)
mask = jnp.logical_and(mask, y >= a_y)
mask = jnp.logical_and(mask, y <= b_y)
return mask
def as_rgb(self, visibility_radius: int = -1):
# TODO(ep): optimise this crap
mask = self.visibility_mask(visibility_radius).reshape(self.width**2)
tiles = self.tiles.reshape(self.width**2)
tiles = [
rendering.highlight(
self._tiles_registry[int(tile_idx)].as_rgb, mask[i] * 0.3
)
for i, tile_idx in enumerate(tiles)
]
images = jnp.concatenate(
[
jnp.concatenate(tiles[i * self.width : (i + 1) * self.width], axis=0)
for i in range(self.width)
],
axis=1,
)
return jnp.asarray(images, dtype=jnp.uint8)
def as_categorical(self):
return jnp.asarray(self.tiles, dtype=int)
@register_action(0)
def rotate_counterclockwise(self) -> None:
agent_pos = self.find_agent()
agent_id: int = self.tiles[agent_pos]
rotated_agent_id = (agent_id - 1 - 10) % 4 + 10
return self.set(agent_pos, rotated_agent_id)
@register_action(1)
def rotate_clockwise(self) -> None:
agent_pos = self.find_agent()
agent_id = self.tiles[agent_pos]
rotated_agent_id = (agent_id + 1 - 10) % 4 + 10
return self.set(agent_pos, rotated_agent_id)
@register_action(2)
def advance(self) -> None:
current_pos = self.find_agent()
agent_id = self.get(current_pos)
direction = agent_id - AgentEast.ID
dir_vector = DIRECTIONS[int(direction)]
new_pos = current_pos + dir_vector
tile_id = int(self.get(new_pos))
if not self._tiles_registry[tile_id].is_walkable:
return
self.place_agent(new_pos, direction)
self.place_obj(Floor, current_pos, True)
return
@register_action(3)
def pickup(self) -> None:
raise NotImplementedError
@register_action(4)
def drop(self) -> None:
raise NotImplementedError
@register_action(5)
def toggle(self) -> None:
raise NotImplementedError
@register_action(6)
def noop(self) -> None:
return
@register_action(7)
def rotate_north(self) -> None:
agent_pos = self.find_agent()
# east direction is categorical `3`
return self.place_agent(agent_pos, 3)
@register_action(8)
def rotate_east(self) -> None:
agent_pos = self.find_agent()
# east direction is categorical `0`
return self.place_agent(agent_pos, 0)
@register_action(9)
def rotate_south(self) -> None:
agent_pos = self.find_agent()
# east direction is categorical `1`
return self.place_agent(agent_pos, 1)
@register_action(10)
def rotate_west(self) -> None:
agent_pos = self.find_agent()
# east direction is categorical `2`
return self.place_agent(agent_pos, 2)
|
the-stack_0_20257 | #!/usr/bin/python3
"""
Unit Test for BaseModel Class
"""
from datetime import datetime
import inspect
import json
import models
from os import environ, stat
import pep8
import unittest
BaseModel = models.base_model.BaseModel
STORAGE_TYPE = environ.get('HBNB_TYPE_STORAGE')
class TestBaseModelDocs(unittest.TestCase):
"""Class for testing BaseModel docs"""
all_funcs = inspect.getmembers(BaseModel, inspect.isfunction)
@classmethod
def setUpClass(cls):
print('\n\n.................................')
print('..... Testing Documentation .....')
print('..... For BaseModel Class .....')
print('.................................\n\n')
def test_doc_file(self):
"""... documentation for the file"""
expected = '\nBaseModel Class of Models Module\n'
actual = models.base_model.__doc__
self.assertEqual(expected, actual)
def test_doc_class(self):
"""... documentation for the class"""
expected = ('\n attributes and functions for BaseModel class\n'
' ')
actual = BaseModel.__doc__
self.assertEqual(expected, actual)
def test_all_function_docs(self):
"""... tests for ALL DOCS for all functions in db_storage file"""
all_functions = TestBaseModelDocs.all_funcs
for function in all_functions:
self.assertIsNotNone(function[1].__doc__)
def test_pep8_base_model(self):
"""... base_model.py conforms to PEP8 Style"""
pep8style = pep8.StyleGuide(quiet=True)
errors = pep8style.check_files(['models/base_model.py'])
self.assertEqual(errors.total_errors, 0, errors.messages)
def test_file_is_executable(self):
"""... tests if file has correct permissions so user can execute"""
file_stat = stat('models/base_model.py')
permissions = str(oct(file_stat[0]))
actual = int(permissions[5:-2]) >= 5
self.assertTrue(actual)
@unittest.skipIf(STORAGE_TYPE == 'db', 'DB Storage does not store BaseModel')
class TestBaseModelInstances(unittest.TestCase):
"""testing for class instances"""
@classmethod
def setUpClass(cls):
print('\n\n.................................')
print('....... Testing Functions .......')
print('..... For BaseModel Class .....')
print('.................................\n\n')
def setUp(self):
"""initializes new BaseModel instance for testing"""
self.model = BaseModel()
def test_instantiation(self):
"""... checks if BaseModel is properly instantiated"""
self.assertIsInstance(self.model, BaseModel)
def test_to_string(self):
"""... checks if BaseModel is properly casted to string"""
my_str = str(self.model)
my_list = ['BaseModel', 'id', 'created_at']
actual = 0
for sub_str in my_list:
if sub_str in my_str:
actual += 1
self.assertTrue(3 == actual)
def test_to_string(self):
"""... checks if BaseModel is properly casted to string"""
my_str = str(self.model)
my_list = ['BaseModel', 'id', 'created_at']
actual = 0
for sub_str in my_list:
if sub_str in my_str:
actual += 1
self.assertTrue(3 == actual)
def test_instantiation_no_updated(self):
"""... should not have updated attribute"""
my_str = str(self.model)
actual = 0
if 'updated_at' in my_str:
actual += 1
self.assertTrue(0 == actual)
def test_save(self):
"""... save function should add updated_at attribute"""
self.model.save()
actual = type(self.model.updated_at)
expected = type(datetime.now())
self.assertEqual(expected, actual)
def test_to_json(self):
"""... to_json should return serializable dict object"""
my_model_json = self.model.to_json()
actual = 1
try:
serialized = json.dumps(my_model_json)
except:
actual = 0
self.assertTrue(1 == actual)
def test_json_class(self):
"""... to_json should include class key with value BaseModel"""
my_model_json = self.model.to_json()
actual = None
if my_model_json['__class__']:
actual = my_model_json['__class__']
expected = 'BaseModel'
self.assertEqual(expected, actual)
def test_name_attribute(self):
"""... add name attribute"""
self.model.name = "CodeSchool"
actual = self.model.name
expected = "CodeSchool"
self.assertEqual(expected, actual)
def test_number_attribute(self):
"""... add number attribute"""
self.model.number = 98
actual = self.model.number
self.assertTrue(98 == actual)
if __name__ == '__main__':
"""
MAIN TESTS
"""
unittest.main
|
the-stack_0_20258 | import errno
try:
import fcntl
except ImportError:
fcntl = None
from bare_locks.abstract import FileLockingMechanism
from bare_locks.abstract import RelativeTo
class FcntlMechanism(FileLockingMechanism):
"""
A file locking mechanism based on fcntl.
"""
available = fcntl is not None
can_share = True
can_block = True
can_switch = True
@staticmethod
def lock(handle,
exclusive: bool = True,
blocking: bool = False,
offset: int = 0,
length: int = 0,
relative_to: RelativeTo = 'start') -> bool:
"""Acquire a lock on the byte range of the file.
The byte range is computed as
[relative_to + offset, relative_to + offset + length - 1]
relative_to can be one of
[start of the file, current cursor position, end of the file]
offset is an integer such that
relative_to + offset >= 0
and length is a non-negative integer with 0 exceptionally meaning ∞.
Byte range can exceed the size of the file.
Acquiring a lock can fail if the file or range is already locked by
another process, or because of some unexpected issue. The former
(normal) failure is reported by the return value, the latter by an
exception.
Parameters
----------
handle:
File handle
exclusive:
Whether to acquire an exclusive or shared lock
blocking:
Whether to block until a lock is acquired
offset:
Offset (in bytes) of the byte range to lock (default=0)
length:
Length (in bytes) of the byte range to lock, with 0 being a special
value meaning "until infinity" (default=0)
relative_to:
File position relative to which the byte range offset is computed.
Can be either 'start' of the file, 'current' position or 'end' of
the file. (default='start')
Returns
-------
bool
Whether a lock was acquired
"""
flags = 0
if exclusive:
flags |= fcntl.LOCK_EX
if not exclusive:
flags |= fcntl.LOCK_SH
if not blocking:
flags |= fcntl.LOCK_NB
if relative_to == 'start':
whence = 0
elif relative_to == 'current':
whence = 1
elif relative_to == 'end':
whence = 2
else:
raise ValueError(f"relative_to should be 'start', 'current', or 'end', "
f"received {relative_to}!")
try:
fcntl.lockf(handle, flags, length, offset, whence)
return True
except OSError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
return False
else:
raise e
@staticmethod
def unlock(handle,
offset: int = 0,
length: int = 0,
relative_to: RelativeTo = 'start'):
"""Release a lock on the byte range of the file.
The byte range is computed as
[relative_to + offset, relative_to + offset + length - 1]
relative_to can be one of
[start of the file, current cursor position, end of the file]
offset is an integer such that
relative_to + offset >= 0
and length is a non-negative integer with 0 exceptionally meaning ∞.
Byte range can exceed the size of the file.
Releasing a byte range for which no lock was held does nothing.
Parameters
----------
handle:
File handle
offset:
Offset (in bytes) of the byte range to lock (default=0)
length:
Length (in bytes) of the byte range to lock, with 0 being a special
value meaning "until infinity" (default=0)
relative_to:
File position relative to which the byte range offset is computed.
Can be either 'start' of the file, 'current' position or 'end' of
the file. (default='start')
"""
if relative_to == 'start':
whence = 0
elif relative_to == 'current':
whence = 1
elif relative_to == 'end':
whence = 2
else:
raise ValueError(f"relative_to should be 'start', 'current', or 'end', "
f"received {relative_to}!")
fcntl.lockf(handle, fcntl.LOCK_UN, length, offset, whence)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.