metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jdhuang-csm/perovskite-featurizer",
"score": 3
}
|
#### File: perovskite-featurizer/perovskite_ml/feature_select.py
```python
import numpy as np
import pandas as pd
# from dcor_jh import distcorr_array
from sklearn.preprocessing import StandardScaler
#------------
# functions for SIS
# could be used to simplify rank_correlation func below
#------------
def xy_correlation_mag(X,y,standardize=True):
"get correlation magnitude of each column of X with y"
if standardize==True:
ss = StandardScaler()
X = ss.fit_transform(X)
return np.abs(np.dot(X.T,y))
def top_n_features(X,y,n,standardize=True):
"get top n features by correlation magnitude"
corr = xy_correlation_mag(X,y,standardize)
return np.argsort(corr)[:-n:-1]
######
def remove_invariant_cols(df):
std = df.std()
zero_var = std.index[std==0]
print('Removed invariant columns:',zero_var)
return df.drop(zero_var,axis=1)
def remove_invalid_cols(df):
invalid = df.columns[df.isnull().max()]
print('Removed columns with invalid values:',invalid)
return df.drop(invalid,axis=1)
def rank_correlation(df,response_col, return_coef=False,corr_type='pearson'):
"""
Rank features by correlation to response
Args:
df: DataFrame with features and response
response_col: response column name
return_coef: if True, return ranked coefficients in addition to ranked feature names
corr_type: correlation coefficient calculation. Options: 'pearson','distance'
Returns:
feature_names: feature names sorted by descending correlation with response
corr
"""
response_idx = list(df.columns).index(response_col)
if corr_type=='pearson':
corr = np.corrcoef(df,rowvar=False)
#elif corr_type=='distance':
# corr = distcorr_array(df.values)
else:
raise ValueError(f'Invalid correlation type {corr_type}')
# get magnitude of each feature's correlation to the response
response_corr = np.abs(np.nan_to_num(corr,0))[response_idx]
sort_idx = np.argsort(response_corr)[::-1]
# remove the response column
sort_idx = sort_idx[sort_idx!=response_idx]
if return_coef is True:
return df.columns[sort_idx], response_corr[sort_idx]
else:
return df.columns[sort_idx]
def get_linked_groups(X, thresh, return_names=True):
"""
Get groups of directly and indirectly correlated columns
Args:
X: matrix or DataFrame. Rows are observations, columns are features
thresh: correlation coefficient threshold for linking
return_names: if True and X is DataFrame, return column names. Else return indexes
"""
corrcoeff = np.corrcoef(X,rowvar=False)
correlated_columns = np.where(np.abs(np.triu(np.nan_to_num(corrcoeff,0),1))>=thresh)
# corr_nodiag = corrcoeff - np.diag(np.diag(corrcoeff))
# correlated_columns = np.where(np.abs(np.nan_to_num(corr_nodiag,0))>=thresh)
groups = []
for num in np.unique(correlated_columns[0]):
in_set = [num in numset for numset in groups]
if max(in_set,default=False)==False:
#if not already in a set, get correlated var nums and check if they belong to an existing set
cnums = correlated_columns[1][np.where(correlated_columns[0]==num)]
numset = set([num] + list(cnums))
#check if numset intersects an existing set
intersect = [numset & group for group in groups]
if len(intersect) > 0:
intersect_group = intersect[np.argmax(intersect)]
else:
intersect_group = []
#if intersects existing set, add to set
if len(intersect_group) > 0:
intersect_group |= numset
#print('case 1 existing group:', num, intersect_group)
#otherwise, make new set
else:
groups.append(numset)
#print('new group:', num, cnums)
else:
#if already in a set, get correlated var nums and add to set
group = groups[in_set.index(True)]
cnums = correlated_columns[1][np.where(correlated_columns[0]==num)]
group |= set(cnums) #union
#print('case 2 existing group:', num, group)
#some links may not be captured. Ex: 1 -> {4,5}. 2 -> 3. 3 -> 4. Now groups are: {1,4,5}, {2,3,4} - need to combine
#safety net - combine groups that share common elements
for i,group1 in enumerate(groups):
for group2 in groups[i+1:]:
if len(group1 & group2) > 0:
group1 |= group2
groups.remove(group2)
if type(X)==pd.core.frame.DataFrame and return_names==True:
# return column names instead of indexes
groups = [set([X.columns[idx] for idx in g]) for g in groups]
return groups
def choose_independent_features(X,thresh,response_col=0,drop_invariant=True):
"""
Choose features that correlate best with the response and are not correlated with each other.
Identify correlation groups and keep the single feature with the strongest correlation to the response from each group.
Args:
X: matrix or dataframe. Rows are observations, columns are features
thresh: correlation coefficient threshold
response_col: column index (or name, if X is a DataFrame) for response. Default 0
drop_invariant: if True, drop columns with zero variance
"""
groups = get_linked_groups(X,thresh=thresh,return_names=False)
corrcoeff = np.corrcoef(X,rowvar=False)
#get list of all columns in linked groups
correlated = sum([list(group) for group in groups],[])
#get list of unlinked columns
independent = set(np.arange(corrcoeff.shape[0])) - set(correlated)
#for each linked group, keep the feature that correlates most strongly with the response
if type(response_col)==str:
# convert column name to index
response_col = list(X.columns).index(response_col)
keep = []
for group in groups:
max_idx = np.argmax(np.abs(corrcoeff[response_col,list(group)]))
keep.append(list(group)[max_idx])
#print(keep)
keep += list(independent)
#print(keep)
#check
check1 = (len(correlated) + len(independent) == corrcoeff.shape[0])
check2 = (len(correlated) + len(keep) - len(groups) == corrcoeff.shape[0])
if min(check1,check2)==False:
raise Exception('Number of correlated and independent features do not match total number')
if drop_invariant==True:
invariant = list(np.where(np.nan_to_num(corrcoeff,0)[response_col]==0)[0])
keep = list(set(keep) - set(invariant))
#print(invariant)
# don't keep the response
if response_col in keep:
keep.remove(response_col)
if type(X)==pd.core.frame.DataFrame:
# return column names instead of indexes
keep = [X.columns[k] for k in keep]
return keep
```
#### File: perovskite-featurizer/perovskite_ml/featurizers.py
```python
import numpy as np
import pandas as pd
import pymatgen as mg
import collections
from pymatgen.core.composition import Composition
from pymatgen.core.molecular_orbitals import MolecularOrbitals
from calc_chemfeat import Perovskite
from matminer.featurizers.conversions import StrToComposition
from matminer.featurizers.composition import ElementProperty, ValenceOrbital, CohesiveEnergy
from matminer.featurizers.base import BaseFeaturizer
from warnings import warn
print('loaded featurizers')
class AtomicOrbitalsMod(BaseFeaturizer):
"""
*Modified from matminer class to handle cases where LUMO is None*
Determine HOMO/LUMO features based on a composition.
The highest occupied molecular orbital (HOMO) and lowest unoccupied
molecular orbital (LUMO) are estiated from the atomic orbital energies
of the composition. The atomic orbital energies are from NIST:
https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations
Warning:
For compositions with inter-species fractions greater than 10,000 (e.g.
dilute alloys such as FeC0.00001) the composition will be truncated (to Fe
in this example). In such extreme cases, the truncation likely reflects the
true physics of the situation (i.e. that the dilute element does not
significantly contribute orbital character to the band structure), but the
user should be aware of this behavior.
"""
def featurize(self, comp):
"""
Args:
comp: (Composition)
pymatgen Composition object
Returns:
HOMO_character: (str) orbital symbol ('s', 'p', 'd', or 'f')
HOMO_element: (str) symbol of element for HOMO
HOMO_energy: (float in eV) absolute energy of HOMO
LUMO_character: (str) orbital symbol ('s', 'p', 'd', or 'f')
LUMO_element: (str) symbol of element for LUMO
LUMO_energy: (float in eV) absolute energy of LUMO
gap_AO: (float in eV)
the estimated bandgap from HOMO and LUMO energeis
"""
integer_comp, factor = comp.get_integer_formula_and_factor()
# warning message if composition is dilute and truncated
if not (len(Composition(comp).elements) ==
len(Composition(integer_comp).elements)):
warn('AtomicOrbitals: {} truncated to {}'.format(comp,
integer_comp))
homo_lumo = MolecularOrbitals(integer_comp).band_edges
feat = collections.OrderedDict()
for edge in ['HOMO', 'LUMO']:
if homo_lumo[edge] is not None:
feat['{}_character'.format(edge)] = homo_lumo[edge][1][-1]
feat['{}_element'.format(edge)] = homo_lumo[edge][0]
feat['{}_energy'.format(edge)] = homo_lumo[edge][2]
else:
#if LUMO is None
feat['{}_character'.format(edge)] = 'na'
feat['{}_element'.format(edge)] = 'na'
#unclear what this value should be. Arbitrarily set to 0. Don't want NaN for modeling
feat['{}_energy'.format(edge)] = 0
feat['gap_AO'] = feat['LUMO_energy'] - feat['HOMO_energy']
return list(feat.values())
def feature_labels(self):
feat = []
for edge in ['HOMO', 'LUMO']:
feat.extend(['{}_character'.format(edge),
'{}_element'.format(edge),
'{}_energy'.format(edge)])
feat.append("gap_AO")
return feat
def citations(self):
return [
"@article{PhysRevA.55.191,"
"title = {Local-density-functional calculations of the energy of atoms},"
"author = {Kotochigova, <NAME>, <NAME>. and Shirley, "
"<NAME>. and Stiles, <NAME>. and Clark, <NAME>.},"
"journal = {Phys. Rev. A}, volume = {55}, issue = {1}, pages = {191--199},"
"year = {1997}, month = {Jan}, publisher = {American Physical Society},"
"doi = {10.1103/PhysRevA.55.191}, "
"url = {https://link.aps.org/doi/10.1103/PhysRevA.55.191}}"]
def implementors(self):
return ['<NAME>', '<NAME>']
class PerovskiteProperty(BaseFeaturizer):
"""
Class to calculate perovskite features. Includes custom features from the Perovskite class and generic features from ElementProperty,
AtomicOrbitals, ValenceOrbital, and CohesiveEnergy matminer featurizers.
Options for initializing:
ordered_formula_featurizer(): for featurizing ordered formulas
cation_site_featurizer(): for featurizing unordered formulas based on user-provided cation site assignments
from_preset(): load a preset
The class can also be called manually, but be aware that different parameter sets are required for an ordered formula featurizer instance than for a cation site featurizer instance.
Parameters:
-----------
cation_site: dict of site assignments for cations, i.e. {el:site}. Elements not in cation_site are assumed to be anions on X-site
site_ox_lim: dict of oxidation state limits for each site, i.e. {site:[min,max]}. Elements on sites are limited to oxidation states within these limits
site_base_ox: dict of base oxidation state for each site, i.e. {site:ox}. Used for determining aliovalent ions and acceptor/donor dopants
ordered_formulas: if True, determine cation site assignments from order
A_site_occupancy: Number of atoms on A site. Used when ordered_formulas is True
anions: list of anions. Used when ordered_formulas is True
Parameters for ordered formula featurizer: site_ox_lim, site_base_ox, A_site_occupancy, anions
Parameters for cation site featurizer: cation_site, site_ox_lim, site_base_ox
"""
def __init__(self, cation_site=None, site_ox_lim={'A':[0,10],'B':[0,10],'X':[-10,0]}, site_base_ox={'A':2,'B':4,'X':-2},
ordered_formulas=False,A_site_occupancy=1,anions=None):
if cation_site is None and ordered_formulas is False:
raise ValueError('Either cation sites must be assigned, or formulas must be ordered. Otherwise site assignments can not be determined')
self.cation_site = cation_site
self.site_ox_lim = site_ox_lim
self.site_base_ox = site_base_ox
self.ordered_formulas = ordered_formulas
self.A_site_occupancy = A_site_occupancy
self.anions = anions
#matminer featurizers
self.ValenceOrbital = ValenceOrbital()
self.AtomicOrbitals = AtomicOrbitalsMod()
self.CohesiveEnergy = CohesiveEnergy()
#custom ElementProperty featurizer
elemental_properties = ['BoilingT', 'MeltingT',
'BulkModulus', 'ShearModulus',
'Row', 'Column', 'Number', 'MendeleevNumber', 'SpaceGroupNumber',
'Density','MolarVolume',
'FusionEnthalpy','HeatVaporization',
'NsUnfilled','NpUnfilled','NdUnfilled','NfUnfilled',
'Polarizability',
'ThermalConductivity']
self.ElementProperty = ElementProperty(data_source='magpie',features=elemental_properties,
stats=["mean", "std_dev", "range"])
self.check_matminer_featurizers()
self.featurize_options = {}
@classmethod
def from_preset(cls, preset_name):
"""
Initialize from preset
Parameters:
-----------
preset_name: name of preset to load. Currently accepts 'BCFZY'
"""
if preset_name=='BCFZY':
#Ba(Co,Fe,Zr,Y)O_3-d system
cation_site={'Ba':'A','Co':'B','Fe':'B','Zr':'B','Y':'B'}
site_ox_lim={'A':[2,2],'B':[2,4],'X':[-2,-2]}
site_base_ox={'A':2,'B':4,'X':-2}
else:
raise ValueError("Invalid preset_name specified!")
return cls(cation_site,site_ox_lim, site_base_ox)
@classmethod
def ordered_formula_featurizer(cls,A_site_occupancy=1,anions=None,site_ox_lim={'A':[0,10],'B':[0,10],'X':[-10,0]}, site_base_ox={'A':2,'B':4,'X':-2}):
"""
Convenience method for instantiating a featurizer for ordered formulas
"""
return cls(cation_site=None,site_ox_lim=site_ox_lim,site_base_ox=site_base_ox,ordered_formulas=True,A_site_occupancy=A_site_occupancy,anions=anions)
@classmethod
def cation_site_featurizer(cls,cation_site, site_ox_lim={'A':[0,10],'B':[0,10],'X':[-10,0]}, site_base_ox={'A':2,'B':4,'X':-2}):
"""
Convenience method for instantiating a featurizer for unordered formulas, based on site assignments
"""
return cls(cation_site,site_ox_lim,site_base_ox)
@property
def ElementProperty_custom_labels(self):
"""
Generate custom labels for ElementProperty featurizer that follow same naming convention as Perovskite class
"""
elemental_property_label_map = {'BoilingT':'boil_temp','MeltingT':'melt_temp',
'BulkModulus':'bulk_mod','ShearModulus':'shear_mod',
'Row':'row','Column':'column','Number':'number','MendeleevNumber':'mendeleev','SpaceGroupNumber':'space_group',
'Density':'density','MolarVolume':'molar_vol',
'FusionEnthalpy':'H_fus','HeatVaporization':'H_vap',
'NsUnfilled':'valence_unfilled_s','NpUnfilled':'valence_unfilled_p','NdUnfilled':'valence_unfilled_d','NfUnfilled':'valence_unfilled_f',
'Polarizability':'polarizability',
'ThermalConductivity':'sigma_therm'}
element_property_labels = list(map(elemental_property_label_map.get,self.ElementProperty.features))
labels = []
for attr in element_property_labels:
for stat in self.ElementProperty.stats:
if stat=='std_dev':
stat = 'std'
labels.append(f'{attr}_{stat}')
return labels
@property
def ElementProperty_categories(self):
"""
Generate categories for ElementProperty featurizer
"""
elemental_property_category_map = {'BoilingT':'elemental','MeltingT':'elemental',
'BulkModulus':'elemental','ShearModulus':'elemental',
'Row':'periodic','Column':'periodic','Number':'periodic','MendeleevNumber':'periodic','SpaceGroupNumber':'periodic',
'Density':'elemental','MolarVolume':'elemental',
'FusionEnthalpy':'elemental','HeatVaporization':'elemental',
'NsUnfilled':'electronic','NpUnfilled':'electronic','NdUnfilled':'electronic','NfUnfilled':'electronic',
'Polarizability':'elemental',
'ThermalConductivity':'elemental'}
element_property_categories = list(map(elemental_property_category_map.get,self.ElementProperty.features))
categories = []
for ep_cat in element_property_categories:
for stat in self.ElementProperty.stats:
categories.append(ep_cat)
return categories
@property
def ElementProperty_units(self):
"""
Generate units for ElementProperty featurizer
"""
elemental_property_unit_map = {'BoilingT':'temp','MeltingT':'temp',
'BulkModulus':'pressure','ShearModulus':'pressure',
'Row':'none','Column':'none','Number':'none','MendeleevNumber':'none','SpaceGroupNumber':'none',
'Density':'density','MolarVolume':'volume',
'FusionEnthalpy':'energy','HeatVaporization':'energy',
'NsUnfilled':'none','NpUnfilled':'none','NdUnfilled':'none','NfUnfilled':'none',
'Polarizability':'polarizability', #complex units - doesn't matter
'ThermalConductivity':'therm'} #complex units - doesn't matter
element_property_units = list(map(elemental_property_unit_map.get,self.ElementProperty.features))
units = []
for ep_unit in element_property_units:
for stat in self.ElementProperty.stats:
units.append(ep_unit)
return units
def ElementProperty_label_check(self):
"""
Check that ElementProperty feature labels are as expected
If not, features may not align with feature labels
"""
#ElementProperty.feature_labels() code as of 2/17/19
labels = []
for attr in self.ElementProperty.features:
src = self.ElementProperty.data_source.__class__.__name__
for stat in self.ElementProperty.stats:
labels.append("{} {} {}".format(src, stat, attr))
if labels!=self.ElementProperty.feature_labels():
raise Exception('ElementProperty features or labels have changed')
def set_featurize_options(self,sites,ox_stats=['min','max','mean','median','std','range'],ep_stats=["mean", "std_dev", "range"],radius_type='ionic_radius',normalize_formula=True,silent=True,categories=None):
"""
Set options for featurization. Since these options should be the same for all compositions in a batch, set for the featurizer instance rather than passing as args to featurize()
so that they do not have to be duplicated in every row of a DataFrame when calling featurize_dataframe().
Since these options change the number and meaning of features returned, it's also safest to set for the whole instance for consistency.
Parameters:
-----------
sites: list or string of sites to featurize. Any combination of 'A', 'B', 'X', and/or 'comp' accepted.
Composition-level, oxidation-state-dependent features are always calculated by the Perovskite class. Passing '' or [] will return only these features.
Specifying 'A','B', and/or 'X' sites will calculate site-level features for these sites (oxidation-state independent and dependent features, and matminer features).
Including 'comp' will calculate oxidation-state-independent features and matminer features for the full composition.
ox_stats: list of aggregate functions to apply to oxidation state combinations for feature generation using Perovskite class.
Options: 'min','max','mean','median','std','range'
ep_stats: ElementProperty stats. Options: "minimum", "maximum", "range", "mean", "avg_dev", "mode"
radius_type: Shannon radius type to use in features. Accepts 'crystal_radius' or 'ionic_radius'
normalize_formula: if True, normalize formula such that higher occupancy cation site has one formula unit (applies to Perovskite class only)
silent: if False, print informational messages from Perovksite class
categories: list of feature categories to return. If None, return all. Options: 'bonding','structure','charge','composition','electronic','elemental','periodic'
"""
feat_options = dict(sites=sites,ox_stats=ox_stats,radius_type=radius_type,normalize_formula=normalize_formula,silent=silent)
self.featurize_options.update(feat_options)
self.ElementProperty.stats = ep_stats
def featurize(self,formula):
"""
Calculate features
Parameters:
-----------
formula: chemical formula string
Returns: list of feature values
"""
if self.featurize_options=={}:
raise Exception('Featurize options have not been set. Use set_featurize_options before featurizing')
if self.ordered_formulas is True:
pvsk = Perovskite.from_ordered_formula(formula, self.A_site_occupancy, self.anions, site_ox_lim = self.site_ox_lim, site_base_ox = self.site_base_ox,
radius_type=self.featurize_options['radius_type'],silent=self.featurize_options['silent'])
elif self.ordered_formulas is False:
pvsk = Perovskite(formula, self.cation_site, self.site_ox_lim, self.site_base_ox,self.featurize_options['radius_type'],self.featurize_options['normalize_formula'],
self.featurize_options['silent'])
pvsk_features = pvsk.featurize(self.featurize_options['sites'],self.featurize_options['ox_stats'])
mm_features = []
for site in self.featurize_options['sites']:
vo_features = self.ValenceOrbital.featurize(pvsk.site_composition[site]) #avg and frac s, p , d, f electrons
vo_features += [sum(vo_features[0:3])] #avg total valence electrons
ao_features = self.AtomicOrbitals.featurize(pvsk.site_composition[site]) #HOMO and LUMO character and energy levels (from atomic orbitals)
ao_features = [ao_features[i] for i in range(len(ao_features)) if i not in (0,1,3,4)] #exclude HOMO_character,HOMO_element, LUMO_character, LUMO_element - categoricals
ce_features = self.CohesiveEnergy.featurize(pvsk.site_composition[site],formation_energy_per_atom=1e-10) #avg elemental cohesive energy
ep_features = self.ElementProperty.featurize(pvsk.site_composition[site]) #elemental property features
mm_features += vo_features + ao_features + ce_features + ep_features
features = list(pvsk_features) + mm_features
return features
@property
def matminer_labels(self):
"""
Feature labels for matminer-derived features
"""
labels = [
#ValenceOrbital labels
'valence_elec_s_mean',
'valence_elec_p_mean',
'valence_elec_d_mean',
'valence_elec_f_mean',
'valence_elec_s_frac',
'valence_elec_p_frac',
'valence_elec_d_frac',
'valence_elec_f_frac',
'valence_elec_tot_mean',
#AtomicOrbitals labels
#'HOMO_character',
'HOMO_energy',
#'LUMO_character',
'LUMO_energy',
'AO_gap',
#CohesiveEnergy labels
'cohesive_energy_mean']
#ElementProperty labels
labels += self.ElementProperty_custom_labels
return labels
@property
def matminer_categories(self):
"""
Feature categories for matminer-derived features
"""
categories = [
#ValenceOrbital categories
'electronic',
'electronic',
'electronic',
'electronic',
'electronic',
'electronic',
'electronic',
'electronic',
'electronic',
#AtomicOrbitals categories
#'HOMO_character',
'electronic',
#'LUMO_character',
'electronic',
'electronic',
#CohesiveEnergy categories
'bonding']
#ElementProperty categories
categories += self.ElementProperty_categories
return categories
@property
def matminer_units(self):
"""
Feature units for matminer-derived features
"""
units = [
#ValenceOrbital units
'none',
'none',
'none',
'none',
'none',
'none',
'none',
'none',
'none',
#AtomicOrbitals units
#'HOMO_character',
'energy',
#'LUMO_character',
'energy',
'energy',
#CohesiveEnergy units
'energy']
#ElementProperty units
units += self.ElementProperty_units
return units
def feature_labels(self):
"""
Get list of feature labels
"""
try:
pvsk_labels = Perovskite.from_preset('BaCoO3','BCFZY',silent=True).feature_labels(self.featurize_options['sites'],self.featurize_options['ox_stats'])
except KeyError:
raise Exception('Featurize options have not been set. Use set_featurize_options before accessing feature labels')
mm_labels = []
for site in self.featurize_options['sites']:
if site=='comp':
site_label = 'comp'
else:
site_label = f'{site}site'
mm_labels += [f'{site_label}_{label}' for label in self.matminer_labels]
return pvsk_labels + mm_labels
def feature_categories(self):
"""
Get list of feature categories. For quick filtering
"""
try:
pvsk_categories = Perovskite.from_preset('BaCoO3','BCFZY',silent=True).feature_categories(self.featurize_options['sites'],self.featurize_options['ox_stats'])
except KeyError:
raise Exception('Featurize options have not been set. Use set_featurize_options before accessing feature labels')
mm_categories = []
for site in self.featurize_options['sites']:
mm_categories += self.matminer_categories
return pvsk_categories + mm_categories
def feature_units(self):
"""
Get list of feature labels. For dimensional analysis
"""
try:
pvsk_units = Perovskite.from_preset('BaCoO3','BCFZY',silent=True).feature_units(self.featurize_options['sites'],self.featurize_options['ox_stats'])
except KeyError:
raise Exception('Featurize options have not been set. Use set_featurize_options before accessing feature labels')
mm_units = []
for site in self.featurize_options['sites']:
mm_units += self.matminer_units
return pvsk_units + mm_units
def check_matminer_featurizers(self):
"""
Check that features and feature order for matminer featurizers are as expected
If features or feature order have changed, featurize() may return unexpected features that do not align with feature_labels()
"""
#verify that matminer feature labels haven't changed
if self.ValenceOrbital.feature_labels() != ['avg s valence electrons',
'avg p valence electrons',
'avg d valence electrons',
'avg f valence electrons',
'frac s valence electrons',
'frac p valence electrons',
'frac d valence electrons',
'frac f valence electrons']:
raise Exception('ValenceOrbital features or labels have changed')
if self.AtomicOrbitals.feature_labels() != ['HOMO_character',
'HOMO_element',
'HOMO_energy',
'LUMO_character',
'LUMO_element',
'LUMO_energy',
'gap_AO']:
raise Exception('AtomicOrbitals features or labels have changed')
if self.CohesiveEnergy.feature_labels() != ['cohesive energy']:
raise Exception('CohesiveEnergy features or labels have changed')
self.ElementProperty_label_check()
```
|
{
"source": "jdhuang-csm/Ru-BCA",
"score": 2
}
|
#### File: Ru-BCA/modules/BCA_featurizer.py
```python
import os
import numpy as np
import pymatgen as mg
from pymatgen.ext.matproj import MPRester
import collections
from matminer.featurizers.base import BaseFeaturizer
from matminer.featurizers.composition import ValenceOrbital, CohesiveEnergy, ElementProperty, BandCenter, MolecularOrbitals
from matminer.utils.data import MagpieData
import pandas as pd
import warnings
from bca_plotting import get_coords_from_comp
"""Lookups for Ba, Ca, and Al"""
oxides = {'Ba':'BaO','Ca':'CaO','Al':'Al2O3','B':'B2O3','Mg':'MgO','Sr':'SrO'}
nitrides = {'Ba':'Ba3N2','Ca':'Ca3N2','Al':'AlN','B':'BN','Mg':'Mg3N2','Sr':'Sr3N2'}
hydrides = {'Ba':'BaH2','Ca':'CaH2','Al':'AlH3','B':'BH3','Mg':'MgH2','Sr':'SrH2'}
# Elemental work function (eV) - from https://public.wsu.edu/~pchemlab/documents/Work-functionvalues.pdf
# Al value is average of 100,110,111 planes; Ba, Ca, B, Mg, and Sr values are for polycrystalline
work_function = {'Ba':2.52,'Ca':2.87,'Al':4.17,'B':4.45,'Mg':3.66,'Sr':2.59}
"""Load elemental electrical conductivity data"""
elec_conductivity_df = pd.read_csv(os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data/ElementalElectricalConductivity.txt'),sep='\t',skipfooter=1,engine='python')
elec_conductivity = dict(zip(elec_conductivity_df['Symbol'],elec_conductivity_df['Electrical Conductivity (S/cm)']))
class MatProjCalc:
def __init__(self,oxide_dict={}):
#dict to store MX bond energies after calculation. Avoid repeated lookups in MP
self.calc_MX_bond_energy = {}
#dict to store formation enthalpies after looking up
self.fH_dict = {
('Ce','gas','exp',''):(417.1,'Formation enthalpy for Ce in gas phase includes exp data from phases: gas') #correction to MP entry: fH for Ce gas is negative in MP
}
self.mp = MPRester(os.environ['MATPROJ_API_KEY'])
print("Created MatProjCalc instance")
@property
def common_anions(self):
"""List of common anions"""
return ['N','P','O','S','F','Cl','Br','I']
@property
def dissocation_energy(self):
"""
Bond dissociation energies for gases at 298K in kJ/mol
Source: https://labs.chem.ucsb.edu/zakarian/armen/11---bonddissociationenergy.pdf
"""
return dict(N=945.33,P=490,O=498.34,S=429,F=156.9,Cl=242.58,Br=193.87,I=152.549,H=436.002)
@property
def mn_combos(self):
"""
Possible m-n pairs (m,n up to 4)
"""
return [(1,1),(1,2),(1,3),(1,4),(2,1),(2,3),(3,1),(3,2),(3,4),(4,1)]
def possible_ionic_formulas(self,metal,anion,metal_ox_lim=None,anion_ox_state=None):
"""
Get possible binary ionic compound formulas for metal-anion pair
Parameters:
-----------
metal: metal element symbol
anion: anion element symbol
metal_ox_lim: tuple of metal oxidation state limits (min, max)
anion_ox_state: anion oxidation state. If None, will attempt to find the common oxidation state for the anion
"""
#get common oxidation state for anion
if anion_ox_state is None:
anion_ox_state = [ox for ox in mg.Element(anion).common_oxidation_states if ox < 0]
if len(anion_ox_state) > 1:
raise Exception(f"Multiple common oxidation states for {anion}. Please specify anion_ox_state")
else:
anion_ox_state = anion_ox_state[0]
if metal_ox_lim is None:
metal_ox_lim = [0,np.inf]
return [f'{metal}{m}{anion}{n}' for m,n in self.mn_combos if m/n <= -anion_ox_state and metal_ox_lim[0] <= -anion_ox_state*n/m <= metal_ox_lim[1]]
def get_fH(self,formula, phase='solid', data_type='exp',silent=True,exclude_phases=[]):
"""
Get average experimental formation enthalpy for formula and phase
Parameters:
-----------
formula: chemical formula string
phase: phase string. Can be 'solid', 'liquid', 'gas', or a specific solid phase (e.g. 'monoclinic'). If 'solid', returns average across all solid phases
"""
#first check for corrected/saved data in fH_dict
try:
fH,msg = self.fH_dict[(formula,phase,data_type,','.join(exclude_phases))]
if silent==False:
#print('already calculated')
print(msg)
#if no entry exists, look up in MP
except KeyError:
results = self.mp.get_data(formula,data_type=data_type)
if data_type=='exp':
#results = self.mp.get_exp_thermo_data(formula)
if phase=='solid':
phase_results = [r for r in results if r.type=='fH' and r.phaseinfo not in ['liquid','gas']+exclude_phases]
else:
phase_results = [r for r in results if r.type=='fH' and r.phaseinfo==phase]
phases = np.unique([r.phaseinfo for r in phase_results])
fH = [r.value for r in phase_results]
elif data_type=='vasp':
if phase in ('liquid','gas'):
raise ValueError('VASP data only valid for solid phases')
elif phase=='solid':
#get entry with lowest energy above hull
srt_results = sorted(results,key=lambda x: x['e_above_hull'])
phase_results = srt_results[0:1]
else:
phase_results = [r for r in results if r['spacegroup']['crystal_system']==phase]
phases = np.unique([r['spacegroup']['crystal_system'] for r in phase_results])
n_atoms = mg.Composition(formula).num_atoms
#DFT formation energies given in eV per atom - need to convert to kJ/mol
fH = [r['formation_energy_per_atom']*n_atoms*96.485 for r in phase_results]
if len(fH)==0:
raise LookupError('No {} data for {} in {} phase'.format(data_type,formula,phase))
maxdiff = np.max(fH) - np.min(fH)
if maxdiff > 15:
warnings.warn('Max discrepancy of {} in formation enthalpies for {} exceeds limit'.format(maxdiff,formula))
fH = np.mean(fH)
msg = 'Formation enthalpy for {} in {} phase includes {} data from phases: {}'.format(formula,phase,data_type,', '.join(phases))
if silent==False:
print(msg)
#store value and info message for future lookup
self.fH_dict[(formula,phase,data_type,','.join(exclude_phases))] = (fH,msg)
return fH
def ionic_formula_from_ox_state(self,metal,anion,metal_ox_state,anion_ox_state=None,return_mn=False):
"""
Get binary ionic compound formula with reduced integer units based on oxidation state
Parameters:
-----------
metal: metal element symbol
anion: anion element symbol
metal_ox_state: metal oxidation state
anion_ox_state: anion oxidation state. If None, will attempt to find the common oxidation state for the anion
return_mn: if True, return formula units for metal (m) and anion (n)
Returns: chemical formula string MmXn, and m, n if return_mn=True
"""
#get common oxidation state for anion
if anion_ox_state is None:
anion_ox_state = [ox for ox in mg.Element(anion).common_oxidation_states if ox < 0]
if len(anion_ox_state) > 1:
raise Exception(f"Multiple common oxidation states for {anion}. Please specify anion_ox_state")
else:
anion_ox_state = anion_ox_state[0]
#formula MmXn
deno = gcd(metal_ox_state,-anion_ox_state)
m = -anion_ox_state/deno
n = metal_ox_state/deno
formula = '{}{}{}{}'.format(metal,m,anion,n)
if return_mn==False:
return formula
else:
return formula, m, n
def ox_states_from_binary_formula(self,formula,anion=None,anion_ox_state=None):
"""
Determine oxidation states from binary formula.
Could also use mg.Composition.oxi_state_guesses(), but the logic used is more complex.
Args:
formula: chemical formula
anion: Element symbol of anion. If None, search for common anion
anion_ox_state: oxidation state of anion. If None, assume common oxidation state
"""
comp = mg.Composition(formula)
if len(comp.elements) != 2:
raise ValueError('Formula must be binary')
# determine anion
if anion is None:
anion = np.intersect1d([e.name for e in comp.elements],self.common_anions)
if len(anion) > 1:
raise ValueError('Found multiple possible anions in formula. Please specify anion')
elif len(anion)==0:
raise ValueError('No common anions found in formula. Please specify anion')
else:
anion = anion[0]
metal = np.setdiff1d(comp.elements,mg.Element(anion))[0].name
#get common oxidation state for anion
if anion_ox_state is None:
anion_ox_state = [ox for ox in mg.Element(anion).common_oxidation_states if ox < 0]
if len(anion_ox_state) > 1:
raise Exception(f"Multiple common oxidation states for {anion}. Please specify anion_ox_state")
else:
anion_ox_state = anion_ox_state[0]
metal_ox_state = -comp.get(anion)*anion_ox_state/comp.get(metal)
return {metal:metal_ox_state,anion:anion_ox_state}
def MX_bond_energy(self,formula,data_type='exp',ordered_formula=False,silent=True,exclude_phases=[]):
"""
Get metal-anion bond energy per mole of metal for binary ionic compound
Parameters:
-----------
formula: chemical formula string
ordered_formula: if true, assume that first element in formula is metal, and second is anion (i.e. MmXn)
exclude_phases: phases to exclude from aggregate over all solid phases
"""
comp = mg.Composition(formula)
formula = comp.reduced_formula
try:
#look up compound if already calculated
abe,msg = self.calc_MX_bond_energy[(formula,data_type,','.join(exclude_phases))]
if silent==False:
#print('already calculated')
print(msg)
except KeyError:
if len(comp.elements) != 2:
raise Exception("Formula is not a binary compound")
if ordered_formula is False:
anions = [el.name for el in comp.elements if el.name in self.common_anions]
if len(anions) == 0:
raise Exception('No common anions found in formula. Use ordered formula to indicate metal and anion')
elif len(anions) > 1:
raise Exception('Multiple anions found in formula. Use ordered formula to indicate metal and anion')
else:
anion = anions[0]
metal = [el.name for el in comp.elements if el.name!=anion][0]
elif ordered_formula is True:
metal = comp.elements[0].name
anion = comp.elements[1].name
m = comp.get_el_amt_dict()[metal]
n = comp.get_el_amt_dict()[anion]
fH = self.get_fH(formula,data_type=data_type,silent=silent,exclude_phases=exclude_phases) #oxide formation enthalpy
H_sub = self.get_fH(metal, phase='gas',silent=silent,exclude_phases=[]) #metal sublimation enthalpy - must be exp data (no vasp data for gas)
#look up info messages from get_fH to store in dict
msg = self.fH_dict[formula,'solid',data_type,','.join(exclude_phases)][1] + '\n'
msg += self.fH_dict[metal,'gas','exp',''][1]
DX2 = self.dissocation_energy[anion] #anion dissociation energy
abe = (fH - m*H_sub - (n/2)*DX2)/m #M-O bond energy per mole of M
self.calc_MX_bond_energy[(formula,data_type,','.join(exclude_phases))] = (abe,msg)
return abe
def citations(self):
"""Cite Materials Project, Materials API, and pymatgen"""
return [
"@article{Jain2013,"
"author = {<NAME>, <NAME> and Hautier, Geoffroy and <NAME> and Richards, <NAME> and <NAME> and <NAME> and Gunter, Dan and Skinner, David and Ceder, Gerbrand and Persson, <NAME>.},"
"doi = {10.1063/1.4812323},"
"issn = {2166532X},"
"journal = {APL Materials},"
"number = {1},"
"pages = {011002},"
"title = {{The Materials Project: A materials genome approach to accelerating materials innovation}},"
"url = {http://link.aip.org/link/AMPADS/v1/i1/p011002/s1\&Agg=doi},"
"volume = {1},"
"year = {2013}"
"}",
"@article{Ong_2015,"
"doi = {10.1016/j.commatsci.2014.10.037},"
"url = {http://dx.doi.org/10.1016/j.commatsci.2014.10.037},"
"year = 2015,"
"month = {feb},"
"publisher = {Elsevier {BV}},"
"volume = {97},"
"pages = {209--215},"
"author = {<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},"
"title = {The Materials Application Programming Interface ({API}): A simple, flexible and efficient {API} for materials data based on {REpresentational} State Transfer ({REST}) principles},"
"journal = {Computational Materials Science}"
"}",
"@article{Ong2012b,"
"author = {Ong, <NAME> and Richards, <NAME> and <NAME> and Hautier, Geoffroy and Kocher, Michael and Cholia, Shreyas and Gunter, Dan and Chevrier, <NAME>. and Persson, <NAME>. and Ceder, Gerbrand},"
"doi = {10.1016/j.commatsci.2012.10.028},"
"file = {:Users/shyue/Mendeley Desktop/Ong et al/Computational Materials Science/2013 - Ong et al. - Python Materials Genomics (pymatgen) A robust, open-source python library for materials analysis.pdf:pdf;:Users/shyue/Mendeley Desktop/Ong et al/Computational Materials Science/2013 - Ong et al. - Python Materials Genomics (pymatgen) A robust, open-source python library for materials analysis(2).pdf:pdf},"
"issn = {09270256},"
"journal = {Computational Materials Science},"
"month = feb,"
"pages = {314--319},"
"title = {{Python Materials Genomics (pymatgen): A robust, open-source python library for materials analysis}},"
"url = {http://linkinghub.elsevier.com/retrieve/pii/S0927025612006295},"
"volume = {68},"
"year = {2013}"
"}"
]
#create MatProjCalc instance to store fetched data/calculations
mpcalc = MatProjCalc()
# oxide formation enthalpies
oxide_Hf = {M:mpcalc.get_fH(MO,exclude_phases=['amorph']) for M,MO in oxides.items()}
#bond energies per mole of metal M
MO_bond_energy = {M:mpcalc.MX_bond_energy(MO,exclude_phases=['amorph']) for M,MO in oxides.items()}
# MN_bond_energy = {M:mpcalc.MX_bond_energy(MN,exclude_phases=['amorph']) for M,MN in nitrides.items()}
# MH_bond_energy = {M:mpcalc.MX_bond_energy(MH,ordered_formula=True,exclude_phases=['amorph']) for M,MH in hydrides.items()}
MN_bond_energy = {}
MH_bond_energy = {}
for M in oxides.keys():
# Use experimental data where possible, fill in with VASP data if needed
MN = nitrides[M]
try:
MN_bond_energy[M] = mpcalc.MX_bond_energy(MN,exclude_phases=['amorph'])
except LookupError:
MN_bond_energy[M] = mpcalc.MX_bond_energy(MN,exclude_phases=['amorph'],data_type='vasp')
warnings.warn(f'Using VASP data for formation enthalpy of {MN}')
MH = hydrides[M]
try:
MH_bond_energy[M] = mpcalc.MX_bond_energy(MH,ordered_formula=True,exclude_phases=['amorph'])
except LookupError:
MH_bond_energy[M] = mpcalc.MX_bond_energy(MH,ordered_formula=True,exclude_phases=['amorph'],data_type='vasp')
warnings.warn(f'Using VASP data for formation enthalpy of {MH}')
#bond energy delta per mole of M
ON_BondEnergyDelta = {M:MN_bond_energy[M] - MOBE for M, MOBE in MO_bond_energy.items()}
OH_BondEnergyDelta = {M:MH_bond_energy[M] - MOBE for M, MOBE in MO_bond_energy.items()}
NH_BondEnergyDelta = {M:MH_bond_energy[M] - MNBE for M, MNBE in MN_bond_energy.items()}
oxidation_state = {'Ba':2,'Ca':2,'Al':3,'B':3,'Mg':2,'Sr':2}
metal_lookups = {}
for m in ['Ba','Ca','Al','B','Mg','Sr']:
metal_lookups[m] = {'work_function':work_function[m],
'MO_BondEnergy':MO_bond_energy[m],
'MN_BondEnergy':MN_bond_energy[m],
'MH_BondEnergy':MH_bond_energy[m],
'oxidation_state':oxidation_state[m],
'ON_BondEnergyDelta':ON_BondEnergyDelta[m],
'OH_BondEnergyDelta':OH_BondEnergyDelta[m],
'NH_BondEnergyDelta':NH_BondEnergyDelta[m],
'oxide_Hf':oxide_Hf[m]
}
class AtomicOrbitalsMod(BaseFeaturizer):
"""
*Modified from matminer class to handle cases where LUMO is None*
Determine HOMO/LUMO features based on a composition.
The highest occupied molecular orbital (HOMO) and lowest unoccupied
molecular orbital (LUMO) are estiated from the atomic orbital energies
of the composition. The atomic orbital energies are from NIST:
https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations
Warning:
For compositions with inter-species fractions greater than 10,000 (e.g.
dilute alloys such as FeC0.00001) the composition will be truncated (to Fe
in this example). In such extreme cases, the truncation likely reflects the
true physics of the situation (i.e. that the dilute element does not
significantly contribute orbital character to the band structure), but the
user should be aware of this behavior.
"""
def featurize(self, comp):
"""
Args:
comp: (Composition)
pymatgen Composition object
Returns:
HOMO_character: (str) orbital symbol ('s', 'p', 'd', or 'f')
HOMO_element: (str) symbol of element for HOMO
HOMO_energy: (float in eV) absolute energy of HOMO
LUMO_character: (str) orbital symbol ('s', 'p', 'd', or 'f')
LUMO_element: (str) symbol of element for LUMO
LUMO_energy: (float in eV) absolute energy of LUMO
gap_AO: (float in eV)
the estimated bandgap from HOMO and LUMO energeis
"""
integer_comp, factor = comp.get_integer_formula_and_factor()
# warning message if composition is dilute and truncated
if not (len(mg.Composition(comp).elements) ==
len(mg.Composition(integer_comp).elements)):
warn('AtomicOrbitals: {} truncated to {}'.format(comp,
integer_comp))
homo_lumo = MolecularOrbitals(integer_comp).band_edges
feat = collections.OrderedDict()
for edge in ['HOMO', 'LUMO']:
if homo_lumo[edge] is not None:
feat['{}_character'.format(edge)] = homo_lumo[edge][1][-1]
feat['{}_element'.format(edge)] = homo_lumo[edge][0]
feat['{}_energy'.format(edge)] = homo_lumo[edge][2]
else:
#if LUMO is None
feat['{}_character'.format(edge)] = 'na'
feat['{}_element'.format(edge)] = 'na'
#unclear what this value should be. Arbitrarily set to 0. Don't want NaN for modeling
feat['{}_energy'.format(edge)] = 0
feat['gap_AO'] = feat['LUMO_energy'] - feat['HOMO_energy']
return list(feat.values())
def feature_labels(self):
feat = []
for edge in ['HOMO', 'LUMO']:
feat.extend(['{}_character'.format(edge),
'{}_element'.format(edge),
'{}_energy'.format(edge)])
feat.append("gap_AO")
return feat
def citations(self):
return [
"@article{PhysRevA.55.191,"
"title = {Local-density-functional calculations of the energy of atoms},"
"author = {Kotochigova, <NAME> Levine, <NAME>. and Shirley, "
"<NAME>. and <NAME>. and Clark, <NAME>.},"
"journal = {Phys. Rev. A}, volume = {55}, issue = {1}, pages = {191--199},"
"year = {1997}, month = {Jan}, publisher = {American Physical Society},"
"doi = {10.1103/PhysRevA.55.191}, "
"url = {https://link.aps.org/doi/10.1103/PhysRevA.55.191}}"]
def implementors(self):
return ['<NAME>', '<NAME>']
class ValenceOrbitalEnergy(BaseFeaturizer):
def __init__(self):
self.element_props = {}
self.MagpieData = MagpieData()
def get_element_props(self,el):
try:
props = self.element_props[el]
except KeyError:
subshells = 'spdf'
n_elec = {sub:self.MagpieData.get_elemental_property(el,f'N{sub}Valence') for sub in subshells}
orbitals = sorted(el.atomic_orbitals.keys())[::-1]
#look up valence orbital for subshell
orbital_func = lambda x: '{}{}'.format(max([orb[0] for orb in orbitals if orb[1]==x]),x)
#get valence orbital energy for subshell
energy_func = lambda x: el.atomic_orbitals[orbital_func(x)]
props = {x:{'n_elec':n_elec[x],'energy':energy_func(x),'shell':orbital_func(x)[0]} for x in subshells if n_elec[x]>0}
self.element_props[el] = props
return props
def featurize(self,comp):
tot_energy = 0
tot_elec = 0
for el in comp.elements:
props = self.get_element_props(el)
tot_energy += comp[el]*sum([v['energy']*v['n_elec'] for v in props.values()])
tot_elec += comp[el]*sum([v['n_elec'] for v in props.values()])
return [tot_energy/tot_elec]
def feature_labels(self):
return ['MeanValenceEnergy']
def citations(self):
return [
"@article{Ward2016,"
"archivePrefix = {arXiv},"
"arxivId = {1606.09551},"
"author = {<NAME> and <NAME> <NAME> and <NAME>},"
"doi = {10.1038/npjcompumats.2016.28},"
"eprint = {1606.09551},"
"isbn = {2057-3960},"
"issn = {20573960},"
"journal = {npj Computational Materials},"
"number = {June},"
"pages = {1--7},"
"title = {{A general-purpose machine learning framework for predicting properties of inorganic materials}},"
"volume = {2},"
"year = {2016}"
"}"]
class BCA():
def __init__(self,composition,radius_type='ionic_radius',normalize_formula=False):
self.cations = [el.name for el in composition.elements if el.name!='O']
self.radius_type = radius_type
if normalize_formula==True:
#scale to single total unit of cations
tot_cat_amt = sum([composition[c] for c in self.cations])
composition = mg.Composition({el:amt/tot_cat_amt for el,amt in composition.get_el_amt_dict().items()})
self.composition = composition
self.metal_composition = mg.Composition({c:self.composition[c] for c in self.cations})
#checks
if len(self.cations)==0:
raise Exception('No cations in composition')
# if self.composition['O']!=self.composition['Ba'] + self.composition['Ca'] + self.composition['Al']*3/2:
# raise Exception('Oxygen amount does not match BaO, CaO, Al2O3 stoichiometry')
if self.radius_type not in ('crystal_radius','ionic_radius'):
raise Exception(f'Invalid radius type {self.radius_type}. Options are crystal_radius and ionic_radius')
@property
def tot_cat_amt(self):
return sum([self.composition[c] for c in self.cations])
def metal_mean_func(self,func):
"""
Weighted average of function func across metals in composition
"""
weights=list(self.metal_composition.get_el_amt_dict().values())
return np.average([func(el) for el in self.metal_composition.elements],weights=weights)
def metal_std_func(self,func,mean=None):
"""
Standard deviation of function func across metals in composition
Args:
func: function to average. Takes pymatgen Element as input
mean: mean of function if already known. If None, calculated
"""
if mean is None:
mean = self.metal_mean_func(func)
return self.metal_mean_func(lambda el: (func(el) - mean)**2)**0.5
# def is_in_phase_triangle(self):
# xc,xa,xb = get_coords_from_comp(self.composition)
# # One boundary specified in each line of if conditions: 1: BA to BCA723; 2: BA to C; 3: BCA723 to C
# if xb <= 0.5*(1+xc) and xa >= 0.5 - 1.5*xc \
# and xb >= 0.5*(1-xc) and xa <= 0.5*(1-xc) \
# and xb <= (7/12)*(1-(6/5)*(xc-1/6)) and xa >= 0.25*(1-(6/5)*(xc-1/6)):
# in_triangle = 1
# else:
# in_triangle = 0
# return in_triangle
def featurize(self):
features = {}
features['MO_ratio'] = self.tot_cat_amt/self.composition['O']
#metal mean/std functions and names
def radius(el):
ox_state = metal_lookups.get(el.name)['oxidation_state']
return el.data['Shannon radii'][f'{ox_state}']['VI'][''][self.radius_type]
def cation_X(el):
ox_state = metal_lookups.get(el.name)['oxidation_state']
r = radius(el)
return ox_state/r**2
metal_funcs = {'oxide_Hf': lambda el: metal_lookups.get(el.name)['oxide_Hf'],
'MO_BondEnergy':lambda el: metal_lookups.get(el.name)['MO_BondEnergy'],
'MN_BondEnergy':lambda el: metal_lookups.get(el.name)['MN_BondEnergy'],
'MH_BondEnergy':lambda el: metal_lookups.get(el.name)['MH_BondEnergy'],
'ON_BondEnergyDelta': lambda el: metal_lookups.get(el.name)['ON_BondEnergyDelta'],
'OH_BondEnergyDelta': lambda el: metal_lookups.get(el.name)['OH_BondEnergyDelta'],
'NH_BondEnergyDelta': lambda el: metal_lookups.get(el.name)['NH_BondEnergyDelta'],
'MO_BondIonicity': lambda el: 1 - np.exp(-0.25*(el.X - mg.Element('O').X)**2),
'M_X': lambda el: el.X,
'M_CationX': cation_X,
'M_radius': radius,
'M_WorkFunction': lambda el: metal_lookups.get(el.name)['work_function'],
'M_sigma_elec': lambda el: elec_conductivity.get(el.name)
}
for name,func in metal_funcs.items():
mean = self.metal_mean_func(func)
std = self.metal_std_func(func,mean=mean)
features[f'{name}_mean'] = mean
features[f'{name}_std'] = std
# phase diagram feature - in BA-CaO-B3A triangle?
# features['in_phase_triangle'] = self.is_in_phase_triangle()
return features
def feature_units(self):
units = ['none',
'energy',
'energy',
'energy',
'energy',
'energy',
'energy',
'energy',
'energy',
'energy',
'energy',
'energy',
'energy',
'none',
'none',
'none',
'none',
'none',
'none',
'length',
'length',
'energy',
'energy',
'S/cm',
'S/cm',
'energy',
'none']
return units
def citations(self):
cite = [
# work function citations
"@Inbook{Holzl1979,"
"author={<NAME>. and <NAME>.},"
"editor={<NAME> and Schulte, <NAME>. and <NAME>},"
"title={Work function of metals},"
"bookTitle={Solid Surface Physics},"
"year={1979},"
"publisher={Springer Berlin Heidelberg},"
"address={Berlin, Heidelberg},"
"pages={1--150},"
"isbn={978-3-540-35253-2},"
"doi={10.1007/BFb0048919},"
"url={https://doi.org/10.1007/BFb0048919}"
"}",
"@Inbook{doi:10.1080/00222346908205102,"
"author={<NAME>.},"
"editor = {<NAME>},"
"title={Work Function: Measurement and Results}"
"bookTitle = {Solid State Surface Science, Volume 1},"
"year = {1969},"
"publisher = {Marcel Dekker},"
"address={New York, NY, USA},"
"pages={179},"
"}",
"@article{doi:10.1063/1.323539,"
"author = {Michaelson,<NAME>. },"
"title = {The work function of the elements and its periodicity},"
"journal = {Journal of Applied Physics},"
"volume = {48},"
"number = {11},"
"pages = {4729-4733},"
"year = {1977},"
"doi = {10.1063/1.323539},"
"URL = {https://doi.org/10.1063/1.323539}"
"}",
# elec conductivity citation
"@misc{AngstromSciences,"
"author={Angstrom Sciences},"
"title = {Elements Electrical Conductivity Reference Table},"
"URL= {https://www.angstromsciences.com/elements-electrical-conductivity},"
"note = {Accessed: 2019-02-27}"
"}",
# cation electronegativity citation
"@article{Zohourian2017,"
"author = {<NAME>. and <NAME>. and <NAME>.},"
"doi = {10.1016/j.ssi.2016.09.012},"
"issn = {01672738},"
"journal = {Solid State Ionics},"
"pages = {64--69},"
"publisher = {Elsevier B.V.},"
"title = {{Proton uptake into the protonic cathode material BaCo0.4Fe0.4Zr0.2O3-$\delta$and comparison to protonic electrolyte materials}},"
"url = {http://dx.doi.org/10.1016/j.ssi.2016.09.012},"
"volume = {299},"
"year = {2017}"
"}",
# BCA phase diagram citation
# "@article{Zhang2017,"
# "author = {<NAME> and <NAME> and <NAME>},"
# "doi = {10.1111/jace.14793},"
# "issn = {15512916},"
# "journal = {Journal of the American Ceramic Society},"
# "keywords = {glass-ceramics,phase equilibria,thermodynamics},"
# "number = {6},"
# "pages = {2722--2731},"
# "title = {{Phase equilibria study and thermodynamic description of the BaO-CaO-Al2O3 system}},"
# "volume = {100},"
# "year = {2017}"
# "}"
# pymatgen citation
"@article{Ong2012b,"
"author = {Ong, <NAME> and Richards, <NAME> and <NAME> and Hautier, Geoffroy and Kocher, Michael and Cholia, Shreyas and Gunter, Dan and Chevrier, <NAME>. and Persson, <NAME>. and Ceder, Gerbrand},"
"doi = {10.1016/j.commatsci.2012.10.028},"
"file = {:Users/shyue/Mendeley Desktop/Ong et al/Computational Materials Science/2013 - Ong et al. - Python Materials Genomics (pymatgen) A robust, open-source python library for materials analysis.pdf:pdf;:Users/shyue/Mendeley Desktop/Ong et al/Computational Materials Science/2013 - Ong et al. - Python Materials Genomics (pymatgen) A robust, open-source python library for materials analysis(2).pdf:pdf},"
"issn = {09270256},"
"journal = {Computational Materials Science},"
"month = feb,"
"pages = {314--319},"
"title = {{Python Materials Genomics (pymatgen): A robust, open-source python library for materials analysis}},"
"url = {http://linkinghub.elsevier.com/retrieve/pii/S0927025612006295},"
"volume = {68},"
"year = {2013}"
"}"
]
return list(np.unique(cite + mpcalc.citations()))
class BCA_Featurizer(BaseFeaturizer):
def __init__(self,radius_type='ionic_radius',normalize_formula=False):
self.radius_type = radius_type
self.normalize_formula = normalize_formula
self.ValenceOrbital = ValenceOrbital()
self.AtomicOrbitals = AtomicOrbitalsMod()
self.CohesiveEnergy = CohesiveEnergy()
self.BandCenter = BandCenter()
self.ValenceOrbitalEnergy = ValenceOrbitalEnergy()
#custom ElementProperty featurizer
elemental_properties = ['BoilingT', 'MeltingT',
'BulkModulus', 'ShearModulus',
'Row', 'Column', 'Number', 'MendeleevNumber', 'SpaceGroupNumber',
'Density','MolarVolume',
'FusionEnthalpy','HeatVaporization',
'Polarizability',
'ThermalConductivity']
self.ElementProperty = ElementProperty(data_source='magpie',features=elemental_properties,
stats=["mean", "std_dev"])
#check matminer featurizers
self.check_matminer_featurizers()
def featurize(self,composition):
bca = BCA(composition,self.radius_type,self.normalize_formula)
bca_features = bca.featurize()
vo_features = self.ValenceOrbital.featurize(bca.metal_composition) #avg and frac s, p , d, f electrons for metals
vo_features += [sum(vo_features[0:3])] #avg total valence electrons for metals
ao_features = self.AtomicOrbitals.featurize(bca.metal_composition) #HOMO and LUMO character and energy levels for metals from atomic orbitals)
ao_features = [ao_features[i] for i in range(len(ao_features)) if i not in (0,1,3,4)]#exclude HOMO_character,HOMO_element, LUMO_character, LUMO_element - categoricals
ce_features = self.CohesiveEnergy.featurize(bca.metal_composition,formation_energy_per_atom=1e-10) #avg metal elemental cohesive energy
bc_features = self.BandCenter.featurize(bca.metal_composition) + self.BandCenter.featurize(bca.composition)
ve_features = self.ValenceOrbitalEnergy.featurize(bca.metal_composition) + self.ValenceOrbitalEnergy.featurize(bca.composition)
ep_features = self.ElementProperty.featurize(bca.metal_composition) + self.ElementProperty.featurize(bca.composition)
mm_features = vo_features + ao_features + ce_features + bc_features + ve_features + ep_features
return list(bca_features.values()) + mm_features
@property
def ElementProperty_custom_labels(self):
"""
Generate custom labels for ElementProperty featurizer that follow same naming convention as Perovskite class
"""
elemental_property_label_map = {'BoilingT':'boil_temp','MeltingT':'melt_temp',
'BulkModulus':'bulk_mod','ShearModulus':'shear_mod',
'Row':'row','Column':'column','Number':'number','MendeleevNumber':'mendeleev','SpaceGroupNumber':'space_group',
'Density':'density','MolarVolume':'molar_vol',
'FusionEnthalpy':'H_fus','HeatVaporization':'H_vap',
'Polarizability':'polarizability',
'ThermalConductivity':'sigma_therm'}
element_property_labels = list(map(elemental_property_label_map.get,self.ElementProperty.features))
labels = []
for attr in element_property_labels:
for stat in self.ElementProperty.stats:
if stat=='std_dev':
stat = 'std'
labels.append(f'M_{attr}_{stat}')
for attr in element_property_labels:
for stat in self.ElementProperty.stats:
if stat=='std_dev':
stat = 'std'
labels.append(f'BCA_{attr}_{stat}')
return labels
@property
def ElementProperty_units(self):
"""
Generate units for ElementProperty featurizer that follow same naming convention as Perovskite class
"""
elemental_property_unit_map = {'BoilingT':'temperature','MeltingT':'temperature',
'BulkModulus':'pressure','ShearModulus':'pressure',
'Row':'none','Column':'none','Number':'none','MendeleevNumber':'none','SpaceGroupNumber':'none',
'Density':'density','MolarVolume':'volume',
'FusionEnthalpy':'energy','HeatVaporization':'energy',
'Polarizability':'polarizability',
'ThermalConductivity':'therm'}
element_property_units = list(map(elemental_property_unit_map.get,self.ElementProperty.features))
units = []
for ep_unit in element_property_units:
for stat in self.ElementProperty.stats:
units.append(ep_unit)
return units*2
def ElementProperty_label_check(self):
"""
Check that ElementProperty feature labels are as expected
If not, features may not align with feature labels
"""
#ElementProperty.feature_labels() code as of 1/24/20
labels = []
for attr in self.ElementProperty.features:
src = self.ElementProperty.data_source.__class__.__name__
for stat in self.ElementProperty.stats:
labels.append("{} {} {}".format(src, stat, attr))
if labels!=self.ElementProperty.feature_labels():
raise Exception('ElementProperty features or labels have changed')
@property
def matminer_labels(self):
"""
Feature labels for matminer-derived features
"""
labels = [
#ValenceOrbital labels
'M_ValenceElec_s_mean',
'M_ValenceElec_p_mean',
'M_ValenceElec_d_mean',
'M_ValenceElec_f_mean',
'M_ValenceElec_s_frac',
'M_ValenceElec_p_frac',
'M_ValenceElec_d_frac',
'M_ValenceElec_f_frac',
'M_ValenceElec_tot_mean',
#AtomicOrbitals labels
#'M_HOMO_character',
'M_HOMO_energy',
#'M_LUMO_character',
'M_LUMO_energy',
'M_AO_gap',
#CohesiveEnergy labels
'M_cohesive_energy_mean',
#BandCenter labels
'M_BandCenter',
'BCA_BandCenter',
#ValenceOrbitalEnergy labels
'M_ValenceEnergy_mean',
'BCA_ValenceEnergy_mean'
]
labels += self.ElementProperty_custom_labels
return labels
@property
def matminer_units(self):
"""
Feature units for matminer-derived features
"""
units = [
#ValenceOrbital units
'none',
'none',
'none',
'none',
'none',
'none',
'none',
'none',
'none',
#AtomicOrbitals units
#'M_HOMO_character',
'energy',
#'M_LUMO_character',
'energy',
'energy',
#CohesiveEnergy units
'energy',
#BandCenter units
'energy',
'energy',
#ValenceOrbitalEnergy units
'energy',
'energy'
]
units += self.ElementProperty_units
return units
def feature_labels(self):
bca_feature_labels = list(BCA(mg.Composition('BaO'),self.radius_type,self.normalize_formula).featurize().keys())
return bca_feature_labels + self.matminer_labels
def feature_units(self):
bca_units = BCA(mg.Composition('BaO')).feature_units()
return bca_units + self.matminer_units
def check_matminer_featurizers(self):
"""
Check that features and feature order for matminer featurizers are as expected
If features or feature order have changed, featurize() may return unexpected features that do not align with feature_labels()
"""
#verify that matminer feature labels haven't changed
if self.ValenceOrbital.feature_labels() != ['avg s valence electrons',
'avg p valence electrons',
'avg d valence electrons',
'avg f valence electrons',
'frac s valence electrons',
'frac p valence electrons',
'frac d valence electrons',
'frac f valence electrons']:
raise Exception('ValenceOrbital features or labels have changed')
if self.AtomicOrbitals.feature_labels() != ['HOMO_character',
'HOMO_element',
'HOMO_energy',
'LUMO_character',
'LUMO_element',
'LUMO_energy',
'gap_AO']:
raise Exception('AtomicOrbitals features or labels have changed')
if self.CohesiveEnergy.feature_labels() != ['cohesive energy']:
raise Exception('CohesiveEnergy features or labels have changed')
if self.BandCenter.feature_labels() != ['band center']:
raise Exception('BandCenter features or labels have changed')
self.ElementProperty_label_check()
def citations(self):
featurizers = [self.ValenceOrbital, self.AtomicOrbitals, self.CohesiveEnergy, self.BandCenter, self.ValenceOrbitalEnergy, BCA(mg.Composition('BaO'))]
return list(np.unique(sum([f.citations() for f in featurizers],[])))
class GenericFeaturizer(BaseFeaturizer):
"""
Featurizer to use generic properties available in matminer featurizers; no features from BCA class utilized
"""
def __init__(self,normalize_formula=False):
self.normalize_formula = normalize_formula
# don't need ValenceOrbital - valence counts etc. covered in ElementProperty.from_preset('magpie')
# self.ValenceOrbital = ValenceOrbital()
self.AtomicOrbitals = AtomicOrbitalsMod()
self.CohesiveEnergy = CohesiveEnergy()
self.BandCenter = BandCenter()
self.ValenceOrbitalEnergy = ValenceOrbitalEnergy()
# ElementProperty featurizer with magpie properties plus additional properties
self.ElementProperty = ElementProperty.from_preset('magpie')
self.ElementProperty.features += ['BoilingT',
'BulkModulus', 'ShearModulus',
'Density','MolarVolume',
'FusionEnthalpy','HeatVaporization',
'Polarizability',
'ThermalConductivity']
# range, min, max are irrelevant inside the ternary
# self.ElementProperty.stats = ['mean', 'avg_dev','mode']
# check matminer featurizers
self.check_matminer_featurizers()
def featurize(self,composition):
# use BCA just to get composition and metal_composition
bca = BCA(composition,'ionic_radius',self.normalize_formula)
ao_features = self.AtomicOrbitals.featurize(bca.metal_composition) # HOMO and LUMO character and energy levels for metals from atomic orbitals)
ao_features = [ao_features[i] for i in range(len(ao_features)) if i not in (0,1,3,4)] # exclude HOMO_character,HOMO_element, LUMO_character, LUMO_element - categoricals
ce_features = self.CohesiveEnergy.featurize(bca.metal_composition,formation_energy_per_atom=1e-10) # avg metal elemental cohesive energy
bc_features = self.BandCenter.featurize(bca.metal_composition) + self.BandCenter.featurize(bca.composition)
ve_features = self.ValenceOrbitalEnergy.featurize(bca.metal_composition) + self.ValenceOrbitalEnergy.featurize(bca.composition)
ep_features = self.ElementProperty.featurize(bca.metal_composition) + self.ElementProperty.featurize(bca.composition)
mm_features = ao_features + ce_features + bc_features + ve_features + ep_features
return mm_features
def feature_labels(self):
"""
Feature labels for matminer-derived features
"""
labels = [
#AtomicOrbitals labels
#'M_HOMO_character',
'M_HOMO_energy',
#'M_LUMO_character',
'M_LUMO_energy',
'M_AO_gap',
#CohesiveEnergy labels
'M_cohesive_energy_mean',
#BandCenter labels
'M_BandCenter',
'BCA_BandCenter',
#ValenceOrbitalEnergy labels
'M_ValenceEnergy_mean',
'BCA_ValenceEnergy_mean'
]
labels += [f'M {l}' for l in self.ElementProperty.feature_labels()]
labels += [f'BCA {l}' for l in self.ElementProperty.feature_labels()]
return labels
@property
def matminer_units(self):
"""
Feature units for matminer-derived features
"""
units = [
#ValenceOrbital units
'none',
'none',
'none',
'none',
'none',
'none',
'none',
'none',
'none',
#AtomicOrbitals units
#'M_HOMO_character',
'energy',
#'M_LUMO_character',
'energy',
'energy',
#CohesiveEnergy units
'energy',
#BandCenter units
'energy',
'energy',
#ValenceOrbitalEnergy units
'energy',
'energy'
]
units += self.ElementProperty_units
return units
def feature_units(self):
bca_units = BCA(mg.Composition('BaO')).feature_units()
return bca_units + self.matminer_units
def check_matminer_featurizers(self):
"""
Check that features and feature order for matminer featurizers are as expected
If features or feature order have changed, featurize() may return unexpected features that do not align with feature_labels()
"""
#verify that matminer feature labels haven't changed
if self.AtomicOrbitals.feature_labels() != ['HOMO_character',
'HOMO_element',
'HOMO_energy',
'LUMO_character',
'LUMO_element',
'LUMO_energy',
'gap_AO']:
raise Exception('AtomicOrbitals features or labels have changed')
if self.CohesiveEnergy.feature_labels() != ['cohesive energy']:
raise Exception('CohesiveEnergy features or labels have changed')
if self.BandCenter.feature_labels() != ['band center']:
raise Exception('BandCenter features or labels have changed')
def citations(self):
featurizers = [self.AtomicOrbitals, self.CohesiveEnergy, self.BandCenter, self.ValenceOrbitalEnergy]
citations = sum([f.citations() for f in featurizers],[])
# add pymatgen citation
citations += [
"@article{Ong2012b,"
"author = {Ong, <NAME> and Richards, <NAME> <NAME> Hautier, <NAME>, <NAME> Cholia, <NAME> Gunter, <NAME>, <NAME>. and Persson, <NAME>. and Ceder, Gerbrand},"
"doi = {10.1016/j.commatsci.2012.10.028},"
"file = {:Users/shyue/Mendeley Desktop/Ong et al/Computational Materials Science/2013 - Ong et al. - Python Materials Genomics (pymatgen) A robust, open-source python library for materials analysis.pdf:pdf;:Users/shyue/Mendeley Desktop/Ong et al/Computational Materials Science/2013 - Ong et al. - Python Materials Genomics (pymatgen) A robust, open-source python library for materials analysis(2).pdf:pdf},"
"issn = {09270256},"
"journal = {Computational Materials Science},"
"month = feb,"
"pages = {314--319},"
"title = {{Python Materials Genomics (pymatgen): A robust, open-source python library for materials analysis}},"
"url = {http://linkinghub.elsevier.com/retrieve/pii/S0927025612006295},"
"volume = {68},"
"year = {2013}"
"}"
]
return list(np.unique(citations))
```
#### File: Ru-BCA/modules/bca_plotting.py
```python
import numpy as np
import ternary
from ternary.helpers import simplex_iterator
import matplotlib.pyplot as plt
import pymatgen as mg
import pandas as pd
import re
from model_eval import predict_interval
def BCA_formula_from_str(BCA_str):
"""
Get chemical formula string from BCA string
Args:
BCA_str: BCA ratio string (e.g. 'B3C1A1')
"""
if len(BCA_str)==6 and BCA_str[:3]=='BCA':
# format: BCAxyz. suitable for single-digit integer x,y,z
funits = BCA_str[-3:]
else:
# format: BxCyAz. suitable for multi-digit or non-integer x,y,z
funits = re.split('[BCA]',BCA_str)
funits = [u for u in funits if len(u) > 0]
funits
components = ['BaO','CaO','Al2O3']
formula = ''.join([f'({c}){n}' for c,n in zip(components, funits)])
return formula
def get_coords_from_comp(comp,tern_axes=['Ca','Al','Ba']):
base_amt = {'Ba':1,'Ca':1,'Al':2}
coords = np.array([comp[el]/base_amt[el] for el in tern_axes])
coords = coords/np.sum(coords)
return coords
def get_comp_from_coords(coords,tern_axes=['Ca','Al','Ba'],scale=1):
if len(coords)==2:
a,b = coords
c = scale - a- b
coords = (a,b,c)
# else:
# a,b,c = coords
oxides = {'Ba':'BaO','Ca':'CaO','Al':'Al2O3','B':'B2O3','Mg':'MgO','Sr':'SrO'}
formula = ''.join(['({}){}'.format(oxides[m],amt) for m,amt in zip(tern_axes,coords)])
return mg.Composition(formula)
def add_colorbar(fig=None, cbrect=[0.9,0.15,0.02,0.75], label=None, tickformat=None,
cmap = None, vlim=None, norm=None,
tick_params={}, label_kwargs={},subplots_adjust={'left':0.05,'wspace':0.35, 'hspace':0.25, 'right':0.8},
**cb_kwargs):
#add a single colorbar
if fig is None:
fig = plt.gcf()
#make an axis for colorbar to control position/size
cbaxes = fig.add_axes(cbrect) #[left, bottom, width, height]
#code from ternary.colormapping.colorbar_hack
if vlim is not None:
vmin,vmax = vlim
else:
vmin = None
vmax = None
if norm==None:
# if logscale==True:
# norm = colors.LogNorm(vmin=vmin,vmax=vmax)
norm = plt.Normalize(vmin=vmin,vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm._A = []
cb = fig.colorbar(sm, cax=cbaxes, format=tickformat,**cb_kwargs)
cb.ax.tick_params(**tick_params)
if label is not None:
cb.set_label(label, **label_kwargs)
fig.subplots_adjust(**subplots_adjust)
def plot_labeled_ternary(comps,values,ax=None,label_points=True,add_labeloffset=0,corner_labelsize=12,point_labelsize=11,point_labeloffset=[0,0.01,0],cmap=None,vlim=None,**scatter_kw):
tern_axes = ['Ca','Al','Ba']
if ax is None:
fig, ax = plt.subplots(figsize=(9,8))
else:
fig = ax.get_figure()
#tfig, tax = ternary.figure(scale=1,ax=ax)
tax = ternary.TernaryAxesSubplot(scale=1,ax=ax)
points = [get_coords_from_comp(c,tern_axes) for c in comps]
if vlim is None:
vmin=min(values)
vmax=max(values)
else:
vmin, vmax = vlim
tax.scatter(points,c=values,cmap=cmap,vmin=vmin,vmax=vmax,**scatter_kw)
tern_labels = ['CaO','Al$_2$O$_3$','BaO']
tax.right_corner_label(tern_labels[0],fontsize=corner_labelsize,va='center',offset=0.08+add_labeloffset)
tax.top_corner_label(tern_labels[1],fontsize=corner_labelsize,va='center',offset=0.05+add_labeloffset)
tax.left_corner_label(tern_labels[2],fontsize=corner_labelsize,va='center',offset=0.08+add_labeloffset)
tax.boundary(linewidth=1)
#tax.clear_matplotlib_ticks()
ax.axis('off')
if label_points==True:
for p,val in zip(points,values):
if pd.isnull(val):
disp = 'NA'
else:
disp = '{}'.format(int(round(val,0)))
tax.annotate(disp,p+point_labeloffset,size=point_labelsize,ha='center',va='bottom')
#add_colorbar(fig,label='NH3 Production Rate (mmol/g$\cdot$h)',vmin=min(values),vmax=max(values),cbrect=[0.9,0.2,0.03,0.67])
tax._redraw_labels()
return tax
def featurize_simplex(scale, featurizer, feature_cols=None, scaler=None,tern_axes=['Ca','Al','Ba']):
"""
Generate feature matrix for simplex (intended for making heatmaps)
Args:
scale: simplex scale. Determines point density
featurizer: matminer-like featurizer instance
feature_cols: subset of column names to use for features. If None, use all columns
scaler: fitted scaler instance. If None, feature matrix will not be scaled
tern_axes: ternary axes. Default ['Ca','Al','Ba']
Returns:
coords: list of simplex coordinates
X: feature matrix
"""
coords = [tup for tup in simplex_iterator(scale)]
comps = [get_comp_from_coords(c,tern_axes=tern_axes) for c in coords]
df = pd.DataFrame([[comp] for comp in comps],columns=['composition'])
featurizer.featurize_dataframe(df,col_id='composition',inplace=True)
if feature_cols is None:
X = df
else:
X = df.loc[:,feature_cols]
if scaler is not None:
X = pd.DataFrame(scaler.transform(X),columns=X.columns)
return coords, X
def predict_simplex(estimator, scale, featurizer=None, feature_cols=None, scaler=None,use_X=None,tern_axes=['Ca','Al','Ba'],metric='median'):
"""
Generate predictions for simplex (intended for making heatmaps)
Args:
estimator: fitted estimator
scale: simplex scale. Determines point density
featurizer: matminer-like featurizer instance
feature_cols: subset of column names to use for features. If None, use all columns
scaler: fitted scaler instance. If None, feature matrix will not be scaled
use_X: optional arg to provide feature matrix if already calculated.
If provided, featurizer, feature_cols, and scaler will be ignored
tern_axes: ternary axes. Default ['Ca','Al','Ba']
metric: if 'median', return point estimate. If 'iqr', return IQR of prediction
Returns:
coords: list of simplex coordinates
y: estimator predictions
"""
if use_X is None:
coords, X = featurize_simplex(scale,featurizer,feature_cols,scaler,tern_axes)
else:
coords = [tup for tup in simplex_iterator(scale)]
X = use_X
if type(X) == pd.core.frame.DataFrame:
X = X.values
# handle nans and infs
# find rows with any nan or inf values
bad_val_idx = np.max(np.array([np.max(np.isinf(X),axis=1),np.max(np.isnan(X),axis=1)]),axis=0)
if np.sum(bad_val_idx) > 0:
print('Warning: feature matrix contains nans or infs. Number of bad rows: {}'.format(np.sum(bad_val_idx)))
# set all features in bad rows to zero so that they don't break estimator.predict()
X[bad_val_idx] = 0
if metric=='median':
y = estimator.predict(X)
elif metric=='iqr':
lb,ub = predict_interval(estimator,X,0.682)
y = ub - lb
# set predictions for bad feature values to nan
y[bad_val_idx] = np.nan
return coords, y
def estimator_ternary_heatmap(scale, estimator, featurizer=None, feature_cols=None, scaler=None,use_X=None, style='triangular',
labelsize=11, add_labeloffset=0, cmap=None, ax=None,figsize=None, vlim=None,metric='median',
multiple=0.1, tick_kwargs={'tick_formats':'%.1f','offset':0.02},
tern_axes=['Ca','Al','Ba'],tern_labels = ['CaO','Al$_2$O$_3$','BaO']):
"""
Generate ternary heatmap of ML predictions
Args:
scale: simplex scale
estimator: sklearn estimator instance
featurizer: featurizer instance
feature_cols: subset of feature names used in model_eval
scaler: sklearn scaler instance
use_X: pre-calculated feature matrix; if passed, featurizer, feature_cols, and scaler are ignored
style: heatmap interpolation style
tern_axes: ternary axes. Only used for generating simplex compositions; ignored if use_X is supplied. Defaults to ['Ca','Al','Ba']
metric: if 'median', return point estimate. If 'iqr', return IQR of prediction
"""
coords, y = predict_simplex(estimator, scale, featurizer, feature_cols, scaler,use_X,tern_axes,metric)
if vlim is None:
vmin = min(y)
vmax = max(y)
else:
vmin,vmax = vlim
points = dict(zip([c[0:2] for c in coords],y))
if ax==None:
fig, ax = plt.subplots(figsize=figsize)
tfig, tax = ternary.figure(scale=scale,ax=ax)
else:
tax = ternary.TernaryAxesSubplot(scale=scale,ax=ax)
tax.heatmap(points,style=style,colorbar=False,cmap=cmap,vmin=vmin,vmax=vmax)
#rescale_ticks(tax,new_scale=axis_scale,multiple = multiple, **tick_kwargs)
tax.boundary()
tax.ax.axis('off')
tax.right_corner_label(tern_labels[0],fontsize=labelsize,va='center',offset=0.08+add_labeloffset)
tax.top_corner_label(tern_labels[1],fontsize=labelsize,va='center',offset=0.05+add_labeloffset)
tax.left_corner_label(tern_labels[2],fontsize=labelsize,va='center',offset=0.08+add_labeloffset)
tax._redraw_labels()
return tax
def feature_ternary_heatmap(scale, feature_name, featurizer=None, use_X=None, style='triangular',
labelsize=11, add_labeloffset=0, cmap=None, ax=None,figsize=None, vlim=None,
multiple=0.1, tick_kwargs={'tick_formats':'%.1f','offset':0.02},
tern_axes=['Ca','Al','Ba'],tern_labels = ['CaO','Al$_2$O$_3$','BaO']):
"""
"""
if use_X is None:
coords, X = featurize_simplex(scale,featurizer,feature_cols=featurizer.feature_labels(),tern_axes=tern_axes)
X = pd.DataFrame(X,columns=featurizer.feature_labels())
else:
coords = [tup for tup in simplex_iterator(scale)]
X = use_X
y = X.loc[:,feature_name]
if vlim is None:
vmin = min(y)
vmax = max(y)
else:
vmin,vmax = vlim
points = dict(zip([c[0:2] for c in coords],y))
if ax==None:
fig, ax = plt.subplots(figsize=figsize)
tfig, tax = ternary.figure(scale=scale,ax=ax)
else:
tax = ternary.TernaryAxesSubplot(scale=scale,ax=ax)
tax.heatmap(points,style=style,colorbar=False,cmap=cmap,vmin=vmin,vmax=vmax)
#rescale_ticks(tax,new_scale=axis_scale,multiple = multiple, **tick_kwargs)
tax.boundary()
tax.ax.axis('off')
tax.right_corner_label(tern_labels[0],fontsize=labelsize,va='center',offset=0.08+add_labeloffset)
tax.top_corner_label(tern_labels[1],fontsize=labelsize,va='center',offset=0.05+add_labeloffset)
tax.left_corner_label(tern_labels[2],fontsize=labelsize,va='center',offset=0.08+add_labeloffset)
tax._redraw_labels()
return tax
def ternary_scatter_vs_heatmap(scatter_comps,scatter_values, hmap_scale,hmap_estimator,vlim,cmap=None,
hmap_featurizer=None,hmap_feature_cols=None,hmap_scaler=None,hmap_use_X=None,
corner_labelsize=11,add_labeloffset=0,label_points=False):
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,5))
tax1 = plot_labeled_ternary(scatter_comps, scatter_values,ax=ax1,label_points=label_points,
corner_labelsize=corner_labelsize,add_labeloffset=add_labeloffset,vlim=vlim,cmap=cmap)
tax2 = estimator_ternary_heatmap(hmap_scale,hmap_estimator,hmap_featurizer,hmap_feature_cols,scaler=hmap_scaler,use_X=hmap_use_X,
ax=ax2,labelsize=corner_labelsize,add_labeloffset=add_labeloffset,vlim=vlim,cmap=cmap)
add_colorbar(fig,vlim=vlim,label='Max NH$_3$ Production Rate',cbrect=[0.9,0.2,0.02,0.7])
return tax1,tax2
def scatter_over_heatmap(scatter_comps, scatter_values,hmap_scale,hmap_estimator,vlim, ax=None, cmap=None,metric='median',
hmap_featurizer=None,hmap_feature_cols=None,hmap_scaler=None,hmap_use_X=None,point_labeloffset=[0,0.02,0],
corner_labelsize=11,add_labeloffset=0,marker='o',markersize=12,scatter_labels=None,scatter_labelsize=12):
"""
"""
if ax is None:
fig, ax = plt.subplots(figsize=(8,8))
tax = estimator_ternary_heatmap(hmap_scale,hmap_estimator,hmap_featurizer,hmap_feature_cols,scaler=hmap_scaler,use_X=hmap_use_X,
ax=ax,labelsize=corner_labelsize,add_labeloffset=add_labeloffset,vlim=vlim,metric=metric,cmap=cmap)
if scatter_labels is None:
# write blank labels
scatter_labels = ['']*len(scatter_comps)
points = [hmap_scale*get_coords_from_comp(c) for c in scatter_comps]
for label, point, color_val in zip(scatter_labels,points,scatter_values):
# must use tax.plot(); tax.scatter() does not work on top of heatmap
if cmap is None:
cmap = plt.get_cmap(plt.rcParams['image.cmap'])
color = cmap((color_val-vlim[0])/(vlim[1]-vlim[0]))
tax.plot([point],c=color,marker=marker,markersize=markersize,ls='',mec='white')
tax.annotate('{}'.format(label),point+np.array(point_labeloffset)*hmap_scale,size=scatter_labelsize,ha='center',va='bottom',color='white')
return tax
def draw_guidelines(tax,**line_kw):
"""
Add phase boundary lines and/or guidelines to ternary
Args:
tax: ternary axes
color: line color
which: which lines to draw. Options: 'all','phase','guide'
line_kw: kwargs for tax.line()
"""
ped_data = pd.read_csv('../data/BCA_PED_coords.csv',skipfooter=2,engine='python')
for col in ['start','end']:
ped_data[f'{col}_comp'] = ped_data[f'{col}'].map(lambda x: mg.Composition(BCA_formula_from_str(x)))
ped_data[f'{col}_coords'] = ped_data[f'{col}_comp'].map(lambda x:get_coords_from_comp(x)*tax.get_scale())
for i, row in ped_data[ped_data['draw_on_prodplot']==1].iterrows():
tax.line(row['start_coords'],row['end_coords'],**line_kw)
# ts = tax.get_scale()
# #set up points
# bca934 = np.array([0.19,0.25,0.56])*ts
# b3a = np.array([0,.25,.75])*ts
# c3a = np.array([.75,.25,0])*ts
# ba = np.array([0,1/2,1/2])*ts
# c = [ts,0,0]
# b = [0,0,ts]
# if which in ('all','guide'):
# #guidelines
# tax.line(b3a,c3a,ls=':',c=color,**line_kw)
# tax.line(b,bca934,ls=':',c=color,**line_kw)
# if which in ('all','phase'):
# #phase boundaries
# tax.line(c,ba,ls='--',c=color,**line_kw)
# tax.line(ba,bca934,ls='--',c=color,**line_kw)
# tax.line(bca934,c,ls='--',c=color,**line_kw)
```
#### File: Ru-BCA/modules/model_eval.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from copy import deepcopy
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score, mean_absolute_error
def multi_min(arrays):
"""
Return the minimum scalar value of multiple arrays
Args:
arrays: list of numpy arrays
"""
mins = []
for arr in arrays:
mins.append(np.min(arr))
return min(mins)
def multi_max(arrays):
"""
Return the maximum scalar value of multiple arrays
Args:
arrays: list of numpy arrays
"""
maxs = []
for arr in arrays:
maxs.append(np.max(arr))
return max(maxs)
class repeating_KFold():
"""
KFold splitter that performs multiple independent splits of the dataset. For use with sklearn and mlxtend functions/classes that take a splitter object
Intended for use with shuffle=True to reduce bias for one particular train-test split
Args:
repeat: int, number of times to repeat
n_splits: number of splits
shuffle: if True, shuffle dataset before splitting
random_state: specify a random state for shuffle
"""
def __init__(self,repeat,n_splits,shuffle=True,random_state=None):
self.repeat = repeat
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
self.kf = KFold(n_splits,shuffle)
# set seeds for consistency if random state specified
if self.random_state is not None:
r = np.random.RandomState(self.random_state)
self.seeds = r.choice(np.arange(0,repeat*10,1),self.repeat,replace=False)
else:
self.seeds = [None]*self.repeat
def split(self,X,y=None,groups=None):
for n,seed in zip(range(self.repeat),self.seeds):
self.kf.random_state = seed
for train,test in self.kf.split(X,y,groups):
yield train,test
def get_n_splits(self,X=None,y=None,groups=None):
return self.n_splits*self.repeat
def KFold_cv(estimator,X,y,sample_weight=None,n_splits=5,pipeline_learner_step='auto',pred_int=0,random_state=None):
"""
Perform k-fold cross-validation
Args:
estimator: sklearn estimator instance
X: data matrix (nxm)
y: response (n-vector)
sample_weight: weights for fitting data. If None, defaults to equal weights
n_splits: number of folds. Default 5
pipeline_learner_step: if estimator is a Pipeline instance, index of the learner step
pred_int: prediction interval to calculate (i.e., 0.5 indicates 50% interval). If 0, do not calculate prediction interval
random_state: random state for KFold shuffle
Returns:
actual: acutal y values for test folds
pred: predicted y values for test folds
train_scores: list of training r2 scores
test_scores: list of test r2 scores
pred_lb: lower bound of prediction interval. Vector of zeros if pred_int==0
pred_ub: upper bound of prediction interval. Vector of zeros if pred_int==0
"""
if random_state is not None:
kf = KFold(n_splits,shuffle=True,random_state=random_state)
else:
kf = KFold(n_splits,shuffle=True)
if len(X)!=len(y):
raise ValueError('X and y must have same first dimension')
# if y is pandas series, convert to array. No info required from Series object
if type(y)==pd.core.series.Series:
y = y.values
train_scores = np.empty(n_splits)
test_scores = np.empty(n_splits)
actual = np.zeros_like(y)
pred = np.zeros_like(y)
pred_lb = np.zeros_like(y)
pred_ub = np.zeros_like(y)
for i, (train_index,test_index) in enumerate(kf.split(X)):
if type(X)==pd.core.frame.DataFrame:
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
else:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
if sample_weight is not None:
w_train = sample_weight[train_index]
w_test = sample_weight[test_index]
if sample_weight is not None:
if type(estimator)==Pipeline:
#if estimator is a Pipeline, need to specify name of learning step in fit_params for sample_weight
if pipeline_learner_step=='auto':
# determine which step is the learner based on existence of _estimator_type attribute
step_objects = [step[1] for step in estimator.steps]
objdirs = [dir(obj) for obj in step_objects]
learner_idx = np.where(['_estimator_type' in d for d in objdirs])[0]
if len(learner_idx)==1:
pipeline_learner_step = learner_idx[0]
else:
raise Exception("Can''t determine pipeline_learner_step. Must specify manually")
est_name = estimator.steps[pipeline_learner_step][0]
estimator.fit(X_train,y_train,**{f'{est_name}__sample_weight':w_train})
else:
estimator.fit(X_train,y_train,sample_weight=w_train)
train_scores[i] = estimator.score(X_train,y_train,sample_weight=w_train)
test_scores[i] = estimator.score(X_test,y_test,sample_weight=w_test)
else:
# not all estimators' fit() methods accept sample_weight arg - can't just pass None
estimator.fit(X_train,y_train)
train_scores[i] = estimator.score(X_train,y_train)
test_scores[i] = estimator.score(X_test,y_test)
actual[test_index] = y_test
pred[test_index] = estimator.predict(X_test)
if pred_int > 0:
pred_lb[test_index],pred_ub[test_index] = predict_interval(estimator,X_test,pred_int)
return actual, pred, train_scores, test_scores, pred_lb, pred_ub
def repeated_KFold_cv(estimator,X,y,repeat,sample_weight=None,n_splits=5,pipeline_learner_step='auto',pred_int=0,random_state=None):
"""
Perform k-fold cross-validation with multiple random splits
Args:
estimator: sklearn estimator instance
X: data matrix (nxm)
y: response (n-vector)
repeat: number of times to repeat KFold CV
sample_weight: weights for fitting data. If None, defaults to equal weights
n_splits: number of folds. Default 5
pipeline_learner_step: if estimator is a Pipeline instance, index of the learner step
pred_int: prediction interval to calculate (i.e., 0.5 indicates 50% interval). If 0, do not calculate prediction interval
random_state: random state for KFold shuffle
Returns:
actuals: list of actual y vectors for all CV repetitions
preds: list of predicted y vectors for all CV repetitions
agg_test_scores: list of r2 scores for all CV repetitions
agg_test_maes: list of MAEs for all CV repetitions
pred_lbs: list of prediction interval lower bounds for all CV repetitions. All zeros if pred_int==0
pred_ubs: list of prediction interval upper bounds for all CV repetitions. All zeros if pred_int==0
"""
actuals = np.empty((repeat,len(y)))
preds = np.empty_like(actuals)
agg_test_scores = np.empty(repeat)
agg_test_maes = np.empty(repeat)
pred_lbs = np.empty_like(actuals)
pred_ubs = np.empty_like(actuals)
# set seeds for consistency if specified
if random_state is not None:
r = np.random.RandomState(random_state)
seeds = r.choice(np.arange(0,repeat*10,1),repeat,replace=False)
else:
seeds = [None]*repeat
for n in range(repeat):
act,pred,train,test,lb,ub = KFold_cv(estimator,X,y,sample_weight,n_splits,pipeline_learner_step,pred_int,random_state=seeds[n])
agg_test_score = r2_score(act,pred,sample_weight=sample_weight)
agg_mae = mean_absolute_error(act,pred,sample_weight=sample_weight)
actuals[n] = act
preds[n] = pred
agg_test_scores[n] = agg_test_score
agg_test_maes[n] = agg_mae
pred_lbs[n] = lb
pred_ubs[n] = ub
return actuals, preds, agg_test_scores, agg_test_maes, pred_lbs, pred_ubs
def KFold_pva(estimator,X,y,sample_weight=None,n_splits=5,pipeline_learner_step='auto',random_state=None,ax=None,xerr=None,pred_int=0,show_metrics=['r2','mae'],text_kw={},s=10,line_kw={'zorder':0,'c':'#1f77b4'},**scatter_kw):
"""
Perform k-fold cross-validation and plot predicted vs. actual for test set
Args:
estimator: sklearn estimator instance
X: data matrix (nxm)
y: response (n-vector)
sample_weight: vector of sample weights. If None, equal weights assigned
n_splits: number of folds. Default 5
random_state: random state for KFold shuffle
ax: axis on which to plot
show_metrics: list of metrics to calculate and annotate on plot. Options: 'r2', 'mae'
text_kw: kwargs for metric text; passed to plt.text()
s: marker size
line_kw: kwargs for ideal x=y line
scatter_kw: kwargs to pass to plt.scatter()
Returns:
train_scores: k-array of train scores
test_scores: k-array of test scores
agg_test_score: overall test score (r2) considering all test folds together
"""
y, y_pred, train_scores, test_scores, pred_lb, pred_ub = KFold_cv(estimator,X,y,sample_weight,n_splits,pipeline_learner_step,pred_int,random_state)
agg_test_score = r2_score(y,y_pred,sample_weight=sample_weight)
if pred_int > 0:
yerr = np.array([y_pred-pred_lb,pred_ub-y_pred])
else:
yerr = None
ax = pred_v_act_plot(y,y_pred,sample_weight,ax,xerr,yerr,show_metrics,text_kw,s,line_kw,**scatter_kw)
return train_scores, test_scores, agg_test_score
def repeated_KFold_pva(estimator,X,y,repeat,plot_type='series',sample_weight=None,n_splits=5,pipeline_learner_step=1,random_state=None,
ax=None,xerr=None,pred_int=0,show_metrics=['r2','mae'],text_kw={},s=10,line_kw={'zorder':0,'c':'#1f77b4'},**scatter_kw):
"""
Perform k-fold cross-validation and plot predicted vs. actual for test set
Args:
estimator: sklearn estimator instance
X: data matrix (nxm)
y: response (n-vector)
repeat: number of times to repeat KFold CV
sample_weight: weights for fitting data. If None, defaults to equal weights
n_splits: number of folds. Default 5
pipeline_learner_step: if estimator is a Pipeline instance, index of the learner step
random_state: random state to determine random seeds for KFold shuffles
ax: axis on which to plot
show_metrics: list of metrics to calculate and annotate on plot. Options: 'r2', 'mae'
text_kw: kwargs for metric text; passed to plt.text()
s: marker size
scatter_kw: kwargs to pass to plt.scatter()
Returns:
train_scores: k-array of train scores
test_scores: k-array of test scores
tot_test_score: overall test score (r2) considering all test folds together
"""
actuals, preds, agg_test_scores, agg_test_maes, pred_lbs, pred_ubs = repeated_KFold_cv(estimator,X,y,repeat,sample_weight,n_splits,pipeline_learner_step,pred_int,random_state)
if plot_type=='series':
# plot each repetition as a separate series
for y,y_pred,lb,ub in zip(actuals, preds,pred_lbs,pred_ubs):
if pred_int > 0:
yerr = np.array([y_pred-lb,ub-y_pred])
else:
y_err = None
ax = pred_v_act_plot(y,y_pred,sample_weight,ax,xerr,yerr,show_metrics=None,text_kw=text_kw,s=s,line_kw=line_kw,**scatter_kw)
elif plot_type=='mean':
# average predicted values for each point across repetitions
y = np.mean(actuals,axis=0)
y_pred = np.mean(preds,axis=0)
if pred_int > 0:
pred_std = np.std(preds,axis=0)
lerr = np.mean(preds-pred_lbs,axis=0)
uerr = np.mean(pred_ubs-preds,axis=0)
# add the variance between CV repetitions to the prediction interval
yerr = np.array([(pred_std**2 + lerr**2)**0.5,(pred_std**2 + uerr**2)**0.5])
else:
yerr = None
ax = pred_v_act_plot(y,y_pred,sample_weight,ax,xerr,yerr,show_metrics=None,text_kw=text_kw,s=s,line_kw=line_kw,**scatter_kw)
# metrics need to be aggregated across repetitions
metric_txt = ''
for metric in show_metrics:
if metric=='r2':
metric_txt += '$r^2: \ {}$\n'.format(round(np.mean(agg_test_scores),3))
elif metric=='mae':
mae_scale = int(np.ceil(np.log10(np.mean(agg_test_maes))))
if mae_scale < 3:
mae_round = 3 - mae_scale
else:
mae_round = 0
metric_txt += 'MAE: {}\n'.format(round(np.mean(agg_test_maes),mae_round))
if len(metric_txt) > 0:
x = text_kw.pop('x',0.05)
y = text_kw.pop('y',0.95)
ax.text(x,y,metric_txt,transform=ax.transAxes,va='top',**text_kw)
return actuals, preds, agg_test_scores, agg_test_maes, pred_lbs, pred_ubs
def plot_pva(estimator,X,y,sample_weight=None,ax=None,xerr=None,pred_int=0,show_metrics=['r2','mae'],text_kw={},s=10,line_kw={'zorder':0,'c':'#1f77b4'},**scatter_kw):
"""
Plot predicted vs. actual for fitted estimator
Args:
estimator: fitted sklearn estimator instance
X: data matrix (nxm)
y: response (n-vector)
sample_weight: sample weights. Only used to calculate metrics (r2, mae)
ax: axis on which to plot
xerr: scalar or array of x (actual) errors/uncertainties
pred_int: if True, estimate and plot prediction intervals
show_metrics: list of metrics to calculate and annotate on plot. Options: 'r2', 'mae'
text_kw: kwargs for metric text; passed to plt.text()
s: marker size
"""
y_pred = estimator.predict(X)
if pred_int > 0:
lb,ub = predict_interval(estimator,X,pred_int)
yerr = np.array([y_pred-lb,ub-y_pred])
else:
yerr = None
ax = pred_v_act_plot(y,y_pred,sample_weight,ax,xerr,yerr,show_metrics,text_kw,s,line_kw,**scatter_kw)
def pred_v_act_plot(y,y_pred,sample_weight=None,ax=None,xerr=None,yerr=None,show_metrics=['r2','mae'],text_kw={},s=10,line_kw={'zorder':0,'c':'#1f77b4'},legend=True,**scatter_kw):
"""
Plot predicted vs. actual
Args:
y: actual values
y_pred: predictions
sample_weight: sample weights. Only used to calculate metrics (r2, mae)
ax: axis on which to plot
xerr: scalar or array of x (actual) errors/uncertainties
yerr: scalar or array of y (prediction) uncertainties
show_metrics: list of metrics to calculate and annotate on plot. Options: 'r2', 'mae'
text_kw: kwargs for metric text; passed to plt.text()
s: marker size
"""
if ax is None:
fig, ax = plt.subplots()
if xerr is None and yerr is None:
ax.scatter(y,y_pred,s=s,**scatter_kw)
else:
ax.errorbar(y,y_pred,xerr=xerr,yerr=yerr,ms=s,**scatter_kw)
axmin = multi_min([y,y_pred])
axmax = multi_max([y,y_pred])
ax.plot([axmin,axmax],[axmin,axmax],**line_kw,label='Ideal')
metric_txt = ''
if show_metrics is not None:
for metric in show_metrics:
if metric=='r2':
r2 = r2_score(y,y_pred,sample_weight=sample_weight)
metric_txt += '$r^2: \ {}$\n'.format(round(r2,3))
elif metric=='mae':
test_mae = mean_absolute_error(y,y_pred,sample_weight=sample_weight)
mae_scale = int(np.ceil(np.log10(test_mae)))
if mae_scale < 3:
mae_round = 3 - mae_scale
else:
mae_round = 0
metric_txt += 'MAE: {}\n'.format(round(test_mae,mae_round))
if len(metric_txt) > 0:
x = text_kw.pop('x',0.05)
y = text_kw.pop('y',0.95)
ax.text(x,y,metric_txt,transform=ax.transAxes,va='top',**text_kw)
ax.set_xlabel('Actual')
ax.set_ylabel('Predicted')
if legend:
ax.legend(loc='lower right')
return ax
def predict_interval(model, X, percent=0.682):
if type(X)==pd.core.frame.DataFrame:
X = X.values
y_pred = np.array([tree.predict(X) for tree in model.estimators_])
lper = 100*(0.5 - percent/2)
uper = 100*(0.5 + percent/2)
lb = np.percentile(y_pred,lper,axis=0)
ub = np.percentile(y_pred,uper,axis=0)
# y_pred = model.predict(X)
# lb = y_pred - 10
# ub = y_pred + 10
return lb,ub
class GridSearchRepeatedCV():
def __init__(self,estimator,param_grid):
self.estimator = deepcopy(estimator)
self.param_grid = param_grid
def fit(self,X,y,repeat,sample_weight=None,n_splits=5,pipeline_learner_step='auto',random_state=None):
meshgrid = np.meshgrid(*self.param_grid.values())
self.param_meshgrid_ = dict(zip(self.param_grid.keys(),meshgrid))
self.grid_scores_ = np.zeros_like(meshgrid[0],dtype='float')
self.grid_params_ = np.empty_like(meshgrid[0],dtype='object')
# iterate over parameter combinations
for idx, tmpvalue in np.ndenumerate(meshgrid[0]):
# get parameter values and set
params = {}
for param_name, param_array in zip(self.param_grid.keys(),meshgrid):
params[param_name] = param_array[idx]
self.estimator.set_params(**params)
# perform cv
y_act, y_pred, agg_scores, agg_maes,lbs,ubs = repeated_KFold_cv(self.estimator,X,y,repeat,sample_weight,n_splits,pipeline_learner_step,pred_int=0,random_state=random_state)
self.grid_scores_[idx] = np.mean(agg_scores)
self.grid_params_[idx] = params
# get best index
self.best_index_ = np.argmax(self.grid_scores_)
self.best_params_ = self.grid_params_.ravel()[self.best_index_]
self.best_score_ = np.max(self.grid_scores_)
@property
def grid_results_(self):
scores = self.grid_scores_.ravel()
params = self.grid_params_.ravel()
results = [(p,s) for p,s in zip(params,scores)]
return results
@property
def ranked_results(self):
results = self.grid_results_
ranked_results = sorted(results,key=lambda x: x[1],reverse=True)
ranked_results = [(i+1,p,s) for i,(p,s) in enumerate(ranked_results)]
return ranked_results
@property
def result_df(self):
scores = self.grid_scores_.ravel()
params = self.grid_params_.ravel()
df = pd.DataFrame(list(params))
df['score'] = scores
return df
def plot_grid_results(self,ax=None,fixed_params={},mark_best=True,colorbar=True,**scatter_kw):
filter_meshgrid = self.param_meshgrid_.copy()
filter_param = self.grid_params_.copy()
filter_score = self.grid_scores_.copy()
for param, value in fixed_params.items():
idx = np.where(filter_meshgrid[param]==value)
for p in filter_meshgrid.keys():
filter_meshgrid[p] = filter_meshgrid[p][idx]
filter_param = filter_param[idx]
filter_score = filter_score[idx]
plot_params = [p for p in self.param_grid.keys() if p not in fixed_params.keys()]
if len(plot_params) > 3:
raise Exception('Too many free parameters to plot')
param_arrays = []
for param in plot_params:
param_arrays.append([gp[param] for gp in filter_param.ravel()])
scores = filter_score.ravel()
if ax is None and len(plot_params) < 3:
fig, ax = plt.subplots()
elif ax is None and len(plot_params)==3:
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
else:
fig = ax.get_figure()
if len(plot_params)==1:
ax.scatter(param_arrays[0],scores,**scatter_kw)
ax.set_xlabel(plot_params[0])
ax.set_ylabel('CV Score')
if mark_best:
# mark the best parameter value with a vertical line
best_param = param_arrays[0][np.argmax(scores)]
ax.axvline(best_param,c='k',lw=1)
elif len(plot_params)==2:
p = ax.scatter(param_arrays[0],param_arrays[1],c=scores,**scatter_kw)
ax.set_xlabel(plot_params[0])
ax.set_ylabel(plot_params[1])
if colorbar:
fig.colorbar(p,ax=ax,label='CV Score')
if mark_best:
# outline the best point in red
best_idx = np.argmax(scores)
ax.scatter(param_arrays[0][best_idx],param_arrays[1][best_idx],facecolors='none',edgecolors='k',**scatter_kw)
elif len(plot_params)==3:
p = ax.scatter(param_arrays[0],param_arrays[1],param_arrays[2],c=scores,**scatter_kw)
ax.set_xlabel(plot_params[0])
ax.set_ylabel(plot_params[1])
ax.set_zlabel(plot_params[2])
if colorbar:
fig.colorbar(p,ax=ax,label='CV Score')
if mark_best:
best_idx = np.argmax(scores)
ax.scatter(param_arrays[0][best_idx],param_arrays[1][best_idx],param_arrays[2][best_idx],facecolors='none',edgecolors='k',**scatter_kw)
return ax
```
|
{
"source": "jdhughes-usgs/mf6bmipaper-saghen",
"score": 3
}
|
#### File: sagehen-mf6/common/figspecs.py
```python
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
class USGSFigure:
def __init__(
self, figure_type="map", family="Arial Narrow", font_path=None,
verbose=False
):
"""Create a USGSFigure object
Parameters
----------
figure_type : str
figure type ("map", "graph")
family : str
font family name (default is Arial Narrow)
verbose : bool
boolean that define if debug information should be written
"""
# initialize members
self.family = None
self.figure_type = None
self.verbose = verbose
self.set_font_family(family=family, font_path=font_path)
self.set_specifications(figure_type=figure_type)
def set_specifications(self, figure_type="map"):
"""Set matplotlib parameters
Parameters
----------
figure_type : str
figure type ("map", "graph")
Returns
-------
"""
self.figure_type = self._validate_figure_type(figure_type)
def set_font_family(self, family="Arial Narrow", font_path=None):
"""Set font family
Parameters
----------
family : str
font family (default is Arial Narrow)
font_path : str
path to fonts not available to matplotlib (not implemented)
Returns
-------
"""
if font_path is not None:
errmsg = "specification of font_path is not implemented"
raise NotImplemented(errmsg)
self.family = self._set_fontfamily(family)
def graph_legend(self, ax=None, handles=None, labels=None, **kwargs):
"""Add a USGS-style legend to a matplotlib axis object
Parameters
----------
ax : axis object
matplotlib axis object (default is None)
handles : list
list of legend handles
labels : list
list of labels for legend handles
kwargs : kwargs
matplotlib legend kwargs
Returns
-------
leg : object
matplotlib legend object
"""
if ax is None:
ax = plt.gca()
font = self._set_fontspec(bold=True, italic=False)
if handles is None or labels is None:
handles, labels = ax.get_legend_handles_labels()
leg = ax.legend(handles, labels, prop=font, **kwargs)
# add title to legend
if "title" in kwargs:
title = kwargs.pop("title")
else:
title = None
leg = self.graph_legend_title(leg, title=title)
return leg
def graph_legend_title(self, leg, title=None):
"""Set the legend title for a matplotlib legend object
Parameters
----------
leg : legend object
matplotlib legend object
title : str
title for legend
Returns
-------
leg : object
matplotlib legend object
"""
if title is None:
title = "EXPLANATION"
elif title.lower() == "none":
title = None
font = self._set_fontspec(bold=True, italic=False)
leg.set_title(title, prop=font)
return leg
def heading(self, ax=None, letter=None, heading=None, x=0.00, y=1.01):
"""Add a USGS-style heading to a matplotlib axis object
Parameters
----------
ax : axis object
matplotlib axis object (default is None)
letter : str
string that defines the subplot (A, B, C, etc.)
heading : str
text string
x : float
location of the heading in the x-direction in normalized plot dimensions
ranging from 0 to 1 (default is 0.00)
y : float
location of the heading in the y-direction in normalized plot dimensions
ranging from 0 to 1 (default is 1.01)
Returns
-------
text : object
matplotlib text object
"""
if ax is None:
ax = plt.gca()
text = None
if letter is not None:
font = self._set_fontspec(bold=True, italic=True)
if heading is None:
letter = letter.replace(".", "")
else:
letter = letter.rstrip()
if letter[-1] != ".":
letter += "."
letter += " "
ax.text(
x,
y,
letter,
va="bottom",
ha="left",
fontdict=font,
transform=ax.transAxes,
)
bbox = ax.get_window_extent().transformed(
plt.gcf().dpi_scale_trans.inverted()
)
width = bbox.width * 25.4 # inches to mm
x += len(letter) * 1.0 / width
if heading is not None:
font = self._set_fontspec(bold=True, italic=False)
text = ax.text(
x,
y,
heading,
va="bottom",
ha="left",
fontdict=font,
transform=ax.transAxes,
)
return text
def add_text(
self,
ax=None,
text="",
x=0.0,
y=0.0,
transform=True,
bold=True,
italic=True,
fontsize=9,
ha="left",
va="bottom",
**kwargs
):
"""Add USGS-style text to a axis object
Parameters
----------
ax : axis object
matplotlib axis object (default is None)
text : str
text string
x : float
x-location of text string (default is 0.)
y : float
y-location of text string (default is 0.)
transform : bool
boolean that determines if a transformed (True) or data (False) coordinate
system is used to define the (x, y) location of the text string
(default is True)
bold : bool
boolean indicating if bold font (default is True)
italic : bool
boolean indicating if italic font (default is True)
fontsize : int
font size (default is 9 points)
ha : str
matplotlib horizontal alignment keyword (default is left)
va : str
matplotlib vertical alignment keyword (default is bottom)
kwargs : dict
dictionary with valid matplotlib text object keywords
Returns
-------
text_obj : object
matplotlib text object
"""
if ax is None:
ax = plt.gca()
if transform:
transform = ax.transAxes
else:
transform = ax.transData
font = self._set_fontspec(bold=bold, italic=italic, fontsize=fontsize)
text_obj = ax.text(
x, y, text, va=va, ha=ha, fontdict=font, transform=transform, **kwargs
)
return text_obj
def add_annotation(
self,
ax=None,
text="",
xy=None,
xytext=None,
bold=True,
italic=True,
fontsize=9,
ha="left",
va="bottom",
**kwargs
):
"""Add an annotation to a axis object
Parameters
----------
ax : axis object
matplotlib axis object (default is None)
text : str
text string
xy : tuple
tuple with the location of the annotation (default is None)
xytext : tuple
tuple with the location of the text
bold : bool
boolean indicating if bold font (default is True)
italic : bool
boolean indicating if italic font (default is True)
fontsize : int
font size (default is 9 points)
ha : str
matplotlib horizontal alignment keyword (default is left)
va : str
matplotlib vertical alignment keyword (default is bottom)
kwargs : dict
dictionary with valid matplotlib annotation object keywords
Returns
-------
ann_obj : object
matplotlib annotation object
"""
if ax is None:
ax = plt.gca()
if xy is None:
xy = (0.0, 0.0)
if xytext is None:
xytext = (0.0, 0.0)
font = self._set_fontspec(bold=bold, italic=italic, fontsize=fontsize)
# add font information to kwargs
if kwargs is None:
kwargs = font
else:
for key, value in font.items():
kwargs[key] = value
# create annotation
ann_obj = ax.annotate(text, xy, xytext, va=va, ha=ha, **kwargs)
return ann_obj
def remove_edge_ticks(self, ax=None):
"""Remove unnecessary ticks on the edges of the plot
Parameters
----------
ax : axis object
matplotlib axis object (default is None)
Returns
-------
ax : axis object
matplotlib axis object
"""
if ax is None:
ax = plt.gca()
# update tick objects
plt.draw()
# get min and max value and ticks
ymin, ymax = ax.get_ylim()
# check for condition where y-axis values are reversed
if ymax < ymin:
y = ymin
ymin = ymax
ymax = y
yticks = ax.get_yticks()
if self.verbose:
print("y-axis: ", ymin, ymax)
print(yticks)
# remove edge ticks on y-axis
ticks = ax.yaxis.majorTicks
for iloc in [0, -1]:
if np.allclose(float(yticks[iloc]), ymin):
ticks[iloc].tick1line.set_visible = False
ticks[iloc].tick2line.set_visible = False
if np.allclose(float(yticks[iloc]), ymax):
ticks[iloc].tick1line.set_visible = False
ticks[iloc].tick2line.set_visible = False
# get min and max value and ticks
xmin, xmax = ax.get_xlim()
# check for condition where x-axis values are reversed
if xmax < xmin:
x = xmin
xmin = xmax
xmax = x
xticks = ax.get_xticks()
if self.verbose:
print("x-axis: ", xmin, xmax)
print(xticks)
# remove edge ticks on y-axis
ticks = ax.xaxis.majorTicks
for iloc in [0, -1]:
if np.allclose(float(xticks[iloc]), xmin):
ticks[iloc].tick1line.set_visible = False
ticks[iloc].tick2line.set_visible = False
if np.allclose(float(xticks[iloc]), xmax):
ticks[iloc].tick1line.set_visible = False
ticks[iloc].tick2line.set_visible = False
return ax
# protected methods
# protected method
def _validate_figure_type(self, figure_type):
"""Set figure type after validation of specified figure type
Parameters
----------
figure_type : str
figure type ("map", "graph")
Returns
-------
figure_type : str
validated figure_type
"""
# validate figure type
valid_types = ("map", "graph")
if figure_type not in valid_types:
errmsg = "invalid figure_type specified ({}) ".format(
figure_type
) + "valid types are '{}'.".format(", ".join(valid_types))
raise ValueError(errmsg)
# set figure_type
if figure_type == "map":
self._set_map_specifications()
elif figure_type == "graph":
self._set_map_specifications()
return figure_type
# protected method
def _set_graph_specifications(self):
"""Set matplotlib rcparams to USGS-style specifications for graphs
Returns
-------
"""
rc_dict = {
"font.family": self.family,
"font.size": 7,
"axes.labelsize": 9,
"axes.titlesize": 9,
"axes.linewidth": 0.5,
"xtick.labelsize": 8,
"xtick.top": True,
"xtick.bottom": True,
"xtick.major.size": 7.2,
"xtick.minor.size": 3.6,
"xtick.major.width": 0.5,
"xtick.minor.width": 0.5,
"xtick.direction": "in",
"ytick.labelsize": 8,
"ytick.left": True,
"ytick.right": True,
"ytick.major.size": 7.2,
"ytick.minor.size": 3.6,
"ytick.major.width": 0.5,
"ytick.minor.width": 0.5,
"ytick.direction": "in",
"pdf.fonttype": 42,
"savefig.dpi": 300,
"savefig.transparent": True,
"legend.fontsize": 9,
"legend.frameon": False,
"legend.markerscale": 1.0,
}
mpl.rcParams.update(rc_dict)
# protected method
def _set_map_specifications(self):
"""Set matplotlib rcparams to USGS-style specifications for maps
Returns
-------
"""
rc_dict = {
"font.family": self.family,
"font.size": 7,
"axes.labelsize": 9,
"axes.titlesize": 9,
"axes.linewidth": 0.5,
"xtick.labelsize": 7,
"xtick.top": True,
"xtick.bottom": True,
"xtick.major.size": 7.2,
"xtick.minor.size": 3.6,
"xtick.major.width": 0.5,
"xtick.minor.width": 0.5,
"xtick.direction": "in",
"ytick.labelsize": 7,
"ytick.left": True,
"ytick.right": True,
"ytick.major.size": 7.2,
"ytick.minor.size": 3.6,
"ytick.major.width": 0.5,
"ytick.minor.width": 0.5,
"ytick.direction": "in",
"pdf.fonttype": 42,
"savefig.dpi": 300,
"savefig.transparent": True,
"legend.fontsize": 9,
"legend.frameon": False,
"legend.markerscale": 1.0,
}
mpl.rcParams.update(rc_dict)
# protected method
def _set_fontspec(self, bold=True, italic=True, fontsize=9):
"""Create fontspec dictionary for matplotlib pyplot objects
Parameters
----------
bold : bool
boolean indicating if font is bold (default is True)
italic : bool
boolean indicating if font is italic (default is True)
fontsize : int
font size (default is 9 point)
Returns
-------
"""
if "Univers" in self.family:
reset_family = True
else:
reset_family = False
family = self.family
if bold:
weight = "bold"
if reset_family:
family = "Univers 67"
else:
weight = "normal"
if reset_family:
family = "Univers 57"
if italic:
if reset_family:
family += " Condensed Oblique"
style = "oblique"
else:
style = "italic"
else:
if reset_family:
family += " Condensed"
style = "normal"
# define fontspec dictionary
fontspec = {
"family": family,
"size": fontsize,
"weight": weight,
"style": style,
}
if self.verbose:
sys.stdout.write("font specifications:\n ")
for key, value in fontspec.items():
sys.stdout.write("{}={} ".format(key, value))
sys.stdout.write("\n")
return fontspec
def _set_fontfamily(self, family):
"""Set font family to Liberation Sans Narrow on linux if default Arial Narrow
is being used
Parameters
----------
family : str
font family name (default is Arial Narrow)
Returns
-------
family : str
font family name
"""
if sys.platform.lower() in ("linux",):
if family == "Arial Narrow":
family = "Liberation Sans Narrow"
return family
```
|
{
"source": "jdhughes-usgs/pymake",
"score": 2
}
|
#### File: pymake/autotest/ci_setup.py
```python
import os
import shutil
import pymake
temp_pth = "temp"
if not os.path.exists(temp_pth):
os.makedirs(temp_pth)
mf6_exdir = os.path.join(temp_pth, "mf6examples")
def download_mf6_examples(verbose=False):
"""Download mf6 examples and return location of folder"""
target = "mf6"
pm = pymake.Pymake(verbose=True)
pm.target = target
# download the modflow 6 release
pm.download_target(target, download_path=temp_pth)
assert pm.download, "could not download {} distribution".format(target)
# get program dictionary
prog_dict = pymake.usgs_program_data.get_target(target)
# set path to example
temp_download_dir = os.path.join(temp_pth, prog_dict.dirname)
temp_dir = os.path.join(temp_download_dir, "examples")
print("copying files to...{}".format(mf6_exdir))
shutil.copytree(temp_dir, mf6_exdir)
print("removing...{} directory".format(temp_download_dir))
shutil.rmtree(temp_download_dir)
return os.path.abspath(mf6_exdir)
def examples_list(verbose=False):
"""Build list of examples
Returns
-------
"""
exclude_models = ("lnf",)
exclude_examples = (
"sagehen",
"ex-gwt-keating",
)
src_folders = []
for dirName, subdirList, fileList in os.walk(mf6_exdir):
useModel = True
for exclude in exclude_models:
if exclude in dirName:
useModel = False
break
if useModel:
for exclude in exclude_examples:
if exclude in dirName:
useModel = False
break
if useModel:
for file_name in fileList:
if file_name.lower() == "mfsim.nam":
if verbose:
print("Found directory: {}".format(dirName))
src_folders.append(dirName)
src_folders = sorted(src_folders)
fpth = os.path.join(mf6_exdir, "mf6examples.txt")
f = open(fpth, "w")
for idx, folder in enumerate(src_folders):
if verbose:
if idx == 0:
print("\n\nMODFLOW 6 examples:\n{}".format(78 * "-"))
print("{:>3d}: {}".format(idx + 1, folder))
f.write("{}\n".format(os.path.abspath(folder)))
f.close()
return
if __name__ == "__main__":
mf6pth = download_mf6_examples(verbose=True)
examples_list(verbose=True)
```
#### File: pymake/autotest/t002_test.py
```python
import os
import sys
import shutil
import pymake
import flopy
import pytest
# determine if running on a continuous integration server
is_CI = "CI" in os.environ
# define program data
target = "swtv4"
if sys.platform.lower() == "win32":
target += ".exe"
# get program dictionary
prog_dict = pymake.usgs_program_data.get_target(target)
# set up paths
dstpth = os.path.join("temp")
if not os.path.exists(dstpth):
os.makedirs(dstpth)
swtpth = os.path.join(dstpth, prog_dict.dirname)
expth = os.path.join(swtpth, "examples")
deppth = os.path.join(swtpth, "dependencies")
srcpth = os.path.join(swtpth, prog_dict.srcdir)
epth = os.path.abspath(os.path.join(dstpth, target))
name_files = sorted(
[
"4_hydrocoin/seawat.nam",
"5_saltlake/seawat.nam",
"2_henry/1_classic_case1/seawat.nam",
"2_henry/4_VDF_uncpl_Trans/seawat.nam",
"2_henry/5_VDF_DualD_Trans/seawat.nam",
"2_henry/6_age_simulation/henry_mod.nam",
"2_henry/2_classic_case2/seawat.nam",
"2_henry/3_VDF_no_Trans/seawat.nam",
"1_box/case1/seawat.nam",
"1_box/case2/seawat.nam",
"3_elder/seawat.nam",
]
)
# add path to name_files
for idx, namefile in enumerate(name_files):
name_files[idx] = os.path.join(expth, namefile)
pm = pymake.Pymake(verbose=True)
pm.target = target
pm.appdir = dstpth
pm.double = True
def edit_namefile(namefile):
# read existing namefile
f = open(namefile, "r")
lines = f.read().splitlines()
f.close()
# remove global line
f = open(namefile, "w")
for line in lines:
if "global" in line.lower():
continue
f.write("{}\n".format(line))
f.close()
def clean_up():
print("Removing temporary build directories")
dirs_temp = [os.path.join("obj_temp"), os.path.join("mod_temp")]
for d in dirs_temp:
if os.path.isdir(d):
shutil.rmtree(d)
# finalize pymake object
pm.finalize()
if os.path.isfile(epth):
print("Removing " + target)
os.remove(epth)
return
def run_seawat(fn):
# edit the name files
edit_namefile(fn)
# run the models
success, buff = flopy.run_model(
epth, os.path.basename(fn), model_ws=os.path.dirname(fn), silent=False
)
errmsg = "could not run...{}".format(os.path.basename(fn))
assert success, errmsg
return
def build_seawat_dependency_graphs():
success = True
build_graphs = True
if is_CI:
if "linux" not in sys.platform.lower():
build_graphs = False
if build_graphs:
if os.path.exists(epth):
# build dependencies output directory
if not os.path.exists(deppth):
os.makedirs(deppth)
# build dependency graphs
print("building dependency graphs")
pymake.make_plots(srcpth, deppth, verbose=True)
# test that the dependency figure for the SEAWAT main exists
findf = os.path.join(deppth, "swt_v4.f.png")
success = os.path.isfile(findf)
assert success, "could not find {}".format(findf)
assert success, "could not build dependency graphs"
return
@pytest.mark.base
@pytest.mark.regression
def test_download():
# Remove the existing seawat directory if it exists
if os.path.isdir(swtpth):
shutil.rmtree(swtpth)
# download the target
pm.download_target(target, download_path=dstpth)
assert pm.download, "could not download {}".format(target)
@pytest.mark.base
@pytest.mark.regression
def test_compile():
assert pm.build() == 0, "could not compile {}".format(target)
@pytest.mark.regression
@pytest.mark.parametrize("fn", name_files)
def test_seawat(fn):
run_seawat(fn)
@pytest.mark.regression
def test_dependency_graphs():
build_seawat_dependency_graphs()
@pytest.mark.base
@pytest.mark.regression
def test_clean_up():
clean_up()
if __name__ == "__main__":
test_download()
test_compile()
for fn in name_files:
run_seawat(fn)
test_dependency_graphs()
test_clean_up()
```
#### File: pymake/autotest/t010_test.py
```python
import os
import sys
import shutil
import subprocess
import pymake
import pytest
# use the line below to set fortran compiler using environmental variables
# if sys.platform.lower() == "win32":
# os.environ["CC"] = "icl"
# else:
# os.environ["CC"] = "icc"
# define program data
target = "gridgen"
if sys.platform.lower() == "win32":
target += ".exe"
# get program dictionary
prog_dict = pymake.usgs_program_data.get_target(target)
# set up paths
dstpth = os.path.join("temp", "t010")
if not os.path.exists(dstpth):
os.makedirs(dstpth)
ver = prog_dict.version
pth = os.path.join(dstpth, prog_dict.dirname)
expth = os.path.join(pth, "examples", "biscayne")
exe_name = os.path.join(dstpth, target)
pm = pymake.Pymake(verbose=True)
pm.target = target
pm.appdir = dstpth
env_var = os.environ.get("CC")
if env_var is not None:
pm.cc = env_var
else:
pm.cc = "g++"
pm.fc = None
pm.inplace = True
pm.makeclean = True
biscayne_cmds = [
"buildqtg action01_buildqtg.dfn",
"grid02qtg-to-usgdata action02_writeusgdata.dfn",
"grid01mfg-to-polyshapefile action03_shapefile.dfn",
"grid02qtg-to-polyshapefile action03_shapefile.dfn",
"grid01mfg-to-pointshapefile action03_shapefile.dfn",
"grid02qtg-to-pointshapefile action03_shapefile.dfn",
"canal_grid02qtg_lay1_intersect action04_intersect.dfn",
"chd_grid02qtg_lay1_intersect action04_intersect.dfn",
"grid01mfg-to-vtkfile action05_vtkfile.dfn",
"grid02qtg-to-vtkfile action05_vtkfile.dfn",
"grid02qtg-to-vtkfilesv action05_vtkfile.dfn",
]
def clean_up():
# clean up
print("Removing folder " + pth)
if os.path.isdir(pth):
shutil.rmtree(pth)
# finalize pymake object
pm.finalize()
if os.path.isfile(exe_name):
print("Removing " + target)
os.remove(exe_name)
return
def run_command(cmdlist, cwd):
p = subprocess.Popen(
cmdlist,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
)
for line in p.stdout.readlines():
print(line.decode().strip())
retval = p.wait()
return retval
def run_gridgen(cmd):
success = False
prog = os.path.abspath(exe_name)
if os.path.exists(prog):
testpth = os.path.abspath(expth)
cmdlist = [prog] + cmd.split()
print("running {}".format(" ".join(cmdlist)))
retcode = run_command(cmdlist, testpth)
if retcode == 0:
success = True
return success
@pytest.mark.base
@pytest.mark.regression
def test_download():
# Remove the existing target download directory if it exists
if os.path.isdir(dstpth):
shutil.rmtree(dstpth)
# download the target
pm.download_target(target, download_path=dstpth)
assert pm.download, "could not download {} distribution".format(target)
@pytest.mark.base
@pytest.mark.regression
def test_compile():
assert pm.build() == 0, "could not compile {}".format(target)
@pytest.mark.regression
@pytest.mark.parametrize("cmd", biscayne_cmds)
def test_gridgen(cmd):
assert run_gridgen(cmd), "could not run {}".format(cmd)
@pytest.mark.base
@pytest.mark.regression
def test_clean_up():
clean_up()
if __name__ == "__main__":
test_download()
test_compile()
# for cmd in biscayne_cmds:
# run_gridgen(cmd)
test_clean_up()
```
#### File: pymake/examples/make_swtv4.py
```python
import pymake
def make_swtv4():
pymake.build_apps("swtv4")
return
if __name__ == "__main__":
make_swtv4()
```
#### File: pymake/examples/make_triangle.py
```python
import pymake
def make_triangle():
pymake.build_apps("triangle")
if __name__ == "__main__":
make_triangle()
```
#### File: pymake/autotest/autotest.py
```python
import os
import shutil
import textwrap
import numpy as np
ignore_ext = (
".hds",
".hed",
".bud",
".cbb",
".cbc",
".ddn",
".ucn",
".glo",
".lst",
".list",
".gwv",
".mv",
".out",
)
def setup(namefile, dst, remove_existing=True, extrafiles=None):
"""Setup MODFLOW-based model files for autotests.
Parameters
----------
namefile : str
MODFLOW-based model name file.
dst : str
destination path for comparison model or file(s)
remove_existing : bool
boolean indicating if an existing comparision model or file(s) should
be replaced (default is True)
extrafiles : str or list of str
list of extra files to include in the comparision
Returns
-------
"""
# Construct src pth from namefile or lgr file
src = os.path.dirname(namefile)
# Create the destination folder, if required
create_dir = False
if os.path.exists(dst):
if remove_existing:
print("Removing folder " + dst)
shutil.rmtree(dst)
create_dir = True
else:
create_dir = True
if create_dir:
os.mkdir(dst)
# determine if a namefile is a lgr control file - get individual
# name files out of the lgr control file
namefiles = [namefile]
ext = os.path.splitext(namefile)[1]
if ".lgr" in ext.lower():
lines = [line.rstrip("\n") for line in open(namefile)]
for line in lines:
if len(line) < 1:
continue
if line[0] == "#":
continue
t = line.split()
if ".nam" in t[0].lower():
fpth = os.path.join(src, t[0])
namefiles.append(fpth)
# Make list of files to copy
files2copy = []
for fpth in namefiles:
files2copy.append(os.path.basename(fpth))
ext = os.path.splitext(fpth)[1]
# copy additional files contained in the name file and
# associated package files
if ext.lower() == ".nam":
fname = os.path.abspath(fpth)
files2copy = files2copy + get_input_files(fname)
if extrafiles is not None:
if isinstance(extrafiles, str):
extrafiles = [extrafiles]
for fl in extrafiles:
files2copy.append(os.path.basename(fl))
# Copy the files
for f in files2copy:
srcf = os.path.join(src, f)
dstf = os.path.join(dst, f)
# Check to see if dstf is going into a subfolder, and create that
# subfolder if it doesn't exist
sf = os.path.dirname(dstf)
if not os.path.isdir(sf):
os.makedirs(sf)
# Now copy the file
if os.path.exists(srcf):
print("Copy file '" + srcf + "' -> '" + dstf + "'")
shutil.copy(srcf, dstf)
else:
print(srcf + " does not exist")
return
def setup_comparison(namefile, dst, remove_existing=True):
"""Setup a comparison model or comparision file(s) for a MODFLOW-based
model.
Parameters
----------
namefile : str
MODFLOW-based model name file.
dst : str
destination path for comparison model or file(s)
remove_existing : bool
boolean indicating if an existing comparision model or file(s) should
be replaced (default is True)
Returns
-------
"""
# Construct src pth from namefile
src = os.path.dirname(namefile)
action = None
for root, dirs, files in os.walk(src):
dl = [d.lower() for d in dirs]
if any(".cmp" in s for s in dl):
idx = None
for jdx, d in enumerate(dl):
if ".cmp" in d:
idx = jdx
break
if idx is not None:
if "mf2005.cmp" in dl[idx] or "mf2005" in dl[idx]:
action = dirs[idx]
elif "mfnwt.cmp" in dl[idx] or "mfnwt" in dl[idx]:
action = dirs[idx]
elif "mfusg.cmp" in dl[idx] or "mfusg" in dl[idx]:
action = dirs[idx]
elif "mf6.cmp" in dl[idx] or "mf6" in dl[idx]:
action = dirs[idx]
elif "libmf6.cmp" in dl[idx] or "libmf6" in dl[idx]:
action = dirs[idx]
else:
action = dirs[idx]
break
if action is not None:
dst = os.path.join(dst, "{}".format(action))
if not os.path.isdir(dst):
try:
os.mkdir(dst)
except:
print("Could not make " + dst)
# clean directory
else:
print("cleaning...{}".format(dst))
for root, dirs, files in os.walk(dst):
for f in files:
tpth = os.path.join(root, f)
print(" removing...{}".format(tpth))
os.remove(tpth)
for d in dirs:
tdir = os.path.join(root, d)
print(" removing...{}".format(tdir))
shutil.rmtree(tdir)
# copy files
cmppth = os.path.join(src, action)
files = os.listdir(cmppth)
files2copy = []
if action.lower() == ".cmp":
for file in files:
if ".cmp" in os.path.splitext(file)[1].lower():
files2copy.append(os.path.join(cmppth, file))
for srcf in files2copy:
f = os.path.basename(srcf)
dstf = os.path.join(dst, f)
# Now copy the file
if os.path.exists(srcf):
print("Copy file '" + srcf + "' -> '" + dstf + "'")
shutil.copy(srcf, dstf)
else:
print(srcf + " does not exist")
else:
for file in files:
if ".nam" in os.path.splitext(file)[1].lower():
files2copy.append(
os.path.join(cmppth, os.path.basename(file))
)
nf = os.path.join(src, action, os.path.basename(file))
setup(nf, dst, remove_existing=remove_existing)
break
return action
def teardown(src):
"""Teardown a autotest directory.
Parameters
----------
src : str
autotest directory to teardown
Returns
-------
"""
if os.path.exists(src):
print("Removing folder " + src)
shutil.rmtree(src)
return
def get_input_files(namefile):
"""Return a list of all the input files in this model.
Parameters
----------
namefile : str
path to a MODFLOW-based model name file
Returns
-------
filelist : list
list of MODFLOW-based model input files
"""
srcdir = os.path.dirname(namefile)
filelist = []
fname = os.path.join(srcdir, namefile)
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
ext = os.path.splitext(ll[2])[1]
if ext.lower() not in ignore_ext:
if len(ll) > 3:
if "replace" in ll[3].lower():
continue
filelist.append(ll[2])
# Now go through every file and look for other files to copy,
# such as 'OPEN/CLOSE'. If found, then add that file to the
# list of files to copy.
otherfiles = []
for fname in filelist:
fname = os.path.join(srcdir, fname)
try:
f = open(fname, "r")
for line in f:
# Skip invalid lines
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
if "OPEN/CLOSE" in line.upper():
for i, s in enumerate(ll):
if "OPEN/CLOSE" in s.upper():
stmp = ll[i + 1]
stmp = stmp.replace('"', "")
stmp = stmp.replace("'", "")
otherfiles.append(stmp)
break
except:
print(fname + " does not exist")
filelist = filelist + otherfiles
return filelist
def get_namefiles(pth, exclude=None):
"""Search through a path (pth) for all .nam files.
Parameters
----------
pth : str
path to model files
exclude : str or lst
File or list of files to exclude from the search (default is None)
Returns
-------
namefiles : lst
List of namefiles with paths
"""
namefiles = []
for root, _, files in os.walk(pth):
namefiles += [
os.path.join(root, file) for file in files if file.endswith(".nam")
]
if exclude is not None:
if isinstance(exclude, str):
exclude = [exclude]
exclude = [e.lower() for e in exclude]
pop_list = []
for namefile in namefiles:
for e in exclude:
if e in namefile.lower():
pop_list.append(namefile)
for e in pop_list:
namefiles.remove(e)
return namefiles
def get_entries_from_namefile(namefile, ftype=None, unit=None, extension=None):
"""Get entries from a namefile. Can select using FTYPE, UNIT, or file
extension.
Parameters
----------
namefile : str
path to a MODFLOW-based model name file
ftype : str
package type
unit : int
file unit number
extension : str
file extension
Returns
-------
entries : list of tuples
list of tuples containing FTYPE, UNIT, FNAME, STATUS for each
namefile entry that meets a user-specified value.
"""
entries = []
f = open(namefile, "r")
for line in f:
if line.strip() == "":
continue
if line[0] == "#":
continue
ll = line.strip().split()
if len(ll) < 3:
continue
status = "UNKNOWN"
if len(ll) > 3:
status = ll[3].upper()
if ftype is not None:
if ftype.upper() == ll[0].upper():
filename = os.path.join(os.path.split(namefile)[0], ll[2])
entries.append((filename, ll[0], ll[1], status))
elif unit is not None:
if int(unit) == int(ll[1]):
filename = os.path.join(os.path.split(namefile)[0], ll[2])
entries.append((filename, ll[0], ll[1], status))
elif extension is not None:
filename = os.path.join(os.path.split(namefile)[0], ll[2])
ext = os.path.splitext(filename)[1]
if len(ext) > 0:
if ext[0] == ".":
ext = ext[1:]
if extension.lower() == ext.lower():
entries.append((filename, ll[0], ll[1], status))
f.close()
if len(entries) < 1:
entries.append((None, None, None, None))
return entries
def get_sim_name(namefiles, rootpth=None):
"""Get simulation name.
Parameters
----------
namefiles : str or list of strings
path(s) to MODFLOW-based model name files
rootpth : str
optional root directory path (default is None)
Returns
-------
simname : list
list of namefiles without the file extension
"""
if isinstance(namefiles, str):
namefiles = [namefiles]
sim_name = []
for namefile in namefiles:
t = namefile.split(os.sep)
if rootpth is None:
idx = -1
else:
idx = t.index(os.path.split(rootpth)[1])
# build dst with everything after the rootpth and before
# the namefile file name.
dst = ""
if idx < len(t):
for d in t[idx + 1 : -1]:
dst += "{}_".format(d)
# add namefile basename without extension
dst += t[-1].replace(".nam", "")
sim_name.append(dst)
return sim_name
# modflow 6 readers and copiers
def setup_mf6(
src, dst, mfnamefile="mfsim.nam", extrafiles=None, remove_existing=True
):
"""Copy all of the MODFLOW 6 input files from the src directory to the dst
directory.
Parameters
----------
src : src
directory path with original MODFLOW 6 input files
dst : str
directory path that original MODFLOW 6 input files will be copied to
mfnamefile : str
optional MODFLOW 6 simulation name file (default is mfsim.nam)
extrafiles : bool
boolean indicating if extra files should be included (default is None)
remove_existing : bool
boolean indicating if existing file in dst should be removed (default
is True)
Returns
-------
mf6inp : list
list of MODFLOW 6 input files
mf6outp : list
list of MODFLOW 6 output files
"""
# Create the destination folder
create_dir = False
if os.path.exists(dst):
if remove_existing:
print("Removing folder " + dst)
shutil.rmtree(dst)
create_dir = True
else:
create_dir = True
if create_dir:
os.makedirs(dst)
# Make list of files to copy
fname = os.path.join(src, mfnamefile)
fname = os.path.abspath(fname)
mf6inp, mf6outp = get_mf6_files(fname)
files2copy = [mfnamefile] + mf6inp
# determine if there are any .ex files
exinp = []
for f in mf6outp:
ext = os.path.splitext(f)[1]
if ext.lower() == ".hds":
pth = os.path.join(src, f + ".ex")
if os.path.isfile(pth):
exinp.append(f + ".ex")
if len(exinp) > 0:
files2copy += exinp
if extrafiles is not None:
files2copy += extrafiles
# Copy the files
for f in files2copy:
srcf = os.path.join(src, f)
dstf = os.path.join(dst, f)
# Check to see if dstf is going into a subfolder, and create that
# subfolder if it doesn't exist
sf = os.path.dirname(dstf)
if not os.path.isdir(sf):
try:
os.mkdir(sf)
except:
print("Could not make " + sf)
# Now copy the file
if os.path.exists(srcf):
print("Copy file '" + srcf + "' -> '" + dstf + "'")
shutil.copy(srcf, dstf)
else:
print(srcf + " does not exist")
return mf6inp, mf6outp
def get_mf6_comparison(src):
"""Determine comparison type for MODFLOW 6 simulation.
Parameters
----------
src : str
directory path to search for comparison types
Returns
-------
action : str
comparison type
"""
action = None
# Possible comparison - the order matters
optcomp = (
"compare",
".cmp",
"mf2005",
"mf2005.cmp",
"mfnwt",
"mfnwt.cmp",
"mfusg",
"mfusg.cmp",
"mflgr",
"mflgr.cmp",
"libmf6",
"libmf6.cmp",
"mf6",
"mf6.cmp",
)
# Construct src pth from namefile
action = None
for _, dirs, _ in os.walk(src):
dl = [d.lower() for d in dirs]
for oc in optcomp:
if any(oc in s for s in dl):
action = oc
break
return action
def setup_mf6_comparison(src, dst, remove_existing=True):
"""Setup comparision for MODFLOW 6 simulation.
Parameters
----------
src : src
directory path with original MODFLOW 6 input files
dst : str
directory path that original MODFLOW 6 input files will be copied to
remove_existing : bool
boolean indicating if existing file in dst should be removed (default
is True)
Returns
-------
action : str
comparison type
"""
# get the type of comparison to use (compare, mf2005, etc.)
action = get_mf6_comparison(src)
if action is not None:
dst = os.path.join(dst, "{}".format(action))
if not os.path.isdir(dst):
try:
os.mkdir(dst)
except:
print("Could not make " + dst)
# clean directory
else:
print("cleaning...{}".format(dst))
for root, dirs, files in os.walk(dst):
for f in files:
tpth = os.path.join(root, f)
print(" removing...{}".format(tpth))
os.remove(tpth)
for d in dirs:
tdir = os.path.join(root, d)
print(" removing...{}".format(tdir))
shutil.rmtree(tdir)
# copy files
cmppth = os.path.join(src, action)
files = os.listdir(cmppth)
files2copy = []
if action.lower() == "compare" or action.lower() == ".cmp":
for file in files:
if ".cmp" in os.path.splitext(file)[1].lower():
files2copy.append(os.path.join(cmppth, file))
for srcf in files2copy:
f = os.path.basename(srcf)
dstf = os.path.join(dst, f)
# Now copy the file
if os.path.exists(srcf):
print("Copy file '" + srcf + "' -> '" + dstf + "'")
shutil.copy(srcf, dstf)
else:
print(srcf + " does not exist")
else:
if "mf6" in action.lower():
for file in files:
if "mfsim.nam" in file.lower():
srcf = os.path.join(cmppth, os.path.basename(file))
files2copy.append(srcf)
srcdir = os.path.join(src, action)
setup_mf6(srcdir, dst, remove_existing=remove_existing)
break
else:
for file in files:
if ".nam" in os.path.splitext(file)[1].lower():
srcf = os.path.join(cmppth, os.path.basename(file))
files2copy.append(srcf)
nf = os.path.join(src, action, os.path.basename(file))
setup(nf, dst, remove_existing=remove_existing)
break
return action
def get_mf6_nper(tdisfile):
"""Return the number of stress periods in the MODFLOW 6 model.
Parameters
----------
tdisfile : str
path to the TDIS file
Returns
-------
nper : int
number of stress periods in the simulation
"""
with open(tdisfile, "r") as f:
lines = f.readlines()
line = [line for line in lines if "NPER" in line.upper()][0]
nper = line.strip().split()[1]
return nper
def get_mf6_mshape(disfile):
"""Return the shape of the MODFLOW 6 model.
Parameters
----------
disfile : str
path to a MODFLOW 6 discretization file
Returns
-------
mshape : tuple
tuple with the shape of the MODFLOW 6 model.
"""
with open(disfile, "r") as f:
lines = f.readlines()
d = {}
for line in lines:
# Skip over blank and commented lines
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
for key in ["NODES", "NCPL", "NLAY", "NROW", "NCOL"]:
if ll[0].upper() in key:
d[key] = int(ll[1])
if "NODES" in d:
mshape = (d["NODES"],)
elif "NCPL" in d:
mshape = (d["NLAY"], d["NCPL"])
elif "NLAY" in d:
mshape = (d["NLAY"], d["NROW"], d["NCOL"])
else:
print(d)
raise Exception("Could not determine model shape")
return mshape
def get_mf6_files(mfnamefile):
"""Return a list of all the MODFLOW 6 input and output files in this model.
Parameters
----------
mfnamefile : str
path to the MODFLOW 6 simulation name file
Returns
-------
filelist : list
list of MODFLOW 6 input files in a simulation
outplist : list
list of MODFLOW 6 output files in a simulation
"""
srcdir = os.path.dirname(mfnamefile)
filelist = []
outplist = []
filekeys = ["TDIS6", "GWF6", "GWT", "<KEY>", "GWF-GWT", "IMS6"]
namefilekeys = ["GWF6", "GWT"]
namefiles = []
with open(mfnamefile) as f:
# Read line and skip comments
lines = f.readlines()
for line in lines:
# Skip over blank and commented lines
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
for key in filekeys:
if key in ll[0].upper():
fname = ll[1]
filelist.append(fname)
for key in namefilekeys:
if key in ll[0].upper():
fname = ll[1]
namefiles.append(fname)
# Go through name files and get files
for namefile in namefiles:
fname = os.path.join(srcdir, namefile)
with open(fname, "r") as f:
lines = f.readlines()
insideblock = False
for line in lines:
ll = line.upper().strip().split()
if len(ll) < 2:
continue
if ll[0] in "BEGIN" and ll[1] in "PACKAGES":
insideblock = True
continue
if ll[0] in "END" and ll[1] in "PACKAGES":
insideblock = False
if insideblock:
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
filelist.append(ll[1])
# Recursively go through every file and look for other files to copy,
# such as 'OPEN/CLOSE' and 'TIMESERIESFILE'. If found, then
# add that file to the list of files to copy.
flist = filelist
# olist = outplist
while True:
olist = []
flist, olist = _get_mf6_external_files(srcdir, olist, flist)
# add to filelist
if len(flist) > 0:
filelist = filelist + flist
# add to outplist
if len(olist) > 0:
outplist = outplist + olist
# terminate loop if no additional files
# if len(flist) < 1 and len(olist) < 1:
if len(flist) < 1:
break
return filelist, outplist
def _get_mf6_external_files(srcdir, outplist, files):
"""Get list of external files in a MODFLOW 6 simulation.
Parameters
----------
srcdir : str
path to a directory containing a MODFLOW 6 simulation
outplist : list
list of output files in a MODFLOW 6 simulation
files : list
list of MODFLOW 6 name files
Returns
-------
"""
extfiles = []
for fname in files:
fname = os.path.join(srcdir, fname)
try:
f = open(fname, "r")
for line in f:
# Skip invalid lines
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
if "OPEN/CLOSE" in line.upper():
for i, s in enumerate(ll):
if s.upper() == "OPEN/CLOSE":
stmp = ll[i + 1]
stmp = stmp.replace('"', "")
stmp = stmp.replace("'", "")
extfiles.append(stmp)
break
if "TS6" in line.upper():
for i, s in enumerate(ll):
if s.upper() == "FILEIN":
stmp = ll[i + 1]
stmp = stmp.replace('"', "")
stmp = stmp.replace("'", "")
extfiles.append(stmp)
break
if "TAS6" in line.upper():
for i, s in enumerate(ll):
if s.upper() == "FILEIN":
stmp = ll[i + 1]
stmp = stmp.replace('"', "")
stmp = stmp.replace("'", "")
extfiles.append(stmp)
break
if "OBS6" in line.upper():
for i, s in enumerate(ll):
if s.upper() == "FILEIN":
stmp = ll[i + 1]
stmp = stmp.replace('"', "")
stmp = stmp.replace("'", "")
extfiles.append(stmp)
break
if "EXTERNAL" in line.upper():
for i, s in enumerate(ll):
if s.upper() == "EXTERNAL":
stmp = ll[i + 1]
stmp = stmp.replace('"', "")
stmp = stmp.replace("'", "")
extfiles.append(stmp)
break
if "FILE" in line.upper():
for i, s in enumerate(ll):
if s.upper() == "FILEIN":
stmp = ll[i + 1]
stmp = stmp.replace('"', "")
stmp = stmp.replace("'", "")
extfiles.append(stmp)
break
if "FILE" in line.upper():
for i, s in enumerate(ll):
if s.upper() == "FILEOUT":
stmp = ll[i + 1]
stmp = stmp.replace('"', "")
stmp = stmp.replace("'", "")
outplist.append(stmp)
break
except:
print("could not get a list of external mf6 files")
return extfiles, outplist
def get_mf6_ftypes(namefile, ftypekeys):
"""Return a list of FTYPES that are in the name file and in ftypekeys.
Parameters
----------
namefile : str
path to a MODFLOW 6 name file
ftypekeys : list
list of desired FTYPEs
Returns
-------
ftypes : list
list of FTYPES that match ftypekeys in namefile
"""
with open(namefile, "r") as f:
lines = f.readlines()
ftypes = []
for line in lines:
# Skip over blank and commented lines
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
for key in ftypekeys:
if ll[0].upper() in key:
ftypes.append(ll[0])
return ftypes
def get_mf6_blockdata(f, blockstr):
"""Return list with all non comments between start and end of block
specified by blockstr.
Parameters
----------
f : file object
open file object
blockstr : str
name of block to search
Returns
-------
data : list
list of data in specified block
"""
data = []
# find beginning of block
for line in f:
if line[0] != "#":
t = line.split()
if t[0].lower() == "begin" and t[1].lower() == blockstr.lower():
break
for line in f:
if line[0] != "#":
t = line.split()
if t[0].lower() == "end" and t[1].lower() == blockstr.lower():
break
else:
data.append(line.rstrip())
return data
# compare functions
def compare_budget(
namefile1,
namefile2,
max_cumpd=0.01,
max_incpd=0.01,
outfile=None,
files1=None,
files2=None,
):
"""Compare the budget results from two simulations.
Parameters
----------
namefile1 : str
namefile path for base model
namefile2 : str
namefile path for comparison model
max_cumpd : float
maximum percent discrepancy allowed for cumulative budget terms
(default is 0.01)
max_incpd : float
maximum percent discrepancy allowed for incremental budget terms
(default is 0.01)
outfile : str
budget comparison output file name. If outfile is None, no
comparison output is saved. (default is None)
files1 : str
base model output file. If files1 is not None, results
will be extracted from files1 and namefile1 will not be used.
(default is None)
files2 : str
comparison model output file. If files2 is not None, results
will be extracted from files2 and namefile2 will not be used.
(default is None)
Returns
-------
success : bool
boolean indicating if the difference between budgets are less
than max_cumpd and max_incpd
"""
try:
import flopy
except:
msg = "flopy not available - cannot use compare_budget"
raise ValueError(msg)
# headers
headers = ("INCREMENTAL", "CUMULATIVE")
direction = ("IN", "OUT")
# Get name of list files
lst_file1 = None
if files1 is None:
lst_file = get_entries_from_namefile(namefile1, "list")
lst_file1 = lst_file[0][0]
else:
if isinstance(files1, str):
files1 = [files1]
for file in files1:
if (
"list" in os.path.basename(file).lower()
or "lst" in os.path.basename(file).lower()
):
lst_file1 = file
break
lst_file2 = None
if files2 is None:
lst_file = get_entries_from_namefile(namefile2, "list")
lst_file2 = lst_file[0][0]
else:
if isinstance(files2, str):
files2 = [files2]
for file in files2:
if (
"list" in os.path.basename(file).lower()
or "lst" in os.path.basename(file).lower()
):
lst_file2 = file
break
# Determine if there are two files to compare
if lst_file1 is None or lst_file2 is None:
print("lst_file1 or lst_file2 is None")
print("lst_file1: {}".format(lst_file1))
print("lst_file2: {}".format(lst_file2))
return True
# Open output file
if outfile is not None:
f = open(outfile, "w")
f.write("Created by pymake.autotest.compare\n")
# Initialize SWR budget objects
lst1obj = flopy.utils.MfusgListBudget(lst_file1)
lst2obj = flopy.utils.MfusgListBudget(lst_file2)
# Determine if there any SWR entries in the budget file
if not lst1obj.isvalid() or not lst2obj.isvalid():
return True
# Get numpy budget tables for lst_file1
lst1 = []
lst1.append(lst1obj.get_incremental())
lst1.append(lst1obj.get_cumulative())
# Get numpy budget tables for lst_file2
lst2 = []
lst2.append(lst2obj.get_incremental())
lst2.append(lst2obj.get_cumulative())
icnt = 0
v0 = np.zeros(2, dtype=float)
v1 = np.zeros(2, dtype=float)
err = np.zeros(2, dtype=float)
# Process cumulative and incremental
for idx in range(2):
if idx > 0:
max_pd = max_cumpd
else:
max_pd = max_incpd
kper = lst1[idx]["stress_period"]
kstp = lst1[idx]["time_step"]
# Process each time step
for jdx in range(kper.shape[0]):
err[:] = 0.0
t0 = lst1[idx][jdx]
t1 = lst2[idx][jdx]
if outfile is not None:
maxcolname = 0
for colname in t0.dtype.names:
maxcolname = max(maxcolname, len(colname))
s = 2 * "\n"
s += "STRESS PERIOD: {} TIME STEP: {}".format(
kper[jdx] + 1, kstp[jdx] + 1
)
f.write(s)
if idx == 0:
f.write("\nINCREMENTAL BUDGET\n")
else:
f.write("\nCUMULATIVE BUDGET\n")
for i, colname in enumerate(t0.dtype.names):
if i == 0:
s = "{:<21} {:>21} {:>21} {:>21}\n".format(
"Budget Entry", "Model 1", "Model 2", "Difference"
)
f.write(s)
s = 87 * "-" + "\n"
f.write(s)
diff = t0[colname] - t1[colname]
s = "{:<21} {:>21} {:>21} {:>21}\n".format(
colname, t0[colname], t1[colname], diff
)
f.write(s)
v0[0] = t0["TOTAL_IN"]
v1[0] = t1["TOTAL_IN"]
if v0[0] > 0.0:
err[0] = 100.0 * (v1[0] - v0[0]) / v0[0]
v0[1] = t0["TOTAL_OUT"]
v1[1] = t1["TOTAL_OUT"]
if v0[1] > 0.0:
err[1] = 100.0 * (v1[1] - v0[1]) / v0[1]
for kdx, t in enumerate(err):
if abs(t) > max_pd:
icnt += 1
if outfile is not None:
e = (
'"{} {}" percent difference ({})'.format(
headers[idx], direction[kdx], t
)
+ " for stress period {} and time step {} > {}.".format(
kper[jdx] + 1, kstp[jdx] + 1, max_pd
)
+ " Reference value = {}. Simulated value = {}.".format(
v0[kdx], v1[kdx]
)
)
e = textwrap.fill(
e,
width=70,
initial_indent=" ",
subsequent_indent=" ",
)
f.write("{}\n".format(e))
f.write("\n")
# Close output file
if outfile is not None:
f.close()
# test for failure
success = True
if icnt > 0:
success = False
return success
def compare_swrbudget(
namefile1,
namefile2,
max_cumpd=0.01,
max_incpd=0.01,
outfile=None,
files1=None,
files2=None,
):
"""Compare the SWR budget results from two simulations.
Parameters
----------
namefile1 : str
namefile path for base model
namefile2 : str
namefile path for comparison model
max_cumpd : float
maximum percent discrepancy allowed for cumulative budget terms
(default is 0.01)
max_incpd : float
maximum percent discrepancy allowed for incremental budget terms
(default is 0.01)
outfile : str
budget comparison output file name. If outfile is None, no
comparison output is saved. (default is None)
files1 : str
base model output file. If files1 is not None, results
will be extracted from files1 and namefile1 will not be used.
(default is None)
files2 : str
comparison model output file. If files2 is not None, results
will be extracted from files2 and namefile2 will not be used.
(default is None)
Returns
-------
success : bool
boolean indicating if the difference between budgets are less
than max_cumpd and max_incpd
"""
try:
import flopy
except:
msg = "flopy not available - cannot use compare_swrbudget"
raise ValueError(msg)
# headers
headers = ("INCREMENTAL", "CUMULATIVE")
direction = ("IN", "OUT")
# Get name of list files
list1 = None
if files1 is None:
lst = get_entries_from_namefile(namefile1, "list")
list1 = lst[0][0]
else:
for file in files1:
if (
"list" in os.path.basename(file).lower()
or "lst" in os.path.basename(file).lower()
):
list1 = file
break
list2 = None
if files2 is None:
lst = get_entries_from_namefile(namefile2, "list")
list2 = lst[0][0]
else:
for file in files2:
if (
"list" in os.path.basename(file).lower()
or "lst" in os.path.basename(file).lower()
):
list2 = file
break
# Determine if there are two files to compare
if list1 is None or list2 is None:
return True
# Initialize SWR budget objects
lst1obj = flopy.utils.SwrListBudget(list1)
lst2obj = flopy.utils.SwrListBudget(list2)
# Determine if there any SWR entries in the budget file
if not lst1obj.isvalid() or not lst2obj.isvalid():
return True
# Get numpy budget tables for list1
lst1 = []
lst1.append(lst1obj.get_incremental())
lst1.append(lst1obj.get_cumulative())
# Get numpy budget tables for list2
lst2 = []
lst2.append(lst2obj.get_incremental())
lst2.append(lst2obj.get_cumulative())
icnt = 0
v0 = np.zeros(2, dtype=float)
v1 = np.zeros(2, dtype=float)
err = np.zeros(2, dtype=float)
# Open output file
if outfile is not None:
f = open(outfile, "w")
f.write("Created by pymake.autotest.compare\n")
# Process cumulative and incremental
for idx in range(2):
if idx > 0:
max_pd = max_cumpd
else:
max_pd = max_incpd
kper = lst1[idx]["stress_period"]
kstp = lst1[idx]["time_step"]
# Process each time step
for jdx in range(kper.shape[0]):
err[:] = 0.0
t0 = lst1[idx][jdx]
t1 = lst2[idx][jdx]
if outfile is not None:
maxcolname = 0
for colname in t0.dtype.names:
maxcolname = max(maxcolname, len(colname))
s = 2 * "\n"
s += "STRESS PERIOD: {} TIME STEP: {}".format(
kper[jdx] + 1, kstp[jdx] + 1
)
f.write(s)
if idx == 0:
f.write("\nINCREMENTAL BUDGET\n")
else:
f.write("\nCUMULATIVE BUDGET\n")
for i, colname in enumerate(t0.dtype.names):
if i == 0:
s = "{:<21} {:>21} {:>21} {:>21}\n".format(
"Budget Entry", "Model 1", "Model 2", "Difference"
)
f.write(s)
s = 87 * "-" + "\n"
f.write(s)
diff = t0[colname] - t1[colname]
s = "{:<21} {:>21} {:>21} {:>21}\n".format(
colname, t0[colname], t1[colname], diff
)
f.write(s)
v0[0] = t0["TOTAL_IN"]
v1[0] = t1["TOTAL_IN"]
if v0[0] > 0.0:
err[0] = 100.0 * (v1[0] - v0[0]) / v0[0]
v0[1] = t0["TOTAL_OUT"]
v1[1] = t1["TOTAL_OUT"]
if v0[1] > 0.0:
err[1] = 100.0 * (v1[1] - v0[1]) / v0[1]
for kdx, t in enumerate(err):
if abs(t) > max_pd:
icnt += 1
e = (
'"{} {}" percent difference ({})'.format(
headers[idx], direction[kdx], t
)
+ " for stress period {} and time step {} > {}.".format(
kper[jdx] + 1, kstp[jdx] + 1, max_pd
)
+ " Reference value = {}. Simulated value = {}.".format(
v0[kdx], v1[kdx]
)
)
e = textwrap.fill(
e,
width=70,
initial_indent=" ",
subsequent_indent=" ",
)
f.write("{}\n".format(e))
f.write("\n")
# Close output file
if outfile is not None:
f.close()
# test for failure
success = True
if icnt > 0:
success = False
return success
def compare_heads(
namefile1,
namefile2,
precision="auto",
text="head",
text2=None,
htol=0.001,
outfile=None,
files1=None,
files2=None,
difftol=False,
verbose=False,
exfile=None,
exarr=None,
maxerr=None,
):
"""Compare the head results from two simulations.
Parameters
----------
namefile1 : str
namefile path for base model
namefile2 : str
namefile path for comparison model
precision : str
precision for binary head file ("auto", "single", or "double")
default is "auto"
htol : float
maximum allowed head difference (default is 0.001)
outfile : str
head comparison output file name. If outfile is None, no
comparison output is saved. (default is None)
files1 : str
base model output file. If files1 is not None, results
will be extracted from files1 and namefile1 will not be used.
(default is None)
files2 : str
comparison model output file. If files2 is not None, results
will be extracted from files2 and namefile2 will not be used.
(default is None)
difftol : bool
boolean determining if the absolute value of the head
difference greater than htol should be evaluated (default is False)
verbose : bool
boolean indicating if verbose output should be written to the
terminal (default is False)
exfile : str
path to a file with exclusion array data. Head differences will not
be evaluated where exclusion array values are greater than zero.
(default is None)
exarr : numpy.ndarry
exclusion array. Head differences will not be evaluated where
exclusion array values are greater than zero. (default is None).
maxerr : int
maximum number of head difference greater than htol that should be
reported. If maxerr is None, all head difference greater than htol
will be reported. (default is None)
Returns
-------
success : bool
boolean indicating if the head differences are less than htol.
"""
try:
import flopy
except:
msg = "flopy not available - cannot use compare_heads"
raise ValueError(msg)
if text2 is None:
text2 = text
dbs = "DATA(BINARY)"
# Get head info for namefile1
hfpth1 = None
status1 = dbs
if files1 is None:
# Get oc info, and return if OC not included in models
ocf1 = get_entries_from_namefile(namefile1, "OC")
if ocf1[0][0] is None:
return True
hu1, hfpth1, du1, _ = flopy.modflow.ModflowOc.get_ocoutput_units(
ocf1[0][0]
)
if text.lower() == "head":
iut = hu1
elif text.lower() == "drawdown":
iut = du1
if iut != 0:
entries = get_entries_from_namefile(namefile1, unit=abs(iut))
hfpth1, status1 = entries[0][0], entries[0][1]
else:
if isinstance(files1, str):
files1 = [files1]
for file in files1:
if text.lower() == "head":
if (
"hds" in os.path.basename(file).lower()
or "hed" in os.path.basename(file).lower()
):
hfpth1 = file
break
elif text.lower() == "drawdown":
if "ddn" in os.path.basename(file).lower():
hfpth1 = file
break
elif text.lower() == "concentration":
if "ucn" in os.path.basename(file).lower():
hfpth1 = file
break
else:
hfpth1 = file
break
# Get head info for namefile2
hfpth2 = None
status2 = dbs
if files2 is None:
# Get oc info, and return if OC not included in models
ocf2 = get_entries_from_namefile(namefile2, "OC")
if ocf2[0][0] is None:
return True
hu2, hfpth2, du2, dfpth2 = flopy.modflow.ModflowOc.get_ocoutput_units(
ocf2[0][0]
)
if text.lower() == "head":
iut = hu2
elif text.lower() == "drawdown":
iut = du2
if iut != 0:
entries = get_entries_from_namefile(namefile2, unit=abs(iut))
hfpth2, status2 = entries[0][0], entries[0][1]
else:
if isinstance(files2, str):
files2 = [files2]
for file in files2:
if text2.lower() == "head":
if (
"hds" in os.path.basename(file).lower()
or "hed" in os.path.basename(file).lower()
):
hfpth2 = file
break
elif text2.lower() == "drawdown":
if "ddn" in os.path.basename(file).lower():
hfpth2 = file
break
elif text2.lower() == "concentration":
if "ucn" in os.path.basename(file).lower():
hfpth2 = file
break
else:
hfpth2 = file
break
# confirm that there are two files to compare
if hfpth1 is None or hfpth2 is None:
print("hfpth1 or hfpth2 is None")
print("hfpth1: {}".format(hfpth1))
print("hfpth2: {}".format(hfpth2))
return True
# make sure the file paths exist
if not os.path.isfile(hfpth1) or not os.path.isfile(hfpth2):
print("hfpth1 or hfpth2 is not a file")
print("hfpth1 isfile: {}".format(os.path.isfile(hfpth1)))
print("hfpth2 isfile: {}".format(os.path.isfile(hfpth2)))
return False
# Open output file
if outfile is not None:
f = open(outfile, "w")
f.write("Created by pymake.autotest.compare\n")
f.write(
"Performing {} to {} comparison\n".format(
text.upper(), text2.upper()
)
)
if exfile is not None:
f.write("Using exclusion file {}\n".format(exfile))
if exarr is not None:
f.write("Using exclusion array\n")
msg = "{} is a ".format(hfpth1)
if status1 == dbs:
msg += "binary file."
else:
msg += "ascii file."
f.write(msg + "\n")
msg = "{} is a ".format(hfpth2)
if status2 == dbs:
msg += "binary file."
else:
msg += "ascii file."
f.write(msg + "\n")
# Process exclusion data
exd = None
# get data from exclusion file
if exfile is not None:
e = None
if isinstance(exfile, str):
try:
exd = np.genfromtxt(exfile).flatten()
except:
e = "Could not read exclusion " + "file {}".format(
os.path.basename(exfile)
)
print(e)
return False
else:
e = "exfile is not a valid file path"
print(e)
return False
# process exclusion array
if exarr is not None:
e = None
if isinstance(exarr, np.ndarray):
if exd is None:
exd = exarr.flatten()
else:
exd += exarr.flatten()
else:
e = "exarr is not a numpy array"
print(e)
return False
# Get head objects
status1 = status1.upper()
unstructured1 = False
if status1 == dbs:
headobj1 = flopy.utils.HeadFile(
hfpth1, precision=precision, verbose=verbose, text=text
)
txt = headobj1.recordarray["text"][0]
if isinstance(txt, bytes):
txt = txt.decode("utf-8")
if "HEADU" in txt:
unstructured1 = True
headobj1 = flopy.utils.HeadUFile(
hfpth1, precision=precision, verbose=verbose
)
else:
headobj1 = flopy.utils.FormattedHeadFile(
hfpth1, verbose=verbose, text=text
)
status2 = status2.upper()
unstructured2 = False
if status2 == dbs:
headobj2 = flopy.utils.HeadFile(
hfpth2, precision=precision, verbose=verbose, text=text2
)
txt = headobj2.recordarray["text"][0]
if isinstance(txt, bytes):
txt = txt.decode("utf-8")
if "HEADU" in txt:
unstructured2 = True
headobj2 = flopy.utils.HeadUFile(
hfpth2, precision=precision, verbose=verbose
)
else:
headobj2 = flopy.utils.FormattedHeadFile(
hfpth2, verbose=verbose, text=text2
)
# get times
times1 = headobj1.get_times()
times2 = headobj2.get_times()
for (t1, t2) in zip(times1, times2):
if not np.allclose([t1], [t2]):
msg = "times in two head files are not " + "equal ({},{})".format(
t1, t2
)
raise ValueError(msg)
kstpkper = headobj1.get_kstpkper()
header = (
"{:>15s} {:>15s} {:>15s} {:>15s}\n".format(
" ", " ", "MAXIMUM", "EXCEEDS"
)
+ "{:>15s} {:>15s} {:>15s} {:>15s}\n".format(
"STRESS PERIOD", "TIME STEP", "HEAD DIFFERENCE", "CRITERIA"
)
+ "{0:>15s} {0:>15s} {0:>15s} {0:>15s}\n".format(15 * "-")
)
if verbose:
print("Comparing results for {} times".format(len(times1)))
icnt = 0
# Process cumulative and incremental
for idx, (t1, t2) in enumerate(zip(times1, times2)):
h1 = headobj1.get_data(totim=t1)
if unstructured1:
temp = np.array([])
for a in h1:
temp = np.hstack((temp, a))
h1 = temp
h2 = headobj2.get_data(totim=t2)
if unstructured2:
temp = np.array([])
for a in h2:
temp = np.hstack((temp, a))
h2 = temp
if exd is not None:
# reshape exd to the shape of the head arrays
if idx == 0:
e = (
"shape of exclusion data ({})".format(exd.shape)
+ "can not be reshaped to the size of the "
+ "head arrays ({})".format(h1.shape)
)
if h1.flatten().shape != exd.shape:
raise ValueError(e)
exd = exd.reshape(h1.shape)
iexd = exd > 0
# reset h1 and h2 to the same value in the excluded area
h1[iexd] = 0.0
h2[iexd] = 0.0
if difftol:
diffmax, indices = _calculate_difftol(h1, h2, htol)
else:
diffmax, indices = _calculate_diffmax(h1, h2)
if outfile is not None:
if idx < 1:
f.write(header)
if diffmax > htol:
sexceed = "*"
else:
sexceed = ""
kk1 = kstpkper[idx][1] + 1
kk0 = kstpkper[idx][0] + 1
f.write(
"{:15d} {:15d} {:15.6g} {:15s}\n".format(
kk1, kk0, diffmax, sexceed
)
)
if diffmax >= htol:
icnt += 1
if outfile is not None:
if difftol:
ee = (
"Maximum absolute head difference "
+ "({}) -- ".format(diffmax)
+ "{} tolerance exceeded at ".format(htol)
+ "{} node location(s)".format(indices[0].shape[0])
)
else:
ee = (
"Maximum absolute head difference "
+ "({}) exceeded ".format(diffmax)
+ "at {} node location(s)".format(indices[0].shape[0])
)
e = textwrap.fill(
ee + ":",
width=70,
initial_indent=" ",
subsequent_indent=" ",
)
if verbose:
f.write("{}\n".format(ee))
print(ee + " at time {}".format(t1))
e = ""
ncells = h1.flatten().shape[0]
fmtn = "{:" + "{}".format(len(str(ncells))) + "d}"
for itupe in indices:
for jdx, ind in enumerate(itupe):
iv = np.unravel_index(ind, h1.shape)
iv = tuple(i + 1 for i in iv)
v1 = h1.flatten()[ind]
v2 = h2.flatten()[ind]
d12 = v1 - v2
# e += ' ' + fmtn.format(jdx + 1) + ' node: '
# e += fmtn.format(ind + 1) # convert to one-based
e += " " + fmtn.format(jdx + 1)
e += " {}".format(iv)
e += " -- "
e += "h1: {:20} ".format(v1)
e += "h2: {:20} ".format(v2)
e += "diff: {:20}\n".format(d12)
if isinstance(maxerr, int):
if jdx + 1 >= maxerr:
break
if verbose:
f.write("{}\n".format(e))
# Write header again, unless it is the last record
if verbose:
if idx + 1 < len(times1):
f.write("\n{}".format(header))
# Close output file
if outfile is not None:
f.close()
# test for failure
success = True
if icnt > 0:
success = False
return success
def compare_concs(
namefile1,
namefile2,
precision="auto",
ctol=0.001,
outfile=None,
files1=None,
files2=None,
difftol=False,
verbose=False,
):
"""Compare the mt3dms and mt3dusgs concentration results from two
simulations.
Parameters
----------
namefile1 : str
namefile path for base model
namefile2 : str
namefile path for comparison model
precision : str
precision for binary head file ("auto", "single", or "double")
default is "auto"
ctol : float
maximum allowed concentration difference (default is 0.001)
outfile : str
concentration comparison output file name. If outfile is None, no
comparison output is saved. (default is None)
files1 : str
base model output file. If files1 is not None, results
will be extracted from files1 and namefile1 will not be used.
(default is None)
files2 : str
comparison model output file. If files2 is not None, results
will be extracted from files2 and namefile2 will not be used.
(default is None)
difftol : bool
boolean determining if the absolute value of the concentration
difference greater than ctol should be evaluated (default is False)
verbose : bool
boolean indicating if verbose output should be written to the
terminal (default is False)
Returns
-------
success : bool
boolean indicating if the concentration differences are less than
ctol.
Returns
-------
"""
try:
import flopy
except:
msg = "flopy not available - cannot use compare_concs"
raise ValueError(msg)
# list of valid extensions
valid_ext = ["ucn"]
# Get info for first ucn file
ufpth1 = None
if files1 is None:
for ext in valid_ext:
ucn = get_entries_from_namefile(namefile1, extension=ext)
ufpth = ucn[0][0]
if ufpth is not None:
ufpth1 = ufpth
break
if ufpth1 is None:
ufpth1 = os.path.join(os.path.dirname(namefile1), "MT3D001.UCN")
else:
if isinstance(files1, str):
files1 = [files1]
for file in files1:
for ext in valid_ext:
if ext in os.path.basename(file).lower():
ufpth1 = file
break
# Get info for second ucn file
ufpth2 = None
if files2 is None:
for ext in valid_ext:
ucn = get_entries_from_namefile(namefile2, extension=ext)
ufpth = ucn[0][0]
if ufpth is not None:
ufpth2 = ufpth
break
if ufpth2 is None:
ufpth2 = os.path.join(os.path.dirname(namefile2), "MT3D001.UCN")
else:
if isinstance(files2, str):
files2 = [files2]
for file in files2:
for ext in valid_ext:
if ext in os.path.basename(file).lower():
ufpth2 = file
break
# confirm that there are two files to compare
if ufpth1 is None or ufpth2 is None:
if ufpth1 is None:
print(" UCN file 1 not set")
if ufpth2 is None:
print(" UCN file 2 not set")
return True
if not os.path.isfile(ufpth1) or not os.path.isfile(ufpth2):
if not os.path.isfile(ufpth1):
print(" {} does not exist".format(ufpth1))
if not os.path.isfile(ufpth2):
print(" {} does not exist".format(ufpth2))
return True
# Open output file
if outfile is not None:
f = open(outfile, "w")
f.write("Created by pymake.autotest.compare_concs\n")
# Get stage objects
uobj1 = flopy.utils.UcnFile(ufpth1, precision=precision, verbose=verbose)
uobj2 = flopy.utils.UcnFile(ufpth2, precision=precision, verbose=verbose)
# get times
times1 = uobj1.get_times()
times2 = uobj2.get_times()
nt1 = len(times1)
nt2 = len(times2)
nt = min(nt1, nt2)
for (t1, t2) in zip(times1, times2):
assert np.allclose(
[t1], [t2]
), "times in two ucn files are not " + "equal ({},{})".format(t1, t2)
if nt == nt1:
kstpkper = uobj1.get_kstpkper()
else:
kstpkper = uobj2.get_kstpkper()
header = (
"{:>15s} {:>15s} {:>15s}\n".format(" ", " ", "MAXIMUM")
+ "{:>15s} {:>15s} {:>15s}\n".format(
"STRESS PERIOD", "TIME STEP", "CONC DIFFERENCE"
)
+ "{0:>15s} {0:>15s} {0:>15s}\n".format(15 * "-")
)
if verbose:
print("Comparing results for {} times".format(len(times1)))
icnt = 0
# Process cumulative and incremental
for idx, time in enumerate(times1[0:nt]):
try:
u1 = uobj1.get_data(totim=time)
u2 = uobj2.get_data(totim=time)
if difftol:
diffmax, indices = _calculate_difftol(u1, u2, ctol)
else:
diffmax, indices = _calculate_diffmax(u1, u2)
if outfile is not None:
if idx < 1:
f.write(header)
f.write(
"{:15d} {:15d} {:15.6g}\n".format(
kstpkper[idx][1] + 1, kstpkper[idx][0] + 1, diffmax
)
)
if diffmax >= ctol:
icnt += 1
if outfile is not None:
if difftol:
ee = (
"Maximum concentration difference ({})".format(
diffmax
)
+ " -- {} tolerance exceeded at ".format(ctol)
+ "{} node location(s)".format(indices[0].shape[0])
)
else:
ee = (
"Maximum concentration difference "
+ "({}) exceeded ".format(diffmax)
+ "at {} node location(s)".format(
indices[0].shape[0]
)
)
e = textwrap.fill(
ee + ":",
width=70,
initial_indent=" ",
subsequent_indent=" ",
)
f.write("{}\n".format(e))
if verbose:
print(ee + " at time {}".format(time))
e = ""
for itupe in indices:
for ind in itupe:
e += "{} ".format(ind + 1) # convert to one-based
e = textwrap.fill(
e,
width=70,
initial_indent=" ",
subsequent_indent=" ",
)
f.write("{}\n".format(e))
# Write header again, unless it is the last record
if idx + 1 < len(times1):
f.write("\n{}".format(header))
except:
print(" could not process time={}".format(time))
print(" terminating ucn processing...")
break
# Close output file
if outfile is not None:
f.close()
# test for failure
success = True
if icnt > 0:
success = False
return success
def compare_stages(
namefile1=None,
namefile2=None,
files1=None,
files2=None,
htol=0.001,
outfile=None,
difftol=False,
verbose=False,
):
"""Compare SWR process stage results from two simulations.
Parameters
----------
namefile1 : str
namefile path for base model
namefile2 : str
namefile path for comparison model
precision : str
precision for binary head file ("auto", "single", or "double")
default is "auto"
htol : float
maximum allowed stage difference (default is 0.001)
outfile : str
head comparison output file name. If outfile is None, no
comparison output is saved. (default is None)
files1 : str
base model output file. If files1 is not None, results
will be extracted from files1 and namefile1 will not be used.
(default is None)
files2 : str
comparison model output file. If files2 is not None, results
will be extracted from files2 and namefile2 will not be used.
(default is None)
difftol : bool
boolean determining if the absolute value of the stage
difference greater than htol should be evaluated (default is False)
verbose : bool
boolean indicating if verbose output should be written to the
terminal (default is False)
Returns
-------
success : bool
boolean indicating if the stage differences are less than htol.
"""
try:
import flopy
except:
msg = "flopy not available - cannot use compare_stages"
raise ValueError(msg)
# list of valid extensions
valid_ext = ["stg"]
# Get info for first stage file
sfpth1 = None
if namefile1 is not None:
for ext in valid_ext:
stg = get_entries_from_namefile(namefile1, extension=ext)
sfpth = stg[0][0]
if sfpth is not None:
sfpth1 = sfpth
break
elif files1 is not None:
if isinstance(files1, str):
files1 = [files1]
for file in files1:
for ext in valid_ext:
if ext in os.path.basename(file).lower():
sfpth1 = file
break
# Get info for second stage file
sfpth2 = None
if namefile2 is not None:
for ext in valid_ext:
stg = get_entries_from_namefile(namefile2, extension=ext)
sfpth = stg[0][0]
if sfpth is not None:
sfpth2 = sfpth
break
elif files2 is not None:
if isinstance(files2, str):
files2 = [files2]
for file in files2:
for ext in valid_ext:
if ext in os.path.basename(file).lower():
sfpth2 = file
break
# confirm that there are two files to compare
if sfpth1 is None or sfpth2 is None:
print("spth1 or spth2 is None")
print("spth1: {}".format(sfpth1))
print("spth2: {}".format(sfpth2))
return False
if not os.path.isfile(sfpth1) or not os.path.isfile(sfpth2):
print("spth1 or spth2 is not a file")
print("spth1 isfile: {}".format(os.path.isfile(sfpth1)))
print("spth2 isfile: {}".format(os.path.isfile(sfpth2)))
return False
# Open output file
if outfile is not None:
f = open(outfile, "w")
f.write("Created by pymake.autotest.compare_stages\n")
# Get stage objects
sobj1 = flopy.utils.SwrStage(sfpth1, verbose=verbose)
sobj2 = flopy.utils.SwrStage(sfpth2, verbose=verbose)
# get totim
times1 = sobj1.get_times()
# get kswr, kstp, and kper
kk = sobj1.get_kswrkstpkper()
header = (
"{:>15s} {:>15s} {:>15s} {:>15s}\n".format(" ", " ", " ", "MAXIMUM")
+ "{:>15s} {:>15s} {:>15s} {:>15s}\n".format(
"STRESS PERIOD", "TIME STEP", "SWR TIME STEP", "STAGE DIFFERENCE"
)
+ "{0:>15s} {0:>15s} {0:>15s} {0:>15s}\n".format(15 * "-")
)
if verbose:
print("Comparing results for {} times".format(len(times1)))
icnt = 0
# Process stage data
for idx, (kon, time) in enumerate(zip(kk, times1)):
s1 = sobj1.get_data(totim=time)
s2 = sobj2.get_data(totim=time)
if s1 is None or s2 is None:
continue
s1 = s1["stage"]
s2 = s2["stage"]
if difftol:
diffmax, indices = _calculate_difftol(s1, s2, htol)
else:
diffmax, indices = _calculate_diffmax(s1, s2)
if outfile is not None:
if idx < 1:
f.write(header)
f.write(
"{:15d} {:15d} {:15d} {:15.6g}\n".format(
kon[2] + 1, kon[1] + 1, kon[0] + 1, diffmax
)
)
if diffmax >= htol:
icnt += 1
if outfile is not None:
if difftol:
ee = (
"Maximum head difference ({}) -- ".format(diffmax)
+ "{} tolerance exceeded at ".format(htol)
+ "{} node location(s)".format(indices[0].shape[0])
)
else:
ee = (
"Maximum head difference "
+ "({}) exceeded ".format(diffmax)
+ "at {} node location(s):".format(indices[0].shape[0])
)
e = textwrap.fill(
ee + ":",
width=70,
initial_indent=" ",
subsequent_indent=" ",
)
f.write("{}\n".format(e))
if verbose:
print(ee + " at time {}".format(time))
e = ""
for itupe in indices:
for ind in itupe:
e += "{} ".format(ind + 1) # convert to one-based
e = textwrap.fill(
e,
width=70,
initial_indent=" ",
subsequent_indent=" ",
)
f.write("{}\n".format(e))
# Write header again, unless it is the last record
if idx + 1 < len(times1):
f.write("\n{}".format(header))
# Close output file
if outfile is not None:
f.close()
# test for failure
success = True
if icnt > 0:
success = False
return success
def _calculate_diffmax(v1, v2):
"""Calculate the maximum difference between two vectors.
Parameters
----------
v1 : numpy.ndarray
array of base model results
v2 : numpy.ndarray
array of comparison model results
Returns
-------
diffmax : float
absolute value of the maximum difference in v1 and v2 array values
indices : numpy.ndarry
indices where the absolute value of the difference is equal to the
absolute value of the maximum difference.
"""
if v1.ndim > 1 or v2.ndim > 1:
v1 = v1.flatten()
v2 = v2.flatten()
if v1.size != v2.size:
err = "Error: calculate_difference v1 size ({}) ".format(
v1.size
) + "is not equal to v2 size ({})".format(v2.size)
raise Exception(err)
diff = abs(v1 - v2)
diffmax = diff.max()
return diffmax, np.where(diff == diffmax)
def _calculate_difftol(v1, v2, tol):
"""Calculate the difference between two arrays relative to a tolerance.
Parameters
----------
v1 : numpy.ndarray
array of base model results
v2 : numpy.ndarray
array of comparison model results
tol : float
tolerance used to evaluate base and comparison models
Returns
-------
diffmax : float
absolute value of the maximum difference in v1 and v2 array values
indices : numpy.ndarry
indices where the absolute value of the difference exceed the
specified tolerance.
"""
if v1.ndim > 1 or v2.ndim > 1:
v1 = v1.flatten()
v2 = v2.flatten()
if v1.size != v2.size:
err = "Error: calculate_difference v1 size ({}) ".format(
v1.size
) + "is not equal to v2 size ({})".format(v2.size)
raise Exception(err)
diff = abs(v1 - v2)
return diff.max(), np.where(diff > tol)
def compare(
namefile1,
namefile2,
precision="auto",
max_cumpd=0.01,
max_incpd=0.01,
htol=0.001,
outfile1=None,
outfile2=None,
files1=None,
files2=None,
):
"""Compare the budget and head results for two MODFLOW-based model
simulations.
Parameters
----------
namefile1 : str
namefile path for base model
namefile2 : str
namefile path for comparison model
precision : str
precision for binary head file ("auto", "single", or "double")
default is "auto"
max_cumpd : float
maximum percent discrepancy allowed for cumulative budget terms
(default is 0.01)
max_incpd : float
maximum percent discrepancy allowed for incremental budget terms
(default is 0.01)
htol : float
maximum allowed head difference (default is 0.001)
outfile1 : str
budget comparison output file name. If outfile1 is None, no budget
comparison output is saved. (default is None)
outfile2 : str
head comparison output file name. If outfile2 is None, no head
comparison output is saved. (default is None)
files1 : str
base model output file. If files1 is not None, results
will be extracted from files1 and namefile1 will not be used.
(default is None)
files2 : str
comparison model output file. If files2 is not None, results
will be extracted from files2 and namefile2 will not be used.
(default is None)
Returns
-------
success : bool
boolean indicating if the budget and head differences are less than
max_cumpd, max_incpd, and htol.
"""
# Compare budgets from the list files in namefile1 and namefile2
success1 = compare_budget(
namefile1,
namefile2,
max_cumpd=max_cumpd,
max_incpd=max_incpd,
outfile=outfile1,
files1=files1,
files2=files2,
)
success2 = compare_heads(
namefile1,
namefile2,
precision=precision,
htol=htol,
outfile=outfile2,
files1=files1,
files2=files2,
)
success = False
if success1 and success2:
success = True
return success
```
#### File: pymake/utils/_compiler_language_files.py
```python
import os
from ._dag import _order_f_source_files, _order_c_source_files
def _get_fortran_files(srcfiles, extensions=False):
"""Return a list of fortran files or unique fortran file extensions.
Parameters
-------
srcfiles : list
list of source file names
extensions : bool
flag controls return of either a list of fortran files or
a list of unique fortran file extensions
Returns
-------
files_out : list
list of fortran files or unique fortran file extensions
"""
files_out = []
for srcfile in srcfiles:
ext = os.path.splitext(srcfile)[1]
if ext.lower() in (
".f",
".for",
".f90",
".fpp",
):
if extensions:
# save unique extension
if ext not in files_out:
files_out.append(ext)
else:
files_out.append(srcfile)
if len(files_out) < 1:
files_out = None
return files_out
def _get_c_files(srcfiles, extensions=False):
"""Return a list of c and cpp files or unique c and cpp file extensions.
Parameters
-------
srcfiles : list
list of source file names
extensions : bool
flag controls return of either a list of c and cpp files or
a list of unique c and cpp file extensions
Returns
-------
files_out : list
list of c or cpp files or uniques c and cpp file extensions
"""
files_out = []
for srcfile in srcfiles:
ext = os.path.splitext(srcfile)[1]
if ext.lower() in (
".c",
".cpp",
):
if extensions:
if ext not in files_out:
files_out.append(ext)
else:
files_out.append(srcfile)
if len(files_out) < 1:
files_out = None
return files_out
def _get_iso_c(srcfiles):
"""Determine if iso_c_binding is used so that the correct c/c++ compiler
flags can be set. All fortran files are scanned.
Parameters
----------
srcfiles : list
list of fortran source files
Returns
-------
iso_c : bool
flag indicating if iso_c_binding is used in any fortran file
"""
iso_c = False
for srcfile in srcfiles:
if os.path.exists(srcfile):
# open the file
f = open(srcfile, "rb")
# read the file
lines = f.read()
# decode the file
lines = lines.decode("ascii", "replace").splitlines()
# develop a list of modules in the file
for line in lines:
linelist = line.strip().split()
if len(linelist) == 0:
continue
if linelist[0].upper() == "USE":
modulename = linelist[1].split(",")[0].upper()
if "ISO_C_BINDING" == modulename:
iso_c = True
break
# terminate file content search if iso_c is True
if iso_c:
break
else:
msg = "get_iso_c: could not " + "open {}".format(
os.path.basename(srcfile)
)
raise FileNotFoundError(msg)
return iso_c
def _preprocess_file(srcfiles):
"""Determine if the file should be preprocessed.
Parameters
----------
srcfiles : str or list
source file path or list of source file paths
Returns
-------
preprocess : bool
flag indicating if the file should be preprocessed
"""
if isinstance(srcfiles, str):
srcfiles = [srcfiles]
preprocess = False
for srcfile in srcfiles:
if os.path.exists(srcfile):
# open the file
f = open(srcfile, "rb")
# read the file
lines = f.read()
# decode the file
lines = lines.decode("ascii", "replace").splitlines()
# develop a list of modules in the file
for line in lines:
linelist = line.strip().split()
if len(linelist) == 0:
continue
if linelist[0].lower() in (
"#define",
"#undef",
"#ifdef",
"#ifndef",
"#if",
"#error",
):
preprocess = True
break
# terminate file content search if preprocess is True
if preprocess:
break
else:
msg = "_preprocess_file: could not " + "open {}".format(
os.path.basename(srcfile)
)
raise FileNotFoundError(msg)
return preprocess
def _get_srcfiles(srcdir, include_subdir):
"""Get a list of source files in source file directory srcdir
Parameters
----------
srcdir : str
path for directory containing source files
include_subdirs : bool
boolean indicating source files in srcdir subdirectories should be
included in the build
Returns
-------
srcfiles : list
list of fortran and c/c++ file in srcdir
"""
# create a list of all c(pp), f and f90 source files
templist = []
for path, _, files in os.walk(srcdir):
for file in files:
if not include_subdir:
if path != srcdir:
continue
file = os.path.join(os.path.join(path, file))
templist.append(file)
srcfiles = []
for file in templist:
if (
file.lower().endswith(".f")
or file.lower().endswith(".f90")
or file.lower().endswith(".for")
or file.lower().endswith(".fpp")
or file.lower().endswith(".c")
or file.lower().endswith(".cpp")
):
srcfiles.append(os.path.relpath(file, os.getcwd()))
return sorted(srcfiles)
def _get_ordered_srcfiles(all_srcfiles, networkx):
"""Create a list of ordered source files (both fortran and c). Ordering is
build using a directed acyclic graph to determine module dependencies.
Parameters
----------
all_srcfiles : list
list of all fortran and c/c++ source files
networkx : bool
boolean indicating if the NetworkX python package should be used
to determine the DAG.
Returns
-------
ordered_srcfiles : list
list of ordered source files
"""
cfiles = []
ffiles = []
for file in all_srcfiles:
if (
file.lower().endswith(".f")
or file.lower().endswith(".f90")
or file.lower().endswith(".for")
or file.lower().endswith(".fpp")
):
ffiles.append(file)
elif file.lower().endswith(".c") or file.lower().endswith(".cpp"):
cfiles.append(file)
# order the source files using the directed acyclic graph in _dag.py
ordered_srcfiles = []
if ffiles:
ordered_srcfiles += _order_f_source_files(ffiles, networkx)
if cfiles:
ordered_srcfiles += _order_c_source_files(cfiles, networkx)
return ordered_srcfiles
```
|
{
"source": "jdhunterae/sim-venture",
"score": 3
}
|
#### File: sim-venture/py_v/effects.py
```python
class Effect(object):
def __init__(self, action):
self.action = action
def cause(self, targets):
pass
class NoEffect(Effect):
def __init__(self, action):
super(NoEffect, self).__init__(action)
def cause(self, targets):
print("[error] no effect declared for '%s'" % (type(self.action).__name__))
class DamageEffect(Effect):
def __init__(self, action):
super(DamageEffect, self).__init__(action)
def cause(self, targets):
if targets is not None:
for target in targets:
target.health = max(0, target.health - self.action.actor.attack_p)
print("%s attacks %s for %d damage..." % (self.action.actor.name, target.name, self.action.actor.attack_p))
print(" %s: %d/%d" % (target.name, target.health, target.health_max))
else:
print("[error]: %s can't find anything to attack..." % (self.action.actor.name))
class MissEffect(Effect):
def __init__(self, action):
super(MissEffect, self).__init__(action)
def cause(self, targets):
if targets is not None:
for target in targets:
print("%s swings at %s...and misses" % (self.action.actor.name, target.name))
else:
print("[error]: %s can't find anyone to miss..." % (self.action.actor.name))
class RunAwayEffect(Effect):
def __init__(self, action):
super(RunAwayEffect, self).__init__(action)
def cause(self, targets):
if targets is not None and self.action.actor in targets:
print("%s tries to run..." % (self.action.actor.name))
targets.remove(self.action.actor)
print(" %s has escaped the battle." % (self.action.actor.name))
else:
print("[error]: %s can't run away from nothing..." % (self.action.actor.name))
class BlockedEffect(Effect):
def __init__(self, action):
super(BlockedEffect, self).__init__(action)
def cause(self, targets):
if targets is not None and self.action.actor in targets:
print("%s tries to run...but can't escape" % (self.action.actor.name))
else:
print("[error]: %s can't run away from nothing..." % (self.action.actor.name))
class MagicDamageEffect(Effect):
def __init__(self, action):
super(MagicDamageEffect, self).__init__(action)
def cause(self, targets):
if targets is not None:
for target in targets:
target.health = max(0, target.health - self.action.actor.attack_m)
print("%s launches a barrage of magical missles at %s for %d damage..." % (self.action.actor.name, target.name, self.action.actor.attack_m))
print(" %s: %d/%d" % (target.name, target.health, target.health_max))
else:
print("[error]: %s can't find anyone to focus on..." % (self.action.actor.name))
class MagicMissEffect(Effect):
def __init__(self, action):
super(MagicMissEffect, self).__init__(action)
def cause(self, targets):
if targets is not None:
for target in targets:
print("%s summons up energies...and they fizzle" % (self.action.actor.name))
else:
print("[error]: %s can't find anyone to focus on..." % (self.action.actor.name))
```
|
{
"source": "jdhxyy/dcom-python",
"score": 2
}
|
#### File: dcom-python/dcompy/block_tx.py
```python
from dcompy.tx import *
from dcompy.common import *
from dcompy.config import *
import asyncio
import crcmodbus
import threading
class _Item:
def __init__(self):
self.protocol = 0
self.pipe = 0
self.dst_ia = 0
self.code = 0
self.rid = 0
self.token = 0
# 第一帧需要重发控制
self.is_first_frame = False
self.first_frame_retry_time = 0
self.first_frame_retry_num = 0
self.last_rx_ack_time = 0
self.crc16 = 0
self.data = bytearray()
_items = list()
_lock = threading.Lock()
async def block_tx_run():
"""
块传输发送模块运行
:return:
"""
while True:
_lock.acquire()
for item in _items:
_check_timeout_and_retry_send_first_frame(item)
_lock.release()
await asyncio.sleep(INTERVAL)
def _check_timeout_and_retry_send_first_frame(item: _Item):
now = get_time()
load_param = get_load_param()
if not item.is_first_frame:
if now - item.last_rx_ack_time > load_param.block_retry_interval * load_param.block_retry_max_num * 1000:
log.warn('block tx timeout!remove task.token:%d', item.token)
_items.remove(item)
return
# 首帧处理
if now - item.first_frame_retry_time < load_param.block_retry_interval * 1000:
return
if item.first_frame_retry_num >= load_param.block_retry_max_num:
log.warn('block tx timeout!first frame send retry too many.token:%d', item.token)
_items.remove(item)
else:
item.first_frame_retry_num += 1
item.first_frame_retry_time = now
log.info("block tx send first frame.token:%d retry num:%d", item.token, item.first_frame_retry_num)
_block_tx_send_frame(item, 0)
def _block_tx_send_frame(item: _Item, offset: int):
log.info('block tx send.token:%d offset:%d', item.token, offset)
delta = len(item.data) - offset
payload_len = SINGLE_FRAME_SIZE_MAX - BLOCK_HEADER_LEN
if payload_len > delta:
payload_len = delta
frame = BlockFrame()
frame.control_word.code = item.code
frame.control_word.block_flag = 1
frame.control_word.rid = item.rid
frame.control_word.token = item.token
frame.control_word.payload_len = BLOCK_HEADER_LEN + payload_len
frame.block_header.crc16 = item.crc16
frame.block_header.total = len(item.data)
frame.block_header.offset = offset
frame.payload.extend(item.data[offset:offset + payload_len])
block_send(item.protocol, item.pipe, item.dst_ia, frame)
def block_tx(protocol: int, pipe: int, dst_ia: int, code: int, rid: int, token: int, data: bytearray):
"""
块传输发送
"""
if len(data) <= SINGLE_FRAME_SIZE_MAX:
return
_lock.acquire()
if _is_item_exist(protocol, pipe, dst_ia, code, rid, token):
_lock.release()
return
log.info('block tx new task.token:%d dst ia:0x%x code:%d rid:%d', token, dst_ia, code, rid)
item = _create_item(protocol, pipe, dst_ia, code, rid, token, data)
_block_tx_send_frame(item, 0)
item.first_frame_retry_num += 1
item.first_frame_retry_time = get_time()
_items.append(item)
_lock.release()
def _is_item_exist(protocol: int, pipe: int, dst_ia: int, code: int, rid: int, token: int) -> bool:
for item in _items:
if item.protocol == protocol and item.pipe == pipe and item.dst_ia == dst_ia and item.code == code \
and item.rid == rid and item.token == token:
return True
return False
def _create_item(protocol: int, pipe: int, dst_ia: int, code: int, rid: int, token: int, data: bytearray) -> _Item:
item = _Item()
item.protocol = protocol
item.pipe = pipe
item.dst_ia = dst_ia
item.code = code
item.rid = rid
item.token = token
item.data.extend(data)
item.crc16 = crcmodbus.checksum(data)
item.is_first_frame = True
item.first_frame_retry_num = 0
now = get_time()
item.first_frame_retry_time = now
item.last_rx_ack_time = now
return item
def block_rx_back_frame(protocol: int, pipe: int, src_ia: int, frame: Frame):
"""
接收到BACK帧时处理函数
"""
if frame.control_word.code != CODE_BACK:
return
_lock.acquire()
for item in _items:
if _check_item_and_deal_back_frame(protocol, pipe, src_ia, frame, item):
break
_lock.release()
def _check_item_and_deal_back_frame(protocol: int, pipe: int, src_ia: int, frame: Frame, item: _Item) -> bool:
"""
checkNodeAndDealBackFrame 检查节点是否符合条件,符合则处理BACK帧
:return: 返回true表示节点符合条件
"""
if item.protocol != protocol or item.pipe != pipe or item.dst_ia != src_ia or \
item.rid != frame.control_word.rid or item.token != frame.control_word.token:
return False
log.info('block tx receive back.token:%d', item.token)
if frame.control_word.payload_len != 2:
log.warn('block rx receive back deal failed!token:%d payload len is wrong:%d', item.token,
frame.control_word.payload_len)
return False
start_offset = (frame.payload[0] << 8) + frame.payload[1]
if start_offset >= len(item.data):
# 发送完成
log.info('block tx end.receive back token:%d start offset:%d >= data len:%d"', item.token, start_offset,
len(item.data))
_items.remove(item)
return True
if item.is_first_frame:
item.is_first_frame = False
item.last_rx_ack_time = get_time()
_block_tx_send_frame(item, start_offset)
return True
def block_tx_deal_rst_frame(protocol: int, pipe: int, src_ia: int, frame: Frame):
"""
块传输发送模块处理复位连接帧
"""
_lock.acquire()
for item in _items:
if item.protocol == protocol and item.pipe == pipe and item.dst_ia == src_ia \
and item.rid == frame.control_word.rid and item.token == frame.control_word.token:
log.warn('block tx receive rst.token:%d', item.token)
_items.remove(item)
break
_lock.release()
def block_remove(protocol: int, pipe: int, dst_ia: int, code: int, rid: int, token: int):
"""块传输发送移除任务"""
_lock.acquire()
for item in _items:
if item.protocol == protocol and item.pipe == pipe and item.dst_ia == dst_ia and item.code == code and \
item.rid == rid and item.token == token:
log.warn('block tx remove task.token:%d', item.token)
_items.remove(item)
break
_lock.release()
```
#### File: dcom-python/dcompy/callback.py
```python
import dcompy.log as log
from dcompy.system_error import *
_services = dict()
def register(protocol: int, rid: int, callback):
"""
注册DCOM服务回调函数
:param protocol: 协议号
:param rid: 服务号
:param callback: 回调函数.格式: func(pipe: int, src_ia: int, req: bytearray) (bytearray, int)
:return: 返回值是应答和错误码.错误码为0表示回调成功,否则是错误码
"""
log.info('register.protocol:%d rid:%d', protocol, rid)
rid += protocol << 16
_services[rid] = callback
def service_callback(protocol: int, pipe: int, src_ia: int, rid: int, req: bytearray) -> (bytearray, int):
"""
回调资源号rid对应的函数
"""
log.info('service callback.rid:%d', rid)
rid += protocol << 16
if rid not in _services:
log.warn('service callback failed!can not find new rid:%d', rid)
return None, SYSTEM_ERROR_INVALID_RID
return _services[rid](pipe, src_ia, req)
```
#### File: dcom-python/dcompy/rx_con.py
```python
from dcompy.callback import *
from dcompy.block_tx import *
def rx_con(protocol: int, pipe: int, src_ia: int, frame: Frame):
"""
接收到连接帧时处理函数
"""
log.info('rx con.token:%d', frame.control_word.token)
resp, err = service_callback(protocol, pipe, src_ia, frame.control_word.rid, frame.payload)
# NON不需要应答
if frame.control_word.code == CODE_NON:
return
if err != SYSTEM_OK:
log.info('service send err:0x%x token:%d', err, frame.control_word.token)
send_rst_frame(protocol, pipe, src_ia, err, frame.control_word.rid, frame.control_word.token)
return
if resp and len(resp) > SINGLE_FRAME_SIZE_MAX:
# 长度过长启动块传输
log.info('service send too long:%d.start block tx.token:%d', len(resp), frame.control_word.token)
block_tx(protocol, pipe, src_ia, CODE_ACK, frame.control_word.rid, frame.control_word.token, resp)
return
ack_frame = Frame()
ack_frame.control_word.code = CODE_ACK
ack_frame.control_word.block_flag = 0
ack_frame.control_word.rid = frame.control_word.rid
ack_frame.control_word.token = frame.control_word.token
if resp:
ack_frame.control_word.payload_len = len(resp)
ack_frame.payload.extend(resp)
else:
ack_frame.control_word.payload_len = 0
send(protocol, pipe, src_ia, ack_frame)
```
#### File: dcom-python/dcompy/waitlist.py
```python
import dcompy.log as log
from dcompy.block_tx import *
from dcompy.system_error import *
from typing import Callable
import threading
class _Item:
def __init__(self):
self.protocol = 0
self.pipe = 0
self.timeout = 0
self.req = bytearray()
self.resp = bytearray()
# 启动时间.单位:us.用于判断是否超过总超时
self.start_time = 0
# 回调函数.存在则是异步调用
self.ack_callback = None
self.dst_ia = 0
self.rid = 0
self.token = 0
self.is_rx_ack = False
self.result = SYSTEM_OK
# 上次发送时间戳.单位:us.用于重传
self.last_retry_timestamp = 0
self.retry_num = 0
self.code = 0
_items = list()
_lock = threading.Lock()
async def waitlist_run():
"""
模块运行.检查等待列表重发,超时等
"""
while True:
_check_wait_items()
await asyncio.sleep(INTERVAL)
def _check_wait_items():
_lock.acquire()
for item in _items:
_retry_send(item)
_lock.release()
def _retry_send(item: _Item):
t = get_time()
if t - item.start_time > item.timeout:
log.warn('wait ack timeout!task failed!token:%d', item.token)
_items.remove(item)
if len(item.req) > SINGLE_FRAME_SIZE_MAX:
block_remove(item.protocol, item.pipe, item.dst_ia, item.code, item.rid, item.token)
if item.ack_callback:
# 回调方式
item.ack_callback(bytearray(), SYSTEM_ERROR_RX_TIMEOUT)
else:
# 同步调用
item.is_rx_ack = True
item.result = SYSTEM_ERROR_RX_TIMEOUT
return
# 块传输不用此处重传.块传输模块自己负责
if len(item.req) > SINGLE_FRAME_SIZE_MAX:
return
load_param = get_load_param()
if t - item.last_retry_timestamp < load_param.block_retry_interval * 1000:
return
# 重传
item.retry_num += 1
if item.retry_num >= load_param.block_retry_max_num:
log.warn('retry too many!task failed!token:%d', item.token)
_items.remove(item)
if item.ack_callback:
# 回调方式
item.ack_callback(bytearray(), SYSTEM_ERROR_RX_TIMEOUT)
else:
# 同步调用
item.is_rx_ack = True
item.result = SYSTEM_ERROR_RX_TIMEOUT
return
item.last_retry_timestamp = t
log.warn('retry send.token:%d retry num:%d', item.token, item.retry_num)
_send_frame(item.protocol, item.pipe, item.dst_ia, item.code, item.rid, item.token, item.req)
def call(protocol: int, pipe: int, dst_ia: int, rid: int, timeout: int, req: bytearray) -> (bytearray, int):
"""
RPC同步调用
:param protocol: 协议号
:param pipe: 通信管道
:param dst_ia: 目标ia地址
:param rid: 服务号
:param timeout: 超时时间,单位:ms.为0表示不需要应答
:param req: 请求数据.无数据可填bytearray()或者None
:return: 返回值是应答字节流和错误码.错误码非SYSTEM_OK表示调用失败
"""
log.info('call.protocol:%d pipe:0x%x dst ia:0x%x rid:%d timeout:%d', protocol, pipe, dst_ia, rid, timeout)
code = CODE_CON if timeout > 0 else CODE_NON
if not req:
req = bytearray()
token = get_token()
_send_frame(protocol, pipe, dst_ia, code, rid, token, req)
if code == CODE_NON:
return bytearray(), SYSTEM_OK
item = _Item()
item.protocol = protocol
item.pipe = pipe
item.timeout = timeout * 1000
item.req = req
item.start_time = get_time()
item.dst_ia = dst_ia
item.rid = rid
item.token = token
item.code = code
item.retry_num = 0
item.last_retry_timestamp = get_time()
_lock.acquire()
_items.append(item)
_lock.release()
while True:
if item.is_rx_ack:
break
log.info('call resp.result:%d len:%d', item.result, len(item.resp))
return item.resp, item.result
def call_async(protocol: int, pipe: int, dst_ia: int, rid: int, timeout: int, req: bytearray,
ack_callback: Callable[[bytearray, int], None]):
"""
RPC异步调用
:param protocol: 协议号
:param pipe: 通信管道
:param dst_ia: 目标ia地址
:param rid: 服务号
:param timeout: 超时时间,单位:ms.为0表示不需要应答
:param req: 请求数据.无数据可填bytearray()或者None
:param ack_callback: 回调函数.原型func(resp: bytearray, error: int).参数是应答字节流和错误码.错误码非SYSTEM_OK表示调用失败
"""
code = CODE_CON
if timeout == 0 or not callable(ack_callback):
code = CODE_NON
if not req:
req = bytearray()
token = get_token()
log.info('call async.token:%d protocol:%d pipe:0x%x dst ia:0x%x rid:%d timeout:%d', token, protocol, pipe, dst_ia,
rid, timeout)
_send_frame(protocol, pipe, dst_ia, code, rid, token, req)
if code == CODE_NON:
return
item = _Item()
item.ack_callback = ack_callback
item.protocol = protocol
item.pipe = pipe
item.timeout = timeout * 1000
item.req = req
item.start_time = get_time()
item.dst_ia = dst_ia
item.rid = rid
item.token = <PASSWORD>
item.code = code
item.retry_num = 0
item.last_retry_timestamp = get_time()
_lock.acquire()
_items.append(item)
_lock.release()
def _send_frame(protocol: int, pipe: int, dst_ia: int, code: int, rid: int, token: int, data: bytearray):
if len(data) > SINGLE_FRAME_SIZE_MAX:
block_tx(protocol, pipe, dst_ia, code, rid, token, data)
return
frame = Frame()
frame.control_word.code = code
frame.control_word.block_flag = 0
frame.control_word.rid = rid
frame.control_word.token = token
frame.control_word.payload_len = len(data)
frame.payload.extend(data)
log.info('send frame.token:%d', token)
send(protocol, pipe, dst_ia, frame)
def rx_ack_frame(protocol: int, pipe: int, src_ia: int, frame: Frame):
"""
接收到ACK帧时处理函数
"""
_lock.acquire()
log.info('rx ack frame.src ia:0x%x', src_ia)
for item in _items:
if _check_item_and_deal_ack_frame(protocol, pipe, src_ia, frame, item):
break
_lock.release()
def _check_item_and_deal_ack_frame(protocol: int, pipe: int, src_ia: int, frame: Frame, item: _Item) -> bool:
if item.protocol != protocol or item.pipe != pipe or item.dst_ia != src_ia or item.rid != frame.control_word.rid \
or item.token != frame.control_word.token:
return False
log.info('deal ack frame.token:%d', item.token)
_items.remove(item)
if item.ack_callback:
# 回调方式
item.ack_callback(frame.payload, SYSTEM_OK)
else:
# 同步调用
item.is_rx_ack = True
item.result = SYSTEM_OK
item.resp = frame.payload
return True
def rx_rst_frame(protocol: int, pipe: int, src_ia: int, frame: Frame):
"""
接收到RST帧时处理函数
"""
_lock.acquire()
log.warn('rx rst frame.src ia:0x%x', src_ia)
for item in _items:
_deal_rst_frame(protocol, pipe, src_ia, frame, item)
_lock.release()
def _deal_rst_frame(protocol: int, pipe: int, src_ia: int, frame: Frame, item: _Item):
if item.protocol != protocol or item.pipe != pipe or item.dst_ia != src_ia or item.rid != frame.control_word.rid \
or item.token != frame.control_word.token:
return False
result = frame.payload[0]
log.warn('deal rst frame.token:%d result:0x%x', item.token, result)
_items.remove(item)
if item.ack_callback:
# 回调方式
item.ack_callback(bytearray(), result)
else:
# 同步调用
item.is_rx_ack = True
item.result = result
return True
```
|
{
"source": "jdhxyy/ssd1306py-micropython",
"score": 3
}
|
#### File: ssd1306py-micropython/ssd1306py/ascii32.py
```python
import sys
_file = None
def _get_ch(ch):
global _file
if _file is None:
_file = open(sys.path[1] + '/ssd1306py/ascii32.txt', 'r')
_file.seek(ord(ch) * 329)
get_line1 = _file.readline()
get_line2 = _file.readline()
data = []
n = 0
for v in get_line1.split(','):
data.append(int(v))
n += 1
if n == 32:
break
n = 0
for v in get_line2.split(','):
data.append(int(v))
n += 1
if n == 32:
break
return data
def display(oled, string, x_axis, y_axis):
offset = 0
for k in string:
byte_data = _get_ch(k)
for y in range(0, 32):
a = bin(byte_data[y]).replace('0b', '')
while len(a) < 8:
a = '0' + a
b = bin(byte_data[y + 32]).replace('0b', '')
while len(b) < 8:
b = '0' + b
for x in range(0, 8):
oled.pixel(x_axis + offset + x, y + y_axis, int(a[x]))
oled.pixel(x_axis + offset + x + 8, y + y_axis, int(b[x]))
offset += 16
```
|
{
"source": "jdhxyy/tziot-micropython",
"score": 2
}
|
#### File: tziot-micropython/tziot/parsecmp.py
```python
import tziot.standardlayer as standardlayer
import tziot.config as config
import knocky as knock
import utzpy as utz
import lagan
def init():
standardlayer.register_rx_observer(_deal_rx)
def _deal_rx(data: bytearray, standard_header: utz.StandardHeader, pipe: int):
if standard_header.dst_ia != config.local_ia or standard_header.next_head != utz.HEADER_CMP:
return
payload = utz.flp_frame_to_bytes(data)
if payload is None:
lagan.warn(config.TAG, "parse cmp failed.flp frame to bytes failed")
return
if len(payload) == 0:
lagan.warn(config.TAG, "parse cmp failed.payload len is wrong:%d", len(payload))
return
knock.call(utz.HEADER_CMP, payload[0], payload[1:])
```
|
{
"source": "jdiasn/McRadar",
"score": 2
}
|
#### File: McRadar/mcradar/fullRadarOperator.py
```python
import xarray as xr
from mcradar import *
def fullRadar(dicSettings, mcTable):
"""
Calculates the radar variables over the entire range
Parameters
----------
dicSettings: a dictionary with all settings output from loadSettings()
mcTable: McSnow data output from getMcSnowTable()
Returns
-------
specXR: xarray dataset with the spectra(range, vel) and KDP(range)
"""
specXR = xr.Dataset()
#specXR_turb = xr.Dataset()
counts = np.ones_like(dicSettings['heightRange'])*np.nan
vol = dicSettings['gridBaseArea'] * dicSettings['heightRes']
for i, heightEdge0 in enumerate(dicSettings['heightRange']):
heightEdge1 = heightEdge0 + dicSettings['heightRes']
print('Range: from {0} to {1}'.format(heightEdge0, heightEdge1))
mcTableTmp = mcTable[(mcTable['sHeight']>heightEdge0) &
(mcTable['sHeight']<=heightEdge1)].copy()
#mcTableTmp = mcTableTmp[(mcTableTmp['sPhi']<=4)]
if (dicSettings['scatSet']['mode'] == 'full') or (dicSettings['scatSet']['mode'] == 'table') or (dicSettings['scatSet']['mode'] == 'wisdom') :
mcTableTmp = mcTableTmp[(mcTableTmp['sPhi']>=0.01)]
mcTableTmp = calcParticleZe(dicSettings['wl'], dicSettings['elv'],
mcTableTmp, ndgs=dicSettings['ndgsVal'],
scatSet=dicSettings['scatSet'])
tmpSpecXR = getMultFrecSpec(dicSettings['wl'], mcTableTmp, dicSettings['velBins'],
dicSettings['velCenterBin'], heightEdge1,dicSettings['convolute'],dicSettings['nave'],dicSettings['noise_pow'],
dicSettings['eps_diss'], dicSettings['uwind'], dicSettings['time_int'], dicSettings['theta']/2./180.*np.pi, scatSet=dicSettings['scatSet'] )
#volume normalization
tmpSpecXR = tmpSpecXR/vol
specXR = xr.merge([specXR, tmpSpecXR])
if (dicSettings['scatSet']['mode'] == 'full') or (dicSettings['scatSet']['mode'] == 'table') or (dicSettings['scatSet']['mode'] == 'wisdom') :
#calculating the integrated kdp
tmpKdpXR = getIntKdp(dicSettings['wl'], mcTableTmp, heightEdge1)
#volume normalization
tmpKdpXR = tmpKdpXR/vol
specXR = xr.merge([specXR, tmpKdpXR])
counts[i] = len(mcTableTmp.vel.values)
return specXR
```
#### File: mcradar/radarOperator/spectraOperator.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def getVelIntSpec(mcTable, mcTable_binned, variable):
"""
Calculates the integrated reflectivity for each velocity bin
Parameters
----------
mcTable: McSnow output returned from calcParticleZe()
mcTable_binned: McSnow table output binned for a given velocity bin
variable: name of column variable wich will be integrated over a velocity bin
Returns
-------
mcTableVelIntegrated: table with the integrated reflectivity for each velocity bin
"""
mcTableVelIntegrated = mcTable.groupby(mcTable_binned)[variable].agg(['sum'])
return mcTableVelIntegrated
def getMultFrecSpec(wls, mcTable, velBins, velCenterBins , centerHeight, convolute,nave,noise_pow,eps_diss,uwind,time_int,theta,scatSet={'mode':'full', 'safeTmatrix':False}):
"""
Calculation of the multi-frequency spectrograms
Parameters
----------
wls: wavelenght (iterable) [mm]
mcTable: McSnow output returned from calcParticleZe()
velBins: velocity bins for the calculation of the spectrogram (array) [m/s]
velCenterBins: center of the velocity bins (array) [m/s]
centerHeight: center height of each range gate (array) [m]
Returns
-------
xarray dataset with the multi-frequency spectrograms
xarray dims = (range, vel)
"""
mcTable_binned = pd.cut(mcTable['vel'], velBins)
tmpDataDic = {}
for wl in wls:
wlStr = '{:.2e}'.format(wl)
if (scatSet['mode'] == 'SSRGA') or (scatSet['mode'] == 'Rayleigh') or (scatSet['mode'] == 'SSRGA-Rayleigh'):
mcTable['sZeMultH_{0}'.format(wlStr)] = mcTable['sZeH_{0}'.format(wlStr)] * mcTable['sMult']
#print(mcTable['sMult'])
#plt.plot(mcTable['sZeMultH_{0}'.format(wlStr)],mcTable['radii_mm'],label='Mult')
#plt.plot(mcTable['sZeH_{0}'.format(wlStr)],mcTable['radii_mm'],label='sZe')
#plt.legend()
#plt.show()
intSpecH = getVelIntSpec(mcTable, mcTable_binned,'sZeMultH_{0}'.format(wlStr))
if convolute == True:
intSpecH = convoluteSpec(intSpecH,wl,velCenterBins,eps_diss,noise_pow,nave,theta,uwind,time_int,centerHeight)
tmpDataDic['spec_H_{0}'.format(wlStr)] = intSpecH.values[:,0]
else:
mcTable['sZeMultH_{0}'.format(wlStr)] = mcTable['sZeH_{0}'.format(wlStr)] * mcTable['sMult']
mcTable['sZeMultV_{0}'.format(wlStr)] = mcTable['sZeV_{0}'.format(wlStr)] * mcTable['sMult']
intSpecH = getVelIntSpec(mcTable, mcTable_binned,'sZeMultH_{0}'.format(wlStr))
intSpecV = getVelIntSpec(mcTable, mcTable_binned, 'sZeMultV_{0}'.format(wlStr))
if convolute == True:
intSpecH = convoluteSpec(intSpecH,wl,velCenterBins,eps_diss,noise_pow,nave,theta,uwind,time_int,centerHeight)
intSpecV = convoluteSpec(intSpecV,wl,velCenterBins,eps_diss,noise_pow,nave,theta,uwind,time_int,centerHeight)
tmpDataDic['spec_H_{0}'.format(wlStr)] = intSpecH.values[:,0]
tmpDataDic['spec_V_{0}'.format(wlStr)] = intSpecV.values[:,0]
#converting to XR
specTable = pd.DataFrame(data=tmpDataDic, index=velCenterBins)
specTable = specTable.to_xarray()
specTable = specTable.expand_dims(dim='range').assign_coords(range=[centerHeight])
specTable = specTable.rename_dims(dims_dict={'index':'vel'}).rename(name_dict={'index':'vel'})
return specTable
def convoluteSpec(spec,wl,vel,eps,noise_pow,nave,theta,u_wind,time_avg,height):
"""
this function convolutes the spectrum with turbulence and adds random noise, optional!
Parameters
----------
spec: spectral data (pd.dataframe) [mm^6/m^3]
sigma_t: spectrum width due to turbulence
np: radar noise power [mm^6/m^3]
Returns
-------
convoluted and noisy spectrum as pd.dataframe with the index of the input spec
"""
L_s = u_wind*time_avg + 2*height*np.sin(theta)
L_lam = wl/2
sigma_t2 = 3/4*(eps/2*np.pi)^(2/3)*( L_s**(2/3) - L_lam**(2/3) )
spec_turb = np.zeros(len(vel))#spec.copy()*np.NaN
dv = np.diff(vel)[0]
prefactor_turb = 1.0 / (np.sqrt(2.0 * np.pi) * np.sqrt(sigma_t2))
#- turbulence convolution:
for i in range(len(vel)):
integral = 0
for ii in range(len(vel)):
exp_arg = (-1.0*(vel[i]-vel[ii])**2)/(2.0*sigma_t2)
if exp_arg >= -100:
integral = integral + (spec.values[ii]*np.exp(exp_arg)*dv)
spec_turb[i] = prefactor_turb * integral
#- add random noise
Ni = noise_pow / (len(vel) * dv)
random_numbers = np.random.uniform(size=len(vel)*nave)
S_bin_noise = np.zeros(len(vel))
for iave in range(nave):
S_bin_noise = S_bin_noise + (-np.log(random_numbers[iave * (len(vel)) : ((iave+1) * len(vel))]) * (spec_turb + np.ones(len(vel))*Ni ))
spectrum = S_bin_noise / nave
return pd.DataFrame(data=spectrum,index=spec.index)
```
#### File: mcradar/radarOperator/zeOperator.py
```python
import subprocess
import numpy as np
import xarray as xr
from glob import glob
from pytmatrix.tmatrix import Scatterer
from pytmatrix import psd, orientation, radar
from pytmatrix import refractive, tmatrix_aux
from scipy import constants
from mcradar.tableOperator import creatRadarCols
import matplotlib.pyplot as plt
# TODO: this function should deal with the LUTs
def calcScatPropOneFreq(wl, radii, as_ratio,
rho, elv, ndgs=30,
canting=False, cantingStd=1,
meanAngle=0, safeTmatrix=False):
"""
Calculates the Ze at H and V polarization, Kdp for one wavelength
TODO: LDR???
Parameters
----------
wl: wavelength [mm] (single value)
radii: radius [mm] of the particle (array[n])
as_ratio: aspect ratio of the super particle (array[n])
rho: density [g/mmˆ3] of the super particle (array[n])
elv: elevation angle [°]
ndgs: division points used to integrate over the particle surface
canting: boolean (default = False)
cantingStd: standard deviation of the canting angle [°] (default = 1)
meanAngle: mean value of the canting angle [°] (default = 0)
Returns
-------
reflect_h: super particle horizontal reflectivity[mm^6/m^3] (array[n])
reflect_v: super particle vertical reflectivity[mm^6/m^3] (array[n])
refIndex: refractive index from each super particle (array[n])
kdp: calculated kdp from each particle (array[n])
"""
#---pyTmatrix setup
# initialize a scatterer object
scatterer = Scatterer(wavelength=wl)
scatterer.radius_type = Scatterer.RADIUS_MAXIMUM
scatterer.ndgs = ndgs
scatterer.ddelta = 1e-6
if canting==True:
scatterer.or_pdf = orientation.gaussian_pdf(std=cantingStd, mean=meanAngle)
# scatterer.orient = orientation.orient_averaged_adaptive
scatterer.orient = orientation.orient_averaged_fixed
# geometric parameters - incident direction
scatterer.thet0 = 90. - elv
scatterer.phi0 = 0.
# parameters for backscattering
refIndex = np.ones_like(radii, np.complex128)*np.nan
reflect_h = np.ones_like(radii)*np.nan
reflect_v = np.ones_like(radii)*np.nan
# S matrix for Kdp
sMat = np.ones_like(radii)*np.nan
for i, radius in enumerate(radii[::5]): #TODO remove [::5]
# A quick function to save the distribution of values used in the test
#with open('/home/dori/table_McRadar.txt', 'a') as f:
# f.write('{0:f} {1:f} {2:f} {3:f} {4:f} {5:f} {6:f}\n'.format(wl, elv,
# meanAngle,
# cantingStd,
# radius,
# rho[i],
# as_ratio[i]))
# scattering geometry backward
# radius = 100.0 # just a test to force nans
scatterer.thet = 180. - scatterer.thet0
scatterer.phi = (180. + scatterer.phi0) % 360.
scatterer.radius = radius
scatterer.axis_ratio = 1./as_ratio[i]
scatterer.m = refractive.mi(wl, rho[i])
refIndex[i] = refractive.mi(wl, rho[i])
if safeTmatrix:
inputs = [str(scatterer.radius),
str(scatterer.wavelength),
str(scatterer.m),
str(scatterer.axis_ratio),
str(int(canting)),
str(cantingStd),
str(meanAngle),
str(ndgs),
str(scatterer.thet0),
str(scatterer.phi0)]
arguments = ' '.join(inputs)
a = subprocess.run(['spheroidMcRadar'] + inputs, # this script should be installed by McRadar
capture_output=True)
# print(str(a))
try:
back_hh, back_vv, sMatrix, _ = str(a.stdout).split('Results ')[-1].split()
back_hh = float(back_hh)
back_vv = float(back_vv)
sMatrix = float(sMatrix)
except:
back_hh = np.nan
back_vv = np.nan
sMatrix = np.nan
# print(back_hh, radar.radar_xsect(scatterer, True))
# print(back_vv, radar.radar_xsect(scatterer, False))
reflect_h[i] = scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * back_hh # radar.radar_xsect(scatterer, True) # Kwsqrt is not correct by default at every frequency
reflect_v[i] = scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * back_vv # radar.radar_xsect(scatterer, False)
# scattering geometry forward
# scatterer.thet = scatterer.thet0
# scatterer.phi = (scatterer.phi0) % 360. #KDP geometry
# S = scatterer.get_S()
sMat[i] = sMatrix # (S[1,1]-S[0,0]).real
# print(sMatrix, sMat[i])
# print(sMatrix)
else:
reflect_h[i] = scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * radar.radar_xsect(scatterer, True) # Kwsqrt is not correct by default at every frequency
reflect_v[i] = scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * radar.radar_xsect(scatterer, False)
# scattering geometry forward
scatterer.thet = scatterer.thet0
scatterer.phi = (scatterer.phi0) % 360. #KDP geometry
S = scatterer.get_S()
sMat[i] = (S[1,1]-S[0,0]).real
kdp = 1e-3* (180.0/np.pi)*scatterer.wavelength*sMat
del scatterer # TODO: Evaluate the chance to have one Scatterer object already initiated instead of having it locally
return reflect_h, reflect_v, refIndex, kdp
def radarScat(sp, wl, K2=0.93):
"""
Calculates the single scattering radar quantities from the matrix values
Parameters
----------
sp: dataArray [n] superparticles containing backscattering matrix
and forward amplitude matrix information needed to compute
spectral radar quantities
wl: wavelength [mm]
K2: Rayleigh dielectric factor |(m^2-1)/(m^2+2)|^2
Returns
-------
reflect_h: super particle horizontal reflectivity[mm^6/m^3] (array[n])
reflect_v: super particle vertical reflectivity[mm^6/m^3] (array[n])
kdp: calculated kdp from each particle (array[n])
ldr_h: linear depolarization ratio horizontal (array[n])
rho_hv: correlation coefficient (array[n])
"""
prefactor = 2*np.pi*wl**4/(np.pi**5*K2)
#print(sp.Z11.values)
#quit()
reflect_hh = prefactor*(sp.Z11 - sp.Z12 - sp.Z21 + sp.Z22).values
reflect_vv = prefactor*(sp.Z11 + sp.Z12 + sp.Z21 + sp.Z22).values
kdp = 1e-3*(180.0/np.pi)*wl*sp.S22r_S11r.values
reflect_hv = prefactor*(sp.Z11 - sp.Z12 + sp.Z21 - sp.Z22).values
#reflect_vh = prefactor*(sp.Z11 + sp.Z12 - sp.Z21 - sp.Z22).values
ldr_h = reflect_hv/reflect_hh
# delta_hv np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])
#a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2
#b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])
#c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
#rho_hv np.sqrt(a / (b*c))
rho_hv = np.nan*np.ones_like(reflect_hh) # disable rho_hv for now
#Ah = 4.343e-3 * 2 * scatterer.wavelength * sp.S22i.values # attenuation horizontal polarization
#Av = 4.343e-3 * 2 * scatterer.wavelength * sp.S11i.values # attenuation vertical polarization
return reflect_hh, reflect_vv, kdp, ldr_h, rho_hv
def calcParticleZe(wls, elv, mcTable, ndgs=30,
scatSet={'mode':'full', 'safeTmatrix':False}, K2=0.93):#zeOperator
"""
Calculates the horizontal and vertical reflectivity of
each superparticle from a given distribution of super
particles,in this case I just quickly wanted to change the function to deal with Monomers with the DDA LUT and use Tmatrix for the aggregates
Parameters
----------
wls: wavelength [mm] (iterable)
elv: elevation angle [°] # TODO: maybe also this can become iterable
mcTable: McSnow table returned from getMcSnowTable()
ndgs: division points used to integrate over the particle surface
scatSet: type of scattering calculations to use, choose between full, table, wisdom, SSRGA, Rayleigh or SSRGA-Rayleigh
Returns
-------
mcTable including the horizontal and vertical reflectivity
of each super particle calculated for X, Ka and W band. The
calculation is made separetely for aspect ratio < 1 and >=1.
Kdp is also included. TODO spectral ldr and rho_hv
"""
#calling the function to create output columns
mcTable = creatRadarCols(mcTable, wls)
#print('mcTable has ', len(mcTable))
if scatSet['mode'] == 'full':
print('Full mode Tmatrix calculation')
##calculation of the reflectivity for AR < 1
tmpTable = mcTable[mcTable['sPhi']<1].copy()
#particle properties
canting = True
meanAngle=0
cantingStd=1
radii_M1 = tmpTable['radii_mm'].values #[mm]
as_ratio_M1 = tmpTable['sPhi'].values
rho_M1 = tmpTable['sRho'].values #[g/cm^3]
for wl in wls:
singleScat = calcScatPropOneFreq(wl, radii_M1, as_ratio_M1,
rho_M1, elv, canting=canting,
cantingStd=cantingStd,
meanAngle=meanAngle, ndgs=ndgs,
safeTmatrix=scatSet['safeTmatrix'])
reflect_h, reflect_v, refInd, kdp_M1 = singleScat
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sPhi']<1] = reflect_h
mcTable['sZeV_{0}'.format(wlStr)].values[mcTable['sPhi']<1] = reflect_v
mcTable['sKDP_{0}'.format(wlStr)].values[mcTable['sPhi']<1] = kdp_M1
##calculation of the reflectivity for AR >= 1
tmpTable = mcTable[mcTable['sPhi']>=1].copy()
#particle properties
canting=True
meanAngle=90
cantingStd=1
radii_M1 = (tmpTable['radii_mm']).values #[mm]
as_ratio_M1 = tmpTable['sPhi'].values
rho_M1 = tmpTable['sRho'].values #[g/cm^3]
for wl in wls:
singleScat = calcScatPropOneFreq(wl, radii_M1, as_ratio_M1,
rho_M1, elv, canting=canting,
cantingStd=cantingStd,
meanAngle=meanAngle, ndgs=ndgs,
safeTmatrix=scatSet['safeTmatrix'])
reflect_h, reflect_v, refInd, kdp_M1 = singleScat
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = reflect_h
mcTable['sZeV_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = reflect_v
mcTable['sKDP_{0}'.format(wlStr)].values[mcTable['sPhi']>=1] = kdp_M1
elif scatSet['mode'] == 'SSRGA':
print('using SSRGA scattering table for all particles, elevation is set to 90')
lut = xr.open_dataset(scatSet['lutFile'])
for wl in wls:
freq = (constants.c / wl*1e3)
#print(freq)
#quit()
mcTableAgg = mcTable[(mcTable['sNmono']>1)].copy()
points = lut.sel(frequency=freq, temperature=270.0, #elevation=90.0, # sofar: elevation can only be 90, we need more SSRGA calculations for other elevation
size = xr.DataArray(mcTableAgg['dia'].values, dims='points'),
method='nearest')
#points = lut.sel(wavelength=wl*1e-3, elevation=90.0, # sofar: elevation can only be 90, we need more SSRGA calculations for other elevation
# size = xr.DataArray(mcTable['dia'].values, dims='points'),
# method='nearest')
ssCbck = points.Cbck.values*1e6 # Tmatrix output is in mm, so here we also have to use ssCbck in mm
prefactor = wl**4/(np.pi**5*K2) # other prefactor: 2*pi*...
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sNmono']>1] = prefactor*ssCbck
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sNmono']==1] = np.nan
elif scatSet['mode'] == 'Rayleigh':
print('using Rayleigh approximation for all particles, only elevation 90 so far')
for wl in wls:
'''
# rayleigh approximation taken from Stefans ssrg_general folder
# calculate equivalent radius from equivalent mass
re = ((3 * mcTable['mTot']) / (4 * mcTable['sRhoIce'] * np.pi)) ** (1/3) *1e3
X = 4*np.pi*re/wl # calculate size parameter
qbck_h = 4*X**4*K2 # calculate backscattering efficiency
cbck_h = qbck_h * re**2 * np.pi/((2*np.pi)**2) #need to divide by (2*pi)^2 to get same as ssrga
'''
wlStr = '{:.2e}'.format(wl)
prefactor = wl**4/(np.pi**5*K2)
# rayleigh approximation according to Jussi Leinonens diss:
k = 2 * np.pi / (wl*1e-3)
sigma = 4*np.pi*np.abs(3*k**2/(4*np.pi)*np.sqrt(K2)*(mcTable['mTot']/mcTable['sRho_tot']))**2 *1e6 /np.pi #need to divide by pi to get same as ssrga, need to multiply by 1e6 to get mm^3
mcTable['sZeH_{0}'.format(wlStr)] = prefactor * sigma
elif scatSet['mode'] == 'SSRGA-Rayleigh':
print('using SSRGA for aggregates and Rayleigh approximation for crystals, only elevation 90')
mcTableAgg = mcTable[(mcTable['sNmono']>1)].copy() # only aggregates
mcTableCry = mcTable[(mcTable['sNmono']==1)].copy() # only monomers
lut = xr.open_dataset(scatSet['lutFile']) # SSRGA LUT
for wl in wls:
wlStr = '{:.2e}'.format(wl)
prefactor = wl**4/(np.pi**5*K2)
# rayleigh approximation for crystal:
k = 2 * np.pi / (wl*1e-3)
if len(mcTableCry['mTot']) > 0:
ssCbck = 4*np.pi*np.abs(3*k**2/(4*np.pi)*np.sqrt(K2)*(mcTableCry['mTot']/mcTableCry['sRho_tot']))**2 *1e6 /np.pi #need to divide by pi to get same as ssrga, need to multiply by 1e6 to get mm^3
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sNmono']==1] = prefactor * ssCbck
else:
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sNmono']==1] = np.nan
# ssrga for aggregates:
#points = lut.sel(wavelength=wl*1e-3, elevation=90.0, # sofar: elevation can only be 90, we need more SSRGA calculations for other elevation
# size = xr.DataArray(mcTableAgg['radii_mm'].values, dims='points'),
# method='nearest')
freq = (constants.c / wl*1e3)
points = lut.sel(frequency=freq, temperature=270.0, #elevation=90.0, # sofar: elevation can only be 90, we need more SSRGA calculations for other elevation
size = xr.DataArray(mcTableAgg['dia'].values, dims='points'),
method='nearest')
if len(points.Cbck)>0: # only if we have aggregates this works. Otherwise we need to write nan here
ssCbck = points.Cbck.values*1e6 # in mm^3
prefactor = wl**4/(np.pi**5*K2)
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sNmono']>1] = prefactor*ssCbck
else:
mcTable['sZeH_{0}'.format(wlStr)].values[mcTable['sNmono']>1] = np.nan
elif len(mcTable): # interpolation fails if no selection is possible
elvSel = scatSet['lutElev'][np.argmin(np.abs(np.array(scatSet['lutElev'])-elv))]
print('elevation ', elv,'lut elevation ', elvSel)
for wl in wls:
f = 299792458e3/wl
freSel = scatSet['lutFreq'][np.argmin(np.abs(np.array(scatSet['lutFreq'])-f))]
print('frequency ', f/1.e9, 'lut frequency ', freSel/1.e9)
dataset_filename = scatSet['lutPath'] + 'testLUT_{:3.1f}e9Hz_{:d}.nc'.format(freSel/1e9, int(elvSel))
lut = xr.open_dataset(dataset_filename)#.sel(wavelength=wl,
# elevation=elv,
# canting=1.0,
# method='nearest')
points = lut.sel(wavelength=wl, elevation=elv, canting=1.0,
size=xr.DataArray(mcTable['radii_mm'].values, dims='points'),
aspect=xr.DataArray(mcTable['sPhi'].values, dims='points'),
density=xr.DataArray(mcTable['sRho'].values, dims='points'),
method='nearest')
reflect_h, reflect_v, kdp_M1, ldr, rho_hv = radarScat(points, wl)
wlStr = '{:.2e}'.format(wl)
mcTable['sZeH_{0}'.format(wlStr)].values = reflect_h
mcTable['sZeV_{0}'.format(wlStr)].values = reflect_v
mcTable['sKDP_{0}'.format(wlStr)].values = kdp_M1
if scatSet['mode'] == 'table':
print('fast LUT mode')
elif scatSet['mode'] == 'wisdom':
print('less fast cache adaptive mode')
return mcTable
```
|
{
"source": "jdiasn/raincoat",
"score": 2
}
|
#### File: raincoat/archive/parsivel_log_nc_convert.py
```python
import numpy as np
import datetime
import calendar
import matplotlib.mlab
matplotlib.use('Agg')
from netCDF4 import Dataset
import io
import collections
def time2unix(datestring):
try:
f = datetime.datetime.strptime(datestring,"%Y%m%d%H%M%S.%f")
unix = calendar.timegm(f.timetuple())
except ValueError:
unix = np.nan
return unix
def count_file_lines(fname, site):
if site == 'jue':
f = open(fname, 'r')
elif site == 'nya':
f = io.open(fname, 'r', encoding='ISO-8859-1')
line_total = sum(1 for line in f)
f.close()
return line_total
def readASCII_old(logfile): #valid for reading in .logs from Aug.2013 until April 17th,2015
#read .log-file:
dic = {}
colnames = ['unixtime',\
'rr','r_accum','wawa','z','vis','interval','amp','nmb','T_sensor',\
'serial_no','version',\
'curr_heating','volt_sensor',\
'status_sensor','station_name',\
'r_amount',\
'error_code',\
'n', 'v' ]
#0: datetime string, 1-9:float, 10,11:string, 12,13: float, 14,15: string, 16:float, 17:string
#check for bad lines to skip:
iline = 0
filelen = count_file_lines(logfile)
rowlen = 570. # default for files!
#set keys where strings will be put in, to string arrays:
for k,key in enumerate(colnames):
if k == 10 or k == 11 or k == 14 or k == 15 or k == 17:
dic[key] = np.empty(filelen,dtype = 'S20')
elif k == 18 or k == 19:
dic[key] = np.zeros([32,filelen])
else:
dic[key] = np.nan * np.ones(filelen)
#read file:
f = open(logfile,'r')
for line in f: # for each line split up string, put value into corresponding array if rowlen normal.
line = line.strip()
cols = line.split(';')
#1/0
for i,cname in enumerate(colnames):
if len(line) == rowlen:
if i == 0:
#datetime = cols[i]
dic[cname][iline] = time2unix(cols[i])
elif i == 10 or i == 11 or i == 14 or i == 15 or i == 17: #all columns containing strings
dic[cname][iline] = str(cols[i])
elif i == 18:
for aa in range(32):
dic[cname][aa,iline] = float(cols[i+aa])
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 19:
for aa in range(32):
dic[cname][aa,iline] = float(cols[50+aa])
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
else: dic[cname][iline] = float(cols[i])
dic['rr'][:] = dic['rr'][:]*60. #convert from mm/min to mm/h
iline += 1
f.close()
return dic
################################################################################
##############################################################################
def readASCII(logfile, site): #valid for reading in .logs later than April 17th,2015
#read .log-file:
dic = {}
colnames = ['unixtime',\
'rr','r_accum','wawa','z','vis','interval','amp','nmb','T_sensor',\
'serial_no','version',\
'curr_heating','volt_sensor',\
'status_sensor','station_name',\
'r_amount',\
'error_code',\
'n', 'v',
'M']
#0: datetime string, 1-9:float, 10,11:string, 12,13: float, 14,15: string, 16:float, 17:string, 18,19: array(32,filelen), 20: array(32,32,filelen)
#check for bad lines to skip:
iline = 0
filelen = count_file_lines(logfile, site)
# if site == 'jue':
# if int(logfile[-12:-4]) > 20160625 :
# rowlen = 4662.0 # Station name JOYCE
# elif 20151016 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151020 :
# rowlen = 4665.
# elif 20151001 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151015 :
# rowlen = 4660.
# else:
# rowlen = 4666.0 # Station name Parsivel4
#
# elif site == 'nya':
# rowlen = 4660.0
#set keys where strings will be put in, to string arrays:
for k,key in enumerate(colnames):
if k == 10 or k == 11 or k == 14 or k == 15 or k == 17:
dic[key] = np.empty(filelen,dtype = 'S20')
elif k == 18 or k == 19:
dic[key] = np.zeros([32,filelen])
elif k == 20:
dic[key] = np.zeros([32,32,filelen])
else:
dic[key] = np.nan * np.ones(filelen)
#read file:
if site == 'jue':
f = open(logfile,'r')
elif site == 'nya':
f = io.open(logfile,'r', encoding='ISO-8859-1')
for line in f.readlines(): # for each line split up string, put value into corresponding array if rowlen normal.
line = line.strip()
cols = line.split(';')
if 20150917 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151017 :
cols = [s.replace('<', '') for s in cols]
cols = [s.replace('>', '') for s in cols]
#1/0
#print 'len(line)', len(line), rowlen, len(line) == rowlen, 'len(cols)', len(cols), len(cols) == 1107
for i,cname in enumerate(colnames): # loop through columns
#if len(line) == rowlen :# and cols[14] < 2: # check status of parsivel: if 0 or 1: sensor usable, if 2 or 3: not usable.
if 1 == 1:
try:
test = float(cols[0][0:4])
except: continue
if test < 2000: # time stamp missing or in the wrong place
continue
if len(cols) == 1106:
tempcols = collections.deque(cols)
tempcols.extendleft([cols[0][0:18]])
tempcols[1] = tempcols[1][18:-1]
cols = list(tempcols)
elif len(cols) != 1107:
continue
if i == 0:
dic[cname][iline] = time2unix(cols[i])
elif i == 10 or i == 11 or i == 14 or i == 15 or i == 17: #all columns containing strings
dic[cname][iline] = str(cols[i])
elif i == 18:
for aa in range(32):
try:
dic[cname][aa,iline] = float(cols[i+aa]) #cols 18 upto 49 (32 values)
except ValueError:
dic[cname][aa,iline] = np.nan
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 19:
for aa in range(32):
try:
dic[cname][aa,iline] = float(cols[50+aa]) #cols 50 upto 81 (32 values)
except ValueError:
dic[cname][aa,iline] = np.nan
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 20:
for bb in range(32): #loop through falling velocities, ie rows in matrix
for aa in range(32): #loop through sizes, ie columns
try:
dic[cname][aa,bb,iline] = float(cols[82+32*aa+bb])
if float(cols[82+32*aa+bb]) < 1000000: dic[cname][aa,bb,iline] = np.nan
except ValueError:
dic[cname][aa,bb,iline] = np.nan
else:
#if i == 1: 1/0
if len(cols) == 1107: # RG 5.8.2016: if some different lenght, something wrong with this line (e.g. time stamp missing)
try:
dic[cname][iline] = float(cols[i])
except ValueError:
dic[cname][iline] = np.nan
else :
dic[cname][iline] = np.nan
#if iline == 1: 1/0
iline += 1
f.close()
return dic
################################################################################################
################################################################################################
def writeNC_old(logfile,ncname): #valid for data Aug2013-Apr17,2015
#read .log-file into dictionnary:
data = readASCII_old(logfile)
#get number of lines in file ie length of data columns
filelen = len(data['unixtime'])
#open .nc outfile.
ncout = Dataset(ncname,'w',format='NETCDF4')
# define dimensions:
dim = ncout.createDimension('dim', filelen) #filelen, set='none' if unlimited dimension
ndim = ncout.createDimension('ndim',32)
stri = ncout.createDimension('stri',None)
#read variables:
time = ncout.createVariable('time','i8',('dim',)) #time in double-precision...
time.units = 'seconds since 1/1/1970 00:00:00'
time[:] = data['unixtime']
rain_rate = ncout.createVariable('rain_rate','f',('dim',))
rain_rate.units = 'mm/h'
rain_rate[:] = data['rr']
rain_accum = ncout.createVariable('rain_accum','f',('dim',))
rain_accum.units = 'mm'
rain_accum[:] = data['r_accum']
wawa = ncout.createVariable('wawa','f',('dim',))
wawa.units = 'weather code'
wawa[:] = data['wawa']
zeff = ncout.createVariable('Z','f',('dim',))
zeff.units = 'dB'
zeff[:] = data['z']
vis = ncout.createVariable('MOR_visibility','f',('dim',))
vis.units = 'm'
vis[:] = data['vis']
interval = ncout.createVariable('sample_interval','f',('dim',))
interval.units = 's'
interval[:] = data['interval']
ampli = ncout.createVariable('signal_amplitude','f',('dim',))
ampli.units = ''
ampli[:] = data['amp']
n_part = ncout.createVariable('n_particles','f',('dim',))
n_part.units = '#'
n_part.description = 'number of detected particles'
n_part[:] = data['nmb']
temp_sens = ncout.createVariable('T_sensor','f',('dim',))
temp_sens.units = 'deg C'
temp_sens[:] = data['T_sensor']
serial_no = ncout.createVariable('serial_no','S',('stri',))
serial_no[:] = data['serial_no']
version = ncout.createVariable('version','S',('stri',))
version.description = 'IOP firmware version'
version[:] = data['version']
curr_heating = ncout.createVariable('curr_heating','f',('dim',))
curr_heating.units = 'A'
curr_heating.description = 'Current heating system'
curr_heating[:] = data['curr_heating']
volt_sensor = ncout.createVariable('volt_sensor','f',('dim',))
volt_sensor.units = 'V'
volt_sensor.description = 'Power supply voltage in the sensor'
volt_sensor[:] = data['volt_sensor']
status_sensor = ncout.createVariable('status_sensor','S',('stri',))
status_sensor[:] = data['status_sensor']
station_name = ncout.createVariable('station_name','S',('stri',))
station_name[:] = data['station_name']
rain_am = ncout.createVariable('rain_am','f',('dim',))
rain_am.units = 'mm'
rain_am.description = 'rain amount absolute'
rain_am[:] = data['r_amount']
error_code = ncout.createVariable('error_code','S',('stri',))
error_code[:] = data['error_code']
N = ncout.createVariable('N','f',('ndim','dim'))
N.units = '1/m3'
N.description = 'mean volume equivalent diameter per preci class'
N[:,:] = data['n']
v = ncout.createVariable('v','f',('ndim','dim'))
v.units = 'm/s'
v.description = 'mean falling speed per preci class'
v[:,:] = data['v']
#close .nc-file:
ncout.close()
return
##################################################################################################
##################################################################################################
def writeNC(logfile,ncname, site):
#read .log-file into dictionnary:
data = readASCII(logfile, site)
#get number of lines in file ie length of data columns
filelen = len(data['unixtime'])
#open .nc outfile.
ncout = Dataset(ncname,'w',format='NETCDF4')
# define dimensions:
dim = ncout.createDimension('dim', filelen) #filelen, set='none' if unlimited dimension
ndim = ncout.createDimension('ndim',32)
stri = ncout.createDimension('stri',None)
#read variables:
time = ncout.createVariable('time','i8',('dim',)) #time in double-precision...
time.units = 'seconds since 1/1/1970 00:00:00'
time[:] = data['unixtime']
rain_rate = ncout.createVariable('rain_rate','f',('dim',))
rain_rate.units = 'mm/h'
rain_rate[:] = data['rr']
rain_accum = ncout.createVariable('rain_accum','f',('dim',))
rain_accum.units = 'mm'
rain_accum[:] = data['r_accum']
wawa = ncout.createVariable('wawa','f',('dim',))
wawa.units = 'weather code'
wawa[:] = data['wawa']
zeff = ncout.createVariable('Z','f',('dim',))
zeff.units = 'dB'
zeff[:] = data['z']
vis = ncout.createVariable('MOR_visibility','f',('dim',))
vis.units = 'm'
vis[:] = data['vis']
interval = ncout.createVariable('sample_interval','f',('dim',))
interval.units = 's'
interval[:] = data['interval']
ampli = ncout.createVariable('signal_amplitude','f',('dim',))
ampli.units = ''
ampli[:] = data['amp']
n_part = ncout.createVariable('n_particles','f',('dim',))
n_part.units = '#'
n_part.description = 'number of detected particles'
n_part[:] = data['nmb']
temp_sens = ncout.createVariable('T_sensor','f',('dim',))
temp_sens.units = 'deg C'
temp_sens[:] = data['T_sensor']
serial_no = ncout.createVariable('serial_no','S6',('stri',))
serial_no[:] = data['serial_no']
version = ncout.createVariable('version','S5',('stri',))
version.description = 'IOP firmware version'
version[:] = data['version']
curr_heating = ncout.createVariable('curr_heating','f',('dim',))
curr_heating.units = 'A'
curr_heating.description = 'Current heating system'
curr_heating[:] = data['curr_heating']
volt_sensor = ncout.createVariable('volt_sensor','f',('dim',))
volt_sensor.units = 'V'
volt_sensor.description = 'Power supply voltage in the sensor'
volt_sensor[:] = data['volt_sensor']
status_sensor = ncout.createVariable('status_sensor','S2',('stri',))
status_sensor[:] = data['status_sensor']
station_name = ncout.createVariable('station_name','S5',('stri',))
station_name[:] = data['station_name']
rain_am = ncout.createVariable('rain_am','f',('dim',))
rain_am.units = 'mm'
rain_am.description = 'rain amount absolute'
rain_am[:] = data['r_amount']
error_code = ncout.createVariable('error_code','S3',('stri',))
error_code[:] = data['error_code']
N = ncout.createVariable('N','f',('ndim','dim'))
N.units = '1/m3'
N.description = 'mean volume equivalent diameter per preci class'
N[:,:] = data['n']
v = ncout.createVariable('v','f',('ndim','dim'))
v.units = 'm/s'
v.description = 'mean falling velocity per preci class'
v[:,:] = data['v']
M = ncout.createVariable('M','f',('ndim','ndim','dim'))
M.units = ''
M.description = 'raw data matrix. number of particles per volume diameter and fall velocity'
M[:,:,:] = data['M']
#close .nc-file:
ncout.close()
return
```
#### File: raincoat/disdrometer/pars_class.py
```python
import numpy as np
#******************************************************************************
# Creation of Parsivel class boundaries (class center and class width) and bin edges for PSD calculation
#******************************************************************************
def pars_class():
""" This function generates the volume equivalent diameter class center and widths
Arguments
---------
Returns
-------
pars_class : 2dim array, volume equivalent diameter class center & width
bin_edges : valuese of volume equivalent diameter class edges
"""
pars_class = np.zeros(shape=(32,2))
bin_edges = np.zeros(shape=(33,1))
#pars_class[:,0] : Center of Class [mm]
#pars_class[:,1] : Width of Class [mm]
pars_class[0:10,1] = 0.125
pars_class[10:15,1] = 0.250
pars_class[15:20,1] = 0.500
pars_class[20:25,1] = 1.
pars_class[25:30,1] = 2.
pars_class[30:32,1] = 3.
j = 0
pars_class[0,0] = 0.0625 # changed to avoid bin width mismatch, anyway better to have a mismatch in the lower part, first two classes should also be 0 because smaller than parsivel sensitivity
for i in range(1,32):
pars_class[i,0] = pars_class[i-1,0] + 0.5*(pars_class[i-1,1] + pars_class[i,1])
# if i < 10 or (i > 10 and i < 15) or (i > 15 and i < 20) or (i > 20 and i < 25) or (i > 25 and i < 30) or (i > 30):
# pars_class[i,0] = pars_class[i-1,0] + pars_class[i,1]
# const = [0.188, 0.375, 0.75, 1.5, 2.5]
# if i == 10 or i == 15 or i == 20 or i == 25 or i == 30:
# pars_class[i,0] = pars_class[i-1,0] + const[j]
# j = j + 1
#print pars_class[i,0]
#bin_edges[i+1,0] = pars_class[i,0] + pars_class[i,1]/2
bin_edges[0,0] = 0.
bin_edges[1:,0] = pars_class[:,0] + pars_class[:,1]*0.5
return pars_class, bin_edges
```
|
{
"source": "JDiaz11/BlackBeanControl",
"score": 2
}
|
#### File: JDiaz11/BlackBeanControl/samsung.py
```python
import Settings
import copy
import string
import sys, getopt
import time, binascii
import netaddr
import broadlink, configparser
from os import path
from Crypto.Cipher import AES
def eq(cnst, val):
return ((cnst * 1.2 > val) and ((cnst * 0.8) < val))
def BinDecode(sbin):
if len(sbin) <> 48:
print('bin length error. len=' + str(len(sbin)))
return False
xbin = 'x' + sbin
BinArray = [xbin[i * 8: (i - 1) * 8: -1] for i in xrange(1, 7)]
# BinArray = [sbin[7: -49: -1], sbin[15: 7: -1], sbin[23: 15: -1], sbin[31: 23: -1], sbin[39: 31: -1], sbin[47: 39: -1]] !!!
# BinArray = [sbin[7: : -1], sbin[15: 7: -1], sbin[23: 15: -1], sbin[31: 23: -1], sbin[39: 31: -1], sbin[47: 39: -1]]
# BinArray = [sbin[: 7: 1], sbin[7: 15: 1], sbin[15: 23: 1], sbin[23: 31: 1], sbin[31: 39: 1], sbin[39: 47: 1]]
HexArray = ['%02X' % int(x, 2) for x in BinArray]
print('bin cmd' + "".join(' %s' % s for s in BinArray))
print('hex cmd' + "".join(' %s' % s for s in HexArray))
print('dec cmd' + ''.join(' %03d' % int(b, 2) for b in BinArray))
return True;
SentCommand = ''
SettingsFile = configparser.ConfigParser()
SettingsFile.optionxform = str
SettingsFile.read(Settings.BlackBeanControlSettings)
try:
Options, args = getopt.getopt(sys.argv[1:], 'd:e:p:n:h', ['command=', 'command=', 'command=', 'neccmd=', 'help'])
except getopt.GetoptError:
print('Options error. Try -d for decode or -h for help. ')
sys.exit(2)
MainCmd = '';
for Option, Argument in Options:
if Option in ('-h', '--help'):
print('Samsung48 IR protocol (NEC48-2) decoder/encoder.')
print('Print command from ini file:')
print(' samsung.py -p <Command name>')
print('Decode:')
print(' samsung.py -d <Command name>')
print('Export as timing list:')
print(' samsung.py -e <Command name>')
print('Encode Samsung IR command:')
print(' samsung.py -n <Samsung command in HEX>')
sys.exit()
elif Option in ('-p', '--print'):
SentCommand = Argument
MainCmd = 'p';
elif Option in ('-d', '--decode'):
SentCommand = Argument
MainCmd = 'd';
elif Option in ('-e', '--export'):
SentCommand = Argument
MainCmd = 'e';
elif Option in ('-n', '--encode'):
SentCommand = Argument
MainCmd = 'n';
if SentCommand.strip() == '':
print('Command name parameter is mandatory')
sys.exit(2)
if SettingsFile.has_option('Commands', SentCommand):
CommandFromSettings = SettingsFile.get('Commands', SentCommand)
else:
CommandFromSettings = ''
if (MainCmd in ['d', 'e', 'p']) and (CommandFromSettings.strip() != ''):
DecodedCommand = CommandFromSettings.decode('hex')
# print command
if MainCmd == 'p':
print(CommandFromSettings)
sys.exit(0)
#'d','e' commands
Intv = '';
LenArray = [];
for s in DecodedCommand:
Intv = Intv + s;
# EOF
if Intv == '\x00\x0d\x05':
# print("eof")
break;
# 1-byte or 3-byte values. 3-byte values starts with 0x00
if (Intv[0] <> '\x00') or (Intv[0] == '\x00' and len(Intv) >= 3):
IntI = int(Intv.encode('hex'), 16) * 1000 * 269 / 8192
LenArray.append(IntI)
# print(IntI)
Intv = ''
print('Array length ' + str(len(LenArray)))
if MainCmd == 'e':
print(''.join((' +' if (ind % 2 == 0) else ' -') + '%d' % i for ind,i in enumerate(LenArray)))
sys.exit(0);
DecodeState = ['init', '1bwait', 'pulse', 'pause', 'stop']
decoding = DecodeState[0]
BinStr = '';
for indx in range(0, len(LenArray) - 1):
if ((indx < len(LenArray) - 3) and eq(4500, LenArray[indx]) and eq(4500, LenArray[indx + 1]) and (300 < LenArray[indx + 2] < 1000)):
if BinStr <> '':
BinDecode(BinStr)
BinStr = ''
decoding = '1bwait'
print('--------------------------------------------')
print('sequence started at indx:' + str(indx) + ' ' + str(LenArray[indx]))
continue
if decoding == '1bwait':
decoding = 'pulse'
continue
# 550-550 = 0; 550-1450 = 1 in microseconds
# decoder setiings - 0.3-1ms pulse and '0', 1-2ms - '1', else - error
if decoding == 'pulse':
if 300 < LenArray[indx] < 900:
decoding = 'pause'
continue
else:
print('error bit1 length=' + str(LenArray[indx]) + ' indx=' + str(indx))
decoding = 'init'
continue
if decoding == 'pause':
if 300 < LenArray[indx] < 1000:
BinStr = BinStr + '0'
elif 1000 < LenArray[indx] < 2000:
BinStr = BinStr + '1'
else:
print('error bit2 length=' + str(LenArray[indx]) + ' indx=' + str(indx))
decoding = 'init'
continue
decoding = 'pulse'
if (decoding == 'init' and BinStr <> '') or (indx == len(LenArray) - 2 and BinStr <> ''):
print('decoded ' + BinStr)
BinDecode(BinStr)
BinStr = ''
if BinStr <> '':
BinDecode(BinStr)
sys.exit(0)
if (MainCmd in ['n']) and (SentCommand.strip() != ''):
if (len(SentCommand) <> 12) or (not all(c in string.hexdigits for c in SentCommand)):
print('Command must be 6-byte hex number.')
sys.exit(2)
print('hex command=' + SentCommand)
BinArray = [bin(int(SentCommand[i: i + 2], 16))[2:].zfill(8)[::-1] for i in xrange(0, 11, 2)];
BinStr = "".join('%s' % s for s in BinArray)
print ('bin command=' + BinStr)
# start sequence + Samsung start + data + end pulse + (here the copy)Samsung start + data + end pulse with long pause + end sequence
EncodedBinStr = "".join(('1434' if (c == '1') else '1414') for c in BinStr)
Cmd = '2600ca00' + '9494' + EncodedBinStr + '1494' + '9494' + EncodedBinStr + '1400072a' + '000d05'
print(Cmd)
sys.exit(0)
```
|
{
"source": "jdiaz4302/streamstats",
"score": 3
}
|
#### File: streamstats/streamstats/watershed.py
```python
from streamstats import utils
class Watershed():
"""Watershed covering a spatial region, with associated information.
The USGS StreamStats API is built around watersheds as organizational
units. Watersheds in the 50 U.S. states can be found using lat/lon
lookups, along with information about the watershed including its HUC code
and a GeoJSON representation of the polygon of a watershed. Basin
characteristics and flow statistics can also be extracted from watersheds.
"""
base_url = "https://streamstats.usgs.gov/streamstatsservices/"
def __init__(self, lat, lon):
"""Initialize a Watershed object
:param lon: Longitude of point in decimal degrees.
:type lon: float
:param lat: Latitude of point in decimal degrees.
:type lat: float
:param simplify: Whether to simplify the polygon representation.
:type simplify: bool
"""
self.lat, self.lon = lat, lon
self.address = utils.find_address(lat=lat, lon=lon)
self.state = utils.find_state(self.address)
self.data = self._delineate()
self.workspace = self.data['workspaceID']
self.flowstats = None
def _delineate(self):
"""Find the watershed that contains a point.
Implements a Delineate Watershed by Location query from
https://streamstats.usgs.gov/docs/streamstatsservices/#/
:rtype dict containing watershed data
"""
payload = {
'rcode': self.state,
'xlocation': self.lon,
'ylocation': self.lat,
'crs': 4326,
'includeparameters': True,
'includeflowtypes': False,
'includefeatures': True,
'simplify': False
}
url = "".join((self.base_url, "watershed.geojson"))
response = utils.requests_retry_session().get(url, params=payload)
response.raise_for_status() # raises errors early
return response.json()
def __repr__(self):
"""Get the string representation of a watershed."""
huc = self.get_huc()
huc_message = 'Watershed object with HUC%s: %s' % (len(huc), huc)
coord_message = 'containing lat/lon: (%s, %s)' % (self.lat, self.lon)
return ', '.join((huc_message, coord_message))
def get_huc(self):
"""Find the Hydrologic Unit Code (HUC) of the watershed."""
watershed_point = self.data['featurecollection'][0]['feature']
huc = watershed_point['features'][0]['properties']['HUCID']
return huc
def get_boundary(self):
"""Return the full watershed GeoJSON as a dictionary.
:rtype dict containing GeoJSON watershed boundary
"""
for dictionary in self.data['featurecollection']:
if dictionary.get('name', '') == 'globalwatershed':
return dictionary['feature']
raise LookupError('Could not find "globalwatershed" in the feature'
'collection.')
def available_characteristics(self):
"""List the available watershed characteristics."""
raise NotImplementedError()
def get_characteristics(self):
"""Get watershed characteristic data values."""
raise NotImplementedError()
def available_flow_stats(self):
"""List the available flow statistics
:rtype list of available flow statistics
"""
if not self.flowstats:
self.get_flow_stats()
avail_stats = [item['StatisticGroupName'] for item in self.flowstats]
return avail_stats
def get_flow_stats(self):
"""Get watershed flow statistics data values.
:rtype dict containing flow statistics data for a watershed
"""
pars = {
'rcode': self.state,
'workspaceID': self.workspace,
'includeflowtypes': True
}
flow_url = "".join((self.base_url, 'flowstatistics.json'))
response = utils.requests_retry_session().get(flow_url, params=pars)
self.flowstats = response.json()
return self.flowstats
```
|
{
"source": "jdiaz/gitpal",
"score": 4
}
|
#### File: jdiaz/gitpal/gitpal.py
```python
import github
LICENSES = {
1: 'Apache 2.0',
2: 'BSD 3-Clause "New" pr "Revised" license',
3: 'GNU General Public License (GPL)',
4: 'GNU Library or "Lesser" General Public License (LGPL)',
5: 'MIT license',
6: 'Mozilla Public License 2.0',
7: 'Common Development and Distribution License',
8: 'Eclipse Public License'
}
def make_project(name, description, license):
"""Creates a new github project with the information provided
Args:
name: The name to use when creating the github Project
description: A description of what the idea. The contents will be
written to the NOTES files.
license: The license to include in the new github project.
Returns:
If the new project was successfully created.
"""
# TODO: Implement
def banner():
"""Creates the banner for the CLI prompt.
Returns:
A string containing the anscii art to draw
"""
handle = open('resources/banner.txt', mode='rt', encoding='utf-8')
return handle.read()
def prompt(banner, options):
"""Displays a CLI prompt that asks the user for information on the side
project idea.
Args:
banner: A string containing the anscii art banner to show at run time.
options: A dict of license options to choose from indexed by number.
Returns:
Nothing
"""
print(banner)
print('Booting...\n')
git = github.Github()
print("Let's make sure you don't forget that idea!\n")
correct = False
cli_prompt = '>> '
while not correct:
print('Please enter project name:')
name = input(cli_prompt)
print('Project description:')
description = input(cli_prompt)
print('Choose a license (Enter the number):')
print(options_menu(options))
license = input(cli_prompt) #TODO: Validate
print('Lets review your input.\n')
print(' Name: {}'.format(name))
print(' Description: {}'.format(description))
print(' License: {}'.format(options[int(license)]))
print('Is everything correct? (yes, no)')
ok = input(cli_prompt).lower()
if ok == 'yes' or ok == 'y':
correct = True
git.make_project(name, description, license)
print('Great, visit your github profile https://www.github.com/')
else:
print('Woops, lets go over the information again!')
def options_menu(options):
"""Creates string representation of the licenses support by gitpal.
Args:
options: A dictionary of numbers to license name
Returns:
A string showing the options menu
"""
buf = []
for key, value in options.items():
buf.append(' {}. {}\n'.format(key, value))
return ''.join(buf)
def main():
prompt(banner(), LICENSES)
if __name__ == '__main__':
main()
```
|
{
"source": "jdiaz/invst",
"score": 4
}
|
#### File: jdiaz/invst/investment_analysis.py
```python
import math
class InvestmentCalculator:
def set_stock_price(self, stock_price):
self.stock_price = stock_price
return self
def set_payout_period(self, time_period = 4):
self.payout_period = time_period
return self
def set_payout_ratio(self, payout_ratio):
self.payout_ratio = payout_ratio
return self
def set_investment_per_payout_period(self, payment):
self.investment_per_payout_period = payment
return self
def set_shares(self, shares):
self.total_shares = shares
return self
def set_years(self, yrs):
self.years = yrs
return self
def set_year_percentage_return(self, ret):
self.year_return = ret
return self
def should_reinvest_dividends(self, reinvest):
self.reinvest_dividends = reinvest
return self
def __format_money(self, money):
return '${:,.2f}'.format(money)
def __stock_value(self, stock_price, number_of_stocks):
return self.__format_money(number_of_stocks * stock_price)
def __dividend_payout(self, shares, payout_ratio):
frac_shares, whole_shares = math.modf(shares) # separate decimal portion of shares you own
return whole_shares * payout_ratio + frac_shares * payout_ratio
def __purchasable_stock_amount(self, stock_price, money):
if money == 0:
return 0
return money / stock_price
def compute(self):
print('=======================================================================')
print('Investment analysis:')
print('\tFor ({}) years'.format(self.years))
print(
'\tStarting shares: {}, {}'.format(
self.total_shares,
self.__format_money(self.total_shares * self.stock_price),
),
)
print(
'\tStarting stock price: {}'.format(self.__format_money(self.stock_price)),
)
print(
'\tInvesting {} per pay period ({}) times a year'.format(
self.__format_money(self.investment_per_payout_period), self.payout_period
),
)
total_money_from_divided_payout = 0
for _ in range(self.years):
for _ in range(self.payout_period):
money_made_from_dividend = self.__dividend_payout(
self.total_shares,
self.payout_ratio,
)
total_money_from_divided_payout += money_made_from_dividend
if self.reinvest_dividends:
stock_purchased_from_dividends = self.__purchasable_stock_amount(
self.stock_price,
money_made_from_dividend,
)
self.total_shares += stock_purchased_from_dividends
self.total_shares += self.__purchasable_stock_amount(
self.stock_price,
self.investment_per_payout_period,
)
# Increase stock price by Year Return %; once every year
self.stock_price += (self.stock_price * self.year_return)
print('\n\tResults:')
print(
'\tTotal stock: {}, {}'.format(
self.total_shares, self.__stock_value(self.stock_price, self.total_shares),
),
)
print('\tNew stock price: {}'.format(self.__format_money(self.stock_price)))
print(
'\tTotal made from dividends: {}, Was it reinvested? {}'.format(
self.__format_money(total_money_from_divided_payout), self.reinvest_dividends
),
)
print(
'\tGoing forward per quarter you will make: {}'.format(
self.__format_money(self.total_shares * self.payout_ratio),
),
)
global_years = 5
global_investment_per_period = 3000
def calculateATTStock():
"""
Calculates stock investment in At&t stock
"""
(InvestmentCalculator()) \
.set_stock_price(31.07) \
.set_shares(117) \
.set_payout_period(4) \
.set_investment_per_payout_period(3000) \
.set_years(3) \
.set_payout_ratio(0.51) \
.set_year_percentage_return(0.0) \
.should_reinvest_dividends(True) \
.compute()
def calculateIVVFund():
"""
Calculates stock investment in IVV ETF
"""
(InvestmentCalculator()) \
.set_stock_price(288.56) \
.set_shares(30) \
.set_payout_period(4) \
.set_investment_per_payout_period(500) \
.set_years(15) \
.set_payout_ratio(1.14) \
.set_year_percentage_return(.1012) \
.should_reinvest_dividends(True) \
.compute()
if __name__ == '__main__':
calculateATTStock()
#calculateIVVFund()
```
|
{
"source": "jdiazmx/WBU",
"score": 3
}
|
#### File: jdiazmx/WBU/Funtions.py
```python
import socket
import os
import random
def showBanner():
strBanner1="""
____ _ _ _ _
| _ \ | | | | | | |
| |_) | __ _ __| | | | |___| |__
| _ < / _` |/ _` | | | / __| '_ \
| |_) | (_| | (_| | |__| \__ \ |_) |
|____/ \__,_|\__,_|\____/|___/_.__/
____ _____ _ _
/ __ \ |_ _| | | | |
| | | |_ _____ _ __ | | _ __ | |_ ___ _ __ _ __ ___| |_
| | | \ \ / / _ \ '__| | | | '_ \| __/ _ \ '__| '_ \ / _ \ __|
| |__| |\ V / __/ | _| |_| | | | || __/ | | | | | __/ |_
\____/ \_/ \___|_| |_____|_| |_|\__\___|_| |_| |_|\___|\__|
v0.1 by <NAME>
"""
strBanner2="""
888 888 888888b. 888 888
888 o 888 888 "88b 888 888
888 d8b 888 888 .88P 888 888
888 d888b 888 8888888K. 888 888
888d88888b888 888 "Y88b 888 888
88888P Y88888 888 888 888 888
8888P Y8888 888 d88P Y88b. .d88P
888P Y888 8888888P" AD "Y88888P" SB
v0.1 by <NAME>
"""
strBanner3="""
01010111 01000010 01010101
v0.1 by <NAME>
"""
num=random.randint(1, 2)
if num==1:
print(strBanner1)
elif num ==2:
print(strBanner2)
else:
print(strBanner3)
return
def showMenu():
strMenu="""
1. Modificar Script
2. Ver Script
3. Enviar Script
4. Modo Interactivo
5. Salir
"""
print(strMenu)
return
def readScript(file):
file = open("Script.txt", "r")
print("--------------------------------------------")
print(file.read())
print("--------------------------------------------")
input("[?]Enter para continuar..")
file.close()
return
def sendScript(clientsocket,file):
file = open("Script.txt", "r")
clientsocket.send(file.read().encode('ascii'))
print("[+]Envianado...")
"""respuesta=clientsocket.recv(1024)
if (respuesta=="O"):
print("[+]Script recibido por el BadUsb")
else:
print("[!]Error al enviar el script")
input("[?]Enter para continuar..")
return
input("[?]Enter para continuar..")
"""
return
def moidifyScript():
print("/n"+"[+]Modifica el Script y Guardalo!...")
os.system('gedit script.txt')
input("[?]Enter para continuar..")
return
```
|
{
"source": "jdiazrdgz/BabeNotifier-Backend",
"score": 2
}
|
#### File: application/resources/Ping.py
```python
from flask_restful import Resource
class Ping(Resource):
def get(self):
return \
{
"success": True,
"message": "BabeNotifier - V0.0.1",
"data": {}
}, 200
```
|
{
"source": "jdiazromeral/django-ddd-quizs-demo",
"score": 2
}
|
#### File: service/application/quiz_creator.py
```python
from service.domain.quiz import Quiz
from service.domain.quiz_repository import QuizRepository
class QuizCreator:
def __init__(self, quiz_respository: QuizRepository) -> None:
self.__quiz_respository = quiz_respository
def create(self, quiz: Quiz) -> None:
self.__quiz_respository.save(quiz)
```
#### File: infrastructure/repositories/mongo_quiz_repository.py
```python
from typing import Optional, List
from urllib.parse import quote_plus
from uuid import UUID
from pymongo import MongoClient
from service.domain.quiz import Quiz
from service.domain.quiz_repository import QuizRepository
from service.infrastructure.builders.quiz_builder import QuizBuilder
from service.infrastructure.serializers.quiz_serializer import QuizSerializer
class MongoQuizRepository(QuizRepository):
def __init__(
self,
quiz_serializer: QuizSerializer,
quizs_builder: QuizBuilder,
):
self.__quiz_serializer = quiz_serializer
self.__quiz_builder = quizs_builder
self.__client = MongoClient(f"mongodb://quiz:{quote_plus('quiz-secret')}@localhost:27017/quizdb")
self.__quizdb = self.__client.quizdb
self.__quiz_collection = self.__quizdb.quizs
def save(self, quiz: Quiz) -> None:
serialized_quiz = self.__quiz_serializer.serialize(quiz)
self.__quiz_collection.insert_one(serialized_quiz)
def find_all(self) -> List[Quiz]:
quizs = []
mongo_quizs = self.__quiz_collection.find_all({})
for quiz in mongo_quizs:
quizs.append(self.__quiz_builder.build(quiz))
return quizs
def find_by_id(self, quiz_id: UUID) -> Optional[Quiz]:
pass
```
|
{
"source": "jdiaz/snorkel",
"score": 2
}
|
#### File: src/components/user_button.py
```python
import flask_security
from .components import UIComponent
import pudgy
class LoginButton(UIComponent, pudgy.MustacheComponent):
pass
class UserButton(UIComponent, pudgy.MustacheComponent):
def __prepare__(self):
self.context.user = flask_security.core.current_user
class UserModal(UIComponent, pudgy.JinjaComponent,
pudgy.ServerBridge, pudgy.SassComponent):
def __prepare__(self):
self.context.user = flask_security.core.current_user
```
#### File: snorkel/src/__init__.py
```python
from __future__ import absolute_import
from __future__ import print_function
from . import main
from . import cli
import os
import sys
from subprocess import Popen, PIPE
SNORKEL_DIR=os.path.expanduser("~/.local/share/snorkel")
def shared_mode():
print("SWITCHING TO SHARED DIR", SNORKEL_DIR, file=sys.stderr)
try:
os.makedirs(SNORKEL_DIR)
except:
pass
os.chdir(SNORKEL_DIR)
DEBUG="DEBUG" in os.environ
def run_command(cmd_args, stdin=b""):
cmd_args = list(map(lambda w: w.decode("utf-8"), cmd_args))
print("RUNNING COMMAND", " ".join(cmd_args), file=sys.stderr)
if isinstance(stdin, str):
stdin = stdin.encode("utf-8")
p = Popen(cmd_args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(stdin)
print(stderr)
return stdout.decode("utf-8")
```
#### File: snorkel/src/pages.py
```python
from __future__ import print_function
import pudgy
import flask
import time
from .views import get_view_by_name
from .components import QuerySidebar, UserButton, UserModal
from .auth import rpc_login_required
from . import backend, results, presenter, rbac
from .components import UIComponent, Selector
import werkzeug
import os
from collections import defaultdict
import flask_security
from .query_spec import QuerySpec
from .util import time_to_seconds, string_dict
try:
from urllib import unquote_plus
except:
from urllib.parse import unquote_plus
from . import fastjson as json
class ViewArea(UIComponent, pudgy.JinjaComponent, pudgy.BackboneComponent, pudgy.ClientBridge):
pass
@pudgy.Virtual
class Page(pudgy.FlaskPage):
NAMESPACE = "pages"
class HomePage(Page, pudgy.SassComponent):
pass
class UserPage(Page):
pass
class DatasetsPage(Page, pudgy.BackboneComponent, pudgy.SassComponent):
def __prepare__(self):
bs = backend.SybilBackend()
self.context.tables = filter(lambda t: rbac.check("query", t), bs.list_tables())
self.context.tables = list(self.context.tables)
self.context.tables.sort()
groups = defaultdict(list)
self.context.table_groups = groups
groupings = []
visited = {}
for t in self.context.tables:
if t in visited:
continue
visited[t] = 1
tokens = t.split("@")
if len(tokens) > 1:
superset = tokens[0]
dataset = " ".join(tokens[1:])
groups[superset].append(dataset)
else:
groups[t].append("")
self.context.groups = list(sorted(groups.keys()))
self.context.user_button = UserButton()
self.context.user_modal = UserModal()
def read_filters(query, i='query'):
import ast
filters = []
try:
filters = query.get('filters')
if type(filters) == str:
filters = ast.literal_eval(filters)
filters = filters[i]
except Exception as e:
print("FILTER ERR", e)
return filters
import datetime
def epoch(d):
return (d - datetime.datetime(1970, 1, 1)).total_seconds()
class QueryPage(Page, pudgy.SassComponent, pudgy.BackboneComponent, pudgy.ServerBridge):
def __prepare__(self):
# locate the potential views
query = QuerySpec(flask.request.args)
self.context.error = None
has_saved = False
if "saved" in self.context and self.context.saved:
query = QuerySpec(self.context.saved.parsed)
has_saved = True
table = self.context.table
view = query.get('view', 'table')
pr = presenter.GetPresenter(table)
bs = backend.SybilBackend()
bs.clear_cache(table=table)
try:
table_info = bs.get_table_info(table)
tables = filter(lambda t: rbac.check("query", t), bs.list_tables())
except Exception as e:
self.context.error = "Couldn't read table info for table %s" % (table)
return
table_selector = Selector(name="table", selected=table, options=tables)
VwClass = get_view_by_name(view)
view = VwClass()
view.context.update(metadata=table_info, presenter=pr, query=query)
# its up to a view to decide on marshalling its data to client,
# but we auto marshal the table metadata and query for every view
view.marshal(metadata=table_info, query=query)
viewarea = ViewArea()
if self.context.saved:
sq = self.context.saved
if not sq.results:
viewarea.call("no_results", {
"errmsg" : "No results found for this query"
});
view.context.update(sq.parsed, results=sq.results, compare=sq.compare)
view.marshal(query=sq.parsed, results=sq.results, compare=sq.compare,
parsed=sq.parsed, created=epoch(sq.created))
viewarea.call("set_view", view)
viewarea.context.update(view=view)
filters = read_filters(query)
compare_filters = read_filters(query, 'compare')
qs = QuerySidebar(info=table_info, view=view, filters=filters or [],
compare_filters=compare_filters, metadata=table_info)
if compare_filters:
qs.call('show_compare_filters')
qs.call("supports_compare_filters", view.SUPPORT_COMPARE_QUERIES)
if has_saved:
qs.call("show_results")
qs.set_ref("sidebar")
qs.marshal(table=table, viewarea=viewarea, metadata=table_info)
self.set_ref("querypage")
user_button = UserButton()
user_modal = UserModal(show_queries=True)
self.marshal(sidebar=qs, table=table, user_modal=user_modal)
self.context.update(
table=table,
sidebar=qs,
user_button=user_button,
user_modal=user_modal,
viewarea=viewarea,
table_selector=table_selector)
# TODO: place these on QueryPage?
@QuerySidebar.api
@rpc_login_required
def run_query(cls, table=None, query=None, viewarea=None, filters=[]):
if not rbac.check("query", table):
return {
"error" : "You don't have access to this table"
}
user = flask_security.core.current_user.email
# this is a name/value encoded array, unfortunately
query = QuerySpec(query)
query.add('table', table)
query.add('filters', string_dict(filters))
d = query.__makedict__()
bs = backend.SybilBackend()
ti = bs.get_table_info(table)
view = query.get('view')
VwClass = get_view_by_name(view)
# this lets a class modify the parameters to a query using
# its own custom params
VwClass.add_custom_params(query)
query.set('viewbase', VwClass.BASE)
res = bs.run_query(table, query, ti)
bs.log_query(user, query)
cmp = None
against = query.get('against', '')
filters = query.get('filters')
compare_filters = filters['compare']
if against or len(compare_filters):
compare_spec = QuerySpec(query.md)
if against:
now = time_to_seconds('now')
compare_delta = time_to_seconds(against) - now
# compare_delta is in ms
query.set('compare_delta', compare_delta*1000)
startms = query.get('start_ms')
endms = query.get('end_ms')
startms += compare_delta * 1000
endms += compare_delta * 1000
compare_spec.set('start_ms', startms)
compare_spec.set('end_ms', endms)
compare_spec.set('filters', { "query" : compare_filters })
cmp = bs.run_query(table, compare_spec, ti)
query.set('compare_mode', 1)
bs.log_query(user, compare_spec)
sq = results.save_for_user(flask_security.core.current_user, query, res, cmp)
d["h"] = sq.hashid
v = VwClass()
v.context.update(query=sq.parsed, results=sq.results, metadata=ti, compare=sq.compare)
v.marshal(query=sq.parsed, results=sq.results, compare=sq.compare, metadata=ti, parsed=epoch(sq.created))
if viewarea:
if not res:
viewarea.call("no_results", {
"errmsg" : "No results found for this query"
});
viewarea.html(v.render())
viewarea.call("set_view", v)
d['filters'] = json.dumps(d['filters'])
queryUrl = unquote_plus(flask.url_for('get_view', **d))
return {
"queryUrl": queryUrl,
"res" : res,
"cmp" : cmp,
"query" : d
}
@QuerySidebar.api
@rpc_login_required
def update_controls(cls, table=None, view=None, query=None, viewarea=None, filters=[]):
pr = presenter.GetPresenter(table)
bs = backend.SybilBackend()
ti = bs.get_table_info(table)
query = QuerySpec(query)
VwClass = get_view_by_name(view)
v = VwClass()
v.context.update(metadata=ti, presenter=pr, query=query)
query_filters= filters['query']
qs = QuerySidebar(view=v, presenter=pr, query=query, filters=query_filters, metadata=ti)
qs.__prepare__()
qs.nomarshal()
has_compare_filters = 'compare' in filters and len(filters['compare']) > 0
if v.SUPPORT_COMPARE_QUERIES and has_compare_filters:
cls.call("show_compare_filters")
else:
cls.call("hide_compare_filters")
cls.call("supports_compare_filters", v.SUPPORT_COMPARE_QUERIES)
# we undelegate our events because we are about to replace ourself
# with the same component
cls.html(qs.context.querycontrols.render(), selector=".querycontrols")
@QueryPage.api
@rpc_login_required
def get_saved_queries(cls, table=None):
user = flask_security.core.current_user
if table:
recent_queries = results.get_for_user(user, table)
return {
"recent" : recent_queries,
"table" : table
}
```
#### File: plugins/snorkel_advanced_views/timeline.py
```python
import pudgy
import os
from snorkel.components import Selector, ControlRow
from snorkel.views import ViewBase
def make_dict(arr):
return dict([(w,w) for w in arr])
class TimelineView(ViewBase, pudgy.JSComponent, pudgy.SassComponent):
NAME="timeline"
BASE="samples"
DISPLAY_NAME="Timeline View"
BASE_DIR=os.path.dirname(__file__)
def add_timeline_controls(self, controls):
groups = make_dict(self.context.metadata["columns"]["strs"])
event_field = Selector(
name="event_field",
options=groups,
selected=self.context.query.get("event_field"))
controls.append(ControlRow("event_field", "Event Field", event_field))
def get_controls(self):
controls = []
self.add_time_controls(controls)
# self.add_time_comparison(controls)
self.add_timeline_controls(controls)
self.add_groupby_selector(controls)
self.add_limit_selector(controls)
self.add_go_button(controls)
return controls
```
#### File: plugins/snorkel_basic_views/dist.py
```python
import os
import pudgy
from snorkel.views import ViewBase
class DistView(ViewBase, pudgy.JSComponent, pudgy.SassComponent):
NAME="dist"
BASE="dist"
DISPLAY_NAME="Dist View"
BASE_DIR=os.path.dirname(__file__)
SUPPORT_COMPARE_QUERIES = True
def get_controls(self):
controls = []
self.add_time_controls(controls)
self.add_field_selector(controls)
self.add_go_button(controls)
return controls
```
#### File: plugins/snorkel_basic_views/scatter.py
```python
import pudgy
import os
from snorkel.components import Selector, ControlRow
from snorkel.views import ViewBase
def make_dict(arr):
return dict([(w,w) for w in arr])
class ScatterView(ViewBase, pudgy.JSComponent):
NAME="scatter"
BASE="samples"
DISPLAY_NAME="Scatter View"
BASE_DIR=os.path.dirname(__file__)
def add_scatter_controls(self, controls):
fields = make_dict(self.context.metadata["columns"]["ints"])
field = Selector(
name="field",
options=fields,
selected=self.context.query.get('field'))
field2 = Selector(
name="field_two",
options=fields,
selected=self.context.query.get('field_two'))
reverse_axis = Selector(
name="reverse_axis",
options={
"Reverse Both " : "reverse_b",
"Reverse Y Axis" : "reverse_y",
"Reverse X Axis" : "reverse_x",
"Normal" : "none",
},
selected=self.context.query.get('reverse_axis', "none"))
controls.append(ControlRow("field", "Field", field))
controls.append(ControlRow("field_two", "Field (2)", field2))
controls.append(ControlRow("reverse_axis", "Reverse Axis", reverse_axis))
def get_controls(self):
controls = []
self.add_time_controls(controls)
# self.add_time_comparison(controls)
self.add_scatter_controls(controls)
self.add_groupby_selector(controls)
self.add_limit_selector(controls)
self.add_go_button(controls)
return controls
```
#### File: snorkel/src/query_spec.py
```python
import werkzeug
try:
import dotmap
except:
dotmap = None
try:
import addict
except:
addict = None
class QuerySpec(object):
def __init__(self, query):
# TODO: list all attributes of a query spec up front so others know what to expect
md = werkzeug.MultiDict()
for q in query:
if type(q) == dict:
md.add(q['name'], q['value'].strip())
elif type(q) == list or type(q) == tuple:
md.add(q[0], q[1].strip())
else:
md.add(q, query[q])
self.ismultidict = False
self.isdotmap = False
if isinstance(query, werkzeug.MultiDict):
self.ismultidict = True
elif addict and isinstance(query, addict.Dict):
self.isdotmap = True
elif dotmap and isinstance(query, dotmap.DotMap):
self.isdotmap = True
elif isinstance(query, list):
self.ismultidict = True
else:
raise Exception("Unknown entry for query spec")
self.md = md
# we will need to put together an exported interface
self.fields = self.get_fields()
self.groupby = self.get_groupby()
def __makedict__(self):
ret = {
}
for f in self.md:
if f.endswith("[]"):
if self.ismultidict:
ret[f] = self.md.getlist(f)
else:
ret[f] = self.md.get(f)
else:
ret[f] = self.md.get(f)
return ret
def __json__(self):
return self.__makedict__()
def setlist(self, k, v):
self.md.setlist(k, v)
def set(self, k, v):
if k in self.md:
self.md.pop(k)
self.md.add(k,v)
def add(self, k, v):
self.md.add(k, v)
def getlist(self, k, d=[]):
if self.ismultidict:
return self.md.getlist(k)
return self.md.get(k) or []
def get(self, k, d=None):
return self.md.get(k, d)
def get_metric(self):
op = self.md.get('metric')
if not op:
op = self.md.get('agg', '')
op = op.lstrip("$")
return op
def get_groupby(self):
g = self.getlist('groupby[]')
if not g:
g = self.getlist('group_by')
return g
def get_fields(self):
g = self.getlist('fields[]')
if not g:
g = self.getlist('fieldset')
return g
def get_custom_fields(self):
g = self.getlist('custom_fields[]')
if not g:
g = self.getlist('custom_fields')
return g
```
#### File: snorkel/src/results.py
```python
from .models import SavedQuery
from pudgy.util import getrandhash
from . import fastjson as json
from . import presenter
def save_for_user(user, params, results, compare={}):
table = params.get('table')
table = presenter.GetRealTable(table)
params = params.__json__()
hashid = getrandhash(json.dumps(params))
hashid = hashid[:10]
sq = SavedQuery.create(results=results, user=user.id, parsed=params, hashid=hashid,
table=table, compare=compare or {})
return sq
def get_for_user(user, table, limit=30):
table = presenter.GetRealTable(table)
res = SavedQuery \
.select(
SavedQuery.created,
SavedQuery.parsed,
SavedQuery.hashid,
SavedQuery.user
).limit(limit) \
.order_by(-SavedQuery.created) \
.where(SavedQuery.user == user.id, SavedQuery.table == table)
res = res.dicts()
return list(reversed(res))
def zip_results():
while True:
ret = list(SavedQuery.select().where(SavedQuery.zipped == False).limit(10))
if not ret:
break
for r in ret:
r.zipped = True
print "ZIPPING", r.id
r.save()
def get_by_hashid(hashid):
return list(SavedQuery.select().where(SavedQuery.hashid == hashid).dicts())
if __name__ == "__main__":
zip_results()
```
#### File: snorkel/src/util.py
```python
import subprocess
from . import fastjson as json
# time translation command is:
# `date -d "<str>" +%s`
def time_to_seconds(timestr):
cmd_args = ["date", "-d", timestr, "+%s"]
try:
output = subprocess.check_output(cmd_args)
except:
raise Exception("Unknown time string: ", timestr)
return int(output)
def time_delta_to_seconds(timedelta):
now = time_to_seconds("now")
then = time_to_seconds(timedelta)
return now - then
def return_json(d):
return json.dumps(d), 200, {"ContentType" : "application/json"}
def add_cache_headers(r):
r.cache_control.max_age = 60 * 60 * 24 * 7 # one week cache
return r
# from https://stackoverflow.com/questions/1254454/fastest-way-to-convert-a-dicts-keys-values-from-unicode-to-str
import collections
try:
basestring
except NameError:
basestring = str
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
string_dict = convert
```
|
{
"source": "jdibiccari/pomem-cli",
"score": 3
}
|
#### File: jdibiccari/pomem-cli/add_poem.py
```python
from slugify import slugify
from multiline_input import multiline_input
import yaml
def add_new():
# Gather poem meta data
prompts = [
"author",
"title"
]
poem_data = {}
body = ""
for prompt in prompts:
response = raw_input(prompt + ': ')
poem_data[prompt] = response
# Gather poem body
stopword = "@@"
print "Copy and paste the body of the poem. Type '{}' and enter when you are finished.".format(stopword)
body = multiline_input(stopword)
poem_data['body'] = body
slug = slugify(poem_data['title'])
with open('data/' + slug + '.yml', 'w') as yaml_file:
yaml_file.write(yaml.dump(poem_data, allow_unicode=True, default_flow_style=False))
```
#### File: jdibiccari/pomem-cli/bleeping.py
```python
import re
import random
from slugify import slugify
from string import punctuation as PUNCTUATION
import yaml
import nltk
from nltk.corpus import wordnet as wn
POS = ['NN+', 'VB+', 'JJ+', 'RB+']
DIFFICULTY_LEVELS = [1, 2, 3, 4, 5]
BLEEP_CHARACTER = "."
POEM_DIR = 'data/'
def load_yaml_from_file(input):
file = 'data/{input}.yml'.format(input=input)
with open (file, 'r') as f:
doc = yaml.load(f)
poem = doc['body'].splitlines(True)
return poem
def tokens(line):
return nltk.word_tokenize(line)
def tagged_tokens(tokens):
return nltk.pos_tag(tokens)
def pos_filter(tagged_tokens):
pos_reg = '|'.join(POS)
match_words = []
nonmatch_words = []
for token in tagged_tokens:
if re.match(pos_reg, token[1]):
match_words.append(token[0])
else:
if not token[0] in PUNCTUATION:
nonmatch_words.append(token[0])
return [match_words, nonmatch_words]
def pos_tag_line(line):
return pos_filter(tagged_tokens(tokens(line)))
def select_random_from_list(removal_candidates, qty=1):
import itertools
for word_set in removal_candidates:
random.shuffle(word_set)
return list(itertools.chain.from_iterable(removal_candidates))[:qty]
# def get_choices(words):
# output = ''
# for i, word in enumerate(words):
# output += '{0}. {1} '.format(i + 1, word)
# return output
def get_similar_words(word):
output = [word]
for ss in wn.synsets(word):
for lemma in ss.lemma_names():
output.append(lemma)
return output
def bleep_line(line, level=1, bleep_character=BLEEP_CHARACTER):
pos_tokens = pos_tag_line(line)
to_remove = select_random_from_list(pos_tokens, level)
new_line = line
for word in to_remove:
replacement = bleep_character * len(word)
new_line = new_line.replace(word, replacement, 1)
return new_line, to_remove
def bleep(lines, level=1, bleep_character=BLEEP_CHARACTER):
# Per each line there is a set number of possibilites, could store eventually.
return [bleep_line(line, level, bleep_character) for line in lines]
```
|
{
"source": "jdickins21/proj8-Gcal",
"score": 4
}
|
#### File: jdickins21/proj8-Gcal/agenda.py
```python
import datetime
import arrow
import datetime
class Appt:
"""
A single appointment, starting on a particular
date and time, and ending at a later time the same day.
"""
def __init__(self, begin, end, desc):
"""Create an appointment.
Arguments:
begin: An arrow date and time object. When the appointment starts.
end: An arrow date and time object, after begin. When the appointments ends.
desc: A string describing the appointment
Raises:
ValueError if appointment ends before it begins
"""
self.begin = begin
self.end = end
if begin >= end :
raise ValueError("Appointment end must be after begin")
self.desc = desc
return
@classmethod
def from_dict(cls, event_dict): #from an apt in the form of an item in busy_list or free_list; both lists are same form
"""
This function takes in a dictionary representation of an Appt and returns an Appt object
for it. The dictionary representation has a "desc", "begin" and "end" term.
"""
desc = event_dict["desc"]
begin = arrow.get(event_dict["begin"])
end = arrow.get(event_dict["end"])
return Appt(begin, end, desc)
def to_dict(self):
"""
This function takes in an Appt object and returns a dictionary representation of it where
the begin and end times of the appointment are stored in isoformat.
Returns a dictionary in the form {"desc": description_of_apt, "begin": isoformat_begin_date_time_of_apt,
"end": isoformat_end_date_time_of_apt}
"""
dict_rep = {}
dict_rep["desc"] = self.desc
dict_rep["begin"] = self.begin.isoformat()
dict_rep["end"] = self.end.isoformat()
return dict_rep
@classmethod
def from_string(cls, txt):
"""
This function takes in a string representation of an Appt and returns an Appt object.
"""
fields = txt.split("|")
if len(fields) != 2:
raise ValueError("Appt literal requires exactly one '|' before description")
timespec = fields[0].strip()
desc = fields[1].strip()
fields = timespec.split("-")
if len(fields) != 2:
raise ValueError("Appt literal must start with date and date seperated by blanks")
appt_begin_text = fields[0]
appt_end_text = fields[1]
begin = arrow.get(appt_begin_text, "MM/DD/YYYY h:mm A")
end = arrow.get(appt_end_text, "MM/DD/YYYY h:mm A")
result = Appt(begin, end, desc)
return result
def __lt__(self, other):
"""Does this appointment finish before other begins?
Arguments:
other: another Appt
Returns:
True iff this Appt is done by the time other begins.
"""
return self.end <= other.begin
def __gt__(self, other):
"""Does other appointment finish before this begins?
Arguments:
other: another Appt
Returns:
True iff other is done by the time this Appt begins
"""
return other < self
def overlaps(self, other):
"""Is there a non-zero overlap between this appointment
and the other appointment?
Arguments:
other is an Appt
Returns:
True iff there exists some duration (greater than zero)
between this Appt and other.
"""
return not (self < other or other < self)
def intersect(self, other, desc=""):
"""Return an appointment representing the period in
common between this appointment and another.
Requires self.overlaps(other).
Arguments:
other: Another Appt
desc: (optional) description text for this appointment.
Returns:
An appointment representing the time period in common
between self and other. Description of returned Appt
is copied from this (self), unless a non-null string is
provided as desc.
"""
if desc=="":
desc = self.desc
assert(self.overlaps(other))
# We know the day must be the same.
# Find overlap of times:
# Later of two begin times, earlier of two end times
new_begin = max(self.begin, other.begin)
new_end = min(self.end, other.end)
return Appt(new_begin, new_end, desc)
def union(self, other, desc=""):
"""Return an appointment representing the combined period in
common between this appointment and another.
Requires self.overlaps(other).
Arguments:
other: Another Appt
desc: (optional) description text for this appointment.
Returns:
An appointment representing the time period spanning
both self and other. Description of returned Appt
is concatenation of two unless a non-null string is
provided as desc.
"""
if desc=="":
desc = self.desc + " " + other.desc
assert(self.overlaps(other))
# We know the day must be the same.
# Find overlap of times:
# Earlier of two begin times, later of two end times
begin = min(self.begin, other.begin)
end = max(self.end, other.end)
return Appt(begin, end, desc)
def __str__(self):
"""Returns a string representation of appointment object.
Example:
"10/31/2012 4:00 PM-10/31/2012 9:00 PM|Long dinner"
"""
begstr = self.begin.format("MM/DD/YYYY h:mm A")
endstr = self.end.format("MM/DD/YYYY h:mm A")
return begstr + "-" + endstr + "|" + self.desc
class Agenda:
"""An Agenda is essentially a list of appointments,
with some agenda-specific methods.
"""
def __init__(self):
"""An empty agenda."""
self.appts = [ ]
@classmethod
def from_list(cls, apt_list): #list of lists
"""
Converts a list of dictionaries representing an agenda of appts into an agenda object
holding Appt objects.
"""
total_agenda = Agenda()
for apt in apt_list:
apt_obj = Appt.from_dict(apt)
total_agenda.append(apt_obj)
return total_agenda
def to_list(self):
"""
Takes an agenda object and converts to a list of dictionaries.
"""
apt_list = []
for apt in self:
dict_rep = apt.to_dict()
apt_list.append(dict_rep)
return apt_list
@classmethod
def from_file(cls, f):
"""Factory: Read an agenda from a file
Arguments:
f: A file object (as returned by io.open) or
an object that emulates a file (like stringio).
returns:
An Agenda object
"""
agenda = cls()
for line in f:
line = line.strip()
if line == "" or line.startswith("#"):
# Skip blank lines and comments
pass
else:
try:
agenda.append(Appt.from_string(line))
except ValueError as err:
print("Failed on line: ", line)
print(err)
return agenda
def append(self,appt):
"""Add an Appt to the agenda."""
self.appts.append(appt)
def intersect(self,other,desc=""):
"""Return a new agenda containing appointments
that are overlaps between appointments in this agenda
and appointments in the other agenda.
Titles of appointments in the resulting agenda are
taken from this agenda, unless they are overridden with
the "desc" argument.
Arguments:
other: Another Agenda, to be intersected with this one
desc: If provided, this string becomes the title of
all the appointments in the result.
"""
default_desc = (desc == "")
result = Agenda()
for thisappt in self.appts:
if default_desc:
desc = thisappt.desc
for otherappt in other.appts:
if thisappt.overlaps(otherappt):
result.append(thisappt.intersect(otherappt,desc))
return result
def normalize(self):
"""Merge overlapping events in an agenda. For example, if
the first appointment is from 1pm to 3pm, and the second is
from 2pm to 4pm, these two are merged into an appt from
1pm to 4pm, with a combination description.
After normalize, the agenda is in order by date and time,
with no overlapping appointments.
"""
if len(self.appts) == 0:
return
ordering = lambda ap: ap.begin #sort by begin date
self.appts.sort(key=ordering)
normalized = [ ]
# print("Starting normalization")
cur = self.appts[0]
for appt in self.appts[1:]:
if appt > cur:
# Not overlapping
# print("Gap - emitting ", cur)
normalized.append(cur)
cur = appt
else:
# Overlapping
# print("Merging ", cur, "\n"+
# "with ", appt)
cur = cur.union(appt)
# print("New cur: ", cur)
# print("Last appt: ", cur)
normalized.append(cur)
self.appts = normalized
def normalized(self):
"""
A non-destructive normalize
(like "sorted(l)" vs "l.sort()").
Returns a normalized copy of this agenda.
"""
copy = Agenda()
copy.appts = self.appts
copy.normalize()
return copy
def complement(self, freeblock):
"""Produce the complement of an agenda
within the span of a timeblock represented by
an appointment. For example,
if this agenda is a set of appointments, produce a
new agenda of the times *not* in appointments in
a given time period.
Args:
freeblock: Looking for time blocks in this period
that are not conflicting with appointments in
this agenda.
Returns:
A new agenda containing exactly the times that
are within the period of freeblock and
not within appointments in this agenda. The
description of the resulting appointments comes
from freeblock.desc.
"""
copy = self.normalized()
comp = Agenda()
desc = freeblock.desc
cur_time = freeblock.begin #arrow date and time
for appt in copy.appts:
if appt < freeblock:
continue
if appt > freeblock:
if cur_time < freeblock.end:
comp.append(Appt(cur_time,freeblock.end, desc))
cur_time = freeblock.end
break
if cur_time < appt.begin:
# print("Creating free time from", cur_time, "to", appt.begin)
comp.append(Appt(cur_time, appt.begin, desc))
cur_time = max(appt.end,cur_time)
if cur_time < freeblock.end:
# print("Creating final free time from", cur_time, "to", freeblock.end)
comp.append(Appt(cur_time, freeblock.end, desc))
return comp
def complementTimeSpan(self, begin_date, end_date, begin_time, end_time):
"""
Calculate the complement of an agenda within a date and time span. This method
calls the complement method per day in the date span.
"""
total_free = Agenda()
date = begin_date.date()
end_date = end_date.date()
while(date <= end_date):
fb_year = date.year
fb_month = date.month
fb_day = date.day
fb_begin = begin_time.replace(year=fb_year, month=fb_month, day=fb_day)
fb_end = end_time.replace(year=fb_year, month=fb_month, day=fb_day)
freeblock = Appt(fb_begin, fb_end, "Available")
free_agenda = self.complement(freeblock)
for apt in free_agenda:
total_free.append(apt)
begin_date = begin_date.replace(days=+1)
date = begin_date.date()
return total_free
def __len__(self):
"""Number of appointments, callable as built-in len() function"""
return len(self.appts)
def __iter__(self):
"""An iterator through the appointments in this agenda."""
return self.appts.__iter__()
def __str__(self):
"""String representation of a whole agenda"""
rep = ""
for appt in self.appts:
rep += str(appt) + "\n"
return rep[:-1]
def __eq__(self,other):
"""Equality, ignoring descriptions --- just equal blocks of time"""
if len(self.appts) != len(other.appts):
return False
for i in range(len(self.appts)):
mine = self.appts[i]
theirs = other.appts[i]
if not (mine.begin == theirs.begin and
mine.end == theirs.end):
return False
return True
```
|
{
"source": "jdidion/docparse",
"score": 2
}
|
#### File: docparse/docparse/google.py
```python
from functools import partial
import re
from typing import Tuple, Dict, Sequence, Optional, cast
from docparse import DocString, DocStyle, Paragraphs, Typed, Field, parser
SECTION_RE = re.compile(r"^(\w[\w ]+):$")
DIRECTIVE_RE = re.compile(r"^.. ([\w ]+):$")
XREF_RE = re.compile(r"(:(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)")
SINGLE_COLON_RE = re.compile(r"(?<!:):(?!:)")
INDENT_RE = re.compile(r"^(\s*)(.*)$")
TYPED_PARAM_RE = re.compile(r"\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)")
KEYWORD_RE = re.compile(r"[^\w]")
NAME_RE = re.compile(
r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`| (?P<name2>[a-zA-Z0-9_.-]+))\s*",
re.X
)
SECTION_PARSERS = {}
ALIASES = {}
@parser(DocStyle.GOOGLE)
def parse_google_docstring(docstring, allow_directives=True) -> DocString:
lines = docstring.splitlines()
num_lines = len(lines)
if num_lines == 0:
return DocString()
def check_section_header(_line) -> Tuple[Optional[str], bool]:
match = SECTION_RE.match(_line)
if match:
name = match.group(1)
if name in ALIASES:
name = ALIASES[name]
if name in SECTION_PARSERS:
return name, False
if allow_directives:
match = DIRECTIVE_RE.match(_line)
if match:
return match.group(1), True
return None, False
def add_section(_name, _is_directive, _lines, _sections, _directives):
if _is_directive:
_directives[_name] = _parse_generic_section(_lines)
else:
parser_func = SECTION_PARSERS.get(_name, _parse_generic_section)
_sections[KEYWORD_RE.sub("_", _name.lower())] = parser_func(_lines)
sections = {}
directives = {}
cur_section = "Description"
cur_directive = False
cur_lines = []
for line in lines:
section, is_directive = check_section_header(line)
if section:
add_section(cur_section, cur_directive, cur_lines, sections, directives)
cur_section = section
cur_directive = is_directive
cur_lines = []
else:
cur_lines.append(line)
if cur_lines:
add_section(cur_section, cur_directive, cur_lines, sections, directives)
return DocString(directives=directives, **sections)
def add_aliases(name: str, *aliases: str):
for alias in aliases:
ALIASES[alias] = name
add_aliases("Examples", "Example")
add_aliases("Keyword Arguments", "Keyword Args")
add_aliases("Notes", "Note")
add_aliases("Parameters", "Args", "Arguments")
add_aliases("Returns", "Return")
add_aliases("Warning", "Warnings")
add_aliases("Yields", "Yield")
def section_parser(*sections: str):
"""Decorator that registers a function as a section paraser.
"""
def decorator(f):
for section in sections:
if isinstance(section, str):
SECTION_PARSERS[section] = f
else:
name, kwargs = cast(Tuple[str, dict], section)
SECTION_PARSERS[name] = partial(f, **kwargs)
return f
return decorator
@section_parser(
"Attention",
"Caution",
"Danger",
"Error",
"Hint",
"Important",
"Note",
"References",
"See also",
"Tip",
"Todo",
"Warning"
)
def _parse_generic_section(lines: Sequence[str]) -> Paragraphs:
"""Combine lines and remove indents. Lines separated by blank lines are
grouped into paragraphs.
Args:
lines: A sequence of line strings.
Returns:
A tuple of paragraph strings.
"""
return Paragraphs.from_lines(tuple(line.strip() for line in lines))
@section_parser("Examples")
def _parse_verbatim_section(lines: Sequence[str]) -> Sequence[str]:
return _dedent(lines)
@section_parser(
("Parameters", dict(parse_type=True)),
("Keyword Arguments", dict(parse_type=True)),
("Other Parameters", dict(parse_type=True)),
"Methods",
"Warns"
)
def _parse_fields_section(
lines: Sequence[str], parse_type: bool = False, prefer_type=False
) -> Dict[str, Field]:
cur_indent = None
fields = []
for line in lines:
indent, line = INDENT_RE.match(line).groups()
indent_size = len(indent)
if line and (cur_indent is None or indent_size <= cur_indent):
# New parameter
cur_indent = indent_size
before, colon, after = _partition_field_on_colon(line)
field_type = None
if parse_type:
field_name, field_type = _parse_parameter_type(before)
else:
field_name = before
if prefer_type and not field_type:
field_type, field_name = field_name, field_type
fields.append((
_escape_args_and_kwargs(field_name),
field_type,
[after.lstrip()]
))
elif fields:
# Add additional lines to current parameter
fields[-1][2].append(line.lstrip())
else:
raise ValueError(f"Unexpected line in Args block: {line}")
return dict(
(field[0], Field(field[0], field[1], Paragraphs.from_lines(field[2])))
for field in fields
)
@section_parser(
"Returns",
"Yields"
)
def _parse_returns_section(lines: Sequence[str]) -> Typed:
before, colon, after = _partition_field_on_colon(lines[0])
return_type = None
if colon:
if after:
return_desc = [after] + list(lines[1:])
else:
return_desc = lines[1:]
return_type = before
else:
return_desc = lines
return Typed(return_type, Paragraphs.from_lines(return_desc))
@section_parser("Raises")
def _parse_raises_section(lines: Sequence[str]) -> Dict[str, Field]:
fields = _parse_fields_section(lines, prefer_type=True)
for field in fields.values():
match = NAME_RE.match(field.datatype).groupdict()
if match["role"]:
field.datatype = match["name"]
return fields
def _partition_field_on_colon(line: str) -> Tuple[str, str, str]:
before_colon = []
after_colon = []
colon = ""
found_colon = False
for i, source in enumerate(XREF_RE.split(line)):
if found_colon:
after_colon.append(source)
else:
m = SINGLE_COLON_RE.search(source)
if (i % 2) == 0 and m:
found_colon = True
colon = source[m.start(): m.end()]
before_colon.append(source[:m.start()])
after_colon.append(source[m.end():])
else:
before_colon.append(source)
return (
"".join(before_colon).strip(),
colon,
"".join(after_colon).strip()
)
def _parse_parameter_type(name_type_str: str) -> Tuple[str, Optional[str]]:
match = TYPED_PARAM_RE.match(name_type_str) # type: ignore
if match:
return match.group(1), match.group(2)
else:
return name_type_str, None
def _escape_args_and_kwargs(name: str) -> str:
if name.startswith("**"):
return r"\*\*" + name[2:]
elif name.startswith("*"):
return r"\*" + name[1:]
else:
return name
def _dedent(lines: Sequence[str]) -> Sequence[str]:
lens = [
len(INDENT_RE.match(line).group(1))
for line in lines if line
]
min_indent = min(lens) if lens else 0
return [line[min_indent:] for line in lines]
```
#### File: docparse/tests/test_google.py
```python
from docparse import Paragraphs, get_docstring
from docparse.google import parse_google_docstring
from . import google_style
def test_functions():
docs = parse_google_docstring(
get_docstring(google_style.function_with_types_in_docstring)
)
assert docs.summary == "Example function with types documented in the docstring."
assert tuple(docs.description.paragraphs) == (
"Example function with types documented in the docstring.",
"`PEP 484`_ type annotations are supported. If attribute, parameter, and "
"return types are annotated according to `PEP 484`_, they do not need to be "
"included in the docstring:"
)
assert len(docs.parameters) == 2
assert "param1" in docs.parameters
param1 = docs.parameters["param1"]
assert param1.name == "param1"
assert param1.datatype == "int"
assert str(param1.description) == "The first parameter."
assert "param2" in docs.parameters
param2 = docs.parameters["param2"]
assert param2.datatype == "str"
assert str(param2.description) == "The second parameter."
returns = docs.returns
assert returns.datatype == "bool"
assert str(returns.description) == \
"The return value. True for success, False otherwise."
assert docs.directives == {
"_PEP 484": Paragraphs(["https://www.python.org/dev/peps/pep-0484/"])
}
```
|
{
"source": "jdidion/hon",
"score": 3
}
|
#### File: hon/hon/utils.py
```python
from contextlib import contextmanager
import os
from pathlib import Path
from shlex import quote
import subprocess
import sys
from typing import IO, List, Union
import toml
def read_toml(path: Path):
with open(path, "rt") as inp:
toml.load(inp)
@contextmanager
def chdir(target: Path):
curwd = Path.cwd()
try:
os.chdir(target)
yield
finally:
os.chdir(curwd)
def run_cmd(
cmd: List[str],
stdout: Union[bool, IO, None] = None,
stderr: Union[bool, IO, None] = None,
shell: bool = False,
**kwargs
):
"""
Execute a command in a subprocess.
Args:
cmd: List of command arguments.
stdout: If True, capture stdout and return it. If None, forward stdout to
sys.stdout. If a file-like object, write stdout to the file.
stderr: If True, capture stderr and return it. If None, forward stderr to
sys.stdout. If a file-like object, write stderr to the file.
shell: Whether to execute the command using a shell.
**kwargs: Additional kwargs to `subprocess.check_output`.
Returns:
A tuple (stdout, stderr). Each will be None unless their respective
parameters were set to True.
"""
if stdout is None:
kwargs["stdout"] = sys.stdout
proc_fn = subprocess.check_call
else:
proc_fn = subprocess.check_output
if stdout is True:
kwargs["stdout"] = subprocess.PIPE
if stderr is None:
kwargs["stderr"] = sys.stderr
elif stderr is True:
kwargs["stderr"] = subprocess.PIPE
if shell:
cmd_str = " ".join(quote(arg) for arg in cmd)
if "executable" not in kwargs:
kwargs["executable"] = "/bin/bash"
return proc_fn(cmd_str, shell=True, **kwargs)
else:
return proc_fn(cmd, shell=False, **kwargs)
```
|
{
"source": "jdidion/htsget-server",
"score": 2
}
|
#### File: htsget-server/htsgetserver/__init__.py
```python
from abc import ABCMeta, abstractmethod
from http import HTTPStatus
import json
from threading import Lock
from typing import Sequence
from urllib.parse import ParseResult, parse_qs
from htsgetserver.server import (
Router,
RoutingServer,
RoutingHttpRequestHandler,
HttpError,
NotFoundHttpError,
)
from htsgetserver.store import DataStore, DefaultDataStore
from htsgetserver.utils import Runnable, run_interruptible
HTSGET_VERSION = (1, 1, 0)
OK_RESPONSE_CONTENT_TYPE = \
"application/vnd.ga4gh.htsget.v{'.'.join(HTSGET_VERSION)}+json; charset=utf-8"
ERROR_RESPONSE_CONTENT_TYPE = "application/json"
DEFAULT_BLOCK_SIZE = 2 ** 30 # 1 GB
DEFAULT_PORT = 80
# TODO:
# TLS 1.2
# CORS
# urllib.urlencode data block URLs
# Content-Length header
# chunked transfer encoding
class UnsupportedMediaTypeHttpError(HttpError):
def __init__(self, media_type):
super().__init__(
HTTPStatus.UNSUPPORTED_MEDIA_TYPE,
f"The requested media type is unsupported: {media_type}"
)
class RouteHandler(metaclass=ABCMeta):
@abstractmethod
def __call__(
self, sub_route: Sequence[str], parsed_url: ParseResult,
http_request_handler: "HtsgetHttpRequestHandler"
):
pass
class ApiRouteHandler(RouteHandler, metaclass=ABCMeta):
def __init__(self, data_store: DataStore, block_size: int):
self.data_store = data_store
self.block_size = block_size
self._cache = {}
self._cache_lock = Lock()
def __call__(
self, sub_route: Sequence[str], parsed_url: ParseResult,
http_request_handler: "HtsgetHttpRequestHandler"
):
if len(sub_route) != 0:
raise NotFoundHttpError(parsed_url.path)
query = parse_qs(parsed_url.query, True, True)
self.handle(sub_route, query, http_request_handler)
@property
@abstractmethod
def default_format(self) -> str:
pass
@property
@abstractmethod
def index_format(self) -> str:
pass
def handle(
self, record_id: Sequence[str], query: dict,
http_request_handler: "HtsgetHttpRequestHandler"
):
"""Handle a query. By default:
* Assumes route_id as a relative path to a file in the data store.
* Uses the format specified in the query to determine the file extension.
* Attempts to resolve the associated index, and, if it doesn't exist, requests
that it be built.
* Generates the ticket response.
Args:
record_id: ID of the record for which to generate a ticket.
query: Dict of query parameters.
http_request_handler: The HTTPRequestHandler that called this
ApiRouteHandler.
"""
data_format = query.get('format', self.default_format)
data_resource, index_resource = self.data_store.resolve(
record_id, data_format, self.index_format
)
if data_resource in self._cache:
ticket_str = self._cache[data_resource]
else:
if not index_resource.exists:
self.create_index(data_resource, index_resource)
self.data_store.add_resource(index_resource)
ticket_urls = self.create_ticket_urls(
data_format, query, data_resource, index_resource
)
ticket = dict(
htsget=dict(
format=data_format.upper(),
urls=ticket_urls
)
)
ticket_str = json.dumps(ticket)
try:
self._cache_lock.acquire()
if data_resource not in self._cache:
self._cache[data_resource] = ticket_str
finally:
self._cache_lock.release()
http_request_handler.send_response(HTTPStatus.OK)
http_request_handler.send_header("Content-Type", OK_RESPONSE_CONTENT_TYPE)
http_request_handler.end_headers()
http_request_handler.wfile.write(ticket_str)
@abstractmethod
def create_index(self, data_resource, index_resource) -> None:
pass
def create_ticket_urls(
self, data_format, query: dict, data_resource, index_resource
) -> Sequence[dict]:
pass
class ReadsApiRouteHandler(ApiRouteHandler):
@property
def default_format(self) -> str:
return "BAM"
@property
def index_format(self) -> str:
return "BAI"
def create_index(self, data_resource, index_resource) -> None:
pass
def handle(self, record_id, query, http_request_handler):
pass
class VariantsApiRouteHandler(ApiRouteHandler):
@property
def default_format(self) -> str:
return "VCF"
@property
def index_format(self) -> str:
return "TBI"
def create_index(self, data_resource, index_resource) -> None:
pass
def handle(self, record_id, query, http_request_handler):
pass
class BlockRouteHandler(RouteHandler):
def __call__(
self, sub_route: Sequence[str], parsed_url: ParseResult,
http_request_handler: "HtsgetHttpRequestHandler"
):
pass
class HtsgetHttpRequestHandler(RoutingHttpRequestHandler):
def check_headers(self):
if "Accept" in self.headers:
accept = self.headers["Accept"]
if not accept.startswith("application/"):
raise UnsupportedMediaTypeHttpError(accept)
accept_app = accept[12:].lower()
if accept_app == "json":
pass
elif (
accept_app.startswith("vnd.ga4gh.htsget.v") and
accept_app.endswith("+json")
):
try:
version = tuple(int(v) for v in accept[18:-5].split("."))
except:
raise UnsupportedMediaTypeHttpError(accept)
if (
# TODO: support backwards compatibility
version < HTSGET_VERSION or
# Assume forward compatibility if the major version is the same
version[0] > HTSGET_VERSION[0]
):
raise UnsupportedMediaTypeHttpError(accept)
else:
raise UnsupportedMediaTypeHttpError(accept)
def handle_error(self, err: HttpError):
self.send_error(err.status)
self.send_header("Content-type", ERROR_RESPONSE_CONTENT_TYPE)
self.end_headers()
error_dict = dict(
htsget=dict(
error=err.error_type,
message=err.message
)
)
self.wfile.write(json.dumps(error_dict))
class HtsgetHttpServer(RoutingServer):
def __init__(self, **kwargs):
super().__init__(request_handler_class=HtsgetHttpRequestHandler, **kwargs)
class HtsgetServerRunner(Runnable):
def __init__(self):
self.server = None
def run(self, **kwargs):
self.server = HtsgetHttpServer(**kwargs)
def stop(self):
self.server.server_close()
def create_default_router(data_store, block_size=DEFAULT_BLOCK_SIZE):
router = Router()
router.add_route(['reads'], ReadsApiRouteHandler(data_store, block_size))
router.add_route(['variants'], VariantsApiRouteHandler(data_store, block_size))
router.add_route(['block'], BlockRouteHandler())
return router
def run(server_address=('localhost', DEFAULT_PORT), router=None):
if router is None:
router = create_default_router(DefaultDataStore())
run_interruptible(
HtsgetServerRunner(), server_address=server_address, router=router
)
```
|
{
"source": "jdidion/pytest-wdl",
"score": 2
}
|
#### File: pytest-wdl/tests/test_core.py
```python
import gzip
import json
from typing import cast
from unittest.mock import Mock
import pytest
from pytest_wdl.config import UserConfiguration
from pytest_wdl.core import (
DefaultDataFile, DataDirs, DataManager, DataResolver, create_data_file
)
from pytest_wdl.localizers import LinkLocalizer, UrlLocalizer
from pytest_wdl.utils import tempdir
from . import GOOD_URL, setenv
def test_data_file():
with tempdir() as d:
foo = d / "foo.txt"
with pytest.raises(ValueError):
DefaultDataFile(foo, None)
bar = d / "bar.txt"
with open(foo, "wt") as out:
out.write("foo\nbar")
df = DefaultDataFile(bar, LinkLocalizer(foo))
assert str(df) == str(bar)
baz = d / "baz.txt"
with open(baz, "wt") as out:
out.write("foo\nbar")
df.assert_contents_equal(baz)
df.assert_contents_equal(str(baz))
df.assert_contents_equal(DefaultDataFile(baz))
blorf = d / "blorf.txt"
with open(blorf, "wt") as out:
out.write("foo\nblorf\nbork")
with pytest.raises(AssertionError):
df.assert_contents_equal(blorf)
df.compare_opts["allowed_diff_lines"] = 1
with pytest.raises(AssertionError):
df.assert_contents_equal(blorf)
df.compare_opts["allowed_diff_lines"] = 2
df.assert_contents_equal(blorf)
def test_data_file_gz():
with tempdir() as d:
foo = d / "foo.txt.gz"
with gzip.open(foo, "wt") as out:
out.write("foo\nbar")
df = DefaultDataFile(foo, allowed_diff_lines=0)
# Compare identical files
bar = d / "bar.txt.gz"
with gzip.open(bar, "wt") as out:
out.write("foo\nbar")
df.assert_contents_equal(bar)
df.assert_contents_equal(str(bar))
df.assert_contents_equal(DefaultDataFile(bar))
# Compare differing files
df.set_compare_opts(allowed_diff_lines=1)
baz = d / "baz.txt.gz"
with gzip.open(baz, "wt") as out:
out.write("foo\nbaz")
df.assert_contents_equal(bar)
df.assert_contents_equal(str(bar))
df.assert_contents_equal(DefaultDataFile(bar))
def test_data_file_dict_type():
with tempdir() as d:
foo = d / "foo.txt.gz"
with gzip.open(foo, "wt") as out:
out.write("foo\nbar")
df = create_data_file(
user_config=UserConfiguration(),
path=foo,
type={
"name": "default",
"allowed_diff_lines": 1
}
)
bar = d / "bar.txt.gz"
with gzip.open(bar, "wt") as out:
out.write("foo\nbaz")
df.assert_contents_equal(bar)
df.assert_contents_equal(str(bar))
df.assert_contents_equal(DefaultDataFile(bar))
def test_data_file_class():
dd = DataResolver(data_descriptors={
"foo": {
"class": "bar",
"value": 1
}
}, user_config=UserConfiguration())
assert dd.resolve("foo") == 1
def test_data_file_json_contents():
with tempdir() as d:
foo = d / "foo.json"
df = create_data_file(
user_config=UserConfiguration(),
path=foo,
contents={
"a": 1,
"b": "foo"
}
)
with open(df.path, "rt") as inp:
assert json.load(inp) == {
"a": 1,
"b": "foo"
}
def test_data_dirs():
with tempdir() as d:
mod = Mock()
mod.__name__ = "foo.bar"
cls = Mock()
cls.__name__ = "baz"
fun = Mock()
fun.__name__ = "blorf"
mod_cls_fun = d / "foo" / "bar" / "baz" / "blorf"
mod_cls_fun.mkdir(parents=True)
data_mod_cls_fun = d / "data" / "foo" / "bar" / "baz" / "blorf"
data_mod_cls_fun.mkdir(parents=True)
with pytest.raises(RuntimeError):
DataDirs(d, mod, fun, cls)
dd = DataDirs(d / "foo", mod, fun, cls)
assert dd.paths == [
mod_cls_fun,
d / "foo" / "bar" / "baz",
d / "foo" / "bar",
data_mod_cls_fun,
d / "data" / "foo" / "bar" / "baz",
d / "data" / "foo" / "bar",
d / "data"
]
mod_cls_fun = d / "foo" / "bar" / "blorf"
mod_cls_fun.mkdir(parents=True)
data_mod_cls_fun = d / "data" / "foo" / "bar" / "blorf"
data_mod_cls_fun.mkdir(parents=True)
dd = DataDirs(d / "foo", mod, fun)
assert dd.paths == [
mod_cls_fun,
d / "foo" / "bar",
data_mod_cls_fun,
d / "data" / "foo" / "bar",
d / "data"
]
def test_data_resolver():
with tempdir() as d:
test_data = {
"foo": {
"name": "foo.txt"
},
"bar": 1
}
foo_txt = d / "data" / "foo.txt"
foo_txt.parent.mkdir()
with open(foo_txt, "wt") as out:
out.write("bar")
mod = Mock()
mod.__name__ = ""
fun = Mock()
fun.__name__ = "test_foo"
dd = DataDirs(d, mod, fun)
resolver = DataResolver(test_data, UserConfiguration(None, cache_dir=d))
with pytest.raises(FileNotFoundError):
resolver.resolve("bork", dd)
assert resolver.resolve("foo", dd).path == foo_txt
assert resolver.resolve("bar", dd) == 1
def test_data_resolver_env():
with tempdir() as d:
path = d / "foo.txt"
with open(path, "wt") as out:
out.write("foo")
with setenv({"FOO": str(path)}):
resolver = DataResolver({
"foo": {
"env": "FOO"
}
}, UserConfiguration(None, cache_dir=d))
assert resolver.resolve("foo").path == path
bar = d / "bar.txt"
resolver = DataResolver({
"foo": {
"env": "FOO",
"path": bar
}
}, UserConfiguration(None, cache_dir=d))
assert resolver.resolve("foo").path == bar
def test_data_resolver_local_path():
with tempdir() as d:
path = d / "foo.txt"
with open(path, "wt") as out:
out.write("foo")
resolver = DataResolver({
"foo": {
"path": "foo.txt"
}
}, UserConfiguration(None, cache_dir=d))
assert resolver.resolve("foo").path == path
with setenv({"MYPATH": str(d)}):
resolver = DataResolver({
"foo": {
"path": "${MYPATH}/foo.txt"
}
}, UserConfiguration(None, cache_dir=d))
assert resolver.resolve("foo").path == path
def test_data_resolver_create_from_contents():
with tempdir() as d:
resolver = DataResolver({
"foo": {
"path": "dir1/dir2/foo.txt",
"contents": "foo"
}
}, UserConfiguration(None, cache_dir=d))
parent = d / "dir1" / "dir2"
foo = resolver.resolve("foo")
assert foo.path == parent / "foo.txt"
with open(foo.path, "rt") as inp:
assert inp.read() == "foo"
with tempdir() as d:
resolver = DataResolver({
"foo": {
"name": "foo.txt",
"contents": "foo"
}
}, UserConfiguration(None, cache_dir=d))
foo = resolver.resolve("foo")
assert foo.path == d / "foo.txt"
with open(foo.path, "rt") as inp:
assert inp.read() == "foo"
with tempdir() as d:
resolver = DataResolver({
"foo": {
"contents": "foo"
}
}, UserConfiguration(None, cache_dir=d))
foo = resolver.resolve("foo")
assert foo.path.parent == d
assert foo.path.exists()
with open(foo.path, "rt") as inp:
assert inp.read() == "foo"
def test_data_resolver_create_from_url():
with tempdir() as d:
resolver = DataResolver({
"foo": {
"url": GOOD_URL,
"path": "dir1/dir2/sample.vcf"
}
}, UserConfiguration(None, cache_dir=d))
foo = resolver.resolve("foo")
assert foo.path == d / "dir1" / "dir2" / "sample.vcf"
with open(foo.path, "rt") as inp:
assert inp.read() == "foo"
with tempdir() as d:
resolver = DataResolver({
"foo": {
"url": GOOD_URL,
"name": "sample.vcf"
}
}, UserConfiguration(None, cache_dir=d))
foo = resolver.resolve("foo")
assert foo.path == d / "sample.vcf"
with open(foo.path, "rt") as inp:
assert inp.read() == "foo"
with tempdir() as d:
resolver = DataResolver({
"foo": {
"url": GOOD_URL
}
}, UserConfiguration(None, cache_dir=d))
foo = resolver.resolve("foo")
assert foo.path == d / "test_file"
with open(foo.path, "rt") as inp:
assert inp.read() == "foo"
def test_data_resolver_create_from_datadir():
with tempdir() as d, tempdir() as d1:
mod = Mock()
mod.__name__ = "foo.bar"
cls = Mock()
cls.__name__ = "baz"
fun = Mock()
fun.__name__ = "blorf"
mod_cls_fun = d / "foo" / "bar" / "baz" / "blorf"
mod_cls_fun.mkdir(parents=True)
data_mod_cls_fun = d / "data" / "foo" / "bar" / "baz" / "blorf"
data_mod_cls_fun.mkdir(parents=True)
dd = DataDirs(d / "foo", mod, fun, cls)
resolver = DataResolver({
"boink": {
"name": "boink.txt",
},
"bobble": {
"name": "bobble.txt"
},
"burp": {
"name": "burp.txt",
"path": "burp.txt"
}
}, UserConfiguration(None, cache_dir=d1))
boink = d / "foo" / "bar" / "boink.txt"
with open(boink, "wt") as out:
out.write("boink")
assert boink == resolver.resolve("boink", dd).path
with pytest.raises(FileNotFoundError):
resolver.resolve("bobble", dd)
burp = d / "foo" / "bar" / "burp.txt"
with open(burp, "wt") as out:
out.write("burp")
burp_resolved = resolver.resolve("burp", dd).path
assert burp_resolved == d1 / "burp.txt"
assert burp_resolved.is_symlink()
with pytest.raises(FileNotFoundError):
resolver.resolve("bobble")
def test_data_manager():
dm = DataManager(
data_resolver=DataResolver(
{
"foo": {
"class": "x",
"value": 1
},
"bar": {
"class": "x",
"value": 2
}
}, UserConfiguration()
),
datadirs=None
)
assert [1, 2] == dm.get_list("foo", "bar")
assert {"foo": 1, "bork": 2} == dm.get_dict("foo", bork="bar")
def test_http_header_set_in_workflow_data():
"""
Test that workflow data file can define the HTTP Headers. This is
important because the URLs referenced can be from different hosts and
require different headers, so setting them at this level allows that
fine-grained control.
"""
with tempdir() as d:
config = UserConfiguration(cache_dir=d)
assert not config.default_http_headers
resolver = DataResolver({
"foo": {
"url": GOOD_URL,
"path": "sample.vcf",
"http_headers": {
"Auth-Header-Token": "TOKEN"
}
}
}, config)
foo = resolver.resolve("foo")
assert foo.path == d / "sample.vcf"
with open(foo.path, "rt") as inp:
assert inp.read() == "foo"
with setenv({"TOKEN": "this_is_the_token"}), tempdir() as d:
config = UserConfiguration(cache_dir=d)
assert not config.default_http_headers
resolver = DataResolver({
"foo": {
"url": GOOD_URL,
"path": "sample.vcf",
"http_headers": {
"Auth-Header-Token": "TOKEN"
}
}
}, config)
foo = resolver.resolve("foo")
assert foo.path == d / "sample.vcf"
assert isinstance(foo.localizer, UrlLocalizer)
assert cast(UrlLocalizer, foo.localizer).http_headers == {
"Auth-Header-Token": "<PASSWORD>"
}
with open(foo.path, "rt") as inp:
assert inp.read() == "foo"
```
#### File: pytest-wdl/tests/test_executors.py
```python
import json
from typing import cast
import pytest
from pytest_wdl.core import EXECUTORS, DefaultDataFile, create_executor
from pytest_wdl.executors import (
ExecutionFailedError, get_workflow_inputs, make_serializable, validate_outputs
)
from pytest_wdl.utils import tempdir
@pytest.mark.integration
@pytest.mark.parametrize("executor", EXECUTORS.keys())
def test_executors(workflow_data, workflow_runner, executor):
inputs = {
"in_txt": workflow_data["in_txt"],
"in_int": 1
}
outputs = {
"out_txt": workflow_data["out_txt"],
"out_int": 1
}
workflow_runner(
"test.wdl",
inputs,
outputs,
executors=[executor]
)
# Test with the old workflow_runner signature
workflow_runner(
"test.wdl",
"cat_file",
inputs,
outputs,
executors=[executor]
)
@pytest.mark.integration
def test_multiple_executors(workflow_data, workflow_runner):
inputs = {
"in_txt": workflow_data["in_txt"],
"in_int": 1
}
outputs = {
"out_txt": workflow_data["out_txt"],
"out_int": 1
}
workflow_runner(
"test.wdl",
inputs,
outputs,
executors=EXECUTORS.keys()
)
# TODO: figure out how to override default_executors
# def test_workflow_runner_error(workflow_data, workflow_runner):
# inputs = {
# "in_txt": workflow_data["in_txt"],
# "in_int": 1
# }
# outputs = {
# "out_txt": workflow_data["out_txt"],
# "out_int": 1
# }
#
# with pytest.raises(RuntimeError):
# workflow_runner(
# "test.wdl",
# inputs,
# outputs,
# executors=[]
# )
@pytest.mark.integration
@pytest.mark.parametrize("executor", ["miniwdl"])
def test_task(workflow_data, workflow_runner, executor):
inputs = {
"in_txt": workflow_data["in_txt"],
}
outputs = {
"out_txt": workflow_data["out_txt"],
}
workflow_runner(
"test.wdl",
inputs,
outputs,
executors=[executor],
task_name="cat"
)
@pytest.mark.integration
@pytest.mark.parametrize("executor", EXECUTORS.keys())
def test_execution_failure(workflow_data, workflow_runner, executor):
inputs = {
"in_txt": workflow_data["in_txt"],
"in_int": 1,
"fail": True
}
outputs = {
"out_txt": workflow_data["out_txt"],
"out_int": 1
}
with pytest.raises(ExecutionFailedError) as exc_info:
workflow_runner(
"test.wdl",
inputs,
outputs,
executors=[executor]
)
err = cast(ExecutionFailedError, exc_info.value)
assert "foo_fail" in err.failed_task
assert err.failed_task_exit_status == 1
def test_get_workflow_inputs():
actual_inputs_dict, inputs_path = get_workflow_inputs(
{"bar": 1}, namespace="foo"
)
assert inputs_path.exists()
with open(inputs_path, "rt") as inp:
assert json.load(inp) == actual_inputs_dict
assert actual_inputs_dict == {
"foo.bar": 1
}
with tempdir() as d:
inputs_file = d / "inputs.json"
actual_inputs_dict, inputs_path = get_workflow_inputs(
{"bar": 1}, inputs_file, "foo"
)
assert inputs_file == inputs_path
assert inputs_path.exists()
with open(inputs_path, "rt") as inp:
assert json.load(inp) == actual_inputs_dict
assert actual_inputs_dict == {
"foo.bar": 1
}
with tempdir() as d:
inputs_file = d / "inputs.json"
inputs_dict = {"foo.bar": 1}
with open(inputs_file, "wt") as out:
json.dump(inputs_dict, out)
actual_inputs_dict, inputs_path = get_workflow_inputs(
inputs_file=inputs_file, namespace="foo"
)
assert inputs_file == inputs_path
assert inputs_path.exists()
with open(inputs_path, "rt") as inp:
assert json.load(inp) == actual_inputs_dict
assert actual_inputs_dict == inputs_dict
def test_make_serializable():
assert make_serializable(1) == 1
assert make_serializable("foo") == "foo"
assert make_serializable((1.1, 2.2)) == [1.1, 2.2]
with tempdir() as d:
foo = d / "foo"
with open(foo, "wt") as out:
out.write("foo")
df = DefaultDataFile(foo)
assert make_serializable(df) == foo
assert make_serializable([df]) == [foo]
assert make_serializable({"a": df}) == {"a": foo}
class Obj:
def __init__(self, a: str, b: int):
self.a = a
self.b = b
def as_dict(self):
return {
"a": self.a,
"b": self.b
}
assert make_serializable(Obj("hi", 1)) == {"a": "hi", "b": 1}
def test_create_executor():
with pytest.raises(RuntimeError):
create_executor("foo", [], None)
def test_validate_outputs():
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": 1}, {"bar": 1}, ""
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": 1}, {"baz": 1}, "foo"
)
validate_outputs(
{"foo.bar": None}, {"bar": None}, "foo"
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": None}, {"bar": 1}, "foo"
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": 1}, {"bar": None}, "foo"
)
validate_outputs(
{"foo.bar": [1, 2, 3]}, {"bar": [1, 2, 3]}, "foo"
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": [1, 2, 3]}, {"bar": [1, 2]}, "foo"
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": [1, 2, 3]}, {"bar": [3, 2, 1]}, "foo"
)
validate_outputs(
{"foo.bar": {"a": 1}}, {"bar": {"a": 1}}, "foo"
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": {"a": 1}}, {"bar": {"b": 1}}, "foo"
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": {"a": 1}}, {"bar": {"a": 1, "b": 2}}, "foo"
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": {"a": 1}}, {"bar": {"a": 2}}, "foo"
)
with pytest.raises(AssertionError):
validate_outputs(
{"foo.bar": 1}, {"bar": 2}, "foo"
)
```
|
{
"source": "jdie00/COTest",
"score": 3
}
|
#### File: tool/tools/file_tools.py
```python
import os
import tools.command_tools as ct
def delete_if_exists(path):
if path == '' or path == '/' or path == '/*':
return
if os.path.exists(path):
ct.execmd('rm -rf ' + path)
def create_dir_if_not_exist(path):
if not os.path.exists(path):
ct.execmd('mkdir ' + path)
def get_file_content(path):
f = open(path, 'r')
content = f.read()
f.close()
return content
def get_file_lines(path):
c = get_file_content(path)
if c == '':
return ''
if c[-1] == '\n':
return c[:-1].split('\n')
else:
return c.split('\n')
def put_file_content(path, content):
f = open(path, 'a+')
f.write(content)
f.close()
```
#### File: tool/tools/list_tools.py
```python
def extract(l, s):
res = []
for elem in l:
if s in elem:
res.append(elem)
return res
def strip(l):
res = []
for elem in l:
res.append(elem.strip())
return res
def get_first_word(l):
res = []
for elem in l:
res.append(elem[:elem.find(' ')])
return res
def get_diff(l1, l2):
diff1 = []
diff2 = []
common = []
for s1 in l1:
if s1 not in l2:
diff1.append(s1)
for s2 in l2:
if s2 not in l1:
diff2.append(s2)
for s1 in l1:
if s1 in l2:
common.append(s1)
return [diff1, common, diff2]
def trim_empty_strings(l):
res = []
for string in l:
if string != '':
res.append(string)
return res
def shuffle(l):
import random
res = l[:]
for i in range(len(res)):
inx = random.randint(0, len(res) - 1)
tmp = res[i]
res[i] = res[inx]
res[inx] = tmp
return res
```
|
{
"source": "jdiego/coveragepy",
"score": 2
}
|
#### File: coveragepy/coverage/xmlreport.py
```python
import os
import sys
import time
import xml.dom.minidom
from coverage import env
from coverage import __url__, __version__, files
from coverage.backward import iitems
from coverage.misc import isolate_module
from coverage.report import get_analysis_to_report
from coverage.backward import SimpleNamespace
os = isolate_module(os)
DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd'
def rate(hit, num):
"""Return the fraction of `hit`/`num`, as a string."""
if num == 0:
return "1"
else:
return "%.4g" % (float(hit) / num)
def convert_to_dict(tup):
di = {}
for a, b in tup:
di.setdefault(a, []).append(b)
return di
class XmlReporter(object):
"""
A reporter for writing Cobertura-style XML coverage results.
"""
EMPTY = "(empty)"
def __init__(self, coverage, report_name=None):
self.coverage = coverage
self.config = self.coverage.config
#
self.report_name = report_name
#
self.source_paths = set()
if self.config.source:
for src in self.config.source:
if os.path.exists(src):
if not self.config.relative_files:
src = files.canonical_filename(src)
self.source_paths.add(src)
self.packages = {}
self.xml_out = None
self.is_class_level = False
def report(self, morfs=None, outfile=None):
"""
Generate a Cobertura-compatible XML report for `morfs`.
`morfs` is a list of modules or file names.
`outfile` is a file object to write the XML to.
"""
# Initial setup.
outfile = outfile or sys.stdout
has_arcs = self.coverage.get_data().has_arcs()
# Create the DOM that will store the data.
impl = xml.dom.minidom.getDOMImplementation()
self.xml_out = impl.createDocument(None, "coverage", None)
# Write header stuff.
xcoverage = self.xml_out.documentElement
xcoverage.setAttribute("version", __version__)
xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
xcoverage.appendChild(self.xml_out.createComment(" Generated by coverage.py: %s " % __url__))
xcoverage.appendChild(self.xml_out.createComment(" Based on %s " % DTD_URL))
# Call xml_file for each file in the data.
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.xml_file(fr, analysis, has_arcs)
xsources = self.xml_out.createElement("sources")
xcoverage.appendChild(xsources)
# Populate the XML DOM with the source info.
for path in sorted(self.source_paths):
xsource = self.xml_out.createElement("source")
xsources.appendChild(xsource)
txt = self.xml_out.createTextNode(path)
xsource.appendChild(txt)
lnum_tot, lhits_tot = 0, 0
bnum_tot, bhits_tot = 0, 0
xpackages = self.xml_out.createElement("packages")
xcoverage.appendChild(xpackages)
# Populate the XML DOM with the package info.
for pkg_name, pkg_data in sorted(iitems(self.packages)):
modules_elts, lhits, lnum, bhits, bnum = pkg_data
xpackage = self.xml_out.createElement("package")
xpackages.appendChild(xpackage)
xclasses = self.xml_out.createElement("classes")
xpackage.appendChild(xclasses)
#
for _, (class_elts, fn_elts) in sorted(iitems(modules_elts)):
for class_elt in class_elts:
xclasses.appendChild(class_elt)
#
for fn in fn_elts:
xpackage.appendChild(fn)
xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
xpackage.setAttribute("line-rate", rate(lhits, lnum))
xpackage.setAttribute("hits", str(lhits))
xpackage.setAttribute("lines", str(lnum))
branch_rate = rate(bhits, bnum) if has_arcs else "0"
xpackage.setAttribute("branch-rate", branch_rate)
xpackage.setAttribute("complexity", "0")
lnum_tot += lnum
lhits_tot += lhits
bnum_tot += bnum
bhits_tot += bhits
xcoverage.setAttribute("lines-valid", str(lnum_tot))
xcoverage.setAttribute("lines-covered", str(lhits_tot))
xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
if has_arcs:
xcoverage.setAttribute("branches-valid", str(bnum_tot))
xcoverage.setAttribute("branches-covered", str(bhits_tot))
xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
else:
xcoverage.setAttribute("branches-covered", "0")
xcoverage.setAttribute("branches-valid", "0")
xcoverage.setAttribute("branch-rate", "0")
xcoverage.setAttribute("complexity", "0")
#
if self.report_name:
xcoverage.setAttribute("name", self.report_name)
# Write the output file.
outfile.write(serialize_xml(self.xml_out))
# Return the total percentage.
denom = lnum_tot + bnum_tot
pct = 0.0 if denom == 0 else 100.0 * (lhits_tot + bhits_tot) / denom
return pct
def is_property_tag(self, tokens_list):
tokens = convert_to_dict(tokens_list)
key = tokens.get('op', [''])[0]
nam = tokens.get('nam', [''])[0]
#
is_tag = key == '@'
if is_tag:
if nam in ['staticmethod', 'classmethod']:
self.is_class_level = True
#
return is_tag
def is_member_fn(self, tokens):
for token, value in tokens:
if token == 'nam' and (value in ['self', 'cls']):
return True
return False
def process_tokens(self, tokens_list, tag):
tokens = convert_to_dict(tokens_list)
key = tokens.get('key', [''])[0]
name = tokens.get('nam', [''])[0]
if key == tag:
return True, name
return False, None
def extract_names(self, fr):
filename = fr.filename.replace("\\", "/")
for source_path in self.source_paths:
source_path = files.canonical_filename(source_path)
if filename.startswith(source_path.replace("\\", "/") + "/"):
rel_name = filename[len(source_path)+1:]
break
else:
rel_name = fr.relative_filename()
self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/"))
dirname = os.path.dirname(rel_name) or u"."
dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth])
return dirname, rel_name
def create_class(self, name, rel_name, lineno):
xclass = self.xml_out.createElement("class")
xclass.setAttribute("name", name)
xclass.setAttribute("filename", rel_name.replace("\\", "/"))
xclass.setAttribute("complexity", "0")
xclass.first_line = lineno
return xclass
def set_class_stats(self, xclass, end_line, analysis):
first_line = xclass.first_line
filtered = [smt for smt in analysis.statements if smt >=first_line and smt <=end_line]
class_lines = len(filtered)
xclass.setAttribute("lines", str(class_lines))
class_hits = 0
filtered_missing = [smt for smt in analysis.missing if smt >=first_line and smt <=end_line]
#
class_hits = class_lines - len(filtered_missing)
xclass.setAttribute("hits", str(class_hits))
# Finalize the statistics that are collected in the XML DOM.
xclass.setAttribute("line-rate", rate(class_hits, class_lines))
def set_method_stats(self, xmethod):
method_hits = 0
method_misses = 0
for child in xmethod.childNodes:
if child.getAttribute('hits') == "1":
method_hits += 1
else:
method_misses += 1
#
method_lines = len(xmethod.childNodes)
xmethod.setAttribute("lines", str(method_lines))
xmethod.setAttribute("hits", str(method_hits))
xmethod.setAttribute("misses", str(method_misses))
def process_class(self, rel_name, lineno, tokens, xclass, xmethod, analysis):
found, name = self.process_tokens(tokens, "class")
if found:
#
if xclass:
if xmethod:
xclass.appendChild(xmethod)
#
last_line = lineno
for smt in analysis.statements:
if smt < lineno:
last_line = smt
#
self.set_class_stats(xclass, last_line, analysis)
#
xclass = self.create_class(name, rel_name, lineno)
return True, xclass
#
return False, xclass
def process_method(self, xmethod, tokens, xclass, free_fn, rel_name):
found, method_name = self.process_tokens(tokens, "def")
if found:
#
if xmethod:
self.set_method_stats(xmethod)
#
xmethod = self.xml_out.createElement("method")
xmethod.setAttribute("name", method_name)
if xclass and (self.is_member_fn(tokens) or self.is_class_level):
xclass.appendChild(xmethod)
self.is_class_level = False
else:
filename = rel_name.replace("\\", "/")
xmethod.setAttribute("filename", filename)
free_fn.append(xmethod)
return True, xmethod
return False, xmethod
def mount_package(self, dirname):
package_name = dirname.replace("/", ".")
package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
return package
def process_line(self, line, has_arcs, branch_stats, missing_branch_arcs, analysis):
# Processing Line
xline = self.xml_out.createElement("line")
xline.setAttribute("number", str(line))
# Q: can we get info about the number of times a statement is
# executed? If so, that should be recorded here.
is_hit = (line not in analysis.missing)
xline.setAttribute("hits", str(int(is_hit)))
if has_arcs:
if line in branch_stats:
total, taken = branch_stats[line]
xline.setAttribute("branch", "true")
xline.setAttribute(
"condition-coverage",
"%d%% (%d/%d)" % (100*taken//total, taken, total)
)
if line in missing_branch_arcs:
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
xline.setAttribute("missing-branches", ",".join(annlines))
return xline
def xml_file(self, fr, analysis, has_arcs):
"""Add to the XML report for a single file."""
if self.config.skip_empty and analysis.numbers.n_statements == 0:
return
#
dirname, rel_name = self.extract_names(fr)
package = self.mount_package(dirname)
# Free functions
free_fn = []
#
xclasses = []
xclass, xmethod = None, None
branch_stats = analysis.branch_stats()
missing_branch_arcs = analysis.missing_branch_arcs()
line = 1
self.is_class_level = False
for line, tokens in enumerate(fr.source_token_lines(), start=1):
if line not in analysis.statements:
continue
if tokens:
is_tag = self.is_property_tag(tokens)
if is_tag:
continue
# We found a new class definition?
created, xclass = self.process_class(rel_name, line, tokens, xclass, xmethod, analysis)
if created:
xclasses.append(xclass)
continue
#
created, xmethod = self.process_method(xmethod, tokens, xclass, free_fn, rel_name)
if created:
if not self.is_class_level and xclass:
self.set_class_stats(xclass, line, analysis)
continue
#
# Processing a line
xline = self.process_line(line, has_arcs, branch_stats, missing_branch_arcs, analysis)
if xmethod:
xmethod.appendChild(xline)
elif xclass:
xclass.appendChild(xline)
#
if xclass and self.is_class_level:
self.set_class_stats(xclass, line, analysis)
if xmethod:
self.set_method_stats(xmethod)
# Rename
package[0][rel_name] = (xclasses, free_fn)
if has_arcs:
classes_branches = sum(t for t, k in branch_stats.values())
missing_branches = sum(t - k for t, k in branch_stats.values())
classes_br_hits = classes_branches - missing_branches
else:
classes_branches = 0.0
classes_br_hits = 0.0
#
classes_lines = len(analysis.statements)
classes_hits = classes_lines - len(analysis.missing)
#
package[1] += classes_hits
package[2] += classes_lines
package[3] += classes_br_hits
package[4] += classes_branches
def serialize_xml(dom):
"""Serialize a minidom node to XML."""
out = dom.toprettyxml()
if env.PY2:
out = out.encode("utf8")
return out
```
|
{
"source": "jdiegoh3/distributed_computing",
"score": 3
}
|
#### File: distributed_computing/exercise11/server_interm_layer.py
```python
from xmlrpc.server import SimpleXMLRPCRequestHandler
import lib.ProtocolUtils as protocolUtils
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
class RegisteredFunctions:
@staticmethod
def add(num1, num2):
result = add_server.call_function(num1, num2)
return result
@staticmethod
def mult(num1, num2):
result = mul_server.call_function(num1, num2)
return result
@staticmethod
def div(num1, num2):
result = div_server.call_function(num1, num2)
return result
@staticmethod
def root(num1, num2):
result = root_server.call_function(num1, num2)
return result
@staticmethod
def pow(num1, num2):
result = pow_server.call_function(num1, num2)
return result
@staticmethod
def sub(num1, num2):
result = sub_server.call_function(num1, num2)
return result
@staticmethod
def log(num1, num2):
result = log_server.call_function(num1, num2)
return result
def main():
# Create server
server = protocolUtils.ServerThread("localhost", 8060)
server.register_class_functions(RegisteredFunctions())
server.start()
if __name__ == "__main__":
# Create the rpc to the specifics servers
add_server = protocolUtils.ClientThread('http://localhost:9001')
add_server.start()
sub_server = protocolUtils.ClientThread('http://localhost:9002')
sub_server.start()
div_server = protocolUtils.ClientThread('http://localhost:9003')
div_server.start()
mul_server = protocolUtils.ClientThread('http://localhost:9004')
mul_server.start()
pow_server = protocolUtils.ClientThread('http://localhost:9005')
pow_server.start()
root_server = protocolUtils.ClientThread('http://localhost:9006')
root_server.start()
log_server = protocolUtils.ClientThread('http://localhost:9007')
log_server.start()
main()
```
#### File: distributed_computing/exercise12/pow_server.py
```python
import math
import lib.ProtocolUtils as protocolUtils
class RegisteredFunctions:
@staticmethod
def function(num1, num2):
result = math.pow(float(num1), float(num2))
return result
def main():
# Create server
server = protocolUtils.ServerThread("localhost", 9005)
server.register_class_functions(RegisteredFunctions())
server.start()
print("Running pow server...")
if __name__ == '__main__':
main()
```
#### File: distributed_computing/exercise13/server.py
```python
import socket
import sys
import threading as thread
import lib.ProtocolUtils as protocolUtils
def message_handler(conn, address):
while True:
raw_data = conn.recv(1024)
received_data = protocolUtils.MessageHandler(raw_data).message_loads()
if received_data[0] == "list_groups":
print("New list_groups operation from ", address)
conn.send(process_group_instance.get_all_groups())
elif received_data[0] == "create_group":
print("New group created by ", address)
process_group_instance.add_process(address)
conn.send("Group already created.".encode())
elif received_data[0] == "join_group":
error = False
try:
group_id = int(received_data[2])
except ValueError as e:
error = True
conn.send("Incorrect group id.".encode())
print("New request to join into a group from ", address, " to the group ", group_id)
result = process_group_instance.add_process(address, group_id)
if result:
conn.send("You're now into the group".encode())
else:
conn.send("Incorrect group id.".encode())
elif received_data[0] == "send_message":
error = False
try:
group_id = int(received_data[2])
except ValueError as e:
error = True
conn.send("Incorrect group id.".encode())
list_members = process_group_instance.get_group(group_id)
print("New message from ", address, " to the group ", group_id)
if list_members:
send_messages = thread.Thread(target=thread_broadcast_group, args=(list_members, received_data[1],))
send_messages.start()
else:
error = True
conn.send("Incorrect group id or the group doesn't have members.".encode())
if not error:
conn.send("Message sent.".encode())
else:
conn.send("yesss".encode())
# Close the thread to save hardware.
# sys.exit()
def thread_broadcast_group(list_of_members, message):
for member in list_of_members:
try:
socket_temp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_temp.connect((member[0], member[1]))
socket_temp.send(message.encode())
except Exception as e:
print("Disconnected client.")
# while True:
# socketInsa = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# socketInsa.connect((address[0], address[1]))
# socketInsa.send("Yupale".encode())
if __name__ == "__main__":
process_group_instance = protocolUtils.ProcessGroup()
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind(('', 9999))
socket_instance.listen(10)
threads_list = []
print("Server running ...")
while True:
conn, address = socket_instance.accept()
print("New connection entry from ", address)
temp_thread = thread.Thread(target=message_handler, args=(conn, address,))
threads_list.append(temp_thread)
temp_thread.start()
```
#### File: distributed_computing/exercise3/sub_server.py
```python
import socketserver
import lib.ProtocolUtils as protocolUtils
class MessageHandler(socketserver.BaseRequestHandler):
@staticmethod
def operation(a, b):
print("New operation in queue ", a, " ", b)
try:
return float(a)-float(b)
except ValueError:
return "The operands requires be numbers"
def handle(self):
protocol_instance = protocolUtils.MessageHandler(self.request.recv(1024))
array_operands = protocol_instance.message_loads()
if array_operands[1] == "-":
result = self.operation(array_operands[0], array_operands[2])
self.request.send(str(result).encode())
else:
self.request.send("Bad Request")
def main():
server = socketserver.TCPServer(("localhost", 9992), MessageHandler)
print("Server sub running ...")
server.serve_forever()
if __name__ == "__main__":
main()
```
#### File: distributed_computing/exercise9/server_of_names.py
```python
import socket
import sys
import lib.ProtocolUtils as protocolUtils
import threading as thread
def switch_operations(operation):
switcher = {
"+": ["localhost", 9991],
"-": ["localhost", 9992],
"*": ["localhost", 9993],
"/": ["localhost", 9994],
"^": ["localhost", 9995],
"log": ["localhost", 9996],
"root": ["localhost", 9997],
}
return switcher.get(operation, None)
def message_handler(conn, addr):
raw_data = conn.recv(1024)
data = protocolUtils.MessageHandler(raw_data).message_loads()
server_interface = switch_operations(data[1])
result = protocolUtils.MessageBuilder(server_interface[0], server_interface[1], False).message_builder()
conn.send(result.encode())
# Close the thread to save hardware.
# sys.exit()
if __name__ == "__main__":
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind(('', 9999))
socket_instance.listen(10)
threads_list = []
print("Server running ...")
while True:
conn, addr = socket_instance.accept()
temp_thread = thread.Thread(target=message_handler, args=(conn, addr,))
threads_list.append(temp_thread)
temp_thread.start()
```
#### File: distributed_computing/exercise9/sub_server.py
```python
import socket
import sys
import lib.ProtocolUtils as protocolUtils
import threading as thread
def message_handler(conn, addr):
data = protocolUtils.MessageHandler(conn.recv(1024)).message_loads()
print("New operation in queue ", data[0], " ", data[2])
try:
result = str(float(data[0]) - float(data[2]))
except ValueError:
result = "The operands requires be numbers"
conn.send(result.encode())
# Close the thread to save hardware.
# sys.exit()
if __name__ == "__main__":
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind(('', 9992))
socket_instance.listen(10)
threads_list = []
print("Add Server running ...")
while True:
conn, addr = socket_instance.accept()
temp_thread = thread.Thread(target=message_handler, args=(conn, addr,))
threads_list.append(temp_thread)
temp_thread.start()
```
#### File: Projects/project1/server.py
```python
import lib.MyUtils as MyUtils
import socket
import random
import threading
def client_handler(conn, address):
while True:
try:
raw_data = conn.recv(1024)
split_data = MyUtils.MessageHandler(raw_data).message_loads()
print(split_data)
operation = split_data[0]
if operation == "not_working":
try:
processor = split_data[1]
ram = split_data[2]
except Exception as e:
# Return
message = MyUtils.MessageBuilder([0], "400")
conn.send(message.get_message())
identifier = str(address[0]) + str(address[1])
body = {
"ip": address[0],
"port": address[1],
"cpu": processor,
"ram": ram
}
unclassified_clients.remove_element(identifier)
occupied_devices.remove_element(identifier)
free_devices.add_element(identifier, body)
print(free_devices.list_elements())
message = MyUtils.MessageBuilder([0], "received")
conn.send(message.get_message())
elif operation == "occupied":
try:
processor = split_data[1]
ram = split_data[2]
except Exception as e:
# Return
message = MyUtils.MessageBuilder([0], "400")
conn.send(message.get_message())
identifier = str(address[0]) + str(address[1])
body = {
"ip": address[0],
"port": address[1],
"cpu": processor,
"ram": ram
}
unclassified_clients.remove_element(identifier)
free_devices.remove_element(identifier)
occupied_devices.add_element(identifier, body)
print(occupied_devices.list_elements())
message = MyUtils.MessageBuilder([0], "received")
conn.send(message.get_message())
elif operation == "get_resources":
try:
processor = split_data[1]
ram = split_data[2]
except Exception as e:
# Return
message = MyUtils.MessageBuilder([0], "400")
conn.send(message.get_message())
message = MyUtils.MessageBuilder([0], "400")
if processor and ram:
device_list = free_devices.list_elements()
for device in device_list:
if device_list[device].get("cpu", None) >= processor and device_list[device].get("ram") >= ram:
message = MyUtils.MessageBuilder([device_list[device].get("ip"), device_list[device].get("port")], "not_working")
else:
device_list = free_devices.list_elements()
rand = random.randint(0, len(device_list)-1)
device = free_devices.list_elements()[list(free_devices.list_elements())[rand]]
message = MyUtils.MessageBuilder([device.get("ip"), device.get("port")], "not_working")
conn.send(message.get_message())
except ConnectionResetError as e:
identifier = str(address[0]) + str(address[1])
unclassified_clients.remove_element(identifier)
free_devices.remove_element(identifier)
occupied_devices.remove_element(identifier)
except Exception as e:
print("Error:", e)
if __name__ == '__main__':
free_devices = MyUtils.FreeDevices()
occupied_devices = MyUtils.OccupiedDevices()
unclassified_clients = MyUtils.UnClassifiedClients()
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind(('192.168.0.19', 9999))
socket_instance.listen(10)
threads_list = []
print("Server running ...")
while True:
conn, address = socket_instance.accept()
print("New connection entry from ", address)
identifier = str(address[0]) + str(address[1])
body = {
"ip": address[0],
"port": address[1],
"cpu": None,
"ram": None
}
unclassified_clients.add_element(identifier, body)
temp_thread = threading.Thread(target=client_handler, args=(conn, address,))
threads_list.append(temp_thread)
temp_thread.start()
```
#### File: project2/lib/MyUtilsClient.py
```python
import threading
import socket
import MyUtils as MyUtils
class Client(object):
identifier = ""
server_host = "LocalHost"
server_port = 9999
my_host = ""
my_port_to_listen = 0
socket_to_server = None
listener_socket = None
my_pages_numbers = []
def __init__(self, server_host, server_port, identifier=""):
print("Client it's running ...")
self.identifier = identifier
# Server variable instances
self.server_host = server_host
self.server_port = server_port
self.socket_to_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket_to_server.connect((self.server_host, self.server_port))
# Listener variable
self.listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.my_host = self.listener_socket.getsockname()[0]
self.my_port_to_listen = self.listener_socket.getsockname()[1]
print("my_host: "+self.my_host+", my_port_to_listen: "+str(self.my_port_to_listen))
# Notify state to server
self.notify_status_to_server()
listen_thread = threading.Thread(target=self.start_listening)
listen_thread.start()
def start_listening(self):
self.listener_socket.listen(10)
while 1:
sc, addr = self.listener_socket.accept()
print("connection IP: " + str(addr[0]) + " port: " + str(addr[1]))
instance = threading.Thread(target=self.read_message, args=(sc, addr))
instance.start()
def read_message(self, sc, addr):
message = sc.recv(1024)
split_message = MyUtils.MessageHandler(message).message_loads()
if split_message[0] == 'need_page':
pass
elif split_message[0] == 'change_page':
pass
else:
sc.send(MyUtils.MessageBuilder(['No exist function: '+split_message[0]], 'error').get_message())
sc.close()
def notify_status_to_server(self):
if self.identifier == "":
message = MyUtils.MessageBuilder([self.my_port_to_listen], 'new_client').get_message()
else:
message = MyUtils.MessageBuilder([self.my_port_to_listen, self.identifier], 'new_client').get_message()
print("Sending state to server: " + message.decode())
self.socket_to_server.send(message)
result = self.socket_to_server.recv(1024)
split_message = MyUtils.MessageHandler(result).message_loads()
print(result.decode())
if split_message[0] != 'error':
self.identifier = split_message[0]
i = 1
while i < len(split_message):
self.add_page_number(split_message[i])
i=i+1
else:
print("error: "+split_message[1])
exit()
def add_page_number(self, number):
self.my_pages_numbers.append(number)
def have_page_number(self, number):
i = 1
while i < len(self.my_pages_numbers):
if self.my_pages_numbers[i] == number:
return True
i = i + 1
return False
def send_page(socket_to_send, page_number):
file = open("page"+page_number+".txt", "r")
send_file(socket_to_send, file)
def send_file(socket_to_send, file):
while (True):
print "Sending..."
data = file.read(1024)
socket_to_send.send(data)
if len(data) < 1024:
break
file.close()
return True
def receive_page(socket_to_receive, page_number):
file = open("page"+page_number+".txt", "w+")
receive_file(socket_to_receive, file)
def receive_file(socket_to_receive, file):
while (True):
print "Receiving..."
info = socket_to_receive.recv(1024)
file.write(info)
if len(info) < 1024:
break
file.close()
return True
```
#### File: project2/lib/MyUtils.py
```python
import socket
class Elements(object):
def __init__(self, elements):
self.elements = elements
def add_element(self, id, process_info):
self.elements[id] = process_info
def remove_element(self, id):
try:
del self.elements[id]
except KeyError as error:
pass
def list_elements(self):
return self.elements
class FreeDevices(Elements):
elements = {}
def __init__(self):
super().__init__(self.elements)
pass
class OccupiedDevices(Elements):
elements = {}
def __init__(self):
super().__init__(self.elements)
pass
class UnClassifiedClients(Elements):
elements = {}
def __init__(self):
super().__init__(self.elements)
pass
class MessageHandler(object):
body = None
def __init__(self, message):
if not isinstance(message, str):
message = message.decode("utf-8")
self.body = message
def message_loads(self):
if self.body:
result = self.body.split("|")
return result
def send_message(host, port, message):
temporal_socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
temporal_socket_instance.connect((host, port))
temporal_socket_instance.send(message)
result = temporal_socket_instance.recv(1024)
return result
class MessageBuilder(object):
message = ""
operation = None
def __init__(self, message_elements, op=None):
self.message += op + "|"
for string in message_elements:
self.message += str(string) + "|"
def get_message(self):
return self.message.encode()
```
#### File: Projects/project2/server.py
```python
import utils.server_lib as library
from utils.general_utils import PPrint, MessageHandler, MessageBuilder
import socket
import sys
import random
import threading
server_address = "192.168.0.5"
server_port = 9999
connected_clients = library.ConnectedClients()
queued_clients = library.QueuedClients()
def client_handler(connection, address):
while True:
try:
raw_data = connection.recv(1024)
handler = MessageHandler(raw_data).message_loads()
if handler[0] == "valid_client":
PPrint.show("{}{}".format("New valid client connected ", address), "green")
client_info = {
"address": handler[1],
"port": int(handler[2]),
"page_space": library.page_space,
"busy": False,
}
connected_clients.add_element("{}/{}".format(address[0], address[1]), client_info)
library.page_space += 1
connection.send(MessageBuilder((str(client_info.get("page_space", "")))
, "assigned_space").get_message())
elif handler[0] == "request_pages":
clients_list = connected_clients.list_elements()
result = None
client_instance = None
for client in clients_list:
client_info = clients_list.get(client)
if client_info.get("page_space", None) == int(handler[1]):
client_instance = client_info
result = (client_info.get("address", None), client_info.get("port"))
if result:
if client_instance.get("busy", None):
message = MessageBuilder((), "queued")
listener_info = clients_list.get("{}/{}".format(address[0], address[1]))
listener_bone = (listener_info.get("address", None), listener_info.get("port", None))
list_wait = queued_clients.list_elements()
element = list_wait.get(int(handler[1]), None)
if element:
element.append(listener_bone)
else:
queued_clients.add_element(int(handler[1]), [listener_bone])
print(list_wait)
else:
client_instance["busy"] = True
message = MessageBuilder(result, "call_him")
else:
message = MessageBuilder((), "no_exist")
connection.send(message.get_message())
elif handler[0] == "request_list_pages":
PPrint.show("{}{}".format("New request to list space of pages from: ", address), "green")
clients_list = connected_clients.list_elements()
result = []
for client in clients_list:
client_info = clients_list.get(client, None)
result.append(client_info.get("page_space", None))
message = MessageBuilder(result, "list_pages")
connection.send(message.get_message())
elif handler[0] == "free_resource":
PPrint.show("{}{}".format("New request to free a resource from: ", address), "green")
clients_list = connected_clients.list_elements()
client_owner = None
for client in clients_list:
client_info = clients_list.get(client, None)
if client_info.get("page_space", None) == int(handler[1]):
client_owner = client_info
client_info["busy"] = False
message = MessageBuilder((), "request_ok")
list_wait = queued_clients.list_elements()
element = list_wait.get(int(handler[1]), None)
client_to_call = None
if element:
client_to_call = element.pop()
if client_to_call and client_owner:
request_file = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
request_file.connect(client_to_call)
message_use_res = MessageBuilder((client_owner.get("address", None), client_owner.get("port", None), client_owner.get("page_space", None)), "call_resource")
request_file.send(message_use_res.get_message())
connection.send(message.get_message())
else:
connection.send("Roger".encode())
except Exception as e:
if isinstance(e, ConnectionAbortedError):
PPrint.show("{}{}".format("Connection lost or aborted with the client ", address), "yellow")
else:
PPrint.show("{}{}".format("Connection lost or aborted with the client ", address), "red")
sys.exit(0)
if __name__ == '__main__':
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind((server_address, server_port))
socket_instance.listen(10)
PPrint.show("{}{}".format("Server running on ", socket.gethostbyname(socket.gethostname())), "green")
while True:
connection, address = socket_instance.accept()
PPrint.show("{}{}".format("New connection entry from ", address), "green")
temp_thread = threading.Thread(target=client_handler, args=(connection, address,))
temp_thread.start()
```
|
{
"source": "jdieguezbean/kickstarterScraping",
"score": 3
}
|
#### File: core/singlenton/app_path.py
```python
import os
from selenium import webdriver
class AppPath:
class __AppPath:
def __init__(self):
self.path = os.path.abspath(os.getcwd())
path = None
def __new__(cls):
if not AppPath.path:
AppPath.path = AppPath.__AppPath().path
return AppPath.path
```
#### File: core/singlenton/logger.py
```python
import logging
try:
from cStringIO import StringIO # Python 2
except ImportError:
from io import StringIO
class Logger:
class __Logger:
def __init__(self):
logging.basicConfig(filename='downloader.log', level=logging.DEBUG, format='%(asctime)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logging.info(msg='Opening downloader')
def critical(self, msg):
logging.critical(msg)
def error(self, msg):
logging.error(msg)
def warn(self, msg):
logging.warning(msg)
def info(self, msg):
logging.info(msg)
def debug(self, msg):
logging.debug(msg)
instance = None
def __new__(cls):
if not Logger.instance:
Logger.instance = Logger.__Logger()
return Logger.instance
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, instance, name):
return setattr(self, instance, name)
```
|
{
"source": "jdieter31/pytorch-maml",
"score": 2
}
|
#### File: pytorch-maml/maml/expm.py
```python
import torch
#%%
def torch_expm(A):
""" """
n_A = A.shape[0]
A_fro = torch.sqrt(A.abs().pow(2).sum(dim=(1,2), keepdim=True))
# Scaling step
maxnorm = torch.Tensor([5.371920351148152]).type(A.dtype).to(A.device)
zero = torch.Tensor([0.0]).type(A.dtype).to(A.device)
n_squarings = torch.max(zero, torch.ceil(torch_log2(A_fro / maxnorm)))
Ascaled = A / 2.0**n_squarings
n_squarings = n_squarings.flatten().type(torch.int32)
# Pade 13 approximation
U, V = torch_pade13(Ascaled)
P = U + V
Q = -U + V
R, _ = torch.solve(P, Q) # solve P = Q*R
# Unsquaring step
expmA = [ ]
for i in range(n_A):
l = [R[i]]
for _ in range(n_squarings[i]):
l.append(l[-1].mm(l[-1]))
expmA.append(l[-1])
return torch.stack(expmA)
#%%
def torch_log2(x):
return torch.log(x) / torch.log(torch.Tensor([2.0])).type(x.dtype).to(x.device)
#%%
def torch_pade13(A):
b = torch.Tensor([64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800.,
960960., 16380., 182., 1.]).type(A.dtype).to(A.device)
ident = torch.eye(A.shape[1], dtype=A.dtype).to(A.device)
A2 = torch.matmul(A,A)
A4 = torch.matmul(A2,A2)
A6 = torch.matmul(A4,A2)
U = torch.matmul(A, torch.matmul(A6, b[13]*A6 + b[11]*A4 + b[9]*A2) + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = torch.matmul(A6, b[12]*A6 + b[10]*A4 + b[8]*A2) + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
return U, V
#%%
if __name__ == '__main__':
from scipy.linalg import expm
import numpy as np
n = 10
A = torch.randn(n,3,3)
A[:,2,:] = 0
expm_scipy = np.zeros_like(A)
for i in range(n):
expm_scipy[i] = expm(A[i].numpy())
expm_torch = torch_expm(A)
print('Difference: ', np.linalg.norm(expm_scipy - expm_torch))
```
#### File: pytorch-maml/maml/utils.py
```python
import torch
from collections import OrderedDict
from torchmeta.modules import MetaModule
from .model import BatchParameter
from .transformer_metric import TransformerMetric
from .constant_metric import ConstantMetric
from .expm import torch_expm as expm
def compute_accuracy(logits, targets):
"""Compute the accuracy"""
with torch.no_grad():
if logits.dim() == 2:
_, predictions = torch.max(logits, dim=1)
accuracy = torch.mean(predictions.eq(targets).float())
else:
_, predictions = torch.max(logits, dim=2)
accuracy = torch.mean(predictions.eq(targets).float(), dim=-1)
return accuracy.detach().cpu().numpy()
def tensors_to_device(tensors, device=torch.device('cpu')):
"""Place a collection of tensors in a specific device"""
if isinstance(tensors, torch.Tensor):
return tensors.to(device=device)
elif isinstance(tensors, (list, tuple)):
return type(tensors)(tensors_to_device(tensor, device=device)
for tensor in tensors)
elif isinstance(tensors, (dict, OrderedDict)):
return type(tensors)([(name, tensors_to_device(tensor, device=device))
for (name, tensor) in tensors.items()])
else:
raise NotImplementedError()
class ToTensor1D(object):
"""Convert a `numpy.ndarray` to tensor. Unlike `ToTensor` from torchvision,
this converts numpy arrays regardless of the number of dimensions.
Converts automatically the array to `float32`.
"""
def __call__(self, array):
return torch.from_numpy(array.astype('float32'))
def __repr__(self):
return self.__class__.__name__ + '()'
def make_warp_model(model, constant=False):
metric_params = []
for parameter in model.parameters():
if isinstance(parameter, BatchParameter):
metric_params.append(parameter)
"""
for layer in model.modules():
if isinstance(layer, BatchLinear):
metric_params.append(layer.weight)
"""
if constant:
return ConstantMetric(metric_params)
else:
return TransformerMetric(metric_params)
def kronecker_warp(grad, kronecker_matrices) -> torch.Tensor:
"""
Function for doing Kronecker based warping of gradient batches of an
m x n matrix parameter
Params:
grad (torch.Tensor): gradient batch of shape [meta_batch_size, batch_size m, n]
kronecker_matrices (Tuple[torch.Tensor, torch.Tensor]): kronecker
matrices to do the warping. First element of tuple is of shape
[meta_batch_size, batch_size n, n] second is of shape
[meta_batch_size, batch_size, m, m]
"""
input_matrices = kronecker_matrices[0]
output_matrices = kronecker_matrices[1]
all_matrices = input_matrices + output_matrices
grad = grad.sum(dim=-3)
grad_size = grad.size()
first_matrix = all_matrices[0]
first_matrix = first_matrix.view(-1, first_matrix.size(-2), first_matrix.size(-1))
temp = grad.view(-1, all_matrices[1].size(-1), first_matrix.size(-1))
first_matrix = first_matrix.unsqueeze(1).expand(
first_matrix.size(0), temp.size(0) // first_matrix.size(0), *first_matrix.size()[1:]
).reshape(-1, first_matrix.size(-2), first_matrix.size(-1))
temp = torch.bmm(temp, first_matrix)
right_size = first_matrix.size(-1)
for i, matrix in enumerate(all_matrices[1:]):
matrix = matrix.view(-1, matrix.size(-2), matrix.size(-1))
matrix = matrix.unsqueeze(1).expand(
matrix.size(0), temp.size(0) // matrix.size(0), *matrix.size()[1:]
).reshape(-1, matrix.size(-2), matrix.size(-1))
temp = torch.bmm(matrix, temp)
if i < len(all_matrices) - 2:
right_size *= matrix.size(-1)
temp = temp.view(-1, all_matrices[i + 2].size(-1), right_size)
return temp.view(grad_size)
def gradient_update_parameters_warp(model,
loss,
params=None,
warp_model=None,
step_size=0.5,
first_order=False,
state=None):
"""Update of the meta-parameters with one step of gradient descent on the
loss function.
Parameters
----------
model : `torchmeta.modules.MetaModule` instance
The model.
loss : `torch.Tensor` instance
The value of the inner-loss. This is the result of the training dataset
through the loss function.
params : `collections.OrderedDict` instance, optional
Dictionary containing the meta-parameters of the model. If `None`, then
the values stored in `model.meta_named_parameters()` are used. This is
useful for running multiple steps of gradient descent as the inner-loop.
step_size : int, `torch.Tensor`, or `collections.OrderedDict` instance (default: 0.5)
The step size in the gradient update. If an `OrderedDict`, then the
keys must match the keys in `params`.
first_order : bool (default: `False`)
If `True`, then the first order approximation of MAML is used.
Returns
-------
updated_params : `collections.OrderedDict` instance
Dictionary containing the updated meta-parameters of the model, with one
gradient update wrt. the inner-loss.
"""
if not isinstance(model, MetaModule):
raise ValueError('The model must be an instance of `torchmeta.modules.'
'MetaModule`, got `{0}`'.format(type(model)))
if params is None:
params = OrderedDict(model.meta_named_parameters())
param_jacobs_lst = [[] for _ in range(len(params))]
for i in range(loss.size(0)):
grads = torch.autograd.grad(loss[i], params.values(), retain_graph=True, create_graph=not first_order)
for j, grad in enumerate(grads):
param_jacobs_lst[j].append(grad)
param_jacobs = [torch.stack(param_jacob, dim=1) for param_jacob in param_jacobs_lst]
if warp_model is not None:
warp_model_input = []
for param in warp_model.warp_parameters:
if param.collect_input:
warp_model_input.append([param.input_data, param.grad_data])
kronecker_matrix_logs = warp_model(warp_model_input)
kronecker_matrices = []
for kronecker_matrix_list in kronecker_matrix_logs:
input_matrices = kronecker_matrix_list[0]
output_matrices = kronecker_matrix_list[1]
exp_input_matrices = []
for matrix in input_matrices:
#exp_matrix = torch.matrix_exp(matrix.reshape((-1, matrix.size(-2), matrix.size(-1))))
#exp_matrix = exp_matrix.reshape(matrix.size())
#exp_matrix = matrix.reshape((-1, matrix.size(-2), matrix.size(-1)))
#exp_matrix = torch.bmm(exp_matrix, exp_matrix)
#exp_matrix = exp_matrix.reshape(matrix.size())
exp_input_matrices.append(matrix)
exp_output_matrices = []
for matrix in output_matrices:
#exp_matrix = torch.matrix_exp(matrix.reshape((-1, matrix.size(-2), matrix.size(-1))))
#exp_matrix = exp_matrix.reshape(matrix.size())
#exp_matrix = matrix.reshape((-1, matrix.size(-2), matrix.size(-1)))
#exp_matrix = torch.bmm(exp_matrix, exp_matrix)
#exp_matrix = exp_matrix.reshape(matrix.size())
exp_output_matrices.append(matrix)
kronecker_matrices.append([exp_input_matrices, exp_output_matrices])
updated_params = OrderedDict()
if isinstance(step_size, (dict, OrderedDict)):
for i, ((name, param), grad) in enumerate(zip(params.items(), param_jacobs)):
if warp_model is not None:
grad = kronecker_warp(grad, kronecker_matrices[i])
updated_params[name] = param - step_size[name] * grad
else:
for i, ((name, param), grad) in enumerate(zip(params.items(), param_jacobs)):
if warp_model is not None:
grad = kronecker_warp(grad, kronecker_matrices[i])
updated_params[name] = param - step_size * grad
return updated_params
```
|
{
"source": "jdieter31/riemannian-nlp",
"score": 3
}
|
#### File: riemann/config/manifold_config.py
```python
import re
from typing import Dict
from .config import ConfigDict
from ..manifolds.manifold import RiemannianManifold
class ManifoldConfig(ConfigDict):
"""
ConfigDict to specify a manifold and its properties
"""
name: str = "EuclideanManifold"
dimension: int = 0
params: dict = {}
def get_manifold_instance(self) -> RiemannianManifold:
"""
Gets an instance of the manifold specified in this config
"""
return RiemannianManifold.from_name_params(self.name, self.params)
@classmethod
def from_string(cls, spec) -> 'ManifoldConfig':
pattern = re.compile(r"([ESH])([0-9]+)")
short_forms = {
"E": "EuclideanManifold",
"S": "SphericalManifold",
"H": "PoincareBall",
}
if "x" in spec:
submanifolds, total_dim = [], 0
for subspec in spec.split("x"):
match = pattern.match(subspec)
assert match is not None, f"Invalid spec {spec}"
typ, dim = match.groups()
submanifolds.append({
"name": short_forms[typ],
"dimension": int(dim)
})
total_dim += int(dim)
return cls(name="ProductManifold", dimension=total_dim,
params={"submanifolds": submanifolds})
else:
match = pattern.match(spec)
assert match is not None, f"Invalid spec {spec}"
typ, dim = match.groups()
return cls(name=short_forms[typ], dimension=int(dim), params={})
```
#### File: riemann/data/data_loader.py
```python
from .graph_dataset import GraphDataset
from .graph_loader_utils import load_csv_edge_list, load_adjacency_matrix
from ..config.config_loader import get_config
training_data: GraphDataset = None
eval_data: GraphDataset = None
def get_training_data() -> GraphDataset:
"""
Loads the training data or fetches it if already loaded
"""
_load_data_if_needed()
return training_data
def get_eval_data() -> GraphDataset:
"""
Loads the eval data or fetches it if already loaded. Will return None if no
train eval split is configured
"""
_load_data_if_needed()
return eval_data
def _load_data_if_needed():
global training_data
global eval_data
if training_data is None:
data_config = get_config().data
if data_config.graph_data_type == "edge":
if data_config.generate_eval_split:
path = data_config.full_path
else:
path = data_config.train_path
idx, objects, weights = load_csv_edge_list(path, data_config.symmetrize,
delimiter=data_config.delimiter)
else:
# TODO This needs to be updated for handling train eval splits
idx, objects, weights \
= load_adjacency_matrix(data_config.train_path, data_config.graph_data_format,
data_config.symmetrize)
if data_config.generate_eval_split:
training_data, eval_data = \
GraphDataset.make_train_eval_split(data_config.dataset_name, idx,
objects, weights)
else:
if data_config.eval_path is not None:
eval_idx, eval_objects, eval_weights = \
load_csv_edge_list(data_config.eval_path,
data_config.symmetrize,
delimiter=data_config.delimiter)
# Correct indexing of objects in eval_idx
for edge in eval_idx:
for i in range(2):
if eval_objects[edge[i]] not in objects:
objects.append(eval_objects[edge[i]])
edge[i] = len(objects) - 1
else:
edge[i] = objects.index(eval_objects[edge[i]])
training_data = GraphDataset(f"{data_config.dataset_name}_train",
idx, objects, weights)
eval_data = GraphDataset(f"{data_config.dataset_name}_eval",
eval_idx, objects, eval_weights)
else:
training_data = GraphDataset(data_config.dataset_name, idx, objects,
weights)
```
#### File: riemannian-nlp/riemann/device_manager.py
```python
import torch
from .config.config_loader import get_config
def get_device():
"""
Returns the main device that experiments are to be ran on. Note that some
components can and should still run on cpu for memory reasons so this
should only be used to retrieve devices for things that would potentially
run on the gpu.
"""
general_config = get_config().general
gpu = general_config.gpu
device = torch.device(f'cuda:{gpu}' if gpu >= 0 else 'cpu')
return device
```
#### File: embedding/conceptnet/__init__.py
```python
import re
import numpy as np
import pandas as pd
from sklearn.preprocessing import normalize
from .nodes import standardized_concept_uri
from .uri import uri_to_label
DOUBLE_DIGIT_RE = re.compile(r'[0-9][0-9]')
DIGIT_RE = re.compile(r'[0-9]')
CONCEPT_RE = re.compile(r'/c/[a-z]{2,3}/.+')
def replace_numbers(s):
"""
Replace digits with # in any term where a sequence of two digits appears.
This operation is applied to text that passes through word2vec, so we
should match it.
"""
if DOUBLE_DIGIT_RE.search(s):
return DIGIT_RE.sub('#', s)
else:
return s
def standardized_uri(language, term):
"""
Get a URI that is suitable to label a row of a vector space, by making sure
that both ConceptNet's and word2vec's normalizations are applied to it.
If the term already looks like a ConceptNet URI, it will only have its
sequences of digits replaced by #. Otherwise, it will be turned into a
ConceptNet URI in the given language, and then have its sequences of digits
replaced.
"""
if not CONCEPT_RE.match(term):
term = standardized_concept_uri(language, term)
return replace_numbers(term)
def get_vector(frame, label, language=None):
"""
Returns the row of a vector-space DataFrame `frame` corresponding
to the text `text`. If `language` is set, this can take in plain text
and normalize it to ConceptNet form. Either way, it can also take in
a label that is already in ConceptNet form.
"""
if frame.index[1].startswith('/c/'): # This frame has URIs in its index
if not label.startswith('/'):
label = standardized_uri(language, label)
try:
return frame.loc[label]
except KeyError:
return pd.Series(index=frame.columns)
else:
if label.startswith('/'):
label = uri_to_label(label)
try:
return frame.loc[replace_numbers(label)]
except KeyError:
# Return a vector of all NaNs
return pd.Series(index=frame.columns)
def normalize_vec(vec):
"""
L2-normalize a single vector, as a 1-D ndarray or a Series.
"""
if isinstance(vec, pd.Series):
return normalize(vec.fillna(0).values.reshape(1, -1))[0]
elif isinstance(vec, np.ndarray):
return normalize(vec.reshape(1, -1))[0]
else:
raise TypeError(vec)
def cosine_similarity(vec1, vec2):
"""
Get the cosine similarity between two vectors -- the cosine of the angle
between them, ranging from -1 for anti-parallel vectors to 1 for parallel
vectors.
"""
return normalize_vec(vec1).dot(normalize_vec(vec2))
def similar_to_vec(frame, vec, limit=50):
# TODO: document the assumptions here
# - frame and vec should be normalized
# - frame should not be made of 8-bit ints
if vec.dot(vec) == 0.:
return pd.Series(data=[], index=[], dtype='f')
similarity = frame.dot(vec)
return similarity.dropna().nlargest(limit)
def weighted_average(frame, weight_series):
if isinstance(weight_series, list):
weight_dict = dict(weight_series)
weight_series = pd.Series(weight_dict)
vec = np.zeros(frame.shape[1], dtype='f')
for i, label in enumerate(weight_series.index):
if label in frame.index:
val = weight_series[i]
vec += val * frame.loc[label].values
return pd.Series(data=vec, index=frame.columns, dtype='f')
```
#### File: riemann/embedding/sentence_embedder.py
```python
from abc import ABC, abstractmethod
from typing import Set
import numpy as np
from .core_nlp import SimpleSentence
from .term_frequencies import TermFrequencies
from .word_embedding import Glove
class SentenceEmbedder(ABC):
@abstractmethod
def embed(self, sentence: SimpleSentence, **kwargs) -> np.ndarray:
pass
@property
@abstractmethod
def dim(self) -> int:
pass
class GloveSentenceEmbedder(SentenceEmbedder):
POS_WHITELIST = {
"JJ",
"JJR",
"JJS",
"NN",
"NNS",
"NNP",
"NNPS",
# "PRP",
# "PRP$",
# "RB",
# "RBR",
# "RBS",
"VB",
"VBD",
"VBG",
"VBN",
"VBP",
"VBZ",
# "WDT",
# "WP",
# "WRB",
}
LEMMA_BLACKLIST = {"there", "do", "be", "have", "use", "need", "want", "like", "such"}
def __init__(
self,
glove: Glove,
pos_whitelist: Set[str] = None,
lemma_blacklist: Set[str] = None,
term_frequencies: TermFrequencies = None,
lemmatize: bool = True,
):
self.glove = glove
self.pos_whitelist = pos_whitelist if pos_whitelist is not None else GloveSentenceEmbedder.POS_WHITELIST
self.lemma_blacklist = lemma_blacklist if lemma_blacklist is not None else GloveSentenceEmbedder.LEMMA_BLACKLIST
self.lemmatize = lemmatize
self.term_frequencies = term_frequencies
@classmethod
def canonical(cls) -> 'GloveSentenceEmbedder':
return cls(
glove=Glove.canonical(),
term_frequencies=TermFrequencies.canonical()
)
@property
def dim(self):
return self.glove.embedding_dim
def weight(self, token):
"""
SIF reweighting as described in A Simple But Tough-to-Beat Baseline for Sentence Embeddings (Arora 2017)
:param token:
:return:
"""
a = 0.001
if self.term_frequencies is not None and token in self.term_frequencies:
return a / (a + self.term_frequencies[token] / float(self.term_frequencies.total_count))
else:
return 1.0
pass
def include_in_embedding(self, lemma: str = None, pos: str = None):
should_include = True
should_include &= ((self.lemma_blacklist is None) or (lemma not in self.lemma_blacklist))
should_include &= ((self.pos_whitelist is None) or (pos is None) or pos == '' or (
pos in self.pos_whitelist))
return should_include
def embed(
self,
sentence: SimpleSentence,
use_filter: bool = True,
l2_normalize: bool = True,
ignore_case: bool = True,
verbose: bool = False,
username: str = None,
knol_id: str = None,
**kwargs
) -> np.ndarray:
"""
Embeds a sentence using its tokens, lemmas, and parts of speech
:param sentence: A sentence loaded from corenlp
:param use_filter: If we should filter out certain parts of speech or lemmas
:param l2_normalize: If true, we will return only unit vectors (unless it is all 0)
:param ignore_case: If true, we will lower case every token and lemma before finding the embedding vector
:param verbose: If verbose, log warnings.
:param username: The username of the user adding the question.
This is purely for debugging, and is safe to ignore.
:param knol_id: The ID of the Knol we're adding. This is purely for debugging, and is safe to ignore.
:return: A vector corresponding to the embedded question.
"""
vec = np.zeros(self.glove.embedding_dim, dtype=np.float32)
valid_tokens = 0
for token, lemma, pos in zip(sentence.original_texts(), sentence.lemmas(), sentence.pos()):
if ignore_case:
lemma = lemma.lower()
token = token.lower()
if lemma == "" or pos == "":
if verbose:
print(
"Got an empty POS tag and/or Lemma. Setting lemma to be token to compensate. username=" +
str(username) + "; knol_id=" + str(knol_id))
lemma = token
if not use_filter or self.include_in_embedding(lemma, pos):
idx = self.glove.lookup_word(lemma if self.lemmatize else token)
if idx >= self.glove.token_mapper.mapped_output_size():
valid_tokens += 1
weight = self.weight(lemma if self.lemmatize else token)
vec += weight * self.glove.get_embedding_at_index(idx)
if valid_tokens > 0:
if l2_normalize:
return self._normalize(vec)
else:
return vec / valid_tokens
elif use_filter:
# back off to a non filtered embedding if no valid tokens were used and we have a 0 vector
return self.embed(sentence, use_filter=False, l2_normalize=l2_normalize,
ignore_case=ignore_case,
verbose=verbose, username=username, knol_id=knol_id)
else:
return vec
@classmethod
def _normalize(cls, vec: np.ndarray, epsilon=1e-12) -> np.ndarray:
"""
Normalize a vector
:param vec: A one dimensional array
:return:
"""
length = np.power(np.sum(np.power(vec, 2)), 0.5)
return vec / (length + epsilon)
@classmethod
def mock(cls):
return cls(glove=Glove.mock(), term_frequencies=TermFrequencies.mock())
__all__ = ['SentenceEmbedder', 'GloveSentenceEmbedder']
```
#### File: riemann/embedding/word_piecer.py
```python
import json
import sys
from typing import Tuple, Dict, List, Counter
from zipfile import ZipFile
class WordPiecer:
EOW = "</w>" # How to pretty print EOWs
UNK = "<?>" # How to pretty print unknown characters.
UNK_EOW = "<?></w>" # How to pretty print unknown characters.
def __init__(self, pieces: List[str], merges: List[Tuple[str, str]]):
"""
:param pieces: A sequence of word pieces.
:param merges: Combinations of word pieces that can be merged with the compression algorithm.
"""
# We look up indices a lot, so interning strings is natural.
self._pieces = [sys.intern(p) for p in pieces]
self._indices = {p: i for i, p in enumerate(pieces)}
self._merges = [(self._indices[x], self._indices[y], self._indices[x + y]) for x, y in
merges]
self._merge_indices = {(x, y): i for i, (x, y, _) in enumerate(self._merges)}
if self.UNK not in self._pieces:
self._indices[self.UNK] = len(self._pieces)
self._pieces.append(self.UNK)
if self.UNK_EOW not in self._pieces:
self._indices[self.UNK_EOW] = len(self._pieces)
self._pieces.append(self.UNK_EOW)
def __len__(self):
"""
:return: Size of the vocabulary produced by WordPiecer.
"""
return len(self._pieces)
def __repr__(self):
return f"<WordPiecer: {len(self._pieces)} pieces and {len(self._merges)} merges>"
def _compress(self, indices: List[int]) -> bool:
"""
Attempt to compress the given set of indices in place.
:param indices:
:return: true if we succeeded at compressing the indices
"""
# Find the top-ranked merge to complete:
NONE = len(self._merges) + 1
merge_idx = min((self._merge_indices.get((indices[i], indices[i + 1]), NONE) for i in
range(len(indices) - 1)),
default=NONE)
if merge_idx == NONE:
return False
(x, y, z) = self._merges[merge_idx]
# Apply the chosen merge by deleting anything that matches its values
for i, _ in enumerate(indices):
if i == len(indices) - 1:
break
if indices[i] == x and indices[i + 1] == y:
# pop i twice
indices.pop(i)
indices.pop(i)
indices.insert(i, z)
return True
def encode_string(self, part: str, append_eow: bool = True) -> List[int]:
"""
Encodes a single part using the merge algorithm.
:param part: An arbitrary string.
:param append_eow: if true, append '</w>' to the last token.
:return: A sequence of piece indices
"""
# 0. Handle empty string.
if not part:
return []
# 1. Map part into a sequence of primary pieces indices that will be compressed below.
indices: List[int] = []
for char in part[:-1]:
indices.append(self._indices.get(char, self._indices[self.UNK]))
char = part[-1]
if append_eow:
indices.append(self._indices.get(char + self.EOW, self._indices[self.UNK_EOW]))
else:
unk_eow = self.UNK_EOW if char.endswith(self.EOW) else self.UNK
indices.append(self._indices.get(char, self._indices[unk_eow]))
# 2. Compress the indices in order of the merge indices
while self._compress(indices):
pass
return indices
def encode(self, tokens: List[str]) -> List[int]:
ret = []
for token in tokens:
ret.extend(self.encode_string(token))
return ret
def decode(self, indices: List[int], strip_eow: bool = True) -> List[str]:
"""
Convert given word piece indices into a set of tokens
:param indices: a list of word piece indices
:param strip_eow: remove the </w> tags
:return: a list of tokens that correspond to @indices
"""
ret = []
word = ""
for idx in indices:
piece = self._pieces[idx]
if piece.endswith(self.EOW):
word += piece[:-len(self.EOW)] if strip_eow else piece
ret.append(word)
word = ""
else:
word += piece
# NOTE: this is potentially incorrect because we might add a new word even without the </w> tag.
if word:
ret.append(word)
return ret
def to_string(self, indices: List[int]) -> str:
"""
Converts a sequence of indices in to a readable string.
:param indices:
:return: A string corresponding to @indices.
Example: 1 2 3 -> the
"""
return " ".join(self.decode(indices, strip_eow=False))
@classmethod
def learn(cls, tokens_: List[str], character_threshold: int = 5,
max_vocab: int = 100) -> 'WordPiecer':
"""
Learns a word piecer from a sequence of tokens.
:param tokens_: a sequence of strings to learn from. By default, all tokens are appended with EOW tags.
:param character_threshold: the threshold on the number of times a character should have been seen to be added.
a positive number has a regularizing effect.
:param max_vocab: The maximum number of elements to add to the vocabulary before quitting.
:return: A new word piecer that can be used for these purposes.
"""
pieces: List[str] = [cls.UNK, cls.UNK_EOW]
indices: Dict[str, int] = {p: i for i, p in enumerate(pieces)}
merges: List[Tuple[str, str]] = []
# 0. Minify the number of tokens actually considered by compressing this list of tokens by count.
tokens = Counter[str](tokens_)
# 1. Start by building a list of character to use.
# 1.1 Compute character statistics to replace rare characters with UNK tokens.
piece_stats = Counter[str]()
# Start by adding characters as pieces.
for token, freq in tokens.items():
for char in token[:-1]:
piece_stats[char] += freq
piece_stats[token[-1] + cls.EOW] += freq
# 1.2 Add these characters to the pieces
for char, freq in piece_stats.most_common():
if freq > character_threshold:
indices[char] = len(pieces)
pieces.append(char)
assert len(
pieces) < max_vocab, f"max_vocab ({max_vocab}) is too small to fit the {len(pieces)} characters"
# 1.3 Replace sequences with integers indices.
data: List[Tuple[List[int], int]] = []
UNK_IDX, UNK_EOW_IDX = indices[cls.UNK], indices[cls.UNK_EOW]
for token, freq in tokens.items():
datum = [indices.get(char, UNK_IDX) for char in token[:-1]]
datum.append(indices.get(token[-1] + cls.EOW, UNK_EOW_IDX))
data.append((datum, freq))
# 2. Merge bigrams to construct a vocabulary.
merge_stats = Counter[Tuple[int, int]]()
# 2.1 Build statistics for merges.
for datum, freq in data:
for i in range(len(datum) - 1):
merge_stats[datum[i], datum[i + 1]] += freq
# 2.2 While the vocabulary size can take it, keep merging tokens.
while len(pieces) < max_vocab and merge_stats:
# Get the most common merge
((x, y), _), = merge_stats.most_common(1)
# Add this merge
merges.append((pieces[x], pieces[y]))
if pieces[x] + pieces[y] not in indices:
indices[pieces[x] + pieces[y]] = len(indices)
pieces.append(pieces[x] + pieces[y])
z = indices[pieces[x] + pieces[y]]
# Apply this merge to all of the data and update its statistics.
for datum, freq in data:
for i, _ in enumerate(datum):
# Handle corner case.
if i == len(datum) - 1:
break
# Aha, this is a candidate merge
if datum[i] == x and datum[i + 1] == y:
datum.pop(i)
datum.pop(i)
datum.insert(i, z)
# Update statistics
if i < len(datum) - 1:
merge_stats[y, datum[i + 1]] -= freq
merge_stats[z, datum[i + 1]] += freq
# We will never see x, y again so remove it.
merge_stats.pop((x, y))
return cls(pieces, merges)
def to_file(self, zf: ZipFile):
zf.writestr("encoder/pieces.txt", "\n".join(self._pieces))
zf.writestr("encoder/merges.txt", "\n".join([
f"{self._pieces[x]}\t{self._pieces[y]}" for (x, y, _) in self._merges
]))
__instance = None
@classmethod
def canonical(cls) -> 'WordPiecer':
if cls.__instance is None:
with ZipFile(props.auto.WORD_PIECER_PATH) as zf:
cls.__instance = cls.from_file(zf)
return cls.__instance
@classmethod
def from_file(cls, zf: ZipFile) -> 'WordPiecer':
def as_tuple(ls: List[str]):
return ls[0], ls[1]
pieces = zf.read("encoder/pieces.txt").decode("utf-8").split("\n")
merges = [as_tuple(line.split("\t")) for line in
zf.read("encoder/merges.txt").decode("utf-8").split("\n")]
return cls(pieces, merges)
@classmethod
def from_openai(cls, encoder_path: str, merges_path: str) -> 'WordPiecer':
with open(encoder_path) as f:
encoder = json.load(f)
pieces, idxs = zip(*sorted(encoder.items(), key=lambda kv: kv[-1]))
# Make sure they're in sorted order.
assert idxs[0] == 0 and idxs[-1] == len(pieces) - 1
with open(merges_path) as f:
merges = [(x, y) for line in f.readlines() for x, y in line.strip().split() if
not line.startswith("#")]
return cls(pieces, merges)
__all__ = ['WordPiecer']
```
#### File: riemann/featurizers/text_featurizer.py
```python
from abc import ABC, abstractmethod
from typing import List, Optional
import torch
from ..manifolds import RiemannianManifold
class TextFeaturizer(ABC):
"""
Abstract class for any type of model that produces embeddings of
text
"""
@abstractmethod
def embed_text(self, data: List[str]) -> List[Optional[torch.Tensor]]:
"""
Produces embedding of text
Args:
data (List[str]): List of text objects
Returns:
embedding (List[Optional[tensor]]): embedding of text objects (if
they can be embedded as represented by the optional)
"""
raise NotImplementedError
def get_manifold(self) -> RiemannianManifold:
"""
Returns the manifold that this GraphEmbedder embeds nodes into.
Defaults to Euclidean if this method is not overwritten
"""
return ManifoldConfig().get_manifold_instance()
```
#### File: riemann/featurizers/wordnet_featurizer.py
```python
from ..embedding.sentence_embedder import GloveSentenceEmbedder
from ..embedding.word_embedding import Glove
from ..embedding.core_nlp import SimpleSentence
from ..data.graph_dataset import GraphDataset
from ..manifolds import SphericalManifold
import torch
import numpy as np
from tqdm import tqdm
def get_wordnet_featurizer(graph_dataset: GraphDataset, eval_dataset:
GraphDataset=None):
sentence_embedder = GloveSentenceEmbedder.canonical()
embeddings = []
bad_nodes = []
for i, o_id in enumerate(graph_dataset.object_ids):
sentence = ' '.join(o_id.split('.')[0].split('_'))
ssentence = SimpleSentence.from_text(sentence)
embedding = sentence_embedder.embed(ssentence)
if np.any(embedding):
embeddings.append(embedding)
else:
bad_nodes.append(i)
graph_dataset.collapse_nodes(bad_nodes)
if eval_dataset is not None:
eval_dataset.collapse_nodes(bad_nodes)
"""
deleted = 0
for bad_node in tqdm(bad_nodes, desc="Collapsing nodes w/o features",
dynamic_ncols=True):
graph_dataset.collapse_node(i - deleted)
if eval_dataset is not None:
eval_dataset.collapse_node(i - deleted)
deleted += 1
"""
vectors = torch.tensor(np.array(embeddings), dtype=torch.float, device=torch.device('cpu'))
def featurize(object_ids, node_ids):
return vectors[node_ids]
return featurize, sentence_embedder.dim, SphericalManifold()
```
#### File: riemannian-nlp/riemann/manifold_nns.py
```python
from math import ceil, sqrt
import faiss
import numpy as np
import torch
from tqdm import tqdm
from .config.config_loader import get_config
from .manifolds import RiemannianManifold
class ManifoldNNS:
def __init__(self, data_points: torch.Tensor, manifold: RiemannianManifold,
samples_for_pole: int = 10000):
self.manifold = manifold
self.compute_index(data_points, samples_for_pole)
def compute_index(self, data_points: torch.Tensor, samples_for_pole: int = 10000):
data_points = data_points.cpu()
if samples_for_pole == 0:
samples_for_pole = data_points.size(0)
perm = torch.randperm(data_points.size(0))
idx = perm[:min(samples_for_pole, perm.size(0))]
self.pole = compute_pole(data_points[idx], self.manifold)
tqdm.write("Creating nns index")
ivf_size = 2 ** (ceil(4 * sqrt(data_points.size(0)) - 1)).bit_length()
index_flat = faiss.index_factory(data_points.size(-1),
"Flat") #f"PCAR16,IVF{ivf_size},SQ4")
general_config = get_config().general
_use_gpu: bool = general_config.gpu >= 0
# make it into a gpu index
if _use_gpu:
res = faiss.StandardGpuResources()
# make it into a gpu index
self.index = faiss.index_cpu_to_gpu(res, 0, index_flat)
params = faiss.GpuParameterSpace()
else:
self.index = index_flat
params = faiss.ParameterSpace()
# params.set_index_parameter(self.index, 'nprobe', 100)
params.initialize(self.index)
num_blocks = 200
block_size = ceil(data_points.size(0) / num_blocks)
num_blocks = ceil(data_points.size(0) / block_size)
self.data_embedding = data_points
pole_batch = self.pole.unsqueeze(0).expand_as(data_points[:block_size])
for i in tqdm(range(num_blocks), desc="Euclidean Project",
dynamic_ncols=True):
start_index = i * block_size
end_index = min((i + 1) * block_size, data_points.size(0))
self.data_embedding[start_index:end_index] = self.manifold.log(
pole_batch[0: end_index - start_index], data_points[start_index:end_index])
tqdm.write("Training Index")
train_size = int(20 * sqrt(data_points.size(0)))
perm = torch.randperm(data_points.size(0))
train_points = \
self.data_embedding.cpu().detach()[perm[:train_size]].numpy()
self.index.train(train_points)
tqdm.write("Adding Vectors to Index")
self.index.add(self.data_embedding.cpu().detach().numpy())
def knn_query_batch_vectors(self, data, k=10, log_space=False):
pole_batch = self.pole.unsqueeze(0).expand_as(data)
if log_space:
data_embedding = data.cpu().detach().numpy()
else:
data_embedding = self.manifold.log(pole_batch, data).cpu().detach().numpy()
return self.index.search(data_embedding, k)
def add_vectors(self, data):
pole_batch = self.pole.unsqueeze(0).expand_as(data)
data_embedding = self.manifold.log(pole_batch, data).cpu().detach().numpy()
self.index.add(data_embedding)
def knn_query_batch_indices(self, indices, k=10):
return self.knn_query_batch_vectors(self.data_embedding[indices], k, log_space=True)
def knn_query_all(self, k=10):
block_size = self.data_embedding.size()[0] // 3
num_blocks = ceil(self.data_embedding.size()[0] / block_size)
dists, nns = None, None
for i in tqdm(range(num_blocks), desc="knn_query", dynamic_ncols=True):
start_index = i * block_size
end_index = min((i + 1) * block_size, self.data_embedding.size()[0])
block_dists, block_nns = self.knn_query_batch_indices(
torch.arange(start_index, end_index,
dtype=torch.long, device=self.data_embedding.device), k)
if dists is None:
dists, nns = block_dists, block_nns
else:
dists = np.concatenate((dists, block_dists))
nns = np.concatenate((nns, block_nns))
return dists, nns
def compute_pole(data_samples: torch.Tensor, manifold: RiemannianManifold):
running_pole = data_samples[0].clone()
for i in range(data_samples.size()[0] - 1):
log_mu_x = manifold.log(running_pole, data_samples[i + 1])
weighted = log_mu_x / (i + 2)
running_pole = manifold.exp(running_pole, weighted)
return running_pole
def compute_pole_batch(data: torch.Tensor, manifold: RiemannianManifold, samples_per_pole=1000,
num_poles=15):
permuted_data = data.new_empty(
[num_poles, min(samples_per_pole, data.size(0)), data.size()[-1]])
for i in range(num_poles):
perm = torch.randperm(data.size(0))
idx = perm[:min(samples_per_pole, perm.size(0))]
permuted_data[i] = data[idx]
running_poles = permuted_data[:, 0, :].clone()
for i in range(permuted_data.size()[1] - 1):
log_mu_x = manifold.log(running_poles, permuted_data[:, i + 1, :])
weighted = log_mu_x / (i + 2)
running_poles = manifold.exp(running_poles, weighted)
return running_poles
```
#### File: riemannian-nlp/riemann/train_old.py
```python
QUICK_EVAL_FREQUENCY = 0
QUICK_EVAL_PERCENT = 0.05
QUICK_EVAL_TRAIN_PERCENT = 0.025
def train(
device,
model,
manifold,
dimension,
data,
optimizer,
loss_params,
n_epochs,
eval_every,
sample_neighbors_every,
lr_scheduler,
shared_params,
thread_number,
feature_manifold,
conformal_loss_params,
tensorboard_watch={},
eval_data=None
):
batch_num = 0
reporter = MemReporter()
for epoch in range(1, n_epochs + 1):
batch_losses = []
if conformal_loss_params is not None:
batch_conf_losses = []
t_start = timeit.default_timer()
if (epoch - 1) % sample_neighbors_every == 0 and thread_number == 0:
optimizer.zero_grad()
inputs = None
graph_dists = None
conf_loss = None
loss = None
# import gc; gc.collect()
# torch.cuda.empty_cache()
with torch.no_grad():
model.to(device)
nns = data.refresh_manifold_nn(model.get_embedding_matrix(), manifold,
return_nns=True)
if eval_data is not None:
eval_data.refresh_manifold_nn(model.get_embedding_matrix(), manifold,
manifold_nns=nns)
if epoch > 1:
syn_acc, sem_acc = embed_eval.eval_analogy(model, manifold, nns)
write_tensorboard('add_scalar', ['syn_acc', syn_acc, epoch - 1])
write_tensorboard('add_scalar', ['sem_acc', sem_acc, epoch - 1])
# import gc; gc.collect()
# torch.cuda.empty_cache()
data_iterator = tqdm(data) if thread_number == 0 else data
for batch in data_iterator:
if batch_num % eval_every == 0 and thread_number == 0:
mean_loss = 0 # float(np.mean(batch_losses)) use to eval every batch setting this to zero as its only used for printing output
savable_model = model.get_savable_model()
save_data = {
'epoch': epoch
}
if data.features is not None:
save_data["features"] = data.features
if model.get_additional_embeddings() is not None:
save_data[
"additional_embeddings_state_dict"] = model.get_additional_embeddings().state_dict()
if hasattr(model, "main_deltas"):
save_data["main_deltas_state_dict"] = model.main_deltas.state_dict()
if hasattr(model, "additional_deltas"):
save_data[
"additional_deltas_state_dict"] = model.additional_deltas.state_dict()
save_data["deltas"] = model.deltas
save_data.update(shared_params)
path = save_model(savable_model, save_data)
elapsed = 0 # Used to eval every batch setting this to zero as its only used for printing output
# embed_eval.eval_wordsim_benchmarks(model, manifold, device=device, iteration=batch_num)
if eval_data is not None:
with torch.no_grad():
hitsat10 = 0
rank_sum = 0
rec_rank_sum = 0
num_ranks = 0
if QUICK_EVAL_FREQUENCY > 0 and batch_num % (
eval_every * QUICK_EVAL_FREQUENCY) == 0:
eval_data.data_fraction = 1
total_eval = True
else:
eval_data.data_fraction = QUICK_EVAL_PERCENT
total_eval = False
eval_data.compute_train_ranks = False
for eval_batch in tqdm(eval_data):
inputs, graph_dists = eval_batch
inputs = inputs.to(device)
graph_dists = graph_dists.to(device)
input_embeddings = model(inputs)
sample_vertices = input_embeddings.narrow(1, 1,
input_embeddings.size(1) - 1)
main_vertices = input_embeddings.narrow(1, 0, 1).expand_as(
sample_vertices)
manifold_dists = manifold.dist(main_vertices, sample_vertices)
sorted_indices = manifold_dists.argsort(dim=-1)
manifold_dists_sorted = torch.gather(manifold_dists, -1, sorted_indices)
n_neighbors = (graph_dists < 2).sum(dim=-1)
batch_nums, neighbor_ranks = (
sorted_indices < n_neighbors.unsqueeze(1)).nonzero(
as_tuple=True)
neighbor_ranks += 1
adjust_indices = torch.arange(n_neighbors.max())
neighbor_adjustements = torch.cat(
[adjust_indices[:n_neighbors[i]] for i in
range(n_neighbors.size(0))])
neighbor_ranks -= neighbor_adjustements.to(device)
neighbor_ranks = neighbor_ranks.float()
rec_ranks = 1 / neighbor_ranks
hitsat10 += (neighbor_ranks <= 10).sum().cpu().numpy()
rank_sum += neighbor_ranks.sum().cpu().numpy()
rec_rank_sum += rec_ranks.sum().cpu().numpy()
num_ranks += neighbor_ranks.size(0)
mean_rank = rank_sum / num_ranks
mean_rec_rank = rec_rank_sum / num_ranks
hitsat10 = hitsat10 / num_ranks
postfix = "_approx"
if total_eval:
postfix = ""
write_tensorboard('add_scalar',
[f'mean_rank{postfix}', mean_rank, batch_num])
write_tensorboard('add_scalar',
[f'mean_rec_rank{postfix}', mean_rec_rank, batch_num])
write_tensorboard('add_scalar', [f'hitsat10{postfix}', hitsat10, batch_num])
if eval_data.is_eval:
hitsat10 = 0
rank_sum = 0
rec_rank_sum = 0
num_ranks = 0
eval_data.data_fraction = QUICK_EVAL_TRAIN_PERCENT
eval_data.compute_train_ranks = True
for eval_batch in tqdm(eval_data):
inputs, graph_dists = eval_batch
inputs = inputs.to(device)
graph_dists = graph_dists.to(device)
input_embeddings = model(inputs)
sample_vertices = input_embeddings.narrow(1, 1,
input_embeddings.size(
1) - 1)
main_vertices = input_embeddings.narrow(1, 0, 1).expand_as(
sample_vertices)
manifold_dists = manifold.dist(main_vertices, sample_vertices)
sorted_indices = manifold_dists.argsort(dim=-1)
manifold_dists_sorted = torch.gather(manifold_dists, -1,
sorted_indices)
n_neighbors = (graph_dists < 2).sum(dim=-1)
batch_nums, neighbor_ranks = (
sorted_indices < n_neighbors.unsqueeze(1)).nonzero(
as_tuple=True)
neighbor_ranks += 1
adjust_indices = torch.arange(n_neighbors.max())
neighbor_adjustements = torch.cat(
[adjust_indices[:n_neighbors[i]] for i in
range(n_neighbors.size(0))])
neighbor_ranks -= neighbor_adjustements.to(device)
neighbor_ranks = neighbor_ranks.float()
rec_ranks = 1 / neighbor_ranks
hitsat10 += (neighbor_ranks <= 10).sum().cpu().numpy()
rank_sum += neighbor_ranks.sum().cpu().numpy()
rec_rank_sum += rec_ranks.sum().cpu().numpy()
num_ranks += neighbor_ranks.size(0)
mean_rank = rank_sum / num_ranks
mean_rec_rank = rec_rank_sum / num_ranks
hitsat10 = hitsat10 / num_ranks
write_tensorboard('add_scalar',
[f'mean_rank_train', mean_rank, batch_num])
write_tensorboard('add_scalar',
[f'mean_rec_rank_train', mean_rec_rank, batch_num])
write_tensorboard('add_scalar',
[f'hitsat10_train', hitsat10, batch_num])
del manifold_dists, manifold_dists_sorted, sample_vertices, main_vertices, input_embeddings
# import gc;gc.collect()
# torch.cuda.empty_cache()
print(model)
reporter.report()
conf_loss = None
delta_loss = None
inputs, graph_dists = batch
inputs = inputs.to(device)
graph_dists = graph_dists.to(device)
optimizer.zero_grad()
rand_val = random.random()
optimizing_model = False
if hasattr(model, "get_additional_embeddings"):
if rand_val > .6:
optimizing_model = False
optimizing_deltas = False
# model.deltas = True
for p in model.parameters():
p.requires_grad = False
for p in model.get_additional_embeddings().parameters():
p.requires_grad = True
if model.deltas:
for p in model.main_deltas.parameters():
p.requires_grad = False
if hasattr(model, "additional_deltas"):
for p in model.additional_deltas.parameters():
p.requires_grad = False
# elif rand_val > 0.3:
elif rand_val > 0:
optimizing_model = True
optimizing_deltas = False
# model.deltas = True
for p in model.parameters():
p.requires_grad = True
for p in model.get_additional_embeddings().parameters():
p.requires_grad = False
if model.deltas:
for p in model.main_deltas.parameters():
p.requires_grad = False
if hasattr(model, "additional_deltas"):
for p in model.additional_deltas.parameters():
p.requires_grad = False
else:
optimizing_model = False
optimizing_deltas = True
model.deltas = True
for p in model.parameters():
p.requires_grad = False
for p in model.get_additional_embeddings().parameters():
p.requires_grad = False
if model.deltas:
for p in model.main_deltas.parameters():
p.requires_grad = True
if hasattr(model, "additional_deltas"):
for p in model.additional_deltas.parameters():
p.requires_grad = True
loss = 0.01 * manifold_dist_loss_relu_sum(model, inputs, graph_dists, manifold,
**loss_params)
loss.backward()
loss_grad_norm = optimizer.step()
batch_losses.append(loss.cpu().detach().numpy())
del loss
# import gc;gc.collect()
# torch.cuda.empty_cache()
if optimizing_model and hasattr(model,
'embedding_model') and conformal_loss_params is not None and epoch % \
conformal_loss_params["update_every"] == 0:
optimizer.zero_grad()
main_inputs = inputs.narrow(1, 0, 1).squeeze(1).clone().detach()
perm = torch.randperm(main_inputs.size(0))
idx = perm[:conformal_loss_params["num_samples"]]
main_inputs = main_inputs[idx]
# model.deltas = False
conf_loss = 0.5 * metric_loss(model, main_inputs, feature_manifold, manifold,
dimension,
isometric=conformal_loss_params["isometric"],
random_samples=conformal_loss_params[
"random_samples"],
random_init=conformal_loss_params["random_init"])
conf_loss.backward()
conf_loss_grad_norm = optimizer.step()
batch_conf_losses.append(conf_loss.cpu().detach().numpy())
if thread_number == 0:
write_tensorboard('add_scalar',
['minibatch_conf_loss', float(batch_conf_losses[-1]),
batch_num])
write_tensorboard('add_scalar',
['conf_loss_gradient_norm', conf_loss_grad_norm, batch_num])
del conf_loss
# import gc;gc.collect()
# torch.cuda.empty_cache()
# model.deltas = True
if hasattr(model, 'main_deltas') and optimizing_deltas:
main_inputs = inputs.view(inputs.shape[0], -1)
vals = model.main_deltas(
model.index_map[main_inputs][model.index_map[main_inputs] >= 0])
mean_deltas = torch.mean(torch.norm(vals, dim=-1))
delta_loss = 0.03 * torch.sum(torch.norm(vals, dim=-1) ** 2)
'''
total_loss = None
if conformal_loss_params is not None and conf_loss is not None:
total_loss = (1 - conformal_loss_params["weight"]) * loss + conformal_loss_params["weight"] * conf_loss
if delta_loss is not None:
# total_loss += delta_loss
pass
total_loss.backward()
else:
if conformal_loss_params is not None:
scaled_loss = (1 - conformal_loss_params["weight"]) * loss
else:
scaled_loss = loss
if delta_loss is not None:
scaled_loss += delta_loss
scaled_loss.backward()
'''
if thread_number == 0:
write_tensorboard('add_scalar',
['minibatch_loss', float(batch_losses[-1]), batch_num])
write_tensorboard('add_scalar', ['gradient_norm', loss_grad_norm, batch_num])
'''
if total_loss is not None:
write_tensorboard('add_scalar', ['minibatch_total_loss', total_loss.cpu().detach().numpy(), batch_num])
'''
if delta_loss is not None:
write_tensorboard('add_scalar',
['minibatch_delta_loss', delta_loss.cpu().detach().numpy(),
batch_num])
write_tensorboard('add_scalar',
['minibatch_delta_mean', mean_deltas.cpu().detach().numpy(),
batch_num])
for name, value in tensorboard_watch.items():
write_tensorboard('add_scalar', [name, value.cpu().detach().numpy(), batch_num])
elapsed = timeit.default_timer() - t_start
batch_num += 1
mean_loss = float(np.mean(batch_losses))
if thread_number == 0:
if conformal_loss_params is not None and len(batch_conf_losses) > 0:
mean_conf_loss = float(np.mean(batch_conf_losses))
metric_loss_type = "isometric" if conformal_loss_params[
"isometric"] else "conformal"
write_tensorboard('add_scalar',
[f'batch_{metric_loss_type}_loss', mean_conf_loss, epoch])
write_tensorboard('add_scalar', ['batch_loss', mean_loss, epoch])
write_tensorboard('add_scalar', ['learning_rate', lr_scheduler.get_lr()[0], epoch])
lr_scheduler.step()
```
#### File: riemann/utils/spell.py
```python
import logging
# Spell is only imported in the Spell python environment. We use it to log metrics to Spell.
import os
from datetime import datetime
from typing import Union, Optional, Any, Dict
from torch.utils.tensorboard import SummaryWriter
logger = logging.getLogger(__name__)
try:
from spell.metrics import send_metric as send_spell_metric # type: ignore
logger.info("Logging metrics with spell")
except ImportError:
def send_spell_metric(name: str, value: Union[str, int, float], index: Optional[int] = None):
pass
class ProxyWriter(SummaryWriter):
"""
Proxies `torch.utils.tensorboard.SummaryWriter` to also send metrics to Spell.
See `torch.utils.tensorboard.SummaryWriter` for documentation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#: The last time we sent a log to Spell for a tag. Used to prevent it from throttling us
self._last_logged: Dict[str, datetime] = {}
def add_scalar(self, tag: Any, scalar_value: Any, global_step: Optional[Any] = None,
walltime: Optional[Any] = None) -> None:
# Inherits documentation from SummaryWriter
super().add_scalar(tag, scalar_value, global_step, walltime)
# Prepend tags with log_dir to differentiate train and eval metrics
log_dir = os.path.basename(self.get_logdir())
tag_ = f"{log_dir}/{tag}"
# Only send logs to Spell every second to prevent throttling
if (datetime.now() - self._last_logged.get(tag_, datetime.now())).seconds > 1:
send_spell_metric(tag_, scalar_value, global_step)
self._last_logged[tag_] = datetime.now()
```
#### File: riemann/utils/visualization.py
```python
from typing import List
import numpy as np
from numpy import sin, cos
from . import svg3d
# region: common parametric surfaces
from .svg3d import parametric_surface
from .. import RiemannianManifold, EuclideanManifold, ProductManifold
from ..config.manifold_config import ManifoldConfig
def sphere(u, v):
x = sin(u) * cos(v)
y = cos(u)
z = -sin(u) * sin(v)
return x, y, z
def torus(u, v):
x = 3 * (1.5 + cos(v)) * cos(2 * u)
y = 3 * (1.5 + cos(v)) * sin(2 * u)
z = 3 * sin(v)
return x, y, z
def plane(u, v):
x = 3 * 2 * u - np.pi
y = 3 * v - np.pi
z = 3 * 0
return x, y, z
def cylinder(u, v, r=1):
x = 3 * r * sin(2 * u)
y = 3 * r * cos(2 * u)
z = 2 * v - np.pi
return x, y, z
def manifold_scatter(manifold: RiemannianManifold, x: np.ndarray,
colors: List[str] = None, labels: List[str] = None) -> svg3d.Drawing:
"""
Draw a scatter plot of points `x` on the manifold `manifold`.
:param manifold:
:param x:
:param colors:
:param labels:
:return:
"""
assert len(x.shape) == 2 and (x.shape[1] == 2 or x.shape[1] == 3)
n, d = x.shape
def face_shader(index, winding):
ret = dict(
fill="white",
fill_opacity="0.50",
stroke="black",
stroke_linejoin="round",
stroke_width="0.001",
)
if winding < 0:
ret["stroke_dasharray"] = "0.01"
return ret
def point_shader(index, winding):
return {
"fill": colors[index] if index < len(colors) else "black",
"fill_opacity": "0.95",
"stroke": "black",
"stroke_linejoin": "round",
"stroke_width": "0.002",
"radius": "0.005",
}
def point_annotator(index):
if index >= len(labels):
return None
return {
"text": str(labels[index]),
"style": "font: 0.05px bold serif",
}
if isinstance(manifold, EuclideanManifold):
if d == 2:
points = np.hstack((x, np.zeros((n, 1)))).reshape((n, 1, 3))
surface = svg3d.Mesh(3.0 * parametric_surface(8, 8, plane), face_shader)
points = svg3d.Mesh(3.0 * points, point_shader, annotator=point_annotator)
scene = svg3d.Scene([svg3d.Group([surface, points])])
camera = svg3d.Camera.create(eye=(10, 0, 40))
else:
pass
elif isinstance(manifold, ProductManifold):
spec = str(manifold)
if spec == "S1xS1":
points = np.array([
torus((u + 1) * np.pi / 2, (v + 1) * np.pi) for u, v in x]).reshape(n, 1, 3)
surface = svg3d.Mesh(3.0 * parametric_surface(44, 44, torus), face_shader)
points = svg3d.Mesh(3.0 * points, point_shader, annotator=point_annotator)
scene = svg3d.Scene([svg3d.Group([surface, points])])
camera = svg3d.Camera.create(eye=(50, 120, 80))
elif spec == "S1xE1":
assert abs(x[:, 0]).max() <= 1.0
# Normalize the euclidean dimension by its largest value.
x[:, 1] /= x[:, 1].max()
# Project them into cylindrical coordinates
points = np.array([
cylinder((u + 1) * np.pi / 2, (v + 1) * np.pi) for u, v in x]).reshape(n, 1, 3)
surface = svg3d.Mesh(3.0 * parametric_surface(24, 24, cylinder), face_shader)
points = svg3d.Mesh(3.0 * points, point_shader, annotator=point_annotator)
scene = svg3d.Scene([svg3d.Group([surface, points])])
camera = svg3d.Camera.create(eye=(50, 120, 80))
elif spec == "S1xE2":
assert abs(x[:, 0]).max() <= 1.0
# Normalize the euclidean dimensions by its largest value.
x[:, 1] /= x[:, 1].max()
x[:, 2] /= x[:, 2].max()
# Project them into cylindrical coordinates
points = np.array([
cylinder((u + 1) * np.pi / 2, (v + 1) * np.pi, r) for u, v, r in x]).reshape(n, 1,
3)
surface = svg3d.Mesh(3.0 * parametric_surface(24, 24, cylinder), face_shader)
points = svg3d.Mesh(3.0 * points, point_shader, annotator=point_annotator)
scene = svg3d.Scene([svg3d.Group([surface, points])])
camera = svg3d.Camera.create(eye=(50, 120, 80))
else:
raise NotImplemented
else:
raise NotImplemented
view_box = "-0.5 -0.5 1.0 1.0"
return svg3d.Engine([
svg3d.View(camera, scene,
svg3d.Viewport.from_string(view_box))
]).render(size=(512, 512), view_box=view_box)
def test_manifold_scatter():
# S1 x E1 x H1
# S1 -> (x, y): |(x, y)| == 1 -> theta
# S2 -> (x, y, z): |(x, y, z)| == 1 --> theta, phi
# E1 -> (x in -inf, inf)
# H2 -> (x, y) |x, y| < 1?
# H3 -> (x, y, z) |x, y, z| < 1?
# SxH2 -> (sin(θ), cos(θ), x, y) |x, y| < 3?
drawing = manifold_scatter(ManifoldConfig.from_string("S1xE1").get_manifold_instance(),
np.hstack(
(2 * np.random.rand(100, 1) - 1, np.random.rand(100, 1) * 3)),
sum(([color] * 20 for color in
"red blue green black yellow".split()), []),
list(range(5)))
drawing.saveas("S1xE1.svg")
drawing = manifold_scatter(ManifoldConfig.from_string("S1xE2").get_manifold_instance(),
np.hstack((
2 * np.random.rand(100, 1) - 1, np.random.rand(100, 1) * 3,
np.random.rand(100, 1) * 3)),
sum(([color] * 20 for color in
"red blue green black yellow".split()), []),
list(range(5)))
drawing.saveas("S1xE2.svg")
drawing = manifold_scatter(ManifoldConfig.from_string("S1xE1").get_manifold_instance(),
2 * np.random.rand(100, 2) - 1,
sum(([color] * 20 for color in
"red blue green black yellow".split()), []),
list(range(5)))
drawing.saveas("S1xS1.svg")
drawing = manifold_scatter(ManifoldConfig.from_string("E2").get_manifold_instance(),
np.random.randn(100, 2),
sum(([color] * 20 for color in
"red blue green black yellow".split()), []),
list(range(5)))
drawing.saveas("E2.svg")
```
|
{
"source": "jdietz02/PlexTracktSync",
"score": 2
}
|
#### File: plextraktsync/commands/sync.py
```python
from typing import List
import click
from tqdm import tqdm
from plextraktsync.commands.login import ensure_login
from plextraktsync.decorators.measure_time import measure_time
from plextraktsync.factory import factory
from plextraktsync.logging import logger
from plextraktsync.version import version
def sync(
sync_option: str,
library: str,
show: str,
movie: str,
ids: List[str],
batch_size: int,
dry_run: bool,
no_progress_bar: bool,
):
"""
Perform sync between Plex and Trakt
"""
logger.info(f"PlexTraktSync [{version()}]")
ensure_login()
movies = sync_option in ["all", "movies"]
shows = sync_option in ["all", "tv", "shows"]
config = factory.run_config().update(
batch_size=batch_size, dry_run=dry_run, progressbar=not no_progress_bar
)
wc = factory.walk_config().update(movies=movies, shows=shows)
w = factory.walker()
if ids:
for id in ids:
wc.add_id(id)
if library:
wc.add_library(library)
if show:
wc.add_show(show)
if movie:
wc.add_movie(movie)
if not wc.is_valid():
click.echo("Nothing to sync, this is likely due conflicting options given.")
return
w.print_plan(print=tqdm.write)
if dry_run:
print("Enabled dry-run mode: not making actual changes")
with measure_time("Completed full sync"):
runner = factory.sync()
runner.sync(walker=w, dry_run=config.dry_run)
```
#### File: PlexTracktSync/plextraktsync/plex_server.py
```python
from functools import partial
import plexapi.server
from plextraktsync.config import PLEX_PLATFORM
from plextraktsync.decorators.nocache import nocache
from plextraktsync.factory import factory
from plextraktsync.logging import logger
class PlexServerConnection:
@staticmethod
@nocache
def connect():
return _get_plex_server()
def get_plex_server():
return PlexServerConnection().connect()
def _get_plex_server():
CONFIG = factory.config()
plex_token = CONFIG["PLEX_TOKEN"]
plex_baseurl = CONFIG["PLEX_BASEURL"]
plex_localurl = CONFIG["PLEX_LOCALURL"]
if plex_token == "-":
plex_token = ""
server = None
plexapi.X_PLEX_PLATFORM = PLEX_PLATFORM
plexapi.BASE_HEADERS["X-Plex-Platform"] = plexapi.X_PLEX_PLATFORM
session = factory.session()
PlexServer = partial(plexapi.server.PlexServer, session=session)
# if connection fails, it will try :
# 1. url expected by new ssl certificate
# 2. url without ssl
# 3. local url (localhost)
try:
server = PlexServer(token=plex_token, baseurl=plex_baseurl)
except plexapi.server.requests.exceptions.SSLError as e:
m = "Plex connection error: {}, local url {} didn't respond either.".format(
str(e), plex_localurl
)
excep_msg = str(e.__context__)
if "doesn't match '*." in excep_msg:
hash_pos = excep_msg.find("*.") + 2
new_hash = excep_msg[hash_pos:hash_pos + 32]
end_pos = plex_baseurl.find(".plex.direct")
new_plex_baseurl = (
plex_baseurl[: end_pos - 32] + new_hash + plex_baseurl[end_pos:]
)
try: # 1
server = PlexServer(token=plex_token, baseurl=new_plex_baseurl)
# save new url to .env
CONFIG["PLEX_TOKEN"] = plex_token
CONFIG["PLEX_BASEURL"] = new_plex_baseurl
CONFIG["PLEX_LOCALURL"] = plex_localurl
CONFIG.save()
logger.info("Plex server url changed to {}".format(new_plex_baseurl))
except Exception:
pass
if server is None and plex_baseurl[:5] == "https":
new_plex_baseurl = plex_baseurl.replace("https", "http")
try: # 2
server = PlexServer(token=<PASSWORD>, baseurl=new_plex_baseurl)
logger.warning(
"Switched to Plex unsecure connection because of SSLError."
)
except Exception:
pass
except Exception as e:
m = "Plex connection error: {}, local url {} didn't respond either. Check PLEX_LOCALURL in .env file.".format(
str(e), plex_localurl
)
if server is None:
try: # 3
server = PlexServer(token=<PASSWORD>, baseurl=plex_localurl)
logger.warning(
"No response from {}, connection using local url {}".format(
plex_baseurl, plex_localurl
)
)
except Exception:
logger.error(m)
print(m)
exit(1)
return server
```
#### File: PlexTracktSync/tests/test_config.py
```python
from os import environ
from os.path import join
from plextraktsync.config import Config
from plextraktsync.factory import factory
from plextraktsync.sync import SyncConfig
def test_config_merge():
config = factory.config()
override = {"root": {"key1": "value1"}}
config.merge(override, config)
override = {"root": {"key2": "value2"}}
config.merge(override, config)
assert config["root"]["key1"] == "value1"
assert config["root"]["key2"] == "value2"
def test_config_merge_real():
config = Config()
from tests.conftest import MOCK_DATA_DIR
config.config_file = join(MOCK_DATA_DIR, "673-config.json")
assert config["sync"]["plex_to_trakt"]["collection"] is False
def test_sync_config():
config = Config()
from tests.conftest import MOCK_DATA_DIR
config.config_file = join(MOCK_DATA_DIR, "673-config.json")
sync_config = SyncConfig(config)
assert sync_config.plex_to_trakt["collection"] is False
def test_config():
config = factory.config()
config.save()
config.initialized = False
assert config["PLEX_TOKEN"] is None
config.save()
assert config["PLEX_TOKEN"] is None
environ["PLEX_TOKEN"] = "Foo"
config.initialized = False
assert config["PLEX_TOKEN"] == "Foo"
try:
del environ["PLEX_TOKEN"]
except KeyError:
pass
config.initialized = False
assert config["PLEX_TOKEN"] is None
environ["PLEX_TOKEN"] = "-"
config.initialized = False
assert config["PLEX_TOKEN"] is None
environ["PLEX_TOKEN"] = "None"
config.initialized = False
assert config["PLEX_TOKEN"] is None
```
#### File: PlexTracktSync/tests/test_new_agent.py
```python
from plextraktsync.plex_api import PlexLibraryItem
from tests.conftest import factory, make
trakt = factory.trakt_api()
def test_tv_lookup():
m = PlexLibraryItem(
make(
cls="plexapi.video.Show",
guid="plex://show/5d9c085ae98e47001eb0d74f",
guids=[
make(id="imdb://tt2661044"),
make(id="tmdb://48866"),
make(id="tvdb://268592"),
],
type="show",
)
)
guid = m.guids[0]
assert guid.provider == "tmdb"
assert guid.id == "48866"
assert m.type == "show"
def test_tv_lookup_none():
m = PlexLibraryItem(
make(
cls="plexapi.video.Show",
guid="tv.plex.agents.none://68178",
guids=[],
type="show",
)
)
guid = m.guids[0]
assert guid.provider == "none"
assert guid.id == "68178"
assert m.type == "show"
```
|
{
"source": "jdilger/african-elephants",
"score": 2
}
|
#### File: jdilger/african-elephants/main.py
```python
import ee
from utils import *
import sun_angles
import view_angles
class base(object):
def __init__(self):
self.TEST = True
# self.date = 123
# self.studyArea = ee.FeatureCollection('somehwrere').geometry()
# Vegetation
self.ecoregions = ee.FeatureCollection('RESOLVE/ECOREGIONS/2017')
# Fire
self.modisFireAqua = ee.ImageCollection('MODIS/006/MYD14A2').select([0])
self.modisFireTerra = ee.ImageCollection('MODIS/006/MOD14A2').select([0])
self.startDate = "" #ee.Date('2019-01-01')
# self.startDate = ee.Date('2019-04-01')
self.endDate = "" #ee.Date('2019-06-01')
self.studyArea = ""#ee.Geometry.Polygon([[[22.58124445939474, -18.13023785466269],
# [22.58124445939474, -18.203308698548458],
# [22.68012141251974, -18.203308698548458],
# [22.68012141251974, -18.13023785466269]]])
self.metadataCloudCoverMax = 80
self.maskSR = True
self.cloudMask = True
self.brdfCorrect = False
self.shadowMasking = False
self.cloudScoreThresh = 20
# toa = false sr = true
self.toaOrSR = True
def exportMapToAsset(self, img, desc, region, assetbase, **kwargs):
scale = kwargs.get('scale', 20)
task_ordered = ee.batch.Export.image.toAsset(image=img,
description=desc,
assetId='%s/%s' % (assetbase,desc),
region=region.getInfo()['coordinates'],
maxPixels=1e13,
# crs=self.epsg,
scale=scale)
task_ordered.start()
def exportMapToCloud(self, img, desc, region,bucket,**kwargs):
scale = kwargs.get('scale', 20)
prefix = kwargs.get('prefix',None)
if prefix:
fileNamePrefix = "%s/%s" % (prefix,desc)
else:
fileNamePrefix = desc
task_ordered = ee.batch.Export.image.toCloudStorage(image=img,
description=desc,
bucket=bucket,
fileNamePrefix=fileNamePrefix,
region=region.getInfo()['coordinates'],
maxPixels=1e13,
# crs=self.epsg,
scale=scale)
task_ordered.start()
class Fire(base):
def __init__(self):
super(Fire, self).__init__()
def reclassify(self, img):
""" reclassifies MODIS Thermal Anomalies & Fire 8-Day Global 1km FireMask band.
1: Not processed (obsolete; not used since Collection 1) ->0
2: Not processed (other reason) -> 0
3: Non-fire water pixel -> 0
4: Cloud (land or water) -> 1
5: Non-fire land pixel -> 1
6: Unknown (land or water) -> 1
7: Fire (low confidence, land or water) -> 2
8: Fire (nominal confidence, land or water) -> 3
9: Fire (high confidence, land or water) -> 4
creates binary image of fire for pixels rated low to high confidence.
creates bands for year,day, and month for later use
Args:
img: The FireMask band of an MODIS Thermal Anomalies image
Returns:
image - original image, binary mask, day, year, month, and unix time
"""
remapped = img.remap([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 0, 0, 1, 1, 1, 1, 2, 3, 4]).rename(['confidence'])
d = ee.Date(img.get('system:time_start'))
y = ee.Image(d.get('year')).int16().rename(['year'])
m = ee.Image(d.get('month')).int16().rename(['month'])
unix = ee.Image(d.millis()).long().rename('unix')
day = ee.Image(d.get('day')).int16().rename(['day']);
binary = remapped.select('confidence').gte(2).rename('binary')
out = remapped.addBands(y).addBands(m).addBands(day).addBands(binary).addBands(unix)
out = out.updateMask(remapped.gte(2));
return out
def burnOut(self, sd, step, unit):
"""takes in a startdate as ee.Date() -maybe change to string later step as an interger
unit of time as string (e.g. 'day','month','year') """
cM = sd.get('month')
cY = sd.get('year')
currentFires = self.getFire(cY, cM)
pD = sd.advance(step, unit)
pM = pD.get('month')
pY = pD.get('year')
pastFires = self.getFire(pY, pM)
mask = pastFires.select('binary')
newFires = currentFires.where(mask.eq(1), 0).selfMask()
allCurrentFires = currentFires.select('binary').rename('allFires')
kernel = ee.Kernel.euclidean(100000, 'meters')
distance = allCurrentFires.select('allFires').distance(kernel, False).rename('distance10k')
return newFires.addBands(allCurrentFires).addBands(pastFires.select(['binary'], ['past'])).addBands(distance)
def historyFire(self,startDate,endDate):
modisFire = self.modisFireTerra.merge(self.modisFireAqua)
modisFire = modisFire.filterDate(startDate,endDate)
fire = modisFire.map(self.reclassify).select('unix').reduce(ee.Reducer.max())
lastDate = endDate.millis()
lastDateImg = ee.Image(lastDate).long().subtract(fire).divide(604800000).int16()
return lastDateImg.set('system:time_start',endDate,'system:time_end',startDate).rename('historyFire')
def getFire(self, targetYear, targetMonth):
# Bring in MYD14/MOD14
modisFire = self.modisFireTerra.merge(self.modisFireAqua)
singleMonth = modisFire.filter(ee.Filter.calendarRange(targetYear, targetYear, 'year')).filter(
ee.Filter.calendarRange(targetMonth, targetMonth, 'month'))
assert singleMonth.size().getInfo(), "No fires found. Try expanding date range."
# Recode it, and find the year, month, and day- then add it to the map
singleMonth = singleMonth.map(self.reclassify);
sum_denisty = singleMonth.select('binary').sum().rename('denisty')
return singleMonth.mosaic().addBands(sum_denisty)
class Vegetation(base):
def __init__(self):
super(Vegetation, self).__init__()
def monthlyNDVI(self, m, y, ic, geometry, dur):
"""
Monthly NDVI(NDVIi, m, y) for each Monitoring Unit (MU) i
in month m and year y is obtained by averaging the 3 dekadal values in each month
full text: https://www.frontiersin.org/articles/10.3389/fenvs.2019.00187/full
Args:
m: month as integer
y: year as integer
ic: NDVI image collection
Returns:
image - Four band image with 2 month NDVI mean, anomaly, standard anomaly, and Vegetative Control Index
"""
ic = ic.filter(ee.Filter.calendarRange(m, m, 'month'))
month_i_ic = ic.filter(ee.Filter.calendarRange(y, y, 'year'))
print('Veg ic month m size',month_i_ic.size().getInfo())
month_i_mean = month_i_ic.mean().rename('NDVI_mean')
month_i_mean_std = month_i_mean.reduceRegion(
**{'reducer': ee.Reducer.stdDev(), 'geometry': geometry, 'scale': 30, 'bestEffort': True,
'maxPixels': 1e13}).get('NDVI_mean')
baseline_ic = ic.filter(ee.Filter.calendarRange(y-dur[0], y, dur[1]))
baseline_mean = baseline_ic.mean()
aandvi = month_i_mean.subtract(baseline_mean).float().rename('AANDVI')
sandvi = aandvi.divide(ee.Image.constant(month_i_mean_std).float()).rename('SANDVI')
vci_min = baseline_mean.reduceRegion(
**{'reducer': ee.Reducer.min(), 'geometry': geometry, 'scale': 30, 'bestEffort': True, 'maxPixels': 1e13})
vci_max = baseline_mean.reduceRegion(
**{'reducer': ee.Reducer.max(), 'geometry': geometry, 'scale': 30, 'bestEffort': True, 'maxPixels': 1e13})
vci_min = ee.Image.constant(vci_min.get('nd')).float()
vci_max = ee.Image.constant(vci_max.get('nd')).float()
vci = month_i_mean.subtract(vci_min).divide(vci_max.subtract(vci_min)).rename('VCI')
return ee.Image.cat([month_i_mean, aandvi, sandvi, vci])
def byRegion(self, m, y, ic, region, dur):
eco = self.ecoregions.filterBounds(region)
biomes = ee.List(eco.aggregate_array('BIOME_NUM')).distinct()
def monthByRegion(b):
a = eco.filter(ee.Filter.eq('BIOME_NUM', ee.Number(b)))
c = ee.Feature(region).difference(ee.Feature(a.union(1).first()))
return self.monthlyNDVI(m, y, ic, c.geometry(), dur)
ndviByBiome = biomes.map(monthByRegion)
# mosaic together
out = ee.ImageCollection(ndviByBiome).mosaic()
return out
class Water(base):
def __init__(self):
super(Water, self).__init__()
def wlc(self, collection, studyArea, **kwargs):
""" creates a the image used for the weighted linear combination (or weighted sum) of water stress indicators.
Adds bands for NOAA/GFS0P25, NOAA/CFSV2/FOR6H, NASA_USDA/HSL/SMAP_soil_moisture, and JAXA/GPM_L3/GSMaP/v6/operational.
Args:
collection: A Sentinel-2 collection
studyArea: A ee geometry
startDate : Optional* defines start date for additional image collections. defaults to self
endDate : Optional* defines end date for additional image collections. defaults to self
Returns:
image - Multiband image for wlcexpression
"""
valid_kwagrs = ['startDate', 'endDate']
if len(kwargs.keys()) > 0 and len([i for i in valid_kwagrs if i in kwargs]) != len(kwargs.keys()):
return print('Only valid arguments are startDate and endDate check that all key word are correct:{}'.format(
kwargs.keys()))
sd = kwargs.get('startDate', self.startDate)
ed = kwargs.get('endDate', self.endDate)
def forecastscleaning(img):
chour = ee.Date(img.get('creation_time')).get('hour')
fhour = ee.Date(img.get('forecast_time')).get('hour')
fday = ee.Date(img.get('forecast_time')).get('day')
cday = ee.Date(img.get('creation_time')).get('day')
eq = cday.eq(fday)
return img.set('fhour', fhour, 'chour', chour, 'fday', fday, 'cday', cday, 'test', eq)
gfs = ee.ImageCollection("NOAA/GFS0P25").select(['temperature_2m_above_ground']).filterDate(sd, ed) \
.filter(ee.Filter.calendarRange(12, 12, 'hour')).map(forecastscleaning) \
.filter(ee.Filter.eq('fhour', 12)) \
.filter(ee.Filter.eq('chour', 12)) \
.filter(ee.Filter.eq('test', 1)).median()
cfs = ee.ImageCollection('NOAA/CFSV2/FOR6H').select(['Precipitation_rate_surface_6_Hour_Average'],
['precip']).filterDate(sd, ed).filterBounds(
studyArea).sum()
smap = ee.ImageCollection("NASA_USDA/HSL/SMAP_soil_moisture").select('ssm').filterDate(sd, ed).sum()
gsmap = ee.ImageCollection("JAXA/GPM_L3/GSMaP/v6/operational").filter(ee.Filter.eq('status','permanent')).select('hourlyPrecipRateGC')
chirps = gsmap.filterBounds(studyArea).filterDate(ee.Date(sd), ee.Date(ed)).sum().rename('percip')
chirps_spi = self.spi(gsmap,sd,ed).rename('chirps_spi')
img = collection.map(self.waterindicies).median()
img = ee.Image.cat([img, gfs, chirps, cfs, smap, chirps_spi]).set('system:time_start', sd, 'sd', sd, 'ed', ed)
return img
def wlcexpression(self, img, region):
""" The weighted linear combination expression computes the final output of water stress
Args:
img: image output from wlc
region: A ee geometry
Returns:
image - single band image water stress"""
img = img.select(['tcw', 'chirps_spi', 'ssm', 'mndwi', 'nwi', 'ndmi', 'temperature_2m_above_ground', 'ndwi'])
img = self.normalizeBands(img, region)
exout = img.expression("(b1*f1) +(b2 * f2) + (b3 *f3) + (b4 * f4)+ (b5 * f5)+ (b6 * f6)+ (b7 * f7)+ (b8 * f8)",
{'b1': img.select(0), 'b2': img.select(1),
'b3': img.select(2), 'b4': img.select(3),
'b5': img.select(4), 'b6': img.select(5),
'b7': ee.Image(1).subtract(img.select(6)),
# temp inverse relation with water avalibility
'b8': img.select(7),
'f1': 7, 'f2': 6, 'f3': 3,
'f4': 4, 'f5': 5, 'f6': 5,
'f7': 4, 'f8': 7})
return exout
def waterindicies(self, image):
ndwi = image.normalizedDifference(['green', 'nir']).rename('ndwi')
ndmi = image.normalizedDifference(['nir', 'swir1']).rename('ndmi')
mndwi = image.normalizedDifference(['green', 'swir1']).rename('mndwi')
nwi = image.expression('((b-(n+s+w))/(b+(n+s+w))*100)', {
'b': image.select('blue'),
'n': image.select('nir'),
's': image.select('swir1'),
'w': image.select('swir2')}).rename('nwi')
# add tesselcap wetness
tcw = image.expression('0.1511*B1 + 0.1973*B2 + 0.3283*B3 + 0.3407*B4 + -0.7117*B5 + -0.4559*B7', {
'B1': image.select('blue'),
'B2': image.select('green'),
'B3': image.select('red'),
'B4': image.select('nir'),
'B5': image.select('swir1'),
'B7': image.select('swir2'),
}).rename('tcw')
# var factors = ee.ImageCollection.fromImages([waterlss2.select('mndwi'),chirps_spi,smap,mndwi,nwi,ndmi,gfs,ndwi]).map(function(f){ return n(f,aoi)})
imglist = [ndwi, ndmi, mndwi, nwi, tcw]
return ee.Image.cat(imglist)
def spi(self, col, sd, ed):
nsd = sd.advance(-6, 'month')
spi6 = col.filterDate(nsd, sd)
mean = spi6.mean()
std = spi6.reduce(ee.Reducer.stdDev())
img = col.filterDate(sd, ed).median()
img = img.subtract(mean).divide(std).set('system:time_start', sd)
return ee.Image(img)
def normalizeBands(self, img,region):
def mm(plist):
plist = ee.List(plist)
# // get bandname
fbn = ee.String(plist.get(0)).slice(0, -4)
imgMin = ee.Image.constant(mmvalues.get(plist.get(0)))
imgMax = ee.Image.constant(mmvalues.get(plist.get(1)))
normImg = img.select(fbn).subtract(imgMin).divide(imgMax.subtract(imgMin));
return normImg.toFloat()
def rename(i):
imin = ee.String(i).cat(preMin)
imax = ee.String(i).cat(preMax)
return ee.List([imin, imax])
mmvalues = img.reduceRegion(reducer=ee.Reducer.minMax(), scale=300, maxPixels=1e13, bestEffort=True, \
geometry=region, tileScale=6)
bnog = img.bandNames()
preMax = '_max'
preMin = '_min'
bn = bnog.map(rename)
return ee.ImageCollection(bn.map(mm)).toBands().rename(bnog)
class landsat(base):
def __init__(self):
super(landsat, self).__init__()
self.divideBands = ee.List(['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
self.bandNamesLandsat = ee.List(
['blue', 'green', 'red', 'nir', 'swir1', 'thermal', 'swir2', 'sr_atmos_opacity', 'pixel_qa'])
self.sensorBandDictLandsatSR = ee.Dictionary({'L8': ee.List([1, 2, 3, 4, 5, 7, 6, 9, 10]), \
'L7': ee.List([0, 1, 2, 3, 4, 5, 6, 7, 9]), \
'L5': ee.List([0, 1, 2, 3, 4, 5, 6, 7, 9]), \
'L4': ee.List([0, 1, 2, 3, 4, 5, 6, 7, 9])})
def loadls(self, startDate, endDate, studyArea):
landsat8 = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR').filterDate(startDate,
endDate).filterBounds(studyArea)
landsat8 = landsat8.filterMetadata('CLOUD_COVER', 'less_than', self.metadataCloudCoverMax)
landsat8 = landsat8.select(self.sensorBandDictLandsatSR.get('L8'), self.bandNamesLandsat)
landsat5 = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR').filterDate(startDate,
endDate).filterBounds(studyArea)
landsat5 = landsat5.filterMetadata('CLOUD_COVER', 'less_than', self.metadataCloudCoverMax)
landsat5 = landsat5.select(self.sensorBandDictLandsatSR.get('L5'), self.bandNamesLandsat).map(
self.defringe)
landsat7 = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR').filterDate(startDate,
endDate).filterBounds(studyArea)
landsat7 = landsat7.filterMetadata('CLOUD_COVER', 'less_than', self.metadataCloudCoverMax)
landsat7 = landsat7.select(self.sensorBandDictLandsatSR.get('L7'), self.bandNamesLandsat)
return landsat5.merge(landsat7).merge(landsat8)
def preprocess(self, **kwargs):
valid_kwagrs = ['startDate', 'endDate','studyArea']
if len(kwargs.keys()) > 0 and len([i for i in valid_kwagrs if i in kwargs]) != len(kwargs.keys()):
return print('Only valid arguments are startDate, studyArea, and endDate check that all key word are correct:{}'.format(
kwargs.keys()))
startDate = kwargs.get('startDate', self.startDate)
endDate = kwargs.get('endDate', self.endDate)
studyArea = kwargs.get('studyArea',self.studyArea)
landsat = self.loadls(startDate=startDate, endDate=endDate, studyArea=studyArea)
if landsat.size().getInfo() > 0:
if self.maskSR == True:
print("removing clouds")
landsat = landsat.map(self.CloudMaskSRL8)
landsat = landsat.map(self.scaleLandsat)
if self.cloudMask == True:
print("removing some more clouds")
landsat = landsat.map(self.maskClouds)
if self.brdfCorrect == True:
landsat = landsat.map(self.brdf)
return landsat
def defringe(self, img):
# threshold for defringing landsat5 and 7
fringeCountThreshold = 279
k = ee.Kernel.fixed(41, 41,
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
m = ee.Image(img).mask().reduce(ee.Reducer.min())
sum = m.reduceNeighborhood(ee.Reducer.sum(), k, 'kernel')
mask = sum.gte(fringeCountThreshold)
return img.updateMask(mask)
def CloudMaskSRL8(self, img):
"""apply cf-mask Landsat"""
QA = img.select("pixel_qa")
shadow = QA.bitwiseAnd(8).neq(0)
cloud = QA.bitwiseAnd(32).neq(0)
return img.updateMask(shadow.Not()).updateMask(cloud.Not()).copyProperties(img)
def maskClouds(self, img):
"""
Computes spectral indices of cloudyness and take the minimum of them.
Each spectral index is fairly lenient because the group minimum
is a somewhat stringent comparison policy. side note -> this seems like a job for machine learning :)
originally written by <NAME> for Landsat imageryadapted to Sentinel by <NAME> and <NAME>
"""
score = ee.Image(1.0);
# Clouds are reasonably bright in the blue band.
blue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(ee.Number(0.3).subtract(ee.Number(0.1)))
score = score.min(blue_rescale);
# Clouds are reasonably bright in all visible bands.
visible = img.select('red').add(img.select('green')).add(img.select('blue'))
visible_rescale = visible.subtract(ee.Number(0.2)).divide(ee.Number(0.8).subtract(ee.Number(0.2)))
score = score.min(visible_rescale);
# Clouds are reasonably bright in all infrared bands.
infrared = img.select('nir').add(img.select('swir1')).add(img.select('swir2'))
infrared_rescale = infrared.subtract(ee.Number(0.3)).divide(ee.Number(0.8).subtract(ee.Number(0.3)))
score = score.min(infrared_rescale);
# Clouds are reasonably cool in temperature.
temp_rescale = img.select('thermal').subtract(ee.Number(300)).divide(ee.Number(290).subtract(ee.Number(300)))
score = score.min(temp_rescale);
# However, clouds are not snow.
ndsi = img.normalizedDifference(['green', 'swir1']);
ndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(ee.Number(0.6).subtract(ee.Number(0.8)))
score = score.min(ndsi_rescale).multiply(100).byte().rename('cloudScore');
mask = score.lt(self.cloudScoreThresh).rename(['cloudMask']);
img = img.updateMask(mask).addBands([mask]).addBands(score);
return img;
def brdf(self, img):
# import sun_angles
# import view_angles
def _apply(image, kvol, kvol0):
blue = _correct_band(image, 'blue', kvol, kvol0, f_iso=0.0774, f_geo=0.0079, f_vol=0.0372)
green = _correct_band(image, 'green', kvol, kvol0, f_iso=0.1306, f_geo=0.0178, f_vol=0.0580)
red = _correct_band(image, 'red', kvol, kvol0, f_iso=0.1690, f_geo=0.0227, f_vol=0.0574)
nir = _correct_band(image, 'nir', kvol, kvol0, f_iso=0.3093, f_geo=0.0330, f_vol=0.1535)
swir1 = _correct_band(image, 'swir1', kvol, kvol0, f_iso=0.3430, f_geo=0.0453, f_vol=0.1154)
swir2 = _correct_band(image, 'swir2', kvol, kvol0, f_iso=0.2658, f_geo=0.0387, f_vol=0.0639)
return replace_bands(image, [blue, green, red, nir, swir1, swir2])
def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):
"""fiso + fvol * kvol + fgeo * kgeo"""
iso = ee.Image(f_iso)
geo = ee.Image(f_geo)
vol = ee.Image(f_vol)
pred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])
pred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])
cfac = pred0.divide(pred).rename(['cfac'])
corr = image.select(band_name).multiply(cfac).rename([band_name])
return corr
def _kvol(sunAz, sunZen, viewAz, viewZen):
"""Calculate kvol kernel.
From Lucht et al. 2000
Phase angle = cos(solar zenith) cos(view zenith) + sin(solar zenith) sin(view zenith) cos(relative azimuth)"""
relative_azimuth = sunAz.subtract(viewAz).rename(['relAz'])
pa1 = viewZen.cos() \
.multiply(sunZen.cos())
pa2 = viewZen.sin() \
.multiply(sunZen.sin()) \
.multiply(relative_azimuth.cos())
phase_angle1 = pa1.add(pa2)
phase_angle = phase_angle1.acos()
p1 = ee.Image(PI().divide(2)).subtract(phase_angle)
p2 = p1.multiply(phase_angle1)
p3 = p2.add(phase_angle.sin())
p4 = sunZen.cos().add(viewZen.cos())
p5 = ee.Image(PI().divide(4))
kvol = p3.divide(p4).subtract(p5).rename(['kvol'])
viewZen0 = ee.Image(0)
pa10 = viewZen0.cos() \
.multiply(sunZen.cos())
pa20 = viewZen0.sin() \
.multiply(sunZen.sin()) \
.multiply(relative_azimuth.cos())
phase_angle10 = pa10.add(pa20)
phase_angle0 = phase_angle10.acos()
p10 = ee.Image(PI().divide(2)).subtract(phase_angle0)
p20 = p10.multiply(phase_angle10)
p30 = p20.add(phase_angle0.sin())
p40 = sunZen.cos().add(viewZen0.cos())
p50 = ee.Image(PI().divide(4))
kvol0 = p30.divide(p40).subtract(p50).rename(['kvol0'])
return (kvol, kvol0)
date = img.date()
footprint = determine_footprint(img)
print(footprint.getInfo()), 'footprint'
(sunAz, sunZen) = sun_angles.create(date, footprint)
(viewAz, viewZen) = view_angles.create(footprint)
(kvol, kvol0) = _kvol(sunAz, sunZen, viewAz, viewZen)
return _apply(img, kvol.multiply(PI()), kvol0.multiply(PI()))
def scaleLandsat(self, img):
"""Landast is scaled by factor 0.0001 """
thermal = img.select(ee.List(['thermal'])).multiply(0.1)
scaled = ee.Image(img).select(self.divideBands).multiply(ee.Number(0.0001))
return img.select(['pixel_qa']).addBands(scaled).addBands(thermal)
class sentinel2(base):
def __init__(self):
super(sentinel2, self).__init__()
self.s2BandsIn = ee.List(
['QA60', 'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B8A', 'B9', 'B10', 'B11', 'B12'])
self.s2BandsOut = ee.List(
['QA60', 'cb', 'blue', 'green', 'red', 're1', 're2', 're3', 'nir', 're4', 'waterVapor', 'cirrus', 'swir1',
'swir2'])
self.divideBands = ee.List(
['blue', 'green', 'red', 're1', 're2', 're3', 'nir', 're4', 'cb', 'cirrus', 'swir1', 'swir2', 'waterVapor'])
# contractPixels: The radius of the number of pixels to contract (negative buffer) clouds and cloud shadows by. Intended to eliminate smaller cloud
# patches that are likely errors (1.5 results in a -1 pixel buffer)(0.5 results in a -0 pixel buffer)
# (1.5 or 2.5 generally is sufficient)
self.contractPixels = 1.5
# dilatePixels: The radius of the number of pixels to dilate (buffer) clouds
# and cloud shadows by. Intended to include edges of clouds/cloud shadows
# that are often missed (1.5 results in a 1 pixel buffer)(0.5 results in a 0 pixel buffer)
# (2.5 or 3.5 generally is sufficient)
self.dilatePixels = 1.5
self.zScoreThresh = -0.8
self.shadowSumThresh = 30
def loads2(self, start, end, studyArea):
collection = 'COPERNICUS/S2'
if self.toaOrSR: collection = 'COPERNICUS/S2_SR'
s2s = ee.ImageCollection(collection).filterDate(start, end) \
.filterBounds(studyArea) \
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', self.metadataCloudCoverMax)) \
# .filter(ee.Filter.lt('CLOUD_COVERAGE_ASSESSMENT', self.metadataCloudCoverMax)) \
return s2s
def preprocess(self, **kwargs):
valid_kwagrs = ['startDate', 'endDate', 'studyArea']
if len(kwargs.keys()) > 0 and len([i for i in valid_kwagrs if i in kwargs]) != len(kwargs.keys()):
return print('Only valid arguments are studyArea, startDate and endDate check that all key word are correct:{}'.format(
kwargs.keys()))
startDate = kwargs.get('startDate', self.startDate)
endDate = kwargs.get('endDate', self.endDate)
studyArea = kwargs.get('studyArea', self.studyArea)
s2 = self.loads2(startDate, endDate, studyArea)
if self.toaOrSR:
self.s2BandsIn = self.s2BandsIn.remove('B10')
self.s2BandsOut = self.s2BandsOut.remove('cirrus')
self.divideBands = self.s2BandsOut.remove('cirrus')
self.cloudMask = False
if s2.size().getInfo() > 0:
s2 = s2.map(self.scaleS2)
if self.shadowMasking == True:
s2 = self.maskShadows(s2, studyArea)
s2 = s2.select(self.s2BandsIn, self.s2BandsOut)
if self.maskSR == True:
print("use QA band for cloud Masking")
s2 = s2.map(self.QAMaskCloud)
if self.cloudMask == True:
print("sentinel cloud score...")
s2 = s2.map(self.sentinelCloudScore)
s2 = self.cloudMasking(s2)
if self.brdfCorrect == True:
print("apply brdf correction..")
s2 = s2.map(self.brdf)
return s2
def scaleS2(self, img):
divideBands = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B8A', 'B9', 'B11', 'B12']
bandNames = img.bandNames()
otherBands = bandNames.removeAll(divideBands)
others = img.select(otherBands)
out = img.select(divideBands).divide(10000)
return out.addBands(others).copyProperties(img,
['system:time_start', 'system:footprint', 'MEAN_SOLAR_ZENITH_ANGLE',
'MEAN_SOLAR_AZIMUTH_ANGLE']).set("centroid",
img.geometry().centroid())
def maskShadows(self, collection, studyArea):
def TDOM(image):
zScore = image.select(shadowSumBands).subtract(irMean).divide(irStdDev)
irSum = image.select(shadowSumBands).reduce(ee.Reducer.sum())
TDOMMask = zScore.lt(self.zScoreThresh).reduce(ee.Reducer.sum()).eq(2) \
.And(irSum.lt(self.shadowSumThresh)).Not()
TDOMMask = TDOMMask.focal_min(self.dilatePixels)
return image.addBands(TDOMMask.rename(['TDOMMask']))
def mask(image):
outimg = image.updateMask(image.select(['TDOMMask']))
return outimg
shadowSumBands = ['B8', 'B11']
allCollection = ee.ImageCollection('COPERNICUS/S2').filterBounds(studyArea).filter(
ee.Filter.lt("CLOUDY_PIXEL_PERCENTAGE", 30))
# Get some pixel-wise stats for the time series
irStdDev = allCollection.select(shadowSumBands).reduce(ee.Reducer.stdDev())
irMean = allCollection.select(shadowSumBands).reduce(ee.Reducer.mean())
# Mask out dark dark outliers
collection_tdom = collection.map(TDOM)
return collection_tdom.map(mask)
def sentinelCloudScore(self, img):
"""
Computes spectral indices of cloudyness and take the minimum of them.
Each spectral index is fairly lenient because the group minimum
is a somewhat stringent comparison policy. side note -> this seems like a job for machine learning :)
originally written by <NAME> for Landsat imagery
adapted to Sentinel by <NAME> and <NAME>
"""
def rescale(img, thresholds):
"""
Linear stretch of image between two threshold values.
"""
return img.subtract(thresholds[0]).divide(thresholds[1] - thresholds[0])
# cloud until proven otherwise
score = ee.Image(1)
blueCirrusScore = ee.Image(0)
# clouds are reasonably bright
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['blue']), [0.1, 0.5]))
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['cb']), [0.1, 0.5]))
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['cirrus']), [0.1, 0.3]))
score = score.min(blueCirrusScore)
score = score.min(rescale(img.select(['red']).add(img.select(['green'])).add(img.select('blue')), [0.2, 0.8]))
score = score.min(rescale(img.select(['nir']).add(img.select(['swir1'])).add(img.select('swir2')), [0.3, 0.8]))
# clouds are moist
ndsi = img.normalizedDifference(['green', 'swir1'])
score = score.min(rescale(ndsi, [0.8, 0.6]))
score = score.multiply(100).byte();
score = score.clamp(0, 100);
return img.addBands(score.rename(['cloudScore']))
def QAMaskCloud(self, img):
bandNames = img.bandNames()
otherBands = bandNames.removeAll(self.divideBands)
others = img.select(otherBands)
qa = img.select('QA60').int16();
img = img.select(self.divideBands)
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = int(math.pow(2, 10));
cirrusBitMask = int(math.pow(2, 11));
# Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudBitMask).eq(0).And(qa.bitwiseAnd(cirrusBitMask).eq(0));
img = img.updateMask(mask).addBands(others)
# Return the masked and scaled data.
return img
def cloudMasking(self, collection):
def maskClouds(img):
cloudMask = img.select(['cloudScore']).lt(self.cloudScoreThresh) \
.focal_min(self.dilatePixels) \
.focal_max(self.contractPixels) \
.rename(['cloudMask'])
bandNames = img.bandNames()
otherBands = bandNames.removeAll(self.divideBands)
others = img.select(otherBands)
img = img.select(self.divideBands).updateMask(cloudMask)
return img.addBands(cloudMask).addBands(others);
# Find low cloud score pctl for each pixel to avoid comission errors
# minCloudScore = collection.select(['cloudScore']).reduce(ee.Reducer.percentile([self.cloudScorePctl]));
collection = collection.map(maskClouds)
return collection
def brdf(self, img):
def _apply(image, kvol, kvol0):
blue = _correct_band(image, 'blue', kvol, kvol0, f_iso=0.0774, f_geo=0.0079, f_vol=0.0372)
green = _correct_band(image, 'green', kvol, kvol0, f_iso=0.1306, f_geo=0.0178, f_vol=0.0580)
red = _correct_band(image, 'red', kvol, kvol0, f_iso=0.1690, f_geo=0.0227, f_vol=0.0574)
re1 = _correct_band(image, 're1', kvol, kvol0, f_iso=0.2085, f_geo=0.0256, f_vol=0.0845)
re2 = _correct_band(image, 're2', kvol, kvol0, f_iso=0.2316, f_geo=0.0273, f_vol=0.1003)
re3 = _correct_band(image, 're3', kvol, kvol0, f_iso=0.2599, f_geo=0.0294, f_vol=0.1197)
nir = _correct_band(image, 'nir', kvol, kvol0, f_iso=0.3093, f_geo=0.0330, f_vol=0.1535)
re4 = _correct_band(image, 're4', kvol, kvol0, f_iso=0.2907, f_geo=0.0410, f_vol=0.1611)
swir1 = _correct_band(image, 'swir1', kvol, kvol0, f_iso=0.3430, f_geo=0.0453, f_vol=0.1154)
swir2 = _correct_band(image, 'swir2', kvol, kvol0, f_iso=0.2658, f_geo=0.0387, f_vol=0.0639)
return replace_bands(image, [blue, green, red, re1, re2, re3, nir, re4, swir1, swir2])
def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):
"""fiso + fvol * kvol + fgeo * kgeo"""
iso = ee.Image(f_iso)
geo = ee.Image(f_geo)
vol = ee.Image(f_vol)
pred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])
pred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])
cfac = pred0.divide(pred).rename(['cfac'])
corr = image.select(band_name).multiply(cfac).rename([band_name])
return corr
def _kvol(sunAz, sunZen, viewAz, viewZen):
"""Calculate kvol kernel.
From Lucht et al. 2000
Phase angle = cos(solar zenith) cos(view zenith) + sin(solar zenith) sin(view zenith) cos(relative azimuth)"""
relative_azimuth = sunAz.subtract(viewAz).rename(['relAz'])
pa1 = viewZen.cos() \
.multiply(sunZen.cos())
pa2 = viewZen.sin() \
.multiply(sunZen.sin()) \
.multiply(relative_azimuth.cos())
phase_angle1 = pa1.add(pa2)
phase_angle = phase_angle1.acos()
p1 = ee.Image(PI().divide(2)).subtract(phase_angle)
p2 = p1.multiply(phase_angle1)
p3 = p2.add(phase_angle.sin())
p4 = sunZen.cos().add(viewZen.cos())
p5 = ee.Image(PI().divide(4))
kvol = p3.divide(p4).subtract(p5).rename(['kvol'])
viewZen0 = ee.Image(0)
pa10 = viewZen0.cos() \
.multiply(sunZen.cos())
pa20 = viewZen0.sin() \
.multiply(sunZen.sin()) \
.multiply(relative_azimuth.cos())
phase_angle10 = pa10.add(pa20)
phase_angle0 = phase_angle10.acos()
p10 = ee.Image(PI().divide(2)).subtract(phase_angle0)
p20 = p10.multiply(phase_angle10)
p30 = p20.add(phase_angle0.sin())
p40 = sunZen.cos().add(viewZen0.cos())
p50 = ee.Image(PI().divide(4))
kvol0 = p30.divide(p40).subtract(p50).rename(['kvol0'])
return (kvol, kvol0)
date = img.date()
footprint = ee.List(img.geometry().bounds().bounds().coordinates().get(0));
(sunAz, sunZen) = sun_angles.create(date, footprint)
(viewAz, viewZen) = view_angles.create(footprint)
(kvol, kvol0) = _kvol(sunAz, sunZen, viewAz, viewZen)
bandNames = img.bandNames()
otherBands = bandNames.removeAll(self.divideBands)
others = img.select(otherBands)
img = ee.Image(_apply(img, kvol.multiply(PI()), kvol0.multiply(PI())))
return img
def sentinelCloudScore(self, img):
"""
Computes spectral indices of cloudyness and take the minimum of them.
Each spectral index is fairly lenient because the group minimum
is a somewhat stringent comparison policy. side note -> this seems like a job for machine learning :)
originally written by <NAME> for Landsat imagery
adapted to Sentinel by <NAME> and <NAME>
"""
def rescale(img, thresholds):
"""
Linear stretch of image between two threshold values.
"""
return img.subtract(thresholds[0]).divide(thresholds[1] - thresholds[0])
# cloud until proven otherwise
score = ee.Image(1)
blueCirrusScore = ee.Image(0)
# clouds are reasonably bright
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['blue']), [0.1, 0.5]))
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['cb']), [0.1, 0.5]))
blueCirrusScore = blueCirrusScore.max(rescale(img.select(['cirrus']), [0.1, 0.3]))
score = score.min(blueCirrusScore)
score = score.min(rescale(img.select(['red']).add(img.select(['green'])).add(img.select('blue')), [0.2, 0.8]))
score = score.min(rescale(img.select(['nir']).add(img.select(['swir1'])).add(img.select('swir2')), [0.3, 0.8]))
# clouds are moist
ndsi = img.normalizedDifference(['green', 'swir1'])
score = score.min(rescale(ndsi, [0.8, 0.6]))
score = score.multiply(100).byte();
score = score.clamp(0, 100);
return img.addBands(score.rename(['cloudScore']))
if __name__ == "__main__":
ee.Initialize()
# tests
ndvi_tests = False
fire_test = 1
collection_test = False
water_tests = True
region = ee.Geometry.Polygon([[[22.37055462536107, -19.69234130304949],
[23.161822166438526, -19.675148989974225],
[23.519800725057106, -18.180985057390387],
[21.87293615648901, -17.80809895124535],
[21.43371056179063, -19.463056253246073]]])
if water_tests:
t = sentinel2().preprocess().select(['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
# g = Water().wlc(t)
# wtf = Water().linearScale(g.select([0]), base().studyArea)
# print('ok')
# b = Water().wlcexpression(g, base().studyArea)
try:
img = Water().wlc(t)
b = Water().wlcexpression(img, region)
print(b.bandNames().getInfo())
except Exception as e:
print(e.args)
print('fml')
try:
str == Water().wlc(t, supDate=111)
print('passes wrong kwargs')
except:
print('fails wrong kwargs')
try:
print(Water().wlc(t).bandNames().getInfo())
print('passes wlc with defaults')
except:
print('wlc fails with defaults')
try:
sd = ee.Date('2018-01-01')
ed = ee.Date('2018-03-01')
img = Water().wlc(t, startDate=sd, endDate=ed)
sd.getInfo()['value'] == img.get('sd').getInfo()['value']
print('passes useing custom date')
except:
print('failed to use correct date')
try:
# test scaling bands
b = t.first()
scale = Water().normalizeBands(b,region)
if isinstance(scale,ee.Image) == 0:
raise BaseException()
print('scaling all bands returns image')
except Exception as e:
print('failed scalling bands test')
print(e.args,e.message)
try:
img = Water().wlc(t)
b = Water().wlcexpression(img, region)
base().exportMap(b,'wlctest20_20170401_20170601_toa',region)
except Exception as e:
print('failed exporting')
print(e.args)
if collection_test:
startDate = ee.Date('2017-04-01')
endDate = ee.Date('2017-05-01')
t = sentinel2().loads2(start=startDate, end=endDate, studyArea=region).select(
['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
# brdf = sentinel2().brdf(t.first())
# print(brdf)
try:
t = sentinel2().preprocess().select(['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
t2 = landsat().preprocess().select(['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
t2.first().bandNames().getInfo() == t.first().bandNames().getInfo()
print('passed loading collections')
except:
print('failed loading collections')
try:
t = sentinel2().preprocess().select(['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
t2 = landsat().preprocess().select(['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
merged_img = t.merge(t2).min()
print('passed merging collections')
except:
print('failed merging collections')
try:
sentinel2().maskShadows(t, region)
print(t.size().getInfo())
print(t.first().bandNames().getInfo())
except:
print('no')
if fire_test:
modisFire = Fire().modisFireTerra.merge(Fire().modisFireAqua)
singleMonth = modisFire.filter(ee.Filter.calendarRange(2020, 2023, 'year')).filter(
ee.Filter.calendarRange(3, 3, 'month'))
print(singleMonth.size())
assert singleMonth.size().getInfo(), "No fires found. Try expanding date range."
if ndvi_tests:
ic = ee.ImageCollection("LANDSAT/LC08/C01/T1_SR")
m = 2
y = 2019
def maskL8sr(image):
cloudShadowBitMask = 1 << 3;
cloudsBitMask = 1 << 5;
qa = image.select('pixel_qa');
mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And(qa.bitwiseAnd(cloudsBitMask).eq(0))
img = image.updateMask(mask).divide(10000).select("B[0-9]*")
out = img.normalizedDifference(['B5', 'B4']).copyProperties(image, ["system:time_start"])
return out
ic = ic.map(maskL8sr).filterBounds(region)
print(ic.size().getInfo())
a = Vegetation().byRegion(m, y, ic, region)
print(a.bandNames().getInfo())
try:
t2 = landsat().preprocess().select(['blue', 'green', 'red', 'nir', 'swir1', 'swir2'])
v = Vegetation().byRegion(m,y,t2,region)
print(v.bandNames().getInfo())
except Exception as e:
print('failed using ls collection')
print(e.args)
```
|
{
"source": "jdilger/ASF_GEE",
"score": 2
}
|
#### File: jdilger/ASF_GEE/createmanifestDEv.py
```python
import time
import calendar
import glob, os
import json
import zipfile
from string import Template
# create base manifest
baseFolder = r'C:\Users\johnj\Documents\SIG\15.goldmining\scripts\dummyData\S1B_IW_GRDH_1SDV_20200110T101421_20200110T101446_019753_025598_C902-PREDORB-10m-power-filt-rtc-gamma'
bucket = 'goldminehack'
def asfMetadata(baseFolder):
""" Parses filename for generating metadata """
baseFile = baseFolder.split('/')[-1].split('_')
baseMeta = ["mission","beanMode","productTypeRes","processingLvlClass","startDate","endDate","absOrbit","mdtID","uniqID"]
a = dict(zip(baseMeta,baseFile))
startTime = parseTime(a['startDate'])
return json.dumps(a), startTime
def parseTime(dateStr):
""" parses date string from ASF naming convetion:
YYYMMDDTHHMMSS """
# Parse to time object
d_str = time.strptime(dateStr,"%Y%m%dT%H%M%S")
# Convert to seconds from epoch
d_ep = calendar.timegm(d_str)
return d_ep
def prepTileSet(baseFolder):
""" returns tifs in tileset template as list of stings """
bands = glob.glob(baseFolder+'/*.tif')
tileSets = Template("""{"id": "$iD","sources":[{"uris": ["$img"]}]}""")
bandsBase = Template(""" {"id":"$bandId", "tileset_id":"$tilesetId"} """)
tList = []
bList = []
for i in bands:
try:
i = i.split('/')[-1]
img = 'gs://{}/{}'.format(bucket,i)
iD = i.split('_')
if iD[-1].split('.')[0] == 'map':
iD = iD[-2].split('-')[1]
else:
iD= iD[-1].split('.')[0]
tileSetsObj = tileSets.substitute(iD=iD,img=img)
bandsObj = bandsBase.substitute(bandId=iD,tilesetId=iD)
tList.append(tileSetsObj)
bList.append(bandsObj)
except:
print("Image format doesn't match expected pattern")
return tList, bList
def makeManifest(baseFolder):
""" creates general manifest file for each ASF scene """
manifestBase = \
""" {"name": "projects/earthengine-legacy/assets/$assPath",
"tilesets": [$tilesets],
"bands": [$bands],
"start_time": {"seconds": $startTime},
"properties": $properties
}"""
properties, startTime = asfMetadata(baseFolder)
tiles, bands = prepTileSet(baseFolder)
tiles = ','.join(tiles)
bands = ','.join(bands)
outname = baseFolder.split('/')[-1]
print(outname)
fullManifest = Template(manifestBase).substitute(assPath='projects/ACCA-SERVIR/Goldmining/ASF/'+outname,tilesets=tiles,bands=bands,startTime=startTime,properties=properties)
with open('upload/'+outname+'.json','a') as something:
something.write(fullManifest)
# makeManifest(baseFolder)
if __name__ == "__main__":
zipList = glob.glob('*.zip')
print(zipList)
print(os.getcwd())
os.makedirs(os.path.dirname('upload/'), exist_ok=True)
for i in zipList:
with zipfile.ZipFile(i,"r") as zip_ref:
zip_ref.extractall('tmp')
targetdir = i[:-4]
er = os.getcwd()+'/tmp/'+targetdir
makeManifest(er)
for j in glob.glob(er+"/*.tif"):
os.rename(j,"{}{}{}".format(os.getcwd(),'/upload/',j.split('/')[-1]))
```
|
{
"source": "jdilger/dash-examples",
"score": 3
}
|
#### File: jdilger/dash-examples/app_slider_map.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
from urllib.request import urlopen
import json
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
import plotly.graph_objects as go
# df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv')
df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv',
dtype={"fips": str}).sort_values(by='date',ascending=True)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash()
# test add fig
selected_datet = 74
datest = df['date'].unique()
datest = datest[selected_datet]
filtered_df = df[df.date == datest]
figt = go.Figure(go.Choroplethmapbox(geojson=counties, locations=filtered_df.fips, z=filtered_df.cases,
colorscale="Inferno", zmin=0, zmax=2000,
marker_opacity=0.95, marker_line_width=0,))#colorbar={'dtick':'log_10(5)'}
figt.update_layout(mapbox_style="carto-positron",
mapbox_zoom=3, mapbox_center = {"lat": 37.0902, "lon": -95.7129}, margin={"r":0,"t":0,"l":0,"b":0})
# end test fig
app.layout = html.Div([
dcc.Graph(id='graph-with-slider', figure=figt),
dcc.Slider(
id='year-slider',
min=0,
max=10,
value=1,
marks={0:{'label':'day 1'},
5:{'label':'day 5'},
7:{'label':'day7'},
10:{'label':'day10'}},#{str(date): str(date) for date in df['date'].unique()},
step=None
),
dcc.Graph(figure=figt)
])
@app.callback(
Output('graph-with-slider', 'figure'),
[Input('year-slider', 'value')])
def update_map(selected_date):
dates = df['date'].unique()
dates = dates[selected_date]
filtered_df = df[df.date == dates]
print(filtered_df)
fig = go.Figure(go.Choroplethmapbox(geojson=counties, locations=filtered_df.fips, z=filtered_df.cases,
colorscale="Inferno", zmin=0, zmax=2000,
marker_opacity=0.95, marker_line_width=0,))#colorbar={'dtick':'log_10(5)'}
fig.update_layout(mapbox_style="carto-positron",
mapbox_zoom=3, mapbox_center = {"lat": 37.0902, "lon": -95.7129})
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
# print(fig)
return fig
if __name__ == '__main__':
app.run_server(debug=True)
```
|
{
"source": "jdilger/restoration_planning_module",
"score": 2
}
|
#### File: component/widget/constraint.py
```python
from traitlets import HasTraits, Any, observe, dlink
from sepal_ui import sepalwidgets as sw
import ipyvuetify as v
import ee
from component.message import cm
from component import parameter as cp
ee.Initialize()
class Constraint(sw.SepalWidget, v.Row):
custom_v_model = Any(-1).tag(sync=True)
def __init__(self, widget, name="name", header="header", id_="id", **kwargs):
# default
self.id = id_
self.header = header
self.name = name
self.class_ = "ma-5"
self.widget = widget
self.align_center = True
# creat a pencil btn
self.btn = v.Icon(children=["mdi-pencil"], _metadata={"layer": id_})
# create the row
super().__init__(**kwargs)
self.children = [
v.Flex(align_center=True, xs1=True, children=[self.btn]),
v.Flex(align_center=True, xs11=True, children=[self.widget]),
]
# js behaviour
self.widget.observe(self._on_change, "v_model")
def _on_change(self, change):
# update the custom v_model
# if the widget is displayed on the questionnaire
if self.viz:
self.custom_v_model = change["new"]
return
def disable(self):
# update the custom v_model
self.custom_v_model = -1
# hide the component
self.hide()
return self
def unable(self):
# update the custom v_model
self.custom_v_model = self.widget.v_model
# show the component
self.show()
return self
class Binary(Constraint):
def __init__(self, name, header, id_, **kwargs):
widget = v.Switch(
# readonly = True,
persistent_hint=True,
v_model=True,
label=name,
**kwargs
)
super().__init__(widget, name=name, header=header, id_=id_)
class Dropdown(Constraint):
def __init__(self, name, items, header, **kwargs):
widget = v.Select(
label=name,
persistent_hint=True,
items=items,
v_model=int(items[0]["value"]),
**kwargs
)
super().__init__(widget, name=name, header=header)
class Range(Constraint):
LABEL = ["low", "medium", "high"]
def __init__(self, name, header, id_, **kwargs):
widget = v.RangeSlider(
label=name, max=1, step=0.1, v_model=[0, 1], thumb_label=True, **kwargs
)
super().__init__(widget, name=name, header=header, id_=id_)
def set_values(self, geometry, layer):
# compute the min and the max for the specific geometry and layer
ee_image = ee.Image(layer).select(0)
# get min
min_ = ee_image.reduceRegion(
reducer=ee.Reducer.min(), geometry=geometry, scale=250, bestEffort=True
)
min_ = list(min_.getInfo().values())[0]
# get max
max_ = ee_image.reduceRegion(
reducer=ee.Reducer.max(), geometry=geometry, scale=250, bestEffort=True
)
max_ = list(max_.getInfo().values())[0]
# if noneType it means that my AOI is out of bounds with respect to my constraint
# as it won't be usable I need to add a hint to the end user
if min_ is None or max_ is None:
self.widget.error_messages = "The aoi is out of the bounds of your constraint layer, use a custom one."
self.widget.min = 0
self.widget.max = 1
self.widget.step = 0.1
self.widget.tick_labels = []
self.widget.v_model = [0, 1]
else:
# remove the error state
self.widget.error_messages = []
# set the min max
self.widget.min = round(min_, 2)
self.widget.max = round(max_, 2)
# set the number of steps by stting the step parameter (100)
self.widget.step = round((self.widget.max - self.widget.min) / 100, 2)
# display ticks label with low medium and high values
self.widget.tick_labels = [
self.LABEL[i // 25 - 1] if i in [25, 50, 75] else "" for i in range(101)
]
# set the v_model on the "min - max" value to select the whole image by default
self.widget.v_model = [self.widget.min, self.widget.max]
return self
class CustomPanel(v.ExpansionPanel, sw.SepalWidget):
def __init__(self, category, criterias):
# save title name
self.title = category
# create a header, as nothing is selected by defaul it should only display the title
self.header = v.ExpansionPanelHeader(children=[cp.criteria_types[category]])
# link the criterias to the select
self.criterias = [c.disable() for c in criterias if c.header == category]
self.select = v.Select(
disabled=True, # disabled until the aoi is selected
class_="mt-5",
small_chips=True,
v_model=None,
items=[c.name for c in self.criterias],
label=cm.constraints.criteria_lbl,
multiple=True,
deletable_chips=True,
persistent_hint=True,
hint="select an AOI first",
)
# create the content, nothing is selected by default so Select should be empty and criterias hidden
criteria_flex = [v.Flex(xs12=True, children=[c]) for c in self.criterias]
self.content = v.ExpansionPanelContent(
children=[v.Layout(row=True, children=[self.select] + criteria_flex)]
)
# create the actual panel
super().__init__(children=[self.header, self.content])
# link the js behaviour
self.select.observe(self._show_crit, "v_model")
def _show_crit(self, change):
for c in self.criterias:
if c.name in change["new"]:
c.unable()
else:
c.disable()
return self
def expand(self):
"""when the custom panel expand I want to display only the title"""
self.header.children = [cp.criteria_types[self.title]]
return self
def shrunk(self):
"""when shrunked I want to display the chips int the header along the title"""
# get the title
title = cp.criteria_types[self.title]
# get the chips
chips = v.Flex(
children=[
v.Chip(class_="ml-1 mr-1", small=True, children=[c.name])
for c in self.criterias
if c.viz
]
)
# write the new header content
self.header.children = [title, chips]
return self
```
#### File: component/widget/edit_dialog.py
```python
from traitlets import Unicode
import json
from pathlib import Path
from traitlets import Int
from sepal_ui import sepalwidgets as sw
from sepal_ui import mapping as sm
from sepal_ui import color
import ipyvuetify as v
import pandas as pd
import ee
from ipyleaflet import WidgetControl
from component import parameter as cp
from component.message import cm
ee.Initialize()
class EditDialog(sw.SepalWidget, v.Dialog):
updated = Int(0).tag(sync=True) # the update traitlets
def __init__(self, aoi_vew, model):
# save the model
self.model = model
# listen to the aoi_vew to update the map
self.view = aoi_vew
self.init_layer = ""
self.id = ""
self.index = None
# add all the standard placeholder, they will be replaced when a layer will be selected
self.title = v.CardTitle(children=["Layer name"])
self.text = v.CardText(children=[""])
self.layer = v.TextField(
class_="ma-5", v_model=None, color="warning", outlined=True, label="Layer"
)
self.unit = v.TextField(
class_="ma-5", v_model=None, color="warning", outlined=True, label="Unit"
)
# add a map to display the layers
self.m = sm.SepalMap()
self.m.layout.height = "40vh"
self.m.layout.margin = "2em"
# two button will be placed at the bottom of the panel
self.cancel = sw.Btn(cm.dial.cancel, color="primary", outlined=True)
self.save = sw.Btn(cm.dial.save, color="primary")
# create the init card
self.card = v.Card(
children=[
self.title,
self.text,
self.layer,
self.unit,
self.m,
v.CardActions(class_="ma-5", children=[self.cancel, self.save]),
]
)
# init the dialog
super().__init__(
persistent=True, value=False, max_width="50vw", children=[self.card]
)
# js behaviours
self.layer.on_event("blur", self._on_layer_change)
self.cancel.on_event("click", self._cancel_click)
self.save.on_event("click", self._save_click)
self.view.observe(self._update_aoi, "updated")
def _on_layer_change(self, widget, event, data):
# do nothing if it's no_layer
if widget.v_model == "no Layer":
return self
# replace the v_model by the init one
if not widget.v_model:
widget.v_model = self.init_layer
# if the layer is different than the init one
elif widget.v_model != self.init_layer:
# display it on the map
geometry = self.view.model.feature_collection
image = Path(widget.v_model)
# if the map cannot be displayed then return to init
try:
self.display_on_map(image, geometry)
except Exception as e:
widget.v_model = self.init_layer
return self
def _cancel_click(self, widget, data, event):
# close without doing anything
self.value = False
return
def _save_click(self, widget, data, event):
# load the btn
widget.toggle_loading()
# change the model according to the selected informations
self.model.layer_list[self.index].update(
layer=self.layer.v_model, unit=self.unit.v_model
)
# modify update to tell the rest of the app that value have been changed
self.updated += 1
# close
self.value = False
# free the btn once the widget is closed
widget.toggle_loading()
return
def _update_aoi(self, change):
# get the aoi
aoi_ee = self.view.model.feature_collection
# draw an outline
outline = ee.Image().byte().paint(featureCollection=aoi_ee, color=1, width=3)
# update the map
self.m.addLayer(outline, {"palette": color.accent}, "aoi")
self.m.zoom_ee_object(aoi_ee.geometry())
return
def set_dialog(self, layer_id=None):
# remove the images
for l in self.m.layers:
if not (l.name in ["aoi", "CartoDB.DarkMatter"]):
self.m.remove_layer(l)
# if data are empty
if not layer_id:
# default variables
self.id = ""
self.index = None
self.init_layer = ""
# default title
self.title.children = [cm.dial.no_layer]
# default text
self.text.children = [cm.dial.disc]
# mute all the widgets
self.layer.v_model = "no Layer"
self.layer.disabled = True
self.unit.v_model = "no unit"
self.unit.disabled = True
# disable save
self.save.disabled = True
else:
# find the index of the item to modify in the model
self.index = next(
(i, l)
for i, l in enumerate(self.model.layer_list)
if l["id"] == layer_id
)[0]
self.id = layer_id
# change title
self.title.children = [self.model.layer_list[self.index]["name"]]
# get the layer list pd dataframe
layer_list = pd.read_csv(cp.layer_list).fillna("")
# change text
layer_df_line = layer_list[layer_list.layer_id == layer_id].iloc[0]
self.text.children = [layer_df_line.layer_info]
# enable textFields
self.layer.disabled = False
self.layer.v_model = self.model.layer_list[self.index]["layer"]
self.unit.disabled = False
self.unit.v_model = self.model.layer_list[self.index]["unit"]
# change default layer name
self.init_layer = layer_df_line.gee_asset
# add the custom layer if existing
geometry = self.view.model.feature_collection
if self.layer.v_model != self.init_layer:
custom_img = Path(self.layer.v_model)
self.display_on_map(custom_img, geometry)
else:
default_img = Path(self.init_layer)
self.display_on_map(default_img, geometry)
# enable save
self.save.disabled = False
# show the dialog
self.value = True
return
def display_on_map(self, image, geometry):
# clip image
ee_image = ee.Image(str(image)).clip(geometry)
# get min
min_ = ee_image.reduceRegion(
reducer=ee.Reducer.min(), geometry=geometry, scale=250, bestEffort=True
)
min_ = list(min_.getInfo().values())[0]
# get max
max_ = ee_image.reduceRegion(
reducer=ee.Reducer.max(), geometry=geometry, scale=250, bestEffort=True
)
max_ = list(max_.getInfo().values())[0]
min_ = min_ if min_ else 0
max_ = max_ if max_ else 1
# update viz_params acordingly
viz_params = cp.plt_viz["viridis"]
viz_params.update(min=min_, max=max_)
# create a colorbar
for c in self.m.controls:
if type(c) == WidgetControl:
self.m.remove_control(c)
self.m.add_colorbar(
colors=cp.plt_viz["viridis"]["palette"],
vmin=round(min_, 2),
vmax=round(max_, 2),
)
# dispaly on map
self.m.addLayer(ee_image, viz_params, image.stem)
return self
```
|
{
"source": "jdilger/useful_ee_scripts",
"score": 2
}
|
#### File: useful_ee_scripts/batch_uploads/mosaic_manifest.py
```python
import os
import json
import re
import logging
from datetime import datetime
import subprocess
logging.basicConfig(filename='info.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
logging.info("Running Batch Manifest Uploader")
logger = logging.getLogger('mosaic_manifest')
class mosaic_manifest(object):
def __init__(self,bucket,ee_dest, sub_dir=None):
self.bucket = bucket
self.ee_dest = ee_dest
self.sub_dir = sub_dir
self.MANIFEST = None
self.FULL_PATH = None
self.check_bucket()
def get_cloud_files(self):
if self.sub_dir is None:
cloud_files = os.popen(f"gsutil ls {self.bucket}/*.tif").read().split("\n")[:-1]
else:
cloud_files = os.popen(f"gsutil ls {self.bucket}/{self.sub_dir}/*.tif").read().split("\n")[:-1]
return cloud_files
def check_json_name(self,file_name):
search = re.search('.json$', file_name)
if search is None:
return f"{file_name}.json"
else:
return file_name
def save(self,location,file_name=None):
assert self.MANIFEST is not None, "Manifest has not been created. Try running mosaic_manifest(bucket,ee_dest).make()"
if file_name is None:
file_name = "manifest.json"
else:
file_name = self.check_json_name(file_name)
full_path = f'{location}/{file_name}'
self.FULL_PATH = full_path
with open(full_path, 'w') as f:
json.dump(self.MANIFEST, f, indent=2)
def check_bucket(self):
bucket_check = self.bucket.split('/')
if len(bucket_check) > 3:
self.sub_dir = "/".join(bucket_check[3:])
self.bucket = "/".join(bucket_check[:3])
def parse_time_from_folder(self):
# todo: make this optional and add in sd/ed as cli inputs
date_dir = self.sub_dir.split("/")[-1]
search = re.search('^(\d{4}-\d{2}-\d{2})-(\d{4}-\d{2}-\d{2})$', date_dir)
groups = search.groups()
start_time = f"{datetime.strptime(groups[0],'%Y-%m-%d').isoformat()}Z"
end_time = f"{datetime.strptime(groups[1],'%Y-%m-%d').isoformat()}Z"
return start_time, end_time
def make(self):
# Get the list of source URIs from the gsutil output.
cloud_files = self.get_cloud_files()
sources_uris = [{'uris': [f]} for f in cloud_files]
asset_name = self.ee_dest
start_time, end_time = self.parse_time_from_folder()
logger.info(f"asset name: {asset_name}")
logger.info(f"start date:{start_time} end date: {end_time}")
# The enclosing object for the asset.
asset = {
'name': asset_name,
'tilesets': [
{
'id': 'dunno',
'sources': sources_uris
}
],
'bands': [
{
'id': 'blue',
'tileset_band_index':0,
'tileset_id': 'dunno'
},
{
'id':'green',
'tileset_band_index':1,
'tileset_id':'dunno'
},
{
'id':'red',
'tileset_band_index':2,
'tileset_id':'dunno'
},
{
'id':'nir',
'tileset_band_index':3,
'tileset_id':'dunno'
},
{
'id':'alpha',
'tileset_band_index':4,
'tileset_id':'dunno'
},
],
'start_time':start_time,
'end_time':end_time
}
self.MANIFEST = asset
return asset
def upload(self, manifest=None):
if manifest is None:
manifest = self.FULL_PATH
logger.info(f"manifest saved to: {self.FULL_PATH}")
result = subprocess.check_output(f"earthengine upload image --manifest {manifest}", shell=True)
task_id = re.search(r"(b'Started upload task with ID:) ([\w{1:24}\d{1:24}]*)(\\r\\n')",str(result))
print(str(result))
logger.info(f"task id:{task_id.groups()[1]}")
if __name__ == "__main__":
import argparse
import textwrap
desc = """CLI for making and uploading Planet 4-Band quad imagery stored in a Google Storage Bucket
e.x. making a mosaic and using the current path to save the manifest.json
mosaic_manifest.py gs://bucket/subdir projects/ee/imagecollection/newImageName .
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(desc))
parser.add_argument("bucket",help='-The URI where images are stored (e.g. gs://myBucket or gs://myBucket/subDir )')
parser.add_argument("ee_dst",help='-The earthengine location to save the mosaic')
parser.add_argument("manifest_dst",help='-The location to locally save the manifest.json')
args = parser.parse_args()
m = mosaic_manifest(args.bucket, args.ee_dst)
m.make()
m.save(args.manifest_dst)
m.upload()
```
|
{
"source": "jdillard/sphinxcontrib-drawio",
"score": 2
}
|
#### File: sphinxcontrib-drawio/tests/test_update.py
```python
import shutil
from pathlib import Path
import pytest
from sphinx.application import Sphinx
SIMPLE_EXPORTED_FNAME = "drawio-bf0f85b68784bab0e62bf5902f5a46b65d71ee70.png"
# deprecated drawio directive test
@pytest.mark.sphinx("html", testroot="simple", srcdir="notchanged")
def test_notchanged(content: Sphinx, make_app_with_local_user_config):
exported = content.outdir / "_images" / SIMPLE_EXPORTED_FNAME
exported_timestamp = exported.stat().st_mtime
app = make_app_with_local_user_config(srcdir=content.srcdir)
app.build()
assert exported.stat().st_mtime == exported_timestamp
# deprecated drawio directive test
@pytest.mark.sphinx("html", testroot="simple", srcdir="changed")
def test_changed(content: Sphinx, make_app_with_local_user_config):
source = Path(content.srcdir / "box.drawio")
exported = content.outdir / "_images" / SIMPLE_EXPORTED_FNAME
exported_timestamp = exported.stat().st_mtime
source.touch()
app = make_app_with_local_user_config(srcdir=content.srcdir)
app.build()
assert exported.stat().st_mtime > exported_timestamp
@pytest.mark.sphinx("html", testroot="image", srcdir="image_notchanged")
def test_image_notchanged(content: Sphinx, make_app_with_local_user_config):
exported = content.outdir / "_images" / "box.svg"
exported_timestamp = exported.stat().st_mtime
app = make_app_with_local_user_config(srcdir=content.srcdir)
app.build()
assert exported.stat().st_mtime == exported_timestamp
@pytest.mark.sphinx("html", testroot="image", srcdir="image_changed")
def test_image_changed(content: Sphinx, make_app_with_local_user_config):
box = Path(content.srcdir / "box.drawio")
exported = content.outdir / "_images" / "box.svg"
exported_timestamp = exported.stat().st_mtime
circle = Path(content.srcdir / "circle.drawio")
shutil.copy(circle, box)
app = make_app_with_local_user_config(srcdir=content.srcdir)
app.build()
assert exported.stat().st_mtime > exported_timestamp
```
|
{
"source": "jdills26/cse6242-project-code",
"score": 3
}
|
#### File: Scripts/schools/greatschoolsratings.py
```python
import requests
import pandas as pd
from io import StringIO, BytesIO
from lxml import etree as et
API_KEY = '<GREATSCHOOLS.ORG API KEY GOES HERE>'
def generate_file(name, response):
d = {}
df = pd.DataFrame()
tree = et.fromstring(response.content)
for child in tree:
for children in child:
d[str(children.tag)] = str(children.text)
df = df.append(d, ignore_index=True)
df.to_csv(name + '.csv', sep=',')
if __name__ == "__main__":
elem_url = 'http://api.greatschools.org/schools/DC/Washington/public/elementary-schools?limit=-1&key={}'.format(API_KEY)
middle_url = 'http://api.greatschools.org/schools/DC/Washington/public/middle-schools?limit=-1&key={}'.format(API_KEY)
high_url = 'http://api.greatschools.org/schools/DC/Washington/public/high-schools?limit=-1&key={}'.format(API_KEY)
elem_schools = requests.get(elem_url)
middle_schools = requests.get(middle_url)
high_schools = requests.get(high_url)
generate_file('elementary', elem_schools)
generate_file('middle', middle_schools)
generate_file('high', high_schools)
```
|
{
"source": "jdimaria3/HearthstoneCS",
"score": 3
}
|
#### File: HearthstoneCS/server/controller.py
```python
import cherrypy
import re, json
from flask_restful import reqparse, abort, Api, Resource
class OptionsController(Resource):
def __init__(self):
print("starting Options Controller")
def OPTIONS(self, *args, **kargs):
return ""
class CardsController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self):
output=dict()
dbfIdList=self.cdb.get_cards()
dictList=list()
output["result"]= "success"
for dbfId in dbfIdList:
dictList.append(self.cdb.get_card(dbfId))
output["cards"]=dictList
return output
def post(self):
# Get json text
the_body= cherrypy.request.body.read().decode()
the_body= json.loads(the_body)
# Determine new dbfId
newID=max(self.cdb.cards.keys())
newID=newID+1
# Create a new list of attributes
myList=list()
# Determine how what type of card is being added.
myList.append(the_body["type"])
myList.append(the_body["name"])
myList.append(the_body["cost"])
myList.append(the_body["rarity"])
myList.append(the_body["class"])
if(the_body["type"]=="MINION"):
myList.append(the_body["attack"])
myList.append(the_body["health"])
myList.append(the_body["url"])
# Set the card given dbfId and list
self.cdb.set_card(newID, myList)
output={'result':'success', "dbfId" : newID}
return output
def DELETE(self):
self.cdb.movies=dict()
output={'result': 'success'}
return output
class CardsKeyController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self, dbfId):
output=dict()
output["card"]=self.cdb.get_card(int(dbfId))
output["result"]="success"
return output
def put(self, dbfId):
# Get json text
the_body= cherrypy.request.body.read().decode()
the_body= json.loads(the_body)
# Create a new list of attributes
myList=list()
# Determine how what type of card is being added.
myList.append(the_body["type"])
myList.append(the_body["name"])
myList.append(the_body["cost"])
myList.append(the_body["rarity"])
myList.append(the_body["class"])
if(the_body["type"]=="MINION"):
myList.append(the_body["attack"])
myList.append(the_body["health"])
myList.append(the_body["url"])
self.cdb.set_card(int(dbfId), myList)
output={'result':'success'}
return output
def delete(self, dbfId):
output=self.cdb.delete_card(int(dbfId))
return output
class MinionsController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self):
output=dict()
output["result"]= "success"
output["minions"]=self.cdb.get_minions()
return output
class MinionsAttackController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["minions"]=self.cdb.get_minions_attackRange(int(low),int(high))
return output
class MinionsHealthController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["minions"]=self.cdb.get_minions_healthRange(int(low),int(high))
return output
class SpellsController(Resource):
def __init__(self, cdb=None):
self.cdb = cdb
def get(self):
output=dict()
output["result"]= "success"
output["spells"]=self.cdb.get_spells()
return output
class CostController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,RANGE):
output=dict()
output["result"]= "success"
low=str(RANGE[0]) + str(RANGE[1])
high=str(RANGE[2]) + str(RANGE[3])
output["cards"]=self.cdb.get_cards_costRange(int(low),int(high))
return output
class NameController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,NAME):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_name(NAME)
return output
class ClassController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self,CLASS):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_class(CLASS)
return output
class RarityController(Resource):
def __init__(self, cdb=None):
self.cdb= cdb
def get(self, RARITY):
output=dict()
output["result"]= "success"
output["cards"]=self.cdb.get_cards_rarity(RARITY)
return output
```
|
{
"source": "jdimatteo/pytype",
"score": 2
}
|
#### File: pytype/pytype/context.py
```python
import contextlib
from typing import Dict, List, Tuple
from pytype import annotation_utils
from pytype import attribute
from pytype import config
from pytype import convert
from pytype import errors
from pytype import load_pytd
from pytype import matcher
from pytype import output
from pytype import special_builtins
from pytype import tracer_vm
from pytype.abstract import abstract
from pytype.typegraph import cfg
from pytype.typegraph import cfg_utils
class Context:
"""An abstract context."""
def __init__(
self,
errorlog: errors.ErrorLog,
options: config.Options,
loader: load_pytd.Loader,
generate_unknowns=False,
store_all_calls=False,
):
# Inputs
self.errorlog = errorlog
self.options = options
self.python_version: Tuple[int, int] = self.options.python_version
self.loader = loader
self.generate_unknowns = generate_unknowns
self.store_all_calls = store_all_calls
# Typegraph
self.program = cfg.Program()
self.root_node: cfg.CFGNode = self.program.NewCFGNode("root")
self.program.entrypoint = self.root_node
# Represents the program exit. Needs to be set before analyze_types.
self.exitpoint: cfg.CFGNode = None
# Helper classes/modules
self.vm = tracer_vm.CallTracer(self)
self.annotation_utils = annotation_utils.AnnotationUtils(self)
self.attribute_handler = attribute.AbstractAttributeHandler(self)
self.converter_minimally_initialized = False
self.convert = convert.Converter(self)
self.pytd_convert = output.Converter(self)
self.program.default_data = self.convert.unsolvable
# Other context
self.callself_stack: List[cfg.Variable] = []
# Map from builtin names to canonical objects.
self.special_builtins: Dict[str, abstract.BaseValue] = {
# The super() function.
"super": self.convert.super_type,
# The object type.
"object": self.convert.object_type,
# for more pretty branching tests.
"__random__": self.convert.primitive_class_instances[bool],
# for debugging
"reveal_type": special_builtins.RevealType(self),
# boolean values.
"True": self.convert.true,
"False": self.convert.false,
# builtin classes
"property": special_builtins.Property(self),
"staticmethod": special_builtins.StaticMethod(self),
"classmethod": special_builtins.ClassMethod(self),
}
# builtin functions
for cls in (
special_builtins.Abs,
special_builtins.AssertType,
special_builtins.HasAttr,
special_builtins.IsCallable,
special_builtins.IsInstance,
special_builtins.IsSubclass,
special_builtins.Next,
):
self.special_builtins[cls.name] = cls.make(self)
# If set, allow construction of recursive values, setting the
# self-referential field to Any
self.recursion_allowed = False
def matcher(self, node):
return matcher.AbstractMatcher(node, self)
@contextlib.contextmanager
def allow_recursive_convert(self):
old = self.recursion_allowed
self.recursion_allowed = True
try:
yield
finally:
self.recursion_allowed = old
def new_unsolvable(self, node):
"""Create a new unsolvable variable at node."""
return self.convert.unsolvable.to_variable(node)
def join_cfg_nodes(self, nodes):
"""Get a new node to which the given nodes have been joined."""
assert nodes
if len(nodes) == 1:
return nodes[0]
else:
ret = self.program.NewCFGNode(self.vm.frame and
self.vm.frame.current_opcode and
self.vm.frame.current_opcode.line)
for node in nodes:
node.ConnectTo(ret)
return ret
def join_variables(self, node, variables):
return cfg_utils.merge_variables(self.program, node, variables)
def join_bindings(self, node, bindings):
return cfg_utils.merge_bindings(self.program, node, bindings)
```
|
{
"source": "jdimol/Rawfie-IoT2Edge-Integration",
"score": 3
}
|
#### File: Rawfie-IoT2Edge-Integration/aeroloop_experiment/main.py
```python
import requests, avro_consumer, ngsi_translator
# The below fields have to be defined...
# Bootstrap server and schema registry urls (Avro Producer)
# urls = {
# 'bootstrap.servers': 'url:port',
# 'group.id': 'groupid',
# 'schema.registry.url': 'url:port'}
# Zookeaper topics
# topics = [] # ['topic_1', 'topic_2', 'topic_3', ..., 'topic_n']
# uav_name = '' # The name of the UAV
# POST NGSI data
def posttoOrion(data):
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
url = 'url' # Url has to be defined.
r = requests.post(url, json=data, headers=headers)
print(r.status_code)
return r
# Main Section of the experiment
ms = avro_consumer(urls, topics, uav_name)
data = to_ngsi(ms)
response = posttoOrion(data)
```
|
{
"source": "jdinh1/mbed-5570",
"score": 3
}
|
#### File: tools/test_configs/__init__.py
```python
from os.path import dirname, abspath, join
from tools.utils import json_file_to_dict
from tools.targets import TARGET_MAP
CONFIG_DIR = dirname(abspath(__file__))
CONFIG_MAP = json_file_to_dict(join(CONFIG_DIR, "config_paths.json"))
TARGET_CONFIGS = json_file_to_dict(join(CONFIG_DIR, "target_configs.json"))
def get_valid_configs(target_name):
if target_name in TARGET_CONFIGS:
target_config = TARGET_CONFIGS[target_name]
elif (target_name in TARGET_MAP and 'LWIP' in TARGET_MAP[target_name].features):
target_config = { "default_test_configuration": "ETHERNET", "test_configurations": ["ETHERNET"] }
else:
return {}
config_dict = {}
for attr in CONFIG_MAP:
if attr in target_config['test_configurations']:
config_dict[attr] = CONFIG_MAP[attr]
return config_dict
def get_config_path(conf_name, target_name):
configs = get_valid_configs(target_name)
if configs and conf_name.upper() in configs:
return join(CONFIG_DIR, configs[conf_name.upper()])
else:
return None
def get_default_config(target_name):
if target_name in TARGET_CONFIGS:
config_name = TARGET_CONFIGS[target_name]['default_test_configuration']
if config_name == "NONE":
return None
return join(CONFIG_DIR, CONFIG_MAP[config_name])
elif (target_name in TARGET_MAP and 'LWIP' in TARGET_MAP[target_name].features):
return join(CONFIG_DIR, CONFIG_MAP["ETHERNET"])
else:
return None
```
|
{
"source": "jdisanti/aws-sdk-rust",
"score": 3
}
|
#### File: tools/ci/crate-range.py
```python
import argparse
import json
import math
import os
import shlex
import subprocess
import sys
import unittest
COLOR_YELLOW = "\033[93m"
COLOR_RESET = "\033[0m"
class Crate:
def __init__(self, path, loc=None):
self.path = path
self.loc = loc
# Counts the lines of code on this crate and caches it
def count_lines(self):
if self.loc is None:
self.loc = int(get_cmd_output(
f"find {self.path} -name '*.rs' -exec wc -l {{}} \\; | \
cut -d' ' -f1 | \
paste -sd+ | \
bc",
shell=True
))
def __repr__(self):
return "Crate" + str((self.loc, self.path))
def __eq__(self, other):
return isinstance(other, Crate) and self.path == other.path
# Given the total number of jobs and batch count, this calculates the actual batch ranges
def calculate_batches(total_jobs, batch_count):
batch_size = math.ceil(total_jobs / batch_count)
return list(map(lambda i: (i, min(total_jobs, i + batch_size)), range(0, total_jobs, batch_size)))
# Splits a list into N lists
def split_list(list, n):
result = []
for batch in calculate_batches(len(list), n):
result.append(list[batch[0]:batch[1]])
return result
# Optimistically sorts the crate list in such a way that the total work per batch is more balanced.
# Accomplishes this by sorting by most lines of code to least lines of code, and then striping that
# list across batches. Finally, it combines those batches back into one list.
#
# IMPORTANT: This must be deterministic and return the same list for the same files every time
def organize_crate_list(batch_count, crates):
crates = sorted(crates, key=lambda c: c.loc, reverse=True)
batches = list(map(lambda _: [], [0] * batch_count)) # Can't do `[] * n` for some reason
for index, crate in enumerate(crates):
batches[index % len(batches)].append(crate)
result = []
for batch in batches:
result.extend(batch)
return result
# Lists all SDK crates including examples
def list_crates(repository_root):
to_examine = []
for path in ["sdk", "examples"]:
to_examine.extend(list(map(lambda p: f"{repository_root}/{path}/{p}", os.listdir(f"{repository_root}/{path}"))))
crates = []
for path in to_examine:
if os.path.isfile(f"{path}/Cargo.toml"):
crates.append(Crate(path))
return crates
# Entry point for the `generate-matrix` sub-command
def subcommand_generate_matrix(repository_root, batch_count, rust_versions):
crates = list_crates(repository_root)
batches = calculate_batches(len(crates), batch_count)
output = {
"crate_range": list(map(lambda b: f"-b {batch_count} -s {b[0]} -e {b[1]}", batches)),
"rust_version": rust_versions
}
print(json.dumps(output))
return 0
# Entry point for the `run` sub-command
def subcommand_run(repository_root, batch_count, start_inclusive, end_exclusive, command):
print(f"{COLOR_YELLOW}Determining crate list...{COLOR_RESET}")
crates = list_crates(repository_root)
if end_exclusive <= start_inclusive or end_exclusive < 0 or start_inclusive < 0:
print("Invalid range")
return 1
if start_inclusive >= len(crates):
print("Range start is invalid")
return 1
if end_exclusive > len(crates):
print("Range end is invalid")
return 1
for crate in crates:
crate.count_lines()
crates = organize_crate_list(batch_count, crates)
crates = crates[start_inclusive:end_exclusive]
print(f"{COLOR_YELLOW}Crates to run against:{COLOR_RESET}")
for crate in crates:
print(f"{COLOR_YELLOW} {crate.loc}\t{crate.path}{COLOR_RESET}")
completed = 0
for crate in crates:
print(f"{COLOR_YELLOW}Current crate: {crate.path}, loc: {crate.loc}, completed: {completed}, "
f"remaining: {len(crates) - completed}{COLOR_RESET}")
os.chdir(crate.path)
subprocess.run(command, check=True)
completed += 1
return 0
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(required=True, dest="subcommand")
subparsers.add_parser("self-test", help="Run unit tests for this script")
subparser_generate_matrix = subparsers.add_parser("generate-matrix", help="Generate a test matrix")
subparser_generate_matrix.add_argument("-b", type=int, dest="batches", required=True, help="Number of batches")
subparser_generate_matrix.add_argument("rust_versions", type=str, nargs=argparse.ONE_OR_MORE)
subparser_run = subparsers.add_parser("run", help="Run command on crate range")
subparser_run.add_argument("-b", required=True, type=int, dest="batches", help="Number of batches")
subparser_run.add_argument("-s", required=True, type=int, dest="start_inclusive", help="Range start inclusive")
subparser_run.add_argument("-e", required=True, type=int, dest="end_exclusive", help="Range end exclusive")
subparser_run.add_argument("cmd", type=str, nargs=argparse.ONE_OR_MORE)
args = parser.parse_args()
repository_root = get_cmd_output("git rev-parse --show-toplevel")
if args.subcommand == "self-test":
sys.argv.pop()
unittest.main()
return 0
elif args.subcommand == "generate-matrix":
return subcommand_generate_matrix(repository_root, args.batches, args.rust_versions)
elif args.subcommand == "run":
return subcommand_run(repository_root, args.batches, args.start_inclusive, args.end_exclusive, args.cmd)
def get_cmd_output(command, shell=False):
if not shell:
command = shlex.split(command)
result = subprocess.run(command, shell=shell, capture_output=True, check=True)
return result.stdout.decode("utf-8").strip()
class SelfTest(unittest.TestCase):
def test_split_list(self):
self.assertEqual([[1, 2], [3, 4], [5, 6]], split_list([1, 2, 3, 4, 5, 6], 3))
self.assertEqual([[1, 2], [3, 4], [5]], split_list([1, 2, 3, 4, 5], 3))
def test_calculate_batches(self):
self.assertEqual([(0, 2), (2, 4), (4, 5)], calculate_batches(5, 3))
self.assertEqual([(0, 2), (2, 4), (4, 6)], calculate_batches(6, 3))
self.assertEqual([(0, 56), (56, 111)], calculate_batches(111, 2))
self.assertEqual([(0, 111)], calculate_batches(111, 1))
self.assertEqual([(0, 1), (1, 2)], calculate_batches(2, 10))
def test_organize_crate_list(self):
self.assertEqual(
[
# batch 1
Crate("A", 3000),
Crate("C", 1000),
Crate("E", 500),
Crate("G", 200),
Crate("I", 50),
# batch 2
Crate("B", 2000),
Crate("D", 1000),
Crate("F", 300),
Crate("H", 100),
],
organize_crate_list(2, [
Crate("A", 3000),
Crate("B", 2000),
Crate("C", 1000),
Crate("D", 1000),
Crate("E", 500),
Crate("F", 300),
Crate("G", 200),
Crate("H", 100),
Crate("I", 50),
])
)
self.assertEqual(
[
# batch 1
Crate("A", 3000),
Crate("D", 1000),
Crate("G", 200),
# batch 2
Crate("B", 2000),
Crate("E", 500),
Crate("H", 100),
# batch 3
Crate("C", 1000),
Crate("F", 300),
Crate("I", 50),
],
organize_crate_list(3, [
Crate("A", 3000),
Crate("B", 2000),
Crate("C", 1000),
Crate("D", 1000),
Crate("E", 500),
Crate("F", 300),
Crate("G", 200),
Crate("H", 100),
Crate("I", 50),
])
)
if __name__ == "__main__":
sys.exit(main())
```
|
{
"source": "jdi-testing/jdi-python",
"score": 3
}
|
#### File: core/logger/jdi_logger.py
```python
import json
import logging
import logging.config
from JDI.core.settings.jdi_settings import PropertyPath
class JDILogger:
def __init__(self, filename="logging.json"):
self.config_filename = PropertyPath(filename).get_property_file()
with open(self.config_filename, "r") as fd:
logging.config.dictConfig(json.load(fd))
self.logger = logging.getLogger(__name__)
def __getattr__(self, name):
return getattr(self.logger, name)
```
#### File: web/os_action/jdi_win32.py
```python
import logging
import time
try:
import win32com.client
except ModuleNotFoundError as e:
logging.exception(e)
class jdi_win32:
@staticmethod
def paste_text(text):
shell = win32com.client.Dispatch("WScript.Shell")
time.sleep(3)
shell.Sendkeys(text)
shell.Sendkeys("~")
```
#### File: driver/utils/web_driver_by_utils.py
```python
class WebDriverByUtils:
@staticmethod
def contains_root(by):
return by is not None and str(by).__contains__(": *root*")
@staticmethod
def trim_root(by):
by[1] = by[1].replace("*root*", "").strip()
return by
@staticmethod
def fill_by_template(by, args):
by_locator = WebDriverByUtils.get_by_locator(by)
if "%" not in by_locator:
raise RuntimeError(WebDriverByUtils.get_bad_locator_msg(by_locator, args))
try:
by_locator = by_locator % args
except Exception as ex:
raise RuntimeError(ex, WebDriverByUtils.get_bad_locator_msg(by_locator, args))
return tuple([by[0], by_locator])
@staticmethod
def get_by_locator(by):
return by[1]
@staticmethod
def get_bad_locator_msg(by_locator, args):
return "Bad locator template '" + by_locator + "'. Args: " + "; ".join(args) + "."
```
#### File: elements/base/base_element.py
```python
from JDI.web.selenium.elements.api_interact.get_element_module import \
GetElementModule
class BaseElement:
name = None
parent = None
avatar = None
def __init__(self, by_locator=None):
self.avatar = GetElementModule(by_locator, self)
def get_driver(self):
return self.avatar.get_driver()
def __str__(self):
return self.__class__.__name__
def _get_type_name(self):
return self.__class__.__name__
def get_name(self):
return self.name if self.name is not None else self._get_type_name()
def get_parent(self):
return self.parent
def init(self, parent, avatar):
from JDI.web.selenium.elements.cascade_init import WebCascadeInit
WebCascadeInit.init_elements(self)
self.set_avatar(avatar)
self.set_parent(parent)
return self
def set_avatar(self, avatar):
self.avatar = avatar
return self
def set_parent(self, parent):
self.parent = parent
def get_locator(self):
return self.avatar.by_locator
def __str__(self):
s = "Name " + self.__class__.__name__
if "by_locator" in dir(self.avatar):
if self.avatar.by_locator is not None:
s += "; Locator: %s:'%s'" % (self.avatar.by_locator[0], self.avatar.by_locator[1])
if self.parent is not None:
if "avatar" in dir(self.parent):
if self.parent.avatar.by_locator is not None:
s += "; Parent: %s:'%s'" % (self.parent.avatar.by_locator[0], self.parent.avatar.by_locator[1])
return s
def has_locator(self):
return self.avatar.has_locator()
```
#### File: elements/base/clickable.py
```python
from JDI.core.utils.decorators import scenario
from JDI.web.selenium.elements.base.element import Element
class Clickable(Element):
def __init__(self, by_locator=None, web_element=None):
if by_locator is not None:
super(Clickable, self).__init__(by_locator=by_locator)
elif web_element is not None:
super(Clickable, self).__init__(web_element=web_element)
else:
super(Clickable, self).__init__()
@scenario(action_name="Click on Element for")
def click(self):
self.click_action()
def click_action(self):
self.get_element().click()
```
#### File: elements/base/clickable_text.py
```python
from JDI.web.selenium.elements.base.clickable import Clickable
from JDI.web.selenium.elements.common.text import Text
class ClickableText(Clickable, Text):
def __init__(self, by_locator=None, web_element=None):
if by_locator is not None:
super(ClickableText, self).__init__(by_locator=by_locator)
elif web_element is not None:
super(ClickableText, self).__init__(web_element=web_element)
else:
super(ClickableText, self).__init__()
```
#### File: elements/common/check_box.py
```python
from JDI.core.utils.decorators import scenario
from JDI.web.selenium.elements.base.clickable import Clickable
class CheckBox(Clickable):
def __init__(self, by_locator=None):
super(CheckBox, self).__init__(by_locator)
@scenario("Check element")
def check(self):
self.check_action()
def check_action(self):
if not self.is_check_action():
self.click()
@scenario("Uncheck element")
def uncheck(self):
self.uncheck_action()
def uncheck_action(self):
if self.is_check_action():
self.click()
@scenario(action_name="Get is checked")
def is_checked(self):
return self.is_check_action()
@scenario(action_name="Set value to the element")
def set_value(self, value):
self.set_value_action(value)
def set_value_action(self, value):
if str(value).lower() in ["true", "check", "1"]:
self.check()
elif str(value).lower() in ["false", "uncheck", "0"]:
self.uncheck()
def is_check_action(self):
return self.get_element().is_selected()
```
#### File: elements/common/file_input.py
```python
from JDI.web.selenium.elements.common.text_field import TextField
class FileInput(TextField):
def __init__(self, by_locator=None):
super(FileInput, self).__init__(by_locator)
def set_value_action(self, value):
self.input(value)
```
#### File: elements/common/image.py
```python
from JDI.core.utils.decorators import scenario
from JDI.web.selenium.elements.base.clickable import Clickable
class Image(Clickable):
def __init__(self, by_locator=None):
super(Image, self).__init__(by_locator)
@scenario(action_name="Get image source for Element")
def get_source(self):
return self.get_web_element().get_attribute("src")
@scenario(action_name="Get image title for Element")
def get_alt(self):
return self.get_web_element().get_attribute("alt")
```
#### File: elements/complex/menu.py
```python
from enum import Enum
from selenium.webdriver import ActionChains
from JDI.core.settings.jdi_settings import JDISettings
from JDI.core.utils.decorators import scenario
from JDI.web.selenium.elements.complex.selector import Selector
import logging
logger = logging.getLogger(__name__)
class Menu(Selector):
menu_levels_locators = list()
separator = "|"
parametrized_class = None
def __init__(self, by_options_name_locator_template=None,
by_all_options_names_locator=None,
by_menu_levels_locators=None,
parametrized_class=None):
super(Menu, self).__init__(by_option_locator_template=by_options_name_locator_template,
by_option_locator_all=by_all_options_names_locator)
if by_all_options_names_locator is not None:
self.menu_levels_locators.append(by_all_options_names_locator)
if by_menu_levels_locators is not None:
if not isinstance(by_menu_levels_locators, list):
raise TypeError("Please supply 'by_menu_levels_locators' param as list")
self.menu_levels_locators = by_menu_levels_locators
if parametrized_class is not None:
self.parametrized_class = parametrized_class
@scenario(action_name="Select elements '%s'", values_list={"value_from_function"})
def hover_and_click(self, names):
self.hover_and_click_action(names)
def hover_and_click_action(self, names):
if names is None or len(names) == 0:
return
split = names.split("|")
if len(split) > len(self.menu_levels_locators):
msg = "Can't hover and click on element ({0}) by value: {1}. Amount of locators ({2}) " \
"less than select path length ({3})".format(str(self), names,
len(self.menu_levels_locators),
len(split))
logger.error(msg)
raise Exception(msg)
self.hover(split[0:-1])
selector = Selector(list(self.menu_levels_locators)[-1])
selector.set_parent(self.get_parent())
selector.select(split[-1])
@scenario(action_name="Hover '%s'", values_list={"name"})
def hover(self, names):
if names is None or len(names) == 0:
return
self.hover_action(names)
def hover_action(self, names):
driver = JDISettings.get_driver_factory().get_driver()
self._choose_item_action(names, lambda el: ActionChains(driver).move_to_element(el).click_and_hold().perform())
def _choose_item_action(self, names, action):
if len(self.menu_levels_locators) == 0 and self.has_locator():
self.menu_levels_locators.append(self.get_locator())
if len(self.menu_levels_locators) < len(names): return
for i in range(0, len(names)):
els = Selector(list(self.menu_levels_locators)[i]).get_web_elements()
el = list(filter(lambda x: x.text == names[i], els))[0]
action(el)
def select_action(self, name):
if isinstance(name, Enum):
self.hover_and_click(name.value)
else:
self.hover_and_click(name)
```
#### File: complex/table/cell.py
```python
from JDI.web.selenium.driver.utils.web_driver_by_utils import WebDriverByUtils
from JDI.web.selenium.elements.api_interact.find_element_by import By
from JDI.web.selenium.elements.base.select_element import SelectElement
class Cell(SelectElement):
row_index = None
column_index = None
table = None
column_num = None
row_nom = None
web_element = None
column_name = None
row_name = None
cell_locator_template = By.xpath(".//tr[{1}]/td[{0}]")
def __init__(self, column_num, row_num, col_name, row_name, table,
cell_locator_template=None, web_element=None, column_index=None, row_index=None):
if web_element is not None:
self.web_element = web_element
if cell_locator_template is not None:
self.cell_locator_template = cell_locator_template
if column_index is not None:
self.column_index = column_index + 1 if table.rows.has_header and table.rows.by_line_template is None else column_index
if row_index is not None:
self.row_index = row_index
self.column_num = column_num
self.row_num = row_num
self.col_name = col_name
self.row_name = row_name
self.table = table
def set_web_element(self, web_element):
self.web_element = web_element
def update_data(self, col_name, row_name):
if self.column_name is None or self.column_name == "" and not (col_name is None or col_name == ""):
self.column_name = col_name
if self.row_name is None and self.row_name == "" and not (row_name is None or row_name == ""):
self.row_name = row_name
return self
def get_text_action(self):
return self.get().get_text()
def get(self):
cell = SelectElement(web_element=self.web_element) if self.web_element is not None else \
SelectElement(
WebDriverByUtils.fill_by_template(self.cell_locator_template, [self.column_index, self.row_index]))
cell.init(parent=self.table, avatar=cell.avatar)
return cell
```
#### File: complex/table/column.py
```python
from JDI.web.selenium.elements.complex.table.row_column import RowColumn
class Column(RowColumn):
def __init__(self, val):
RowColumn.__init__(self, val)
@staticmethod
def column(val):
return Column(val)
```
#### File: selenium/preconditions/web_preconditions.py
```python
from re import match
from JDI.web.selenium.settings.web_settings import WebSettings
class WebPreconditions:
@staticmethod
def check_url(url):
return match(".*/" + str(url) + "(\\?.*)?", WebSettings.get_driver_factory().get_driver().current_url) != None
@staticmethod
def open_uri(uri):
WebSettings.get_driver_factory().get_driver().get(uri)
```
#### File: selenium/settings/web_settings.py
```python
from selenium.webdriver.remote.command import Command
from JDI.core.logger.jdi_logger import JDILogger
from JDI.core.settings.jdi_settings import JDISettings
from JDI.web.selenium.driver.selenium_driver_factory import \
SeleniumDriverFactory
class WebSettings(JDISettings):
logger = JDILogger()
domain = JDISettings.get_domain()
@staticmethod
def get_driver_factory():
return JDISettings.get_driver_factory()
@staticmethod
def set_driver_factory(driver_factory):
JDISettings._driver_factory = driver_factory
@staticmethod
def use_driver(options=None, capabilities=None, executor=None):
driver_name = JDISettings.get_setting_by_name("driver")
JDISettings._driver_factory = SeleniumDriverFactory()
WebSettings.set_driver_factory(JDISettings._driver_factory)
return JDISettings._driver_factory.register_driver(driver_name, options, capabilities, executor)
@staticmethod
def quit_browser():
driver = WebSettings.get_driver_factory().get_driver()
driver.quit()
try:
driver.execute(Command.CLOSE)
except Exception:
pass
@staticmethod
def get_driver():
return WebSettings.get_driver_factory().get_driver()
```
#### File: page_objects/pages/home_page.py
```python
from JDI.web.selenium.elements.api_interact.find_element_by import By
from JDI.web.selenium.elements.common.image import Image
from JDI.web.selenium.elements.common.link import Link
from JDI.web.selenium.elements.common.text import Text
from JDI.web.selenium.elements.composite.web_page import WebPage
class HomePage(WebPage):
def __init__(self, url, title):
super(HomePage, self).__init__(url=url, title=title)
about = Link(By.link_text("About"))
logo_image = Image(By.css(".epam-logo img"))
text_item = Text(By.css(".main-txt"))
```
#### File: page_objects/sections/contact_form.py
```python
from JDI.web.selenium.elements.api_interact.find_element_by import By
from JDI.web.selenium.elements.common.button import Button
from JDI.web.selenium.elements.common.text_area import TextArea
from JDI.web.selenium.elements.common.text_field import TextField
from JDI.web.selenium.elements.composite.form import Form
class ContactForm(Form):
def __init__(self, by_locator=None):
super(ContactForm, self).__init__(by_locator)
first_name = TextField(By.id("Name"))
last_name = TextField(By.id("LastName"))
description = TextArea(By.id("Description"))
submit = Button(By.xpath("//*[text()='Submit']"))
def get_form_value(self):
return {self.first_name.get_text(), self.last_name.get_text(), self.description.get_text()}
class ContactFormTwoButtons(Form):
def __init__(self, by_locator=None):
super(ContactFormTwoButtons, self).__init__(by_locator)
first_name = TextField(By.id("Name"))
last_name = TextField(By.id("LastName"))
description = TextArea(By.id("Description"))
submit = Button(By.xpath("//*[text()='Submit']"))
calculate = Button(By.xpath("//*[text()='Calculate']"))
def get_form_value(self):
return {self.first_name.get_text(), self.last_name.get_text(), self.description.get_text()}
```
#### File: test/common/label_test.py
```python
import pytest
from JDI.jdi_assert.testing.assertion import Assert
from tests.jdi_uitests_webtests.main.enums.preconditions import Preconditions
from tests.jdi_uitests_webtests.main.page_objects.epam_jdi_site import EpamJDISite
from tests.jdi_uitests_webtests.test.init_tests import InitTests
@pytest.mark.web
class LabelTests(InitTests):
label = EpamJDISite.metals_colors_page.calculate_label
def setUp(self):
super(LabelTests, self).setUp(self.id().split(".")[-1])
Preconditions.METALS_AND_COLORS_PAGE.is_in_state()
def test_click(self):
self.label.click()
Assert.assert_element_test(self.label, "CALCULATE")
```
#### File: test/common/link_test.py
```python
import pytest
from JDI.core.settings.jdi_settings import JDISettings
from JDI.jdi_assert.testing.assertion import Assert
from tests.jdi_uitests_webtests.main.enums.preconditions import Preconditions
from tests.jdi_uitests_webtests.main.page_objects.epam_jdi_site import EpamJDISite
from tests.jdi_uitests_webtests.test.init_tests import InitTests
@pytest.mark.web
class LinkTests(InitTests):
link = EpamJDISite.footer.about_link
def setUp(self):
super(LinkTests, self).setUp(self.id().split(".")[-1])
Preconditions.HOME_PAGE.is_in_state()
def test_click(self):
self.link.click()
EpamJDISite.support_page.check_opened()
def test_get_reference(self):
Assert.assert_equal(self.link.get_reference(), EpamJDISite.support_page.url)
def test_wait_reference_test(self):
Preconditions.SUPPORT_PAGE.is_in_state()
EpamJDISite.home_page.open()
Assert.wait_assert_equal(lambda: self.link.get_reference(), EpamJDISite.support_page.url)
```
#### File: complex/table/search_rows_columns_test.py
```python
import pytest
from JDI.jdi_assert.testing.assertion import Assert
from JDI.web.selenium.elements.complex.table.column import Column
from JDI.web.selenium.elements.complex.table.row import Row
from tests.jdi_uitests_webtests.main.enums.preconditions import Preconditions
from tests.jdi_uitests_webtests.main.page_objects.epam_jdi_site import EpamJDISite
from tests.jdi_uitests_webtests.test.init_tests import InitTests
@pytest.mark.web
class SearchRowsColumnsTests(InitTests):
expected_column = (
"1:Selenium, Custom "
"2:TestNG, JUnit, Custom "
"3:TestNG, JUnit, Custom "
"4:Log4J, TestNG log, Custom "
"5:Jenkins, Allure, Custom "
"6:Custom "
)
expected_row = "Type:Test Runner " + "Now:TestNG, JUnit, Custom " + "Plans:MSTest, NUnit, Epam "
table = EpamJDISite.support_page.support_table
def setUp(self):
Preconditions.SUPPORT_PAGE.is_in_state()
def test_column_by_num(self):
actual_column = "".join(
list(map(lambda x: "{0}:{1} ".format(str(x[0]), x[1].get_text()), self.table.column(2)))
)
Assert.assert_equal(actual_column, self.expected_column)
def test_column_by_name(self):
actual_column = "".join(
list(map(lambda x: "{0}:{1} ".format(str(x[0]), x[1].get_text()), self.table.column("Now")))
)
Assert.assert_equal(actual_column, self.expected_column)
def test_row_by_num(self):
actual_column = "".join(list(map(lambda x: "{0}:{1} ".format(str(x[0]), x[1].get_text()), self.table.row(2))))
Assert.assert_equal(actual_column, self.expected_row)
def test_row_by_name(self):
actual_column = "".join(list(map(lambda x: "{0}:{1} ".format(str(x[0]), x[1].get_text()), self.table.row("2"))))
Assert.assert_equal(actual_column, self.expected_row)
def test_column_by_criteria_int(self):
actual_column = "".join(
list(
map(
lambda x: "{0}:{1} ".format(str(x[0]), x[1].get_text()),
self.table.column("TestNG, JUnit, Custom", Row.row(3)),
)
)
)
Assert.assert_equal(actual_column, self.expected_column)
def test_column_by_criteria_name(self):
actual_column = "".join(
list(
map(
lambda x: "{0}:{1} ".format(str(x[0]), x[1].get_text()),
self.table.column("TestNG, JUnit, Custom", Row.row("3")),
)
)
)
Assert.assert_equal(actual_column, self.expected_column)
def test_row_by_criteria_int(self):
actual_column = "".join(
list(
map(
lambda x: "{0}:{1} ".format(str(x[0]), x[1].get_text()),
self.table.row("MSTest, NUnit, Epam", Column.column(3)),
)
)
)
Assert.assert_equal(actual_column, self.expected_row)
def test_row_by_criteria_string(self):
actual_column = "".join(
list(
map(
lambda x: "{0}:{1} ".format(str(x[0]), x[1].get_text()),
self.table.row("MSTest, NUnit, Epam", Column.column("Plans")),
)
)
)
Assert.assert_equal(actual_column, self.expected_row)
```
#### File: test/composite/form_two_buttons_test.py
```python
import pytest
from JDI.jdi_assert.testing.assertion import Assert
from tests.jdi_uitests_webtests.main.entities.contact import Contact
from tests.jdi_uitests_webtests.main.enums.preconditions import Preconditions
from tests.jdi_uitests_webtests.main.page_objects.epam_jdi_site import EpamJDISite
from tests.jdi_uitests_webtests.test.init_tests import InitTests
@pytest.mark.web
class FormTwoButtonsTests(InitTests):
form = EpamJDISite.contact_form_page.contact_form_two_buttons
contact = Contact("Ivan", "Ivanov", "Smart Man")
def setUp(self):
super(FormTwoButtonsTests, self).setUp(self.id().split(".")[-1])
Preconditions.CONTACT_PAGE.is_in_state()
def test_submit_spec_button_string(self):
self.form.submit_form(self.contact, "calculate")
Assert.wait_assert_equal(lambda: EpamJDISite.contact_form_page.result.get_text(), str(self.contact))
```
#### File: test/composite/page_test.py
```python
import pickle
import unittest
import pytest
from JDI.core.settings.jdi_settings import JDISettings
from JDI.jdi_assert.testing.assertion import Assert
from JDI.web.selenium.elements.composite.web_site import WebSite
from JDI.web.selenium.settings.web_settings import WebSettings
from tests.jdi_uitests_webtests.main.entities.user import User
from tests.jdi_uitests_webtests.main.enums.preconditions import Preconditions
from tests.jdi_uitests_webtests.main.page_objects.epam_jdi_site import EpamJDISite
import logging
logger = logging.getLogger(__name__)
@pytest.mark.web
class PageTests(unittest.TestCase):
def setUp(self):
logger.info("Run Test %s" % self.id().split(".")[-1])
WebSite.init(EpamJDISite)
logger.info("Run Tests")
EpamJDISite.home_page.open()
EpamJDISite.login_page.submit(User.default())
Preconditions.CONTACT_PAGE.is_in_state()
def test_refresh(self):
EpamJDISite.contact_form_page.contact_form.submit.click()
Assert.assert_equal(EpamJDISite.contact_form_page.result.get_text(), "Summary: 3")
EpamJDISite.contact_form_page.refresh()
Assert.assert_equal(EpamJDISite.contact_form_page.result.get_text(), "")
EpamJDISite.contact_form_page.check_opened()
def test_back(self):
EpamJDISite.home_page.open()
EpamJDISite.home_page.check_opened()
EpamJDISite.home_page.back()
EpamJDISite.contact_form_page.check_opened()
def test_forward(self):
EpamJDISite.home_page.open()
EpamJDISite.home_page.back()
EpamJDISite.contact_form_page.check_opened()
EpamJDISite.contact_form_page.forward()
EpamJDISite.home_page.check_opened()
def test_add_cookie(self):
cookie = {"name": "key", "value": "value"}
self.get_driver().delete_all_cookies()
Assert.assert_true(not len(self.get_driver().get_cookies()))
EpamJDISite.contact_form_page.add_cookie(cookie)
Assert.assert_equal(self.get_driver().get_cookie(cookie["name"])["value"], cookie["value"])
def test_clear_cache(self):
cookie = {"name": "key", "value": "value"}
EpamJDISite.contact_form_page.add_cookie(cookie)
Assert.assert_false(not len(self.get_driver().get_cookies()))
EpamJDISite.contact_form_page.clear_cache()
Assert.assert_true(not len(self.get_driver().get_cookies()))
def tearDown(self):
WebSettings.quit_browser()
def get_driver(self):
return JDISettings.get_driver_factory().get_driver()
```
#### File: test/composite/search_test.py
```python
import pytest
from tests.jdi_uitests_webtests.main.enums.preconditions import Preconditions
from tests.jdi_uitests_webtests.main.page_objects.epam_jdi_site import EpamJDISite
from tests.jdi_uitests_webtests.test.init_tests import InitTests
@pytest.mark.web
class SearchTests(InitTests):
def setUp(self):
super(SearchTests, self).setUp(self.id().split(".")[-1])
Preconditions.HOME_PAGE.is_in_state()
def test_fill(self):
EpamJDISite.header.search_section.find("something")
EpamJDISite.support_page.check_opened()
```
#### File: main/entities/entities.py
```python
from tests.jdi_uitest_web_examples.main.enums.enums import JobCategories, Locations
class JobSearchFilter:
def __init__(self):
self.keyword = "QA"
self.category = JobCategories.QA
self.location = Locations.MOSCOW
class Attendee:
def __init__(self):
self.filter = JobSearchFilter()
self.first_name = "Roman"
self.last_name = "Iovlev"
self.email = "<EMAIL>"
self.country = "Russian Federation"
self.city = "Moscow"
self.ca = ""
self.comment = "I WANT TO WORK IN EPAM!!!"
def __str__(self):
return self.name + " " + self.last_name
class Job:
def __init__(self, name, category, location):
self.name = name
self.category = category
self.location = location
class User:
DEFAULT = {"UserTest", "Test Password"}
name = None
password = None
```
#### File: epam/custom_elements/tree_dropdown.py
```python
from enum import Enum
from JDI.web.selenium.elements.complex.base_selector import BaseSelector
from JDI.web.selenium.elements.complex.dropdown import Dropdown
class TreeDropdown(Dropdown):
tree_locators = list()
def __init__(self, by_select_locator, by_tree_locators=None):
super(TreeDropdown, self).__init__(
by_select_locator=by_select_locator, by_option_locator_template=by_select_locator
)
self.tree_locators = by_tree_locators
def select_action(self, names):
self.expand()
if isinstance(names, Enum):
split = list(map(lambda x: x.strip(), names.value.split(">")))
elif isinstance(names, str):
split = list(map(lambda x: x.strip(), names.split(">")))
else:
raise TypeError("incorrect incorrect format")
for i in range(0, len(split)):
el = BaseSelector(by_option_locator_all=self.tree_locators[i])
el.set_parent(self)
web_el = el.get_element(split[i])
if "dropdown-invisible-group" not in web_el.find_element_by_xpath("..").get_attribute("class"):
web_el.click()
```
|
{
"source": "jdix531/stelligent-nu",
"score": 2
}
|
#### File: 04-vpcs/lab-4.1.8/cfn_generator.py
```python
import os
# prevents bools showing up as strings
os.environ['TROPO_REAL_BOOL']='true'
from troposphere import Export, GetAtt, Ref, Template, Tags, Sub, Select, GetAZs, Parameter, Output, ImportValue
import troposphere.ec2 as ec2
resource_tags=Tags(
Name=Sub("${AWS::StackName}"),
user="josh.dix.labs",
stelligent_u_lesson='lesson-4-1',
stelligent_u_lab='lab-1'
)
# "vpc stack"
def dump_base_yaml(cfn_file):
template=Template()
vpc_cidr_param=template.add_parameter(Parameter(
"vpcCidrParam",
Description="string of vpc cidr block to use",
Type="String",
))
subnet_cidr_param=template.add_parameter(Parameter(
"subnetCidrParam",
Description="string of subnet cidr block to use",
Type="String",
))
igw=template.add_resource(
ec2.InternetGateway(
"Igw",
Tags=resource_tags,
)
)
vpc=template.add_resource(
ec2.VPC(
"Vpc",
CidrBlock=Ref(vpc_cidr_param),
EnableDnsSupport=True,
EnableDnsHostnames=True,
InstanceTenancy="default",
Tags=resource_tags,
)
)
igwa=template.add_resource(
ec2.VPCGatewayAttachment(
"IgwA",
VpcId=Ref(vpc),
InternetGatewayId=Ref(igw),
)
)
route_tbl=template.add_resource(
ec2.RouteTable(
"RouteTable",
VpcId=Ref(vpc),
Tags=resource_tags,
)
)
default_route=template.add_resource(
ec2.Route(
"defaultRoute",
DestinationCidrBlock="0.0.0.0/0",
GatewayId=Ref(igw),
RouteTableId=Ref(route_tbl)
)
)
subnet=template.add_resource(
ec2.Subnet(
"Subnet",
VpcId=Ref(vpc),
CidrBlock=Ref(subnet_cidr_param),
MapPublicIpOnLaunch=True,
AvailabilityZone=Select(0, GetAZs()),
Tags=resource_tags,
)
)
route_tbl_asoc=template.add_resource(
ec2.SubnetRouteTableAssociation(
"RouteTblSubnetAsoc",
RouteTableId=Ref(route_tbl),
SubnetId=Ref(subnet)
)
)
priv_route_tbl=template.add_resource(
ec2.RouteTable(
"PrivRouteTable",
VpcId=Ref(vpc),
Tags=resource_tags,
)
)
priv_subnet=template.add_resource(
ec2.Subnet(
"PrivSubnet",
VpcId=Ref(vpc),
CidrBlock="10.10.1.0/24",
MapPublicIpOnLaunch=False,
AvailabilityZone=Select(0, GetAZs()),
Tags=resource_tags,
)
)
route_tbl_asoc=template.add_resource(
ec2.SubnetRouteTableAssociation(
"RouteTblPrivSubnetAsoc",
RouteTableId=Ref(priv_route_tbl),
SubnetId=Ref(priv_subnet)
)
)
ngw_elastic_ip = template.add_resource(
ec2.EIP(
"MyNgwEip",
Tags=resource_tags,
)
)
nat_gateway = template.add_resource(
ec2.NatGateway(
"MyNatGateway",
AllocationId=GetAtt(ngw_elastic_ip, "AllocationId"),
SubnetId=Ref(subnet),
)
)
private_out_route=template.add_resource(
ec2.Route(
"privateOutRoute",
DestinationCidrBlock="0.0.0.0/0",
NatGatewayId=Ref(nat_gateway),
RouteTableId=Ref(priv_route_tbl)
)
)
first_network_acl = template.add_resource(
ec2.NetworkAcl(
"MyFirstNetAcl",
Tags=resource_tags,
VpcId=Ref(vpc),
)
)
network_out_second_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPrivOutNetAclEntry",
NetworkAclId=Ref(first_network_acl),
CidrBlock="0.0.0.0/0",
Protocol=-1,
Egress=True,
RuleAction="allow",
RuleNumber=100,
)
)
network_inbound_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyInNetAclEntry",
NetworkAclId=Ref(first_network_acl),
CidrBlock="172.16.58.3/32",
Protocol=6,
RuleAction="allow",
RuleNumber=100,
PortRange=ec2.PortRange(From=22, To=22)
)
)
private_to_public_client_ports_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPriv2PubClientPortsNetAclEntry",
NetworkAclId=Ref(first_network_acl),
CidrBlock="10.10.1.0/24",
Protocol=6,
RuleAction="allow",
RuleNumber=101,
PortRange=ec2.PortRange(From=1024, To=65535)
)
)
public_to_internet_client_ports_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPub2DefaultClientPortsNetAclEntry",
NetworkAclId=Ref(first_network_acl),
CidrBlock="0.0.0.0/0",
Protocol=6,
RuleAction="allow",
RuleNumber=102,
PortRange=ec2.PortRange(From=1024, To=65535)
)
)
public_to_private_icmpv4_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPubIcmpv4NetAclEntry",
NetworkAclId=Ref(first_network_acl),
CidrBlock="10.10.1.0/24",
Protocol=1,
Icmp=ec2.ICMP(Code=-1, Type=-1),
RuleAction="allow",
RuleNumber=103
)
)
second_network_acl = template.add_resource(
ec2.NetworkAcl(
"MySecondNetAcl",
Tags=resource_tags,
VpcId=Ref(vpc),
)
)
network_out_second_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPriv2InternetClientPortsNetAclEntry",
NetworkAclId=Ref(second_network_acl),
CidrBlock="0.0.0.0/0",
Protocol=6,
RuleAction="allow",
RuleNumber=100,
PortRange=ec2.PortRange(From=1024, To=65535)
)
)
public_to_private_ssh_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPrivSshNetAclEntry",
NetworkAclId=Ref(second_network_acl),
CidrBlock="10.10.0.0/24",
Protocol=6,
RuleAction="allow",
RuleNumber=101,
PortRange=ec2.PortRange(From=22, To=22)
)
)
public_to_private_http_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPrivHttpNetAclEntry",
NetworkAclId=Ref(second_network_acl),
CidrBlock="10.10.0.0/24",
Protocol=6,
RuleAction="allow",
RuleNumber=102,
PortRange=ec2.PortRange(From=80, To=80)
)
)
private_to_public_icmpv4_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPrivIcmpv4NetAclEntry",
NetworkAclId=Ref(second_network_acl),
CidrBlock="10.10.0.0/24",
Protocol=1,
Icmp=ec2.ICMP(Code=-1, Type=-1),
RuleAction="allow",
RuleNumber=103
)
)
network_out_second_acl_entry = template.add_resource(
ec2.NetworkAclEntry(
"MyPubOutNetAclEntry",
NetworkAclId=Ref(second_network_acl),
CidrBlock="0.0.0.0/0",
Protocol=-1,
Egress=True,
RuleAction="allow",
RuleNumber=100,
)
)
subnet_nacl_asociation = template.add_resource(
ec2.SubnetNetworkAclAssociation(
"subNaclAsoc",
NetworkAclId=Ref(first_network_acl),
SubnetId=Ref(subnet)
)
)
priv_subnet_nacl_asociation = template.add_resource(
ec2.SubnetNetworkAclAssociation(
"privSubNaclAsoc",
NetworkAclId=Ref(second_network_acl),
SubnetId=Ref(priv_subnet)
)
)
template.add_output([
Output(
"VpcId",
Description="InstanceId of the newly created EC2 instance",
Value=Ref(vpc),
Export=Export("VpcId-jdix"),
),
Output(
"SubnetId",
Description="InstanceId of the newly created EC2 instance",
Value=Ref(subnet),
Export=Export("SubnetId-jdix"),
),
Output(
"PrivSubnetId",
Description="InstanceId of the newly created EC2 instance",
Value=Ref(priv_subnet),
Export=Export("PrivSubnetId-jdix"),
),
])
template_out_yaml(cfn_file, template)
# "instance stack"
def dump_lab_yaml(cfn_file):
template=Template()
key_name_param=template.add_parameter(Parameter(
"keyName",
Description="string of vpc cidr block to use",
Type="String",
))
ami_id_param=template.add_parameter(Parameter(
"AmiId",
Description="string of vpc cidr block to use",
Type="AWS::EC2::Image::Id"
))
instance_type_param=template.add_parameter(Parameter(
"InstanceType",
Description="string of vpc cidr block to use",
Type="String",
))
sg = template.add_resource(
ec2.SecurityGroup(
"MySg",
GroupDescription="who cares",
VpcId=ImportValue("VpcId-jdix"),
Tags=resource_tags,
)
)
sshIn = template.add_resource(
ec2.SecurityGroupIngress(
"MySshIn",
CidrIp="0.0.0.0/0",
IpProtocol="tcp",
FromPort=22,
ToPort=22,
GroupId=Ref(sg)
)
)
pingIn = template.add_resource(
ec2.SecurityGroupIngress(
"MyPingIn",
CidrIp="0.0.0.0/0",
IpProtocol="icmp",
FromPort=-1,
ToPort=-1,
GroupId=Ref(sg)
)
)
instance = template.add_resource(
ec2.Instance(
"MyInstance",
ImageId=Ref(ami_id_param),
SubnetId=ImportValue("SubnetId-jdix"),
InstanceType=Ref(instance_type_param),
KeyName=Ref(key_name_param),
Tags=resource_tags,
SecurityGroupIds=[Ref(sg)],
)
)
priv_instance = template.add_resource(
ec2.Instance(
"MyPrivInstance",
ImageId=Ref(ami_id_param),
SubnetId=ImportValue("PrivSubnetId-jdix"),
InstanceType=Ref(instance_type_param),
KeyName=Ref(key_name_param),
Tags=resource_tags,
SecurityGroupIds=[Ref(sg)],
)
)
instance_elastic_ip = template.add_resource(
ec2.EIP(
"MyEip",
InstanceId=Ref(instance),
Tags=resource_tags,
)
)
template.add_output([
Output(
"InstanceId",
Description="InstanceId of the newly created EC2 instance",
Value=Ref(instance),
Export=Export("InstanceId-jdix"),
),
Output(
"InstancePrivateIP",
Description="InstanceId of the newly created EC2 instance",
Value=GetAtt(instance, "PrivateIp"),
Export=Export("InstancePrivateIP-jdix"),
)
])
template_out_yaml(cfn_file, template)
def template_out_yaml(cfn_file, template):
with open(cfn_file, 'w') as f:
f.write(template.to_yaml())
```
#### File: 09-lambda/lab-9.2.3/dynoquery.py
```python
import boto3
import json
from boto3.dynamodb.conditions import Key
def handler(event, context):
query = json.loads(event['body'])['query']
dynamo = boto3.client('dynamodb', region_name='us-east-2')
table_scan = dynamo.scan(TableName='MyDynamoTableName')
matched_results = []
for item in table_scan['Items']:
for event in item['logEvents']['L']:
if event['M']['message']['S'].find("\"bucketName\":\"" + query + "\"") != -1:
matched_results.append(event['M']['message']['S'])
return {
'statusCode': 200,
'body': json.dumps(matched_results)
}
```
|
{
"source": "jdixosnd/jsonl-to-conll",
"score": 3
}
|
#### File: jsonl-to-conll/jsonl_to_conll/io.py
```python
import json
def json_to_text(jsons, output_filename):
with open(output_filename, "w") as f:
for each_json in jsons:
for line in each_json:
f.writelines(" ".join(line) + "\n")
def read_jsonl(filename):
result = []
with open(filename, "r") as f:
for line in f.readlines():
result.append(json.loads(line))
return result
```
|
{
"source": "jdj2261/joystick-test",
"score": 3
}
|
#### File: src/ums_serial/ums_serial.py
```python
import time
import sys, os
import serial, serial.tools.list_ports
from serial.serialutil import SerialException
dir_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.dirname(dir_path))
class UmsSerial:
def __init__(self, port: str, baudrate: int, timeout: float):
self.port = port
self.device = None
self._serial = serial.Serial(baudrate=baudrate, timeout=timeout)
def __repr__(self) -> str:
return "<{cls}>".format(cls=self.__class__.__name__)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.disconnect()
def connect(self, test_mode) -> bool:
if test_mode:
return True
while not self._serial.isOpen():
ports = self.find_comports()
for port in ports:
if self.port == port.device:
self._serial.port = self.port
try:
self._serial.open()
except SerialException as e:
print(e)
time.sleep(1)
if self._serial.isOpen():
return True
return False
def disconnect(self) -> bool:
try:
self._serial.close()
except AttributeError:
print("open_comport has not been called yet!")
return False
else:
print("Closing...")
return True
@staticmethod
def find_comports() -> list:
# Make a list of all available ports on the system
available_ports = list(serial.tools.list_ports.comports())
ports = [port for port in available_ports]
return ports
def isOpen(self):
return self._serial.isOpen()
def write(self, data):
if not self.isOpen():
raise PortNotOpenError()
self._serial.write(serial.to_bytes(data))
@property
def read(self, length):
return self._serial.read(length)
@property
def readline(self):
return self._serial.readline()
```
#### File: src/ums_xbox/protocol.py
```python
class Packet:
ESTOP = {'ESTOP_OFF' : 0x00 , 'ESTOP_ON' : 0x01}
GEAR = {'GEAR_D' : 0x00, 'GEAR_N' : 0x01, 'GEAR_R' : 0x02}
WHEEL = {'WHEEL_FRONT' : 0x00, 'WHEEL_ALL' : 0x01, 'WHEEL_REAR' : 0x02}
def __init__(self):
self.packet = [0 for _ in range(18)]
self.s = 0x53
self.t = 0x54
self.x = 0x58
self.accel_data = [0x00, 0x00]
self.brake_data = [0x00, 0x00]
self.steer_data = [0x00, 0x00, 0x00, 0x00]
self.alive = 0x00
self.checksum = 0x00
self.etx0 = 0x0D
self.etx1 = 0x0A
def makepacket(self, estop='ESTOP_OFF', gear='GEAR_N', wheel='WHEEL_FRONT') -> list:
self.packet[0] = self.s
self.packet[1] = self.t
self.packet[2] = self.x
self.packet[3] = self.ESTOP.get(estop)
self.packet[4] = self.GEAR.get(gear)
self.packet[5] = self.WHEEL.get(wheel)
self.packet[6] = self.accel_data[0]
self.packet[7] = self.accel_data[1]
self.packet[8] = self.brake_data[0]
self.packet[9] = self.brake_data[1]
self.packet[10] = self.steer_data[0]
self.packet[11] = self.steer_data[1]
self.packet[12] = self.steer_data[2]
self.packet[13] = self.steer_data[3]
self.packet[14] = self.alive
self.packet[15] = self.calc_checksum(self.packet[3:14])
self.packet[16] = self.etx0
self.packet[17] = self.etx1
return self.packet
@staticmethod
def calc_checksum(datas: list) -> int:
checksum = sum(datas) & 0xFF
return checksum
```
|
{
"source": "jdj2261/lets-do-mujoco",
"score": 3
}
|
#### File: models/grippers/panda_gripper.py
```python
import numpy as np
from utils.mjcf_utils import xml_path_completion
from models.grippers.gripper import Gripper
class PandaGripperBase(Gripper):
"""
Gripper for Franka's Panda (has two fingers).
"""
def __init__(self):
super().__init__(xml_path_completion("grippers/panda_gripper.xml"))
def format_action(self, action):
return action
@property
def init_qpos(self):
return np.array([0.020833, -0.020833])
@property
def joints(self):
return ["finger_joint1", "finger_joint2"]
@property
def actuators(self):
return ["gripper_finger_joint1", "gripper_finger_joint2"]
@property
def dof(self):
return 2
@property
def visualization_sites(self):
return ["grip_site", "grip_site_cylinder"]
@property
def contact_geoms(self):
return ["hand_collision", "finger1_collision", "finger2_collision", "finger1_tip_collision", "finger2_tip_collision"]
@property
def left_finger_geoms(self):
return ["finger1_tip_collision"]
@property
def right_finger_geoms(self):
return ["finger2_tip_collision"]
class PandaGripper(PandaGripperBase):
"""
Modifies PandaGripperBase to only take one action.
"""
def format_action(self, action):
"""
1 => closed, -1 => open
"""
assert len(action) == 1
return np.array([-1 * action[0], 1 * action[0]])
@property
def dof(self):
return 1
```
#### File: models/grippers/robotiq_85_gripper.py
```python
import numpy as np
from utils.mjcf_utils import xml_path_completion
from models.grippers.gripper import Gripper
class Robotiq85GripperBase(Gripper):
"""
6-DoF Robotiq gripper.
"""
def __init__(self):
super().__init__(xml_path_completion("grippers/robotiq_gripper_85.xml"))
def format_action(self, action):
return action
@property
def init_qpos(self):
return [3.3161, 0., 0., 0., 0., 0.]
@property
def joints(self):
return ["finger_joint", "left_inner_finger_joint",
"left_inner_knuckle_joint", "right_outer_knuckle_joint",
"right_inner_finger_joint", "right_inner_knuckle_joint"]
@property
def actuators(self):
return ["finger_joint", "right_outer_knuckle_joint"]
@property
def dof(self):
return 6
@property
def visualization_sites(self):
return ["grip_site", "grip_site_cylinder"]
@property
def contact_geoms(self):
return [
"hand_collision",
"left_outer_knuckle_collision",
"left_outer_finger_collision",
"left_inner_finger_collision",
"left_fingertip_collision",
"left_inner_knuckle_collision",
"right_outer_knuckle_collision",
"right_outer_finger_collision",
"right_inner_finger_collision",
"right_fingertip_collision",
"right_inner_knuckle_collision",
]
@property
def left_finger_geoms(self):
return [
"left_outer_finger_collision",
"left_inner_finger_collision",
"left_fingertip_collision"
]
@property
def right_finger_geoms(self):
return [
"right_outer_finger_collision",
"right_inner_finger_collision",
"right_fingertip_collision"
]
class Robotiq85Gripper(Robotiq85GripperBase):
"""
1-DoF variant of RobotiqGripperBase.
"""
def format_action(self, action):
"""
Maps continuous action into binary output
-1 => open, 1 => closed
Args:
action (np.array): gripper-specific action
Raises:
AssertionError: [Invalid action dimension size]
"""
assert len(action) == 1
self.current_action = np.clip(self.current_action + self.speed * np.sign(action), -1.0, 1.0)
return self.current_action
@property
def speed(self):
return 0.01
@property
def dof(self):
return 1
```
|
{
"source": "jdj2261/pykin",
"score": 2
}
|
#### File: pykin/models/urdf_link.py
```python
from pykin.utils.kin_utils import convert_string_to_narray, LINK_TYPES
class URDF_Link:
"""
Class of parsing link info described in URDF
"""
@staticmethod
def set_visual(elem_link, link_frame):
"""
Set link visual
"""
for elem_visual in elem_link.findall('visual'):
URDF_Link.set_visual_origin(elem_visual, link_frame)
URDF_Link.set_visual_geometry(elem_visual, link_frame)
URDF_Link.set_visual_color(elem_visual, link_frame)
@staticmethod
def set_collision(elem_link, link_frame):
"""
Set link collision
"""
for elem_collision in elem_link.findall('collision'):
URDF_Link.set_collision_origin(elem_collision, link_frame)
URDF_Link.set_collision_geometry(elem_collision, link_frame)
URDF_Link.set_collision_color(elem_collision, link_frame)
@staticmethod
def set_visual_origin(elem_visual, frame):
"""
Set link visual's origin
"""
for elem_origin in elem_visual.findall('origin'):
frame.link.visual.offset.pos = convert_string_to_narray(elem_origin.attrib.get('xyz'))
frame.link.visual.offset.rot = convert_string_to_narray(elem_origin.attrib.get('rpy'))
@staticmethod
def set_visual_geometry(elem_visual, frame):
"""
Set link visual's geometry
"""
def _set_link_visual_geom(shapes, frame):
"""
Set link visual's geometry
"""
if shapes.tag == "box":
frame.link.visual.gtype = shapes.tag
frame.link.visual.gparam = {"size" : convert_string_to_narray(shapes.attrib.get('size', None))}
elif shapes.tag == "cylinder":
frame.link.visual.gtype = shapes.tag
frame.link.visual.gparam = {"length" : shapes.attrib.get('length', 0),
"radius" : shapes.attrib.get('radius', 0)}
elif shapes.tag == "sphere":
frame.link.visual.gtype = shapes.tag
frame.link.visual.gparam = {"radius" : shapes.attrib.get('radius', 0)}
elif shapes.tag == "mesh":
frame.link.visual.gtype = shapes.tag
frame.link.visual.gparam = {"filename" : shapes.attrib.get('filename', None)}
else:
frame.link.visual.gtype = None
frame.link.visual.gparam = None
for elem_geometry in elem_visual.findall('geometry'):
for shape_type in LINK_TYPES:
for shapes in elem_geometry.findall(shape_type):
_set_link_visual_geom(shapes, frame)
@staticmethod
def set_visual_color(elem_visual, frame):
"""
Set link visual's color
"""
for elem_matrial in elem_visual.findall('material'):
for elem_color in elem_matrial.findall('color'):
rgba = convert_string_to_narray(elem_color.attrib.get('rgba'))
frame.link.visual.gparam['color'] = {elem_matrial.get('name') : rgba}
@staticmethod
def set_collision_origin(elem_collision, frame):
"""
Set link collision's origin
"""
for elem_origin in elem_collision.findall('origin'):
frame.link.collision.offset.pos = convert_string_to_narray(elem_origin.attrib.get('xyz'))
frame.link.collision.offset.rot = convert_string_to_narray(elem_origin.attrib.get('rpy'))
@staticmethod
def set_collision_geometry(elem_collision, frame):
"""
Set link collision's geometry
"""
def _set_link_collision_geom(shapes, frame):
if shapes.tag == "box":
frame.link.collision.gtype = shapes.tag
frame.link.collision.gparam = {"size" : convert_string_to_narray(shapes.attrib.get('size', None))}
elif shapes.tag == "cylinder":
frame.link.collision.gtype = shapes.tag
frame.link.collision.gparam = {"length" : shapes.attrib.get('length', 0),
"radius" : shapes.attrib.get('radius', 0)}
elif shapes.tag == "sphere":
frame.link.collision.gtype = shapes.tag
frame.link.collision.gparam = {"radius" : shapes.attrib.get('radius', 0)}
elif shapes.tag == "mesh":
frame.link.collision.gtype = shapes.tag
frame.link.collision.gparam = {"filename" : shapes.attrib.get('filename', None)}
else:
frame.link.collision.gtype = None
frame.link.collision.gparam = None
elem_geometry = elem_collision.find('geometry')
for shape_type in LINK_TYPES:
for shapes in elem_geometry.findall(shape_type):
_set_link_collision_geom(shapes, frame)
@staticmethod
def set_collision_color(elem_collision, frame):
"""
Set link visual's color
"""
for elem_matrial in elem_collision.findall('material'):
for elem_color in elem_matrial.findall('color'):
rgba = convert_string_to_narray(elem_color.attrib.get('rgba'))
frame.link.collision.gparam['color'] = {elem_matrial.get('name') : rgba}
```
#### File: pykin/planners/cartesian_planner.py
```python
import numpy as np
import pykin.utils.transform_utils as t_utils
import pykin.utils.kin_utils as k_utils
import pykin.kinematics.jacobian as jac
from pykin.planners.planner import Planner
from pykin.utils.error_utils import OriValueError, CollisionError
from pykin.utils.kin_utils import ShellColors as sc, logging_time
from pykin.utils.log_utils import create_logger
from pykin.utils.transform_utils import get_linear_interpoation, get_quaternion_slerp
logger = create_logger('Cartesian Planner', "debug",)
class CartesianPlanner(Planner):
"""
path planner in Cartesian space
Args:
robot(SingleArm or Bimanual): The manipulator robot type is SingleArm or Bimanual
self_collision_manager: CollisionManager for robot's self collision check
object_collision_manager: CollisionManager for collision check between robot and object
n_step(int): Number of waypoints
dimension(int): robot arm's dof
waypoint_type(str): Type of waypoint ex) "Linear", "Cubic", "Circular"
"""
def __init__(
self,
robot,
self_collision_manager=None,
object_collision_manager=None,
n_step=500,
dimension=7,
waypoint_type="Linear"
):
super(CartesianPlanner, self).__init__(
robot,
self_collision_manager,
object_collision_manager,
dimension)
self.n_step = n_step
self.waypoint_type = waypoint_type
self.eef_name = self.robot.eef_name
self.arm = None
self._dimension = dimension
super()._setup_q_limits()
super()._setup_eef_name()
def __repr__(self):
return 'pykin.planners.cartesian_planner.{}()'.format(type(self).__name__)
@logging_time
def get_path_in_joinst_space(
self,
current_q=None,
goal_pose=None,
waypoints=None,
resolution=1,
damping=0.5,
epsilon=1e-12,
pos_sensitivity=0.03,
is_slerp=False
):
self._cur_qpos = super()._change_types(current_q)
self._goal_pose = super()._change_types(goal_pose)
init_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
self._cur_pose = self.robot.get_eef_pose(init_fk)
self._resolution = resolution
self._damping = damping
self._pos_sensitivity = pos_sensitivity
self._is_slerp = is_slerp
if waypoints is None:
waypoints = self.generate_waypoints(is_slerp)
paths, target_positions = self._compute_path_and_target_pose(waypoints, epsilon)
return paths, target_positions
def _compute_path_and_target_pose(self, waypoints, epsilon):
cnt = 0
total_cnt = 10
while True:
cnt += 1
collision_pose = {}
cur_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
current_transform = cur_fk[self.eef_name].h_mat
eef_position = cur_fk[self.eef_name].pos
paths = [self._cur_qpos]
target_positions = [eef_position]
for step, (pos, ori) in enumerate(waypoints):
target_transform = t_utils.get_h_mat(pos, ori)
err_pose = k_utils.calc_pose_error(target_transform, current_transform, epsilon)
J = jac.calc_jacobian(self.robot.desired_frames, cur_fk, self._dimension)
J_dls = np.dot(J.T, np.linalg.inv(np.dot(J, J.T) + self._damping**2 * np.identity(6)))
dq = np.dot(J_dls, err_pose)
self._cur_qpos = np.array([(self._cur_qpos[i] + dq[i]) for i in range(self._dimension)]).reshape(self._dimension,)
is_collision_free = self._collision_free(self._cur_qpos)
if not is_collision_free:
_, name = self.self_c_manager.in_collision_other(other_manager=self.object_c_manager, return_names=True)
collision_pose[step] = (name, np.round(target_transform[:3,3], 6))
continue
if not self._check_q_in_limits(self._cur_qpos):
continue
cur_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
current_transform = cur_fk[self.robot.eef_name].h_mat
if step % (1/self._resolution) == 0 or step == len(waypoints)-1:
paths.append(self._cur_qpos)
target_positions.append(pos)
err = t_utils.compute_pose_error(self._goal_pose[:3], cur_fk[self.eef_name].pos)
if collision_pose.keys():
logger.error(f"Failed Generate Path.. Collision may occur.")
for name, pose in collision_pose.values():
logger.warning(f"\n\tCollision Names : {name} \n\tCollision Position : {pose}")
# logger.warning(f"Collision Position : {pose}")
raise CollisionError("Conflict confirmed. Check the object position!")
if err < self._pos_sensitivity:
logger.info(f"Generate Path Successfully!! Error is {err:6f}")
break
if cnt > total_cnt:
logger.error(f"Failed Generate Path.. The number of retries of {cnt} exceeded")
paths, target_positions = None, None
break
logger.error(f"Failed Generate Path.. Position Error is {err:6f}")
print(f"{sc.BOLD}Retry Generate Path, the number of retries is {cnt}/{total_cnt} {sc.ENDC}\n")
return paths, target_positions
# TODO
# generate cubic, circular waypoints
def generate_waypoints(self, is_slerp):
if self.waypoint_type == "Linear":
waypoints = [path for path in self._get_linear_path(self._cur_pose, self._goal_pose, is_slerp)]
if self.waypoint_type == "Cubic":
pass
if self.waypoint_type == "Circular":
pass
return waypoints
def get_waypoints(self):
return self.waypoints
def _change_pose_type(self, pose):
ret = np.zeros(7)
ret[:3] = pose[:3]
if isinstance(pose, (list, tuple)):
pose = np.asarray(pose)
ori = pose[3:]
if ori.shape == (3,):
ori = t_utils.get_quaternion_from_rpy(ori)
ret[3:] = ori
elif ori.shape == (4,):
ret[3:] = ori
else:
raise OriValueError(ori.shape)
return ret
def _get_linear_path(self, init_pose, goal_pose, is_slerp):
for step in range(1, self.n_step + 1):
delta_t = step / self.n_step
pos = get_linear_interpoation(init_pose[:3], goal_pose[:3], delta_t)
ori = init_pose[3:]
if is_slerp:
ori = get_quaternion_slerp(init_pose[3:], goal_pose[3:], delta_t)
yield (pos, ori)
def _get_cubic_path(self):
pass
def _get_cicular_path(self):
pass
@property
def resolution(self):
return self._resolution
@resolution.setter
def resolution(self, resolution):
self._resolution = resolution
@property
def damping(self):
return self._damping
@damping.setter
def damping(self, damping):
self._damping = damping
@property
def pos_sensitivity(self):
return self._pos_sensitivity
@pos_sensitivity.setter
def pos_sensitivity(self, pos_sensitivity):
self._pos_sensitivity = pos_sensitivity
@property
def is_slerp(self):
return self._is_slerp
@is_slerp.setter
def is_slerp(self, is_slerp):
self._is_slerp = is_slerp
```
#### File: pykin/planners/planner.py
```python
import numpy as np
from abc import abstractclassmethod, ABCMeta
from pykin.utils.log_utils import create_logger
from pykin.utils.error_utils import CollisionError, NotFoundError
logger = create_logger('Cartesian Planner', "debug",)
class Planner(metaclass=ABCMeta):
"""
Base Planner class
Args:
robot (SingleArm or Bimanual): The manipulator robot type is SingleArm or Bimanual
self_collision_manager: CollisionManager for robot's self collision check
object_collision_manager: CollisionManager for collision check between robot and object
dimension(int): robot arm's dof
"""
def __init__(
self,
robot,
self_collision_manager,
object_collision_manager,
dimension
):
self.robot = robot
self._dimension = dimension
if self_collision_manager is None:
logger.warning(f"This Planner does not do collision checking")
self.self_c_manager = None
else:
self.self_c_manager = self_collision_manager
check_collision = self.self_c_manager.in_collision_internal()
if check_collision:
raise CollisionError("Conflict confirmed. Check the joint settings again")
self.object_c_manager = object_collision_manager
def __repr__(self) -> str:
return 'pykin.planners.planner.{}()'.format(type(self).__name__)
@staticmethod
def _change_types(datas):
"""
"""
if not isinstance(datas, (np.ndarray)):
datas = np.array(datas)
if datas.size == 0:
raise NotFoundError("Make sure set current or goal joints..")
return datas
def _setup_q_limits(self):
"""
Setup joint limits (lower and upper)
"""
if self.arm is not None:
self.q_limits_lower = self.robot.joint_limits_lower[self.arm]
self.q_limits_upper = self.robot.joint_limits_upper[self.arm]
else:
self.q_limits_lower = self.robot.joint_limits_lower
self.q_limits_upper = self.robot.joint_limits_upper
def _check_q_in_limits(self, q_in):
"""
check q_in within joint limits
If q_in is in joint limits, return True
otherwise, return False
Returns:
bool(True or False)
"""
return np.all([q_in >= self.q_limits_lower, q_in <= self.q_limits_upper])
def _setup_eef_name(self):
"""
Setup end-effector name
"""
if self.arm is not None:
self.eef_name = self.robot.eef_name[self.arm]
def _collision_free(self, new_q, is_attached=False):
"""
Check collision free between robot and objects
Args:
new_q(np.array): new joint angles
Returns:
result(bool): If collision free, return True
names(set of 2-tup): The set of pairwise collisions.
"""
if self.self_c_manager is None:
return True
transformations = self._get_transformations(new_q)
if is_attached:
grasp_pose = transformations[self.robot.eef_name].h_mat
obj_pose = np.dot(grasp_pose, self.T_between_gripper_and_obj)
self.self_c_manager.set_transform(self.obj_info["name"], obj_pose)
for link, transformations in transformations.items():
if link in self.self_c_manager._objs:
transform = transformations.h_mat
A2B = np.dot(transform, self.robot.links[link].visual.offset.h_mat)
self.self_c_manager.set_transform(name=link, transform=A2B)
is_self_collision = self.self_c_manager.in_collision_internal(return_names=False, return_data=False)
is_object_collision = self.self_c_manager.in_collision_other(other_manager=self.object_c_manager, return_names=False)
if is_self_collision or is_object_collision:
return False
return True
@abstractclassmethod
def get_path_in_joinst_space(self):
"""
write planner algorithm you want
"""
raise NotImplementedError
@abstractclassmethod
def _get_linear_path(self, init_pose, goal_pose):
raise NotImplementedError
def _get_transformations(self, q_in):
"""
Get transformations corresponding to q_in
Args:
q_in(np.array): joint angles
Returns:
transformations(OrderedDict)
"""
if self.robot.robot_name == "sawyer":
q_in = np.concatenate((np.zeros(1), q_in))
if self.arm is not None:
transformations = self.robot.forward_kin(q_in, self.robot.desired_frames[self.arm])
else:
transformations = self.robot.forward_kin(q_in)
return transformations
@property
def dimension(self):
return self._dimension
@dimension.setter
def dimension(self, dimesion):
self._dimension = dimesion
@property
def cur_qpos(self):
return self._cur_qpos
@cur_qpos.setter
def cur_qpos(self, cur_qpos):
self._cur_qpos = cur_qpos
@property
def goal_pose(self):
return self._goal_pose
@goal_pose.setter
def goal_pose(self, goal_pose):
self._goal_pose = goal_pose
```
#### File: pykin/robots/bimanual.py
```python
import numpy as np
from pykin.robots.robot import Robot
from pykin.utils.error_utils import NotFoundError
class Bimanual(Robot):
"""
Initializes a bimanual robot simulation object.
Args:
fname (str): path to the urdf file.
offset (Transform): robot init offset
"""
def __init__(
self,
fname: str,
offset=None
):
super(Bimanual, self).__init__(fname, offset)
self._setup_input2dict()
self._set_joint_limits_upper_and_lower()
def _setup_input2dict(self):
"""
Setup dictionary name
"""
self._base_name = self._input2dict("")
self._eef_name = {}
self.desired_base_frame = self._input2dict(None)
self.desired_frames = self._input2dict(None)
self._frames = self._input2dict(None)
self._revolute_joint_names = self._input2dict(None)
self._target_pose = self._input2dict(None)
self.joint_limits_lower = self._input2dict(None)
self.joint_limits_upper = self._input2dict(None)
def _input2dict(self, inp):
"""
Helper function that converts an input that is either a single value or a list into a dict with keys for
each arm: "right", "left"
Args:
inp (str or list or None): Input value to be converted to dict
:Note: If inp is a list, then assumes format is [right, left]
Returns:
dict: Inputs mapped for each robot arm
"""
# First, convert to list if necessary
if not isinstance(inp, list):
inp = [inp for _ in range(2)]
# Now, convert list to dict and return
return {key: value for key, value in zip(self._arms, inp)}
def _set_joint_limits_upper_and_lower(self):
"""
Set joint limits upper and lower
"""
limits_lower = []
limits_upper = []
for joint, (limit_lower, limit_upper) in self.joint_limits.items():
limits_lower.append((joint, limit_lower))
limits_upper.append((joint, limit_upper))
for arm in self._arms:
self.joint_limits_lower[arm] = [
limit_lower for joint, limit_lower in limits_lower if arm in joint]
self.joint_limits_upper[arm] = [
limit_upper for joint, limit_upper in limits_upper if arm in joint]
def setup_link_name(self, base_name="", eef_name=None):
"""
Sets robot's link name
Args:
base_name (str): reference link name
eef_name (str): end effector name
"""
if "right" in eef_name:
self._base_name["right"] = base_name
self._eef_name["right"] = eef_name
self._set_desired_base_frame("right")
self._set_desired_frame("right")
if "left" in eef_name:
self._base_name["left"] = base_name
self._eef_name["left"] = eef_name
self._set_desired_base_frame("left")
self._set_desired_frame("left")
def _set_desired_base_frame(self, arm):
"""
Sets robot's desired base frame
Args:
arm (str): robot arm (right or left)
"""
if self.base_name[arm] == "":
self.desired_base_frame[arm] = self.root
else:
self.desired_base_frame[arm] = super().find_frame(self.base_name[arm] + "_frame")
def _set_desired_frame(self, arm):
"""
Sets robot's desired frame
Args:
arm (str): robot arm (right or left)
"""
self.desired_frames[arm] = super().generate_desired_frame_recursive(
self.desired_base_frame[arm],
self.eef_name[arm])
self._frames[arm] = self.desired_frames[arm]
self._revolute_joint_names[arm] = super().get_revolute_joint_names(self._frames[arm])
self._target_pose[arm] = np.zeros(len(self._revolute_joint_names[arm]))
def inverse_kin(self, current_joints, target_pose, method="LM", maxIter=1000):
"""
Returns joint angles obtained by computing IK
Args:
current_joints (sequence of float): input joint angles
target_pose (np.array): goal pose to achieve
method (str): two methods to calculate IK (LM: Levenberg-marquardt, NR: Newton-raphson)
maxIter (int): Maximum number of calculation iterations
Returns:
joints (np.array): target joint angles
"""
if not isinstance(target_pose, dict):
raise TypeError("Be sure to input the target pose in dictionary form.")
joints = {}
self._frames = self._input2dict(None)
self._revolute_joint_names = self._input2dict(None)
for arm in target_pose.keys():
if self.eef_name[arm]:
self._set_desired_frame(arm)
self._target_pose[arm] = self._convert_target_pose_type_to_npy(target_pose[arm])
joints[arm] = self.kin.inverse_kinematics(
self._frames[arm],
current_joints,
self._target_pose[arm],
method,
maxIter)
return joints
def _convert_target_pose_type_to_npy(self, value):
"""
convert input type to numpy array
Args:
value(list or tupe)
Returns:
np.array
"""
if isinstance(value, (list, tuple)):
value = np.array(value)
return value.flatten()
def get_eef_pose(self, transformations):
"""
Compute end effector's pose
Args:
transformations(OrderedDict)
Returns:
vals(dict)
"""
vals = {}
for arm in self.arm_type:
if self.eef_name[arm]:
vals[arm] = np.concatenate((transformations[self.eef_name[arm]].pos, transformations[self.eef_name[arm]].rot))
return vals
@property
def _arms(self):
"""
Returns name of arms used as naming convention throughout this module
Returns:
2-tuple: ('right', 'left')
"""
return ("right", "left")
@property
def arm_type(self):
"""
Return arm type
If number of eef_name is two, return tuple type("right", "left)
otherwise, return list type(["right] or ["left"])
Returns:
arm types (tuple or list)
"""
if len(self._eef_name.keys()) == 2:
return self._arms
elif "right" in self.eef_name.keys():
return ["right"]
elif "left" in self.eef_name.keys():
return ["left"]
else:
raise NotFoundError("Can not find robot's arm type")
@property
def base_name(self):
return self._base_name
@property
def eef_name(self):
return self._eef_name
@property
def active_joint_names(self):
return self._revolute_joint_names
```
#### File: pykin/utils/error_utils.py
```python
class NotFoundError(Exception):
"""
Class of custom Exception about Not Found
Args:
data (all types): input data
"""
def __init__(self, data):
self.data = data
def __str__(self):
return f"Not Found {self.data}, please check the name again"
class CollisionError(Exception):
"""
Class of custom Exception about Collision
Args:
data (all types): input data
"""
def __init__(self, data):
self.data = data
def __str__(self):
return f"Check the collision.. {self.data}, please check settings again"
class LimitJointError(Exception):
"""
Class of custom Exception about Collision
Args:
data (all types): input data
"""
def __init__(self, *data):
self.data = data
def __str__(self):
return f"Check the joints.. {self.data}, please check current joints setting again"
class OriValueError(Exception):
"""
Class of custom Exception about Orientation Value
Args:
data (all types): input data
"""
def __init__(self, data):
self.data = data
def __str__(self):
return "Expecting the shape of the orientation to be (3,), (3,3), or (4,), instead got:""{}".format(self.data)
```
#### File: pykin/utils/kin_utils.py
```python
import numpy as np
import time
from pykin.kinematics.transform import Transform
JOINT_TYPE_MAP = {'revolute' : 'revolute',
'fixed' : 'fixed',
'prismatic' : 'prismatic'}
LINK_TYPE_MAP = {'cylinder' : 'cylinder',
'sphere' : 'sphere',
'box' : 'box',
'mesh' : 'mesh'}
LINK_TYPES = ['box', 'cylinder', 'sphere', 'capsule', 'mesh']
class ShellColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Baxter:
left_e0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0., 0. ])
left_w0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0., 0. ])
right_e0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0., 0. ])
right_w0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0., 0. ])
@staticmethod
def add_visual_link(link_transforms, f):
if "left_lower_shoulder" in f.link.name:
link_transforms["left_upper_elbow_visual"] = np.dot(link_transforms["left_lower_shoulder"],
Baxter.left_e0_fixed_offset)
if "left_lower_elbow" in f.link.name:
link_transforms["left_upper_forearm_visual"] = np.dot(link_transforms["left_lower_elbow"],
Baxter.left_w0_fixed_offset)
if "right_lower_shoulder" in f.link.name:
link_transforms["right_upper_elbow_visual"] = np.dot(link_transforms["right_lower_shoulder"],
Baxter.right_e0_fixed_offset)
if "right_lower_elbow" in f.link.name:
link_transforms["right_upper_forearm_visual"] = np.dot(link_transforms["right_lower_elbow"],
Baxter.right_w0_fixed_offset)
def convert_thetas_to_dict(active_joint_names, thetas):
"""
Check if any pair of objects in the manager collide with one another.
Args:
active_joint_names (list): actuated joint names
thetas (sequence of float): If not dict, convert to dict ex. {joint names : thetas}
Returns:
thetas (dict): Dictionary of actuated joint angles
"""
if not isinstance(thetas, dict):
assert len(active_joint_names) == len(thetas
), f"""the number of robot joint's angle is {len(active_joint_names)},
but the number of input joint's angle is {len(thetas)}"""
thetas = dict((j, thetas[i]) for i, j in enumerate(active_joint_names))
return thetas
def logging_time(original_fn):
"""
Decorator to check time of function
"""
def wrapper_fn(*args, **kwargs):
start_time = time.time()
result = original_fn(*args, **kwargs)
end_time = time.time()
print(f"WorkingTime[{original_fn.__name__}]: {end_time-start_time:.4f} sec\n")
return result
return wrapper_fn
def convert_transform(origin):
"""
Args:
origin (None or Transform): offset of object
Returns:
Transform: Returns Transform if origin is None
"""
if origin is None:
return Transform()
else:
return Transform(rot=origin.rot, pos=origin.pos)
def convert_string_to_narray(str_input):
"""
Args:
str_input (str): string
Returns:
np.array: Returns string to np.array
"""
if str_input is not None:
return np.array([float(data) for data in str_input.split()])
def calc_pose_error(tar_pose, cur_pose, EPS):
"""
Args:
tar_pos (np.array): target pose
cur_pos (np.array): current pose
EPS (float): epsilon
Returns:
np.array: Returns pose error
"""
pos_err = np.array([tar_pose[:3, -1] - cur_pose[:3, -1]])
rot_err = np.dot(cur_pose[:3, :3].T, tar_pose[:3, :3])
w_err = np.dot(cur_pose[:3, :3], rot_to_omega(rot_err, EPS))
return np.vstack((pos_err.T, w_err))
def rot_to_omega(R, EPS):
# referred p36
el = np.array(
[[R[2, 1] - R[1, 2]],
[R[0, 2] - R[2, 0]],
[R[1, 0] - R[0, 1]]]
)
norm_el = np.linalg.norm(el)
if norm_el > EPS:
w = np.dot(np.arctan2(norm_el, np.trace(R) - 1) / norm_el, el)
elif (R[0, 0] > 0 and R[1, 1] > 0 and R[2, 2] > 0):
w = np.zeros((3, 1))
else:
w = np.dot(np.pi/2, np.array([[R[0, 0] + 1], [R[1, 1] + 1], [R[2, 2] + 1]]))
return w
def limit_joints(joint_angles, lower, upper):
"""
Set joint angle limit
Args:
joint_angles (sequence of float): joint angles
lower (sequence of float): lower limit
upper (sequence of float): upper limit
Returns:
joint_angles (sequence of float): Returns limited joint angle
"""
if lower is not None and upper is not None:
for i in range(len(joint_angles)):
if joint_angles[i] < lower[i]:
joint_angles[i] = lower[i]
if joint_angles[i] > upper[i]:
joint_angles[i] = upper[i]
return joint_angles
```
|
{
"source": "jdj2261/robosuite",
"score": 2
}
|
#### File: demo/99_etc/joint_control.py
```python
import numpy as np
import robosuite as suite
def relative2absolute_joint_pos_commands(goal_joint_pos, robot, kp, kd):
assert len(goal_joint_pos) == robot.dof
action = [0 for _ in range(robot.dof)]
curr_joint_pos = robot._joint_positions
curr_joint_vel = robot._joint_velocities
for i in range(robot.dof):
action[i] = (goal_joint_pos[i] - curr_joint_pos[i]) * kp - curr_joint_vel[
i
] * kd
return action
result_qpos = np.array([np.pi / 2, 0, 0, 0, 0, 0])
def robosuite_simulation_controller_test(env, sim_time):
# Reset the env
env.reset()
robot = env.robots[0]
kp = 2
kd = 1.2
cnt = 0
is_reached = False
while True:
# if env.done:
# break
env.render()
action = relative2absolute_joint_pos_commands(
result_qpos, robot, kp, kd
)
pose_error = np.array([abs(result_qpos[i] - robot._joint_positions[i]) for i in range(robot.dof)])
# print(pose_error)
if np.all(pose_error< 1e-2):
cnt += 1
print("is_reached")
is_reached = True
if is_reached:
if cnt%2 == 1:
result_qpos[0] = 0
elif cnt%2 == 0:
result_qpos[0] = np.pi/2
observation, reward, done, info = env.step(action)
# close window
env.close()
env = suite.make(
"Lift",
robots="UR5e",
gripper_types=None,
has_renderer=True,
has_offscreen_renderer=False,
use_camera_obs=False,
use_object_obs=False,
control_freq=50,
render_camera=None,
horizon=2000,
)
robosuite_simulation_controller_test(env, env.horizon)
```
|
{
"source": "jdjame/pyspark-boilerplate-mehdio",
"score": 2
}
|
#### File: jdjame/pyspark-boilerplate-mehdio/setup.py
```python
import os
import shutil
import zipfile
from setuptools import setup, find_packages
from distutils.cmd import Command
from distutils.command.install import install
from pipenv.project import Project
from pipenv.utils import convert_deps_to_pip
import subprocess
import os
PACKAGE_NAME = 'pyspark_boilerplate_mehdio'
SRC_FOLDER_NAME = 'src'
VERSION = '0.2'
ARTIFACTORY_USER = os.environ['ARTIFACTORY_USER']
ARTIFACTORY_API_KEY = os.environ['ARTIFACTORY_API_KEY']
ARTI_URI = "https://my.jfrog.artifactory/artifactory"
REPOSITORY = "my-repo-name"
pfile = Project(chdir=False).parsed_pipfile
requirements = convert_deps_to_pip(pfile['packages'], r=False)
test_requirements = convert_deps_to_pip(pfile['dev-packages'], r=False)
def zip_dir(directory, zipname, delete_flag=False):
"""
Compress a directory (ZIP file).
"""
if os.path.exists(directory):
outZipFile = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED)
# The root directory within the ZIP file.
rootdir = os.path.basename(directory)
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
# Write the file named filename to the archive,
# giving it the archive name 'arcname'.
filepath = os.path.join(dirpath, filename)
parentpath = os.path.relpath(filepath, directory)
arcname = os.path.join(rootdir, parentpath)
outZipFile.write(filepath, arcname)
if delete_flag is True :
print("deleting dir")
print(directory)
shutil.rmtree(directory)
outZipFile.close()
class BdistSpark(Command):
description = "create deps and project distribution files for spark_submit"
user_options = [
('requirement=', 'r', 'Install from the given requirements file. [default: requirements.txt]'),
('wheel-dir=', 'w', 'Build deps into dir. [default: spark_dist]')
]
def initialize_options(self):
self.requirement = 'Pipfile'
self.wheel_dir = PACKAGE_NAME+'-'+VERSION+'_spark_dist'
def finalize_options(self):
assert os.path.exists(self.requirement), (
"requirements file '{}' does not exist.".format(self.requirement))
def run(self):
if os.path.exists(self.wheel_dir):
shutil.rmtree(self.wheel_dir)
temp_dir = os.path.join(self.wheel_dir, '.temp')
os.makedirs(temp_dir)
# Install deps from pipfile to spark dist folder
print("Copying python deps as zip file")
deps_name_folder = self.wheel_dir+"/"+PACKAGE_NAME+"-"+VERSION+"-deps"
deps_install_cmd = "pipenv run pip install -r <(pipenv lock -r) --target "+deps_name_folder
subprocess.call(deps_install_cmd, shell=True, executable='/bin/bash')
zip_dir(deps_name_folder, deps_name_folder+".zip", True)
# Copying Source files
cmd = self.reinitialize_command('bdist_wheel')
cmd.dist_dir = temp_dir
self.run_command('bdist_wheel')
# make final rearrangements
for dirname, _, files in os.walk(self.wheel_dir):
for fname in files:
if not fname.startswith(PACKAGE_NAME):
os.remove(os.path.join(self.wheel_dir, fname))
else:
if fname.endswith('whl'):
os.renames(os.path.join(temp_dir, fname),
os.path.join(self.wheel_dir, '{}-{}.zip'.format(PACKAGE_NAME, VERSION)))
# Copy the main.py file
main_src_file = SRC_FOLDER_NAME+"/main.py"
main_dest_file = self.wheel_dir+"/main.py"
print("Copying main.py entry file in spark_dist from "+main_src_file+ " to "+main_dest_file)
shutil.copyfile(main_src_file, main_dest_file)
# Copy the jars
jar_src = "jars"
print("Copying jars... from /jars folder")
subprocess.call("cp -r "+jar_src+" "+self.wheel_dir+"/", shell=True, executable='/bin/bash')
zip_dir(self.wheel_dir, self.wheel_dir+".zip", False)
# Removing cache and build folder
print("Removing cache and build folders...")
if os.path.exists("build"):
shutil.rmtree("build")
if os.path.exists(PACKAGE_NAME+".egg-info"):
shutil.rmtree(PACKAGE_NAME+".egg-info")
print("*** Spark build is available at : "+self.wheel_dir+ " and also as single zip file ! ***")
class PublishArtifact(install):
description = "Publish Artifact"
user_options = install.user_options + [
("foo=", None, "jfrog user token as <EMAIL>:mytoken"),
]
def run(self):
print("Publishing Artifact ...")
wheel_dir_zip = PACKAGE_NAME+'-'+VERSION+'_spark_dist.zip'
pre_path_build = PACKAGE_NAME+"/"+VERSION
publish_cmd = '''
curl -u {artifactory_user}:{artifactory_api_key} \
-i -X PUT \
-T {wheel_dir_zip} \
{arti_uri}/{repository}/{pre_path_build}/{wheel_dir_zip}'''.format(artifactory_user=ARTIFACTORY_USER, artifactory_api_key=ARTIFACTORY_API_KEY, arti_uri=ARTI_URI, repository=REPOSITORY, pre_path_build=pre_path_build, wheel_dir_zip=wheel_dir_zip)
subprocess.call(publish_cmd, shell=True, executable='/bin/bash')
print("Publishing Artifact successful on {arti_uri}/{wheel_dir_zip}!".format(wheel_dir_zip=wheel_dir_zip, arti_uri=ARTI_URI))
setup(
name=PACKAGE_NAME,
version=VERSION,
description='an example project that shows how to build spark_submit deps',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(include=[SRC_FOLDER_NAME, SRC_FOLDER_NAME+'.*'],
exclude=['*.test.*', '*.test']),
install_requires=requirements,
tests_require=test_requirements,
package_data={
PACKAGE_NAME: ['../Pipfile']
},
cmdclass={
"bdist_spark": BdistSpark,
"publish_artifact": PublishArtifact
}
)
```
#### File: src/helpers/logging.py
```python
import os
import logging
import sys
class Log4j(object):
"""Wrapper class for Log4j JVM object.
:param spark: SparkSession object.
"""
def __init__(self, spark):
# get spark app details with which to prefix all messages
app_id = spark.sparkContext.getConf().get('spark.app.id')
app_name = spark.sparkContext.getConf().get('spark.app.name')
log4j = spark._jvm.org.apache.log4j
message_prefix = '<' + app_name + ' ' + app_id + '>'
self.logger = log4j.LogManager.getLogger(message_prefix)
def error(self, message):
"""Log an error.
:param: Error message to write to log
:return: None
"""
self.logger.error(message)
return None
def warn(self, message):
"""Log an warning.
:param: Error message to write to log
:return: None
"""
self.logger.warn(message)
return None
def info(self, message):
"""Log information.
:param: Information message to write to log
:return: None
"""
self.logger.info(message)
return None
class YarnLogger:
@staticmethod
def setup_logger():
if not 'LOG_DIRS' in os.environ:
sys.stderr.write('Missing LOG_DIRS environment variable, pyspark logging disabled')
return
file = os.environ['LOG_DIRS'].split(',')[0] + '/pyspark.log'
logging.basicConfig(filename=file, level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s')
def __getattr__(self, key):
return getattr(logging, key)
```
|
{
"source": "jdjebi/PassChecker",
"score": 3
}
|
#### File: jdjebi/PassChecker/passchecker.py
```python
import os
import sys
import re
import subprocess
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
def run(cmd):
return subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,shell=True)
def get_profiles():
get_profiles_name = "netsh wlan show profiles"
get_profile_pass = 'netsh wlan show profile name="{}" key=clear'
output = run(get_profiles_name) # Exécute la commande get_profiles_name_cmd
results = re.findall(r"(\?)?: (?P<name>.*)", output.stdout.read()) # Récupération du nom des profiles dans output.stdout
Profiles = [] #Liste des profiles Nom + Mot de passe
max_ = None
for i, result in enumerate(results):
profile_name = result[1]
if profile_name != '':
output = run(get_profile_pass.format(profile_name)) # Exécution de la commande get_profile_pass
lines = output.stdout.readlines()
key_line = lines[-11:-10][0] # Extraction de la ligne du mot de passe
key = re.search(r": (?P<pass>.*)",key_line) # Extraction du mot de passe
Profiles.append((profile_name,key.group('pass'))) # Ajout du couple (nom,mot de passe) à la liste des profiles
if max_ and i >= max_:
break
return Profiles
class Win(Tk):
def __init__(self):
super().__init__()
self.title("Mointi PassChecker")
self.container = Frame(self)
self.table = ttk.Treeview(self.container, columns=('name','pass'))
self.current_name = StringVar()
self.current_pass = StringVar()
self.initUI()
self.update()
self.bluild_content()
def initUI(self):
table = self.table
container = self.container
img_wifi = Image.open("icons/wifi.png")
img_key = Image.open("icons/key.png")
self.wifi = ImageTk.PhotoImage(img_wifi)
self.key = ImageTk.PhotoImage(img_key)
# Colonnes
table.column("#0", width=60, stretch=NO)
table.heading('#0', text='n°')
table.heading('name', text='Nom du profil',image=self.wifi)
table.heading('pass', text='Mot de passe', image=self.key)
#TConfiguration des tags
table.tag_configure("bg1", background="#E8E8E8")
table.tag_configure("bg2", background="#fff")
table.tag_bind('item', '<ButtonRelease-1>', self.selectItem)
vsb = ttk.Scrollbar(container,orient="vertical",command=self.table.yview)
self.table.configure(yscrollcommand=vsb.set)
#Footer
Footer = Frame(self, height=30, highlightbackground="gray", highlightthickness=1, padx=10, pady=10)
profile_label = Label(Footer, text="Nom du profil:")
pass_label = Label(Footer, text="Mot de passe:")
profile_entry = ttk.Entry(Footer,textvariable=self.current_name)
pass_entry = ttk.Entry(Footer,textvariable=self.current_pass)
#Placement
container.pack(side=TOP, fill=BOTH, expand=1)
table.pack(side=LEFT, fill=BOTH, expand=1)
vsb.pack(side=RIGHT, fill=Y)
Footer.pack(side=BOTTOM, fill=X)
profile_label.grid(row=0, column=0, sticky=W)
profile_entry.grid(row=1, column=0, padx=5)
pass_label.grid(row=0, column=1, sticky=W)
pass_entry.grid(row=1, column=1)
def selectItem(self,e):
curItem = self.table.focus()
val = self.table.item(curItem)['values']
self.current_name.set(val[0])
self.current_pass.set(val[1])
def bluild_content(self):
for i, profile in enumerate(get_profiles()):
tag = "bg1"
name = profile[0]
pass_ = profile[1]
if i % 2 :
tag = "bg2"
if pass_ == '1':
pass_ = "<PASSWORD>"
self.table.insert('', 'end', text=str(i+1), values=(name,pass_), tags=('item',tag))
def main():
app = Win()
app.mainloop()
if __name__ == "__main__":
main()
```
|
{
"source": "JDJGInc/Da-Discord-Bot",
"score": 3
}
|
#### File: JDJGInc/Da-Discord-Bot/ClientConfig.py
```python
import discord
import re
from discord.ext import commands
async def get_prefix(client,message):
extras = ["ddb*"]
comp = re.compile("^(" + "|".join(map(re.escape, extras)) + ").*", flags=re.I)
match = comp.match(message.content)
if match is not None:
extras.append(match.group(1))
return commands.when_mentioned_or(*extras)(client, message)
client = commands.Bot(command_prefix=(get_prefix),intents = discord.Intents.all())
```
|
{
"source": "JDJGInc/DeveloperGlobalChat",
"score": 2
}
|
#### File: DeveloperGlobalChat/cogs/global.py
```python
from discord.ext import commands
import utils
import cool_utils
import discord, re, random, asyncio
from better_profanity import profanity
import traceback
class GlobalChat(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._cd = commands.CooldownMapping.from_cooldown(3.0, 15.0, commands.BucketType.user)
async def cog_command_error(self, ctx, error):
if ctx.command and not ctx.command.has_error_handler():
await ctx.send(error)
import traceback
traceback.print_exc()
#I need to fix all cog_command_error
async def message_converter(self, message : discord.Message):
args = message.content
args = args or "Test Content"
try:
for x in re.findall(r'<@!?([0-9]{15,20})>', args):
user = await self.bot.try_user(int(x))
print(f"{re.match(rf'<@!?({x})>', args).group()}")
args = args.replace(f"{re.match(rf'<@!?({x})>', args).group()}", f"@{user}")
#fix issue
except Exception as e:
traceback.print_exc()
print(f"error occured as {e}.")
ctx = await self.bot.get_context(message)
args = await commands.clean_content().convert(ctx, args)
args = profanity.censor(args, censor_char = "#")
args = cool_utils.Links.censor(content=args, censor="#")
return args
@commands.Cog.listener()
async def on_message(self, message):
#find out how to edit edited messages or who deleted them to enable syncing.
ctx = await self.bot.get_context(message)
if message.channel.id in self.bot.linked_channels and not message.author.bot and not ctx.valid and not ctx.prefix:
bucket = self._cd.get_bucket(message)
retry_after = bucket.update_rate_limit()
if retry_after:
await asyncio.sleep(15.0)
#slows down spam, now it just well wait 15 seconds if cooldown is triggered.
args = await self.message_converter(message)
if len(args) >= 6000:
args = "Message Too Big, Author will be notifited"
await ctx.send(f"{ctx.author.mention}, please use content less than 6000, either using a pastebin or something else, thanks")
embed = discord.Embed(title = f"{message.guild}",
description = f"{args}", color = 15428885, timestamp = message.created_at)
embed.set_author(name=f"{message.author}", icon_url = message.author.display_avatar.url)
if message.guild: embed.set_thumbnail(url = message.guild.icon.url if message.guild.icon else "https://i.imgur.com/3ZUrjUP.png")
for c in self.bot.linked_channels:
channel = self.bot.get_channel(c)
if c == message.channel.id:
continue
await channel.send(embed = embed)
@commands.has_permissions(manage_messages = True)
@commands.command(brief = "Adds yourself to the global chat with other developers", aliases = ["addlink"])
async def add_link(self, ctx):
if not ctx.guild:
return await ctx.send("this is not a guild appreantly, if it is report the problem to the developer thanks :D at JDJG Inc. Official#3493")
if not isinstance(ctx.channel, discord.TextChannel):
return await ctx.send("you must use in a textchannel")
view = utils.BasicButtons(ctx, timeout = 30.0)
msg = await ctx.send("This adds a link to the current channel. Do you want to do this?", view = view)
await view.wait()
if view.value is None:
return await msg.edit("you didn't respond quickly enough")
if not view.value:
return await msg.edit("Not linking your channel to the global chat.")
await msg.edit("I can now link your channel. Linking....")
row = await self.bot.db.fetchrow("SELECT * FROM linked_chat WHERE server_id = $1", ctx.guild.id)
if row:
await ctx.send("you already linked a channel, we'll update it right now.")
await self.bot.db.execute("UPDATE linked_chat SET channel_id = $1 WHERE server_id = $2", ctx.channel.id, ctx.guild.id)
self.bot.linked_channels.remove(row.get("channel_id"))
if not row:
await self.bot.db.execute("INSERT INTO linked_chat values ($1, $2)", ctx.guild.id, ctx.channel.id)
self.bot.linked_channels.append(ctx.channel.id)
await msg.edit("Linked channel :D")
@commands.has_permissions(manage_messages = True)
@commands.command(brief = "Adds yourself to the global chat with other developers", aliases = ["removelink"])
async def remove_link(self, ctx):
if not isinstance(ctx.channel, discord.TextChannel):
return await ctx.send("you must use in a text channel")
view = utils.BasicButtons(ctx, timeout = 30.0)
msg = await ctx.send("This remove a link to the current channel. Do you want to do this?", view = view)
await view.wait()
if view.value is None:
return await msg.edit("you didn't respond quickly enough")
if not view.value:
return await msg.edit("Not unlinking your channel to the global chat.")
await msg.edit("I can now unlink your channel, unlinking....")
row = await self.bot.db.fetchrow("SELECT * FROM linked_chat WHERE server_id = $1", ctx.guild.id)
if not row:
await ctx.send("Can't unlink from a channel that doesn't exist.")
self.bot.linked_channels.remove(row.get("channel_id"))
await self.bot.db.execute("DELETE FROM linked_chat WHERE server_id = $1", ctx.guild.id)
await msg.edit("Unlinked channel....")
@commands.command(brief = "gives you an invite to invite the bot", aliases = ["inv"])
async def invite(self, ctx):
minimial_invite = discord.utils.oauth_url(self.bot.user.id, permissions = discord.Permissions(permissions = 70635073))
embed = discord.Embed(title = "Invite link:", color = random.randint(0, 16777215))
embed.add_field(name = "Minimial permisions", value = f"{ minimial_invite}")
embed.set_thumbnail(url = self.bot.user.display_avatar.url)
embed.set_footer(text = f"not all features may work if you invite with minimal perms, if you invite with 0 make sure these permissions are in a Bots/Bot role.")
view = discord.ui.View()
view.add_item(discord.ui.Button(label = f"{self.bot.user.name}'s Minimial Permisions Invite", url = minimial_invite, style = discord.ButtonStyle.link))
await ctx.send(embed = embed, view = view)
@commands.command(brief = "rules")
async def rules(self, ctx):
await ctx.send("Please ask JDJG what the rules are.")
#move the rules into here.
@commands.command()
async def credits(self, ctx):
await ctx.send("DB provided by and ran by FrostiiWeeb#0400 \nAJTHATKID#0001 for his PFP \nJDJG Inc. Official#3493 as the owner and manager and programmer of the bot as well as FrostiiWeeb#0400 for also programming the bot. \nEndlessVortex#4547 and BenitzCoding#1317 Thank You!")
@commands.command(brief = "gives a link to the source")
async def source(self, ctx):
embed = discord.Embed(title = "Project at:\nhttps://github.com/GlobalChatDev/DeveloperGlobalChat !", description="you can also contact the owner if you want more info(by using the owner command) you can see who owns the bot. Please don't just copy the source code, cause this may cause issues with you or the user instead ask if you want to use my code or learn from my code and look to see if that's a valid command a.ka ask me first, then discord.py about the bot! Thanks :D", color = random.randint(0, 16777215))
embed.set_author(name = f"{self.bot.user}'s source code:", icon_url = self.bot.user.display_avatar.url)
await ctx.send(embed = embed)
def setup(bot):
bot.add_cog(GlobalChat(bot))
```
|
{
"source": "JDJGInc/JDBot",
"score": 2
}
|
#### File: JDBot/cogs/global.py
```python
from discord.ext import commands
import discord, re
from better_profanity import profanity
class Global(commands.Cog):
"Global Chat Commands"
def __init__(self, bot):
self.bot = bot
@commands.command(brief = "wait for it to release")
async def global_wip(self, ctx):
await ctx.send("currently global chat is WIP for JDBot.")
@commands.command(brief = "makes a global chat example message from your message", aliases = ["test_gc", "generate_message"])
async def test_globalchat(self, ctx, *, args = None):
args = args or "Test Content"
for x in re.findall(r'<@!?([0-9]{15,20})>', args):
user = await self.bot.try_user(int(x))
args = args.replace(f"{re.match(rf'<@!?({x})>', args).group()}", f"@{user}")
#fix this issue
args = await commands.clean_content().convert(ctx, args)
args = profanity.censor(args, censor_char = "#")
embed = discord.Embed(title=f"{ctx.guild}",
description = f"{args}", color = 15428885, timestamp = ctx.message.created_at)
embed.set_author(name=f"{ctx.author}", icon_url = ctx.author.display_avatar.url)
if ctx.guild: embed.set_thumbnail(url = ctx.guild.icon.url if ctx.guild.icon else "https://i.imgur.com/3ZUrjUP.png")
if not ctx.guild: embed.set_thumbnail(url = "https://i.imgur.com/3ZUrjUP.png")
await ctx.send(f"Here's what it would look like in Global Chat!", embed = embed)
def setup(bot):
bot.add_cog(Global(bot))
```
#### File: JDBot/cogs/help.py
```python
from discord.ext import commands
import utils
import itertools
class JDBotHelp(commands.MinimalHelpCommand):
async def send_pages(self):
menu = utils.SendHelp(self.paginator.pages, ctx = self.context, delete_message_after = True)
await menu.send(self.context.channel)
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
if bot.description:
self.paginator.add_line(bot.description, empty = True)
note = self.get_opening_note()
if note:
self.paginator.add_line(note, empty=True)
no_category = f'\u200b{self.no_category}'
def get_category(command, *, no_category=no_category):
cog = command.cog
return f"__**{cog.qualified_name}:**__ \n{cog.description}" if cog is not None else no_category
filtered = await self.filter_commands(bot.commands, sort = True, key=get_category)
to_iterate = itertools.groupby(filtered, key=get_category)
for category, Commands in to_iterate:
self.paginator.add_line(category)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
class Help(commands.Cog):
"The Help Menu Cog"
def __init__(self, bot):
self.bot = bot
self._original_help_command = bot.help_command
self.bot.help_command = JDBotHelp()
self.bot.help_command.cog = self
def cog_unload(self):
self.help_command = self._original_help_command
def setup(bot):
bot.add_cog(Help(bot))
```
#### File: JDBot/cogs/jsk.py
```python
from discord.ext import commands
from jishaku.cog import STANDARD_FEATURES, OPTIONAL_FEATURES
from jishaku.features.baseclass import Feature
from jishaku.codeblocks import codeblock_converter
from jishaku.exception_handling import ReplResponseReactor
from jishaku.repl import AsyncCodeExecutor, get_var_dict_from_ctx
from jishaku.functools import AsyncSender
#look into making more jishaku commands: https://jishaku.readthedocs.io/en/latest/cog.html
class Jishaku(*OPTIONAL_FEATURES, *STANDARD_FEATURES):
@Feature.Command(parent="jsk", name="py", aliases=["python"])
async def jsk_python(self, ctx: commands.Context, *, argument: codeblock_converter):
arg_dict = get_var_dict_from_ctx(ctx, '')
arg_dict.update(get_var_dict_from_ctx(ctx, '_'))
arg_dict["_"] = self.last_result
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
executor = AsyncCodeExecutor(argument.content, scope, arg_dict=arg_dict)
async for send, result in AsyncSender(executor):
if result is None:
continue
self.last_result = result
send(await self.jsk_python_result_handling(ctx, result))
finally:
scope.clear_intersection(arg_dict)
def setup(bot: commands.Bot):
bot.add_cog(Jishaku(bot = bot))
```
#### File: JDBot/cogs/webhook.py
```python
from discord.ext import commands
import discord, re, random, aiohttp
class Webhook(commands.Cog):
"Commands dealing with webhooks"
def __init__(self, bot):
self.bot = bot
@commands.command(brief="a way to send stuff to webhooks.",help = "this uses webhook urls, and sends stuff to them")
async def webhook(self, ctx, *, args=None):
if args is None:
await ctx.send("You didn't send anything")
if args:
check=re.match(r"https://discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,21})/(?P<token>[A-<KEY>]{60,68})",args)
if check:
args = args.replace(f"{check.group()} ","")
if args == check.group():
args = "No Content"
session = self.bot.session
response=await session.get(check.group())
if response.status == 200:
webhook=discord.Webhook.from_url(check.group(), session=session)
embed = discord.Embed(title=f"Webhook {webhook.name}'s Message", color=random.randint(0, 16777215),timestamp=(ctx.message.created_at))
embed.add_field(name="Content:",value=args
)
await webhook.send(embed=embed)
await ctx.send(f"Message was sent to the desired webhook channel.")
if response.status != 200:
await ctx.send("Not a valid link or an error occured")
if isinstance(ctx.channel, discord.TextChannel):
try:
await ctx.message.delete()
except:
await ctx.send("deleting the webhook failed, delete asap")
if not check:
await ctx.send("not a proper webhook url.")
@commands.command(brief="a way to create webhooks",help="make commands with this.")
async def webhook_create(self, ctx, arg = None, *, args = None):
if isinstance(ctx.channel, discord.TextChannel):
if ctx.author.guild_permissions.manage_webhooks:
if arg:
if args is None:
try:
webhook=await ctx.channel.create_webhook(name = arg)
except Exception as e:
return await ctx.send(f"give the bot manage webhook permissions for this to work and give the error to {e} if an issue.")
embed = discord.Embed(title=f"{ctx.author}'s message:",color=random.randint(0, 16777215),timestamp=(ctx.message.created_at))
embed.add_field(name="Content:",value="Test")
if args:
try:
webhook = await ctx.channel.create_webhook(name=arg,reason=args)
except Exception as e:
return await ctx.send(f"give the bot manage webhook permissions for this to work and give the error to {e} if an issue.")
embed = discord.Embed(title=f"{ctx.author}'s message:",color=random.randint(0, 16777215),timestamp=(ctx.message.created_at))
embed.add_field(name="Content:",value=args)
if ctx.message.attachments:
image=await ctx.message.attachments[0].read()
pass_test = True
try:
discord.utils._get_mime_type_for_image(image)
except discord.errors.InvalidArgument:
pass_test = False
if pass_test:
await webhook.edit(avatar=image)
if pass_test is False:
await ctx.send("not a valid image")
await webhook.send(embed=embed)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
try:
await ctx.author.send("Webhook url coming up")
await ctx.author.send(webhook.url)
except discord.Forbidden:
await ctx.send(f"We couldn't DM you {ctx.author.mention}")
if arg is None:
await ctx.send("You need to use values for it to work")
if ctx.author.guild_permissions.manage_webhooks is False:
await ctx.send("you can't use that.")
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send("You can't use that silly")
@commands.command(brief = "tells you a webhook's avatar.")
async def webhook_avatar(self, ctx, *, args = None):
if not args:
return await ctx.send("You didn't give me an arguments to go over.")
check = re.match(r"https://discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,21})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})",args)
if check:
args = args.replace(f"{check.group()} ","")
if args == check.group():
session = self.bot.session
response=await session.get(check.group())
if not response.status != 200:
webhook=discord.Webhook.from_url(check.group(), session=session)
embed = discord.Embed(title = f"{webhook.name}"'s avatar:', color = random.randint(0, 16777215), timestamp = ctx.message.created_at)
embed.set_image(url = webhook.avatar.url)
await ctx.send(content = "Got the Webhook's avatar url",embed = embed)
if response.status != 200:
await ctx.send("Not a valid link or an error occured")
if isinstance(ctx.channel, discord.TextChannel):
try:
await ctx.message.delete()
except:
await ctx.send("deleting the webhook failed, delete asap")
@commands.command(brief = "deletes a webhook by url")
async def webhook_delete(self, ctx, *, args = None):
if not args:
return await ctx.send("You didn't give me an arguments to go over.")
check = re.match(r"https://discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,21})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})",args)
if check:
args = args.replace(f"{check.group()} ","")
if args == check.group():
session = self.bot.session
response=await session.get(check.group())
if not response.status != 200:
webhook = discord.Webhook.from_url(check.group(), session = session)
info = await response.json()
if not info.get("guild_id") or not info.get("channel_id"):
return await ctx.send(f"can't grab permissions from a {None} Guild or {None} Channel \nGuild ID: {webhook.guild_id}\nChannel ID: {webhook.channel_id}")
channel = self.bot.get_channel(int(info.get("channel_id")))
guild = self.bot.get_guild(int(info.get("guild_id")))
if not guild or not channel:
return await ctx.send("I can't check permissions of a guild that is none.")
member = await guild.try_member(ctx.author.id)
if member is None:
return await ctx.send("You don't exist in the guild that you used the webhook of.")
if channel.permissions_for(member).manage_webhooks:
try:
await webhook.delete()
await ctx.send(f"succeeded in deleting webhook in {guild} in {channel.mention}!")
except Exception as e:
await ctx.send(f"An error occured with reason:\n{e}")
if response.status != 200:
await ctx.send("Not a valid link or an error occured")
if isinstance(ctx.channel, discord.TextChannel):
try:
await ctx.message.delete()
except:
await ctx.send("deleting the webhook failed, delete asap unless it told you the link was deleted")
def setup(bot):
bot.add_cog(Webhook(bot))
```
#### File: JDBot/utils/checks.py
```python
import discord
def check(ctx):
def inner(m):
return m.author == ctx.author
return inner
def Membercheck(ctx):
def inner(m):
return m.author == ctx.guild.me
return inner
def warn_permission(ctx, Member):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages and ctx.author.top_role > Member.top_role and ctx.author.guild_permissions >= Member.guild_permissions
#bug with user with same permissions maybe and other stuff(seems fixed for right now, leaving note just in case.)
if isinstance(ctx.channel, discord.DMChannel):
return True
def cleanup_permission(ctx):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages
if isinstance(ctx.channel, discord.DMChannel):
return True
def mutual_guild_check(ctx, user):
mutual_guilds = set(ctx.author.mutual_guilds)
mutual_guilds2 = set(user.mutual_guilds)
return bool(mutual_guilds.intersection(mutual_guilds2))
async def filter_commands(ctx, command_list):
async def check(cmd, ctx):
try:
return await cmd.can_run(ctx)
except:
return False
return [cmd for cmd in command_list if await check(cmd, ctx)]
```
#### File: JDBot/utils/embeds.py
```python
import collections, random, discord, aioimgur, sr_api, asyncdagpi, jeyyapi
import os
async def guildinfo(ctx, guild):
base_user = collections.Counter([u.bot for u in guild.members])
bots = base_user[True]
users = base_user[False]
base_animated = collections.Counter([e.animated for e in guild.emojis])
static_emojis = base_animated[False]
animated_emojis = base_animated[True]
base_available = collections.Counter([e.available for e in guild.emojis])
usable_emojis = base_available[True]
base_status = collections.Counter([x.status for x in guild.members])
online_users = base_status[discord.Status.online]
dnd_users = base_status[discord.Status.dnd]
idle_users = base_status[discord.Status.idle]
offline_users = base_status[discord.Status.offline]
embed = discord.Embed(title="Guild Info:", color = random.randint(0, 16777215))
embed.add_field(name="Server Name:", value=guild.name)
embed.add_field(name="Server ID:", value=guild.id)
embed.add_field(name = "Server Creation:", value = f"{discord.utils.format_dt(guild.created_at, style = 'd')}\n{discord.utils.format_dt(guild.created_at, style = 'T')}")
embed.add_field(name="Server Owner Info:", value = f"Owner : {guild.owner} \nOwner ID : {guild.owner_id}")
embed.add_field(name = "Member info", value = f"Member Count : {guild.member_count}\nUsers : {users} \nBots : {bots} ")
embed.add_field(name="Channel Count:", value = len(guild.channels))
embed.add_field(name="Role Count:", value = len(guild.roles))
embed.set_thumbnail(url = guild.icon.url if guild.icon else "https://i.imgur.com/3ZUrjUP.png")
embed.add_field(name="Emojis Info:", value = f"Limit : {guild.emoji_limit}\nStatic : {static_emojis} \nAnimated : {animated_emojis} \nTotal : {len(guild.emojis)}/{guild.emoji_limit*2} \nUsable : {usable_emojis}")
animated_value = guild.icon.is_animated() if guild.icon else False
embed.add_field(name="Max File Size:",value=f"{guild.filesize_limit/1000000} MB")
embed.add_field(name="Shard ID:",value=guild.shard_id)
embed.add_field(name="Animated Icon", value = f"{animated_value}")
embed.add_field(name="User Presences Info:", value = f"Online Users: {online_users} \nDND Users: {dnd_users} \nIdle Users : {idle_users} \nOffline Users : {offline_users}")
await ctx.send(embed=embed)
async def roleinfo(ctx, role):
role_members = collections.Counter([u.bot for u in role.members])
role_bots = role_members[True]
role_users = role_members[False]
if role.tags:
role_bot_id = role.tags.bot_id
if not role.tags:
role_bot_id = None
role_time = f"{discord.utils.format_dt(role.created_at, style = 'd')}{discord.utils.format_dt(role.created_at, style = 'T')}"
embed = discord.Embed(title = f"{role} Info:" ,color = random.randint(0, 16777215) )
embed.add_field(name = "Mention:", value = f"{role.mention}")
embed.add_field(name = "ID:", value = f"{role.id}")
embed.add_field(name = "Created at:", value = f"{role_time}")
embed.add_field(name="Member Count:", value = f"Bot Count : {role_bots} \nUser Count : {role_users}" )
embed.add_field(name = "Position Info:", value = f"Position : {role.position} \nHoisted : {role.hoist}")
embed.add_field(name = "Managed Info:", value = f"Managed : {role.managed} \nBot : {role.is_bot_managed()} \nBot ID : {role_bot_id} \nDefault : {role.is_default()} \nBooster Role : {role.is_premium_subscriber()} \nIntegrated : {role.is_integration()} \nMentionable : {role.mentionable} ")
embed.add_field(name = "Permissions:", value = f"{role.permissions.value}")
embed.add_field(name = "Color:", value = f"{role.colour}")
embed.set_thumbnail(url = "https://i.imgur.com/liABFL4.png")
embed.set_footer(text = f"Guild: {role.guild}")
await ctx.send(embed = embed)
async def triggered_converter(url, ctx):
sr_client = sr_api.Client(session = ctx.bot.session)
source_image=sr_client.filter(option="triggered", url = str(url))
imgur_client= aioimgur.ImgurClient(os.environ["imgur_id"],os.environ["imgur_secret"])
imgur_url= await imgur_client.upload_from_url(source_image.url)
embed = discord.Embed(color=random.randint(0, 16777215))
embed.set_author(name=f"Triggered gif requested by {ctx.author}",icon_url=(ctx.author.display_avatar.url))
embed.set_image(url = imgur_url["link"])
embed.set_footer(text="powered by some random api")
return embed
async def headpat_converter(url, ctx):
try:
client = jeyyapi.JeyyAPIClient(session = ctx.bot.session)
image = await client.patpat(url)
except Exception as e:
print(e)
return await ctx.send("the api failed on us. Please contact the Bot owner if this is a perstient issue.")
imgur_client = aioimgur.ImgurClient(os.environ["imgur_id"],os.environ["imgur_secret"])
imgur_url = await imgur_client.upload(image)
embed=discord.Embed(color=random.randint(0, 16777215))
embed.set_author(name=f"Headpat gif requested by {ctx.author}",icon_url=(ctx.author.display_avatar.url))
embed.set_image(url=imgur_url["link"])
embed.set_footer(text = "powered by some jeyyapi")
return embed
def create_channel_permission(ctx):
return ctx.author.guild_permissions.manage_channels
def clear_permission(ctx):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages
if isinstance(ctx.channel,discord.DMChannel):
return False
async def invert_converter(url, ctx):
try:
sr_client = sr_api.Client(session = ctx.bot.session)
source_image = sr_client.filter("invert", url = str(url))
image = await source_image.read()
except:
return await ctx.send("the api failed on us. Please contact the Bot owner if this is a perstient issue.")
imgur_client = aioimgur.ImgurClient(os.environ["imgur_id"],os.environ["imgur_secret"])
imgur_url = await imgur_client.upload(image)
embed=discord.Embed(color = random.randint(0, 16777215))
embed.set_author(name=f"Inverted Image requested by {ctx.author}",icon_url=(ctx.author.display_avatar.url))
embed.set_image(url=imgur_url["link"])
embed.set_footer(text="powered by some random api")
return embed
async def headpat_converter2(url, ctx):
dagpi_client = asyncdagpi.Client(os.environ["dagpi_key"], session = ctx.bot.session)
image=await dagpi_client.image_process(asyncdagpi.ImageFeatures.petpet(),str(url))
imgur_client = aioimgur.ImgurClient(os.environ["imgur_id"],os.environ["imgur_secret"])
imgur_url = await imgur_client.upload(image.image)
embed=discord.Embed(color = random.randint(0, 16777215))
embed.set_author(name=f"Headpat gif requested by {ctx.author}",icon_url=(ctx.author.display_avatar.url))
embed.set_image(url = imgur_url["link"])
embed.set_footer(text = "powered by dagpi")
return embed
async def jail_converter(url, ctx):
dagpi_client = asyncdagpi.Client(os.environ["dagpi_key"], session = ctx.bot.session)
image=await dagpi_client.image_process(asyncdagpi.ImageFeatures.jail(),str(url))
imgur_client = aioimgur.ImgurClient(os.environ["imgur_id"],os.environ["imgur_secret"])
imgur_url = await imgur_client.upload(image.image)
embed=discord.Embed(color = random.randint(0, 16777215))
embed.set_author(name=f"Jail Image requested by {ctx.author}",icon_url=(ctx.author.display_avatar.url))
embed.set_image(url = imgur_url["link"])
embed.set_footer(text="powered by dagpi")
return embed
async def invert_converter2(url, ctx):
try:
client = jeyyapi.JeyyAPIClient(session = ctx.bot.session)
image = await client.half_invert(url)
except:
return await ctx.send("the api failed on us. Please contact the Bot owner if this is a perstient issue.")
imgur_client = aioimgur.ImgurClient(os.environ["imgur_id"],os.environ["imgur_secret"])
imgur_url = await imgur_client.upload(image)
embed=discord.Embed(color = random.randint(0, 16777215))
embed.set_author(name=f"Inverted Image requested by {ctx.author}",icon_url = (ctx.author.display_avatar.url))
embed.set_image(url = imgur_url["link"])
embed.set_footer(text="powered by some jeyyapi")
return embed
```
|
{
"source": "JDJGInc/JDJGBotSupreme",
"score": 3
}
|
#### File: JDJGInc/JDJGBotSupreme/DatabaseControl.py
```python
from bson import ObjectId
import DatabaseConfig
def AddChannelLink(Channel_Source, Channel_destination,databaseDest=DatabaseConfig.db.ChannelLink):
document = {"src":Channel_Source,"dest":Channel_destination}
document_id = databaseDest.insert_one(document).inserted_id
return document_id
def DeleteChannelLink_ID(ID,databaseDest=DatabaseConfig.db.ChannelLink):
DatabaseConfig.db.ChannelLink.delete_one({'_id': ObjectId(str(ID))})
return (str(ID)+" Deleted")
def DeleteChannelLink_ChanNum(Channel_Source,Channel_destination):
tmp_doc = {"src":Channel_Source,"dest":Channel_destination}
DatabaseConfig.db.ChannelLink.delete_one(tmp_doc)
return "Deleted Link"
def GetLinkedChannels(client,Channel_Source):
ret_str = "This Channel Is linked to "
for doc in DatabaseConfig.db.ChannelLink.find():
if(doc['src']==int(Channel_Source)):
ret_str = ret_str + str(client.get_channel(doc['dest']))+ ", "
return ret_str
def GetLinkedChannelsList(Channel_Source):
ret=[]
for doc in DatabaseConfig.db.ChannelLink.find():
if(doc['src']==int(Channel_Source)):
ret.append(doc['dest'])
return ret
def to_ChannelId(channelName):
channelName = channelName.replace("<#","")
channelName = channelName.replace(">","")
return int(channelName)
#DISCORD.PY
```
#### File: JDJGInc/JDJGBotSupreme/swear_checker.py
```python
bad_words= [
"nigg",
"damn",
"hell",
"shit",
"shithead",
"ass hat",
'faggot',
'retard',
'bastard',
'nigga',
"hitler",
"RenDev"
]
sexual_words =[
"fuck",
"cunt",
"pussy",
"ass",
"penis",
"dick",
"vagina",
"sex",
"masturbate",
"cum",
"jack off",
"skin flute",
"coochie",
"condom",
"peen",
"ejaculate",
"semen",
"sperm",
"wet dream",
]
bad_word_list = sexual_words+bad_words
import DatabaseConfig
col = DatabaseConfig.db.server_settings
cmp_list = bad_words.append(sexual_words)
def filter_words(message, guild):
doc = col.find_one({"ser_id":guild})
endings = ["ing","er","ist","ed","en","tion",""]
message = message.lower()
words_found =[]
for obj in cmp_list:
slur_pass = 0
if obj in doc["slur"]:
slur_pass = 1
if(slur_pass==0):
for ends in endings:
word = obj.lower() + ends.lower()
if word in message:
words_found.append(obj.lower())
return words_found
def censor_message(message,guild):
message = message.split(" ")
words_found = filter_words(message,guild)
new_mess = ""
begin_bool = 1
for word in message:
for i in range(len(word)):
censor = censor + "*"
if(begin_bool):
begin_bool = 0;
if not (word.lower() in words_found):
new_mess = new_mess + word
else:
new_mess = new_mess + censor
else:
if not (word.lower() in words_found):
new_mess = new_mess + " " + word
else:
new_mess = new_mess + censor
return new_mess
```
#### File: JDJGInc/JDJGBotSupreme/UpdateNotify.py
```python
import discord
import DatabaseConfig
def defDoc(num, val,guildId):
doc = [guildId,"NULL","NULL","NULL"]
doc[num] = val
return {"ser_id":doc[0],"title":doc[1],"body_head":doc[2],"body":doc[3]}
def GetArgs(message):
args = ""
tmpmsg = message.content
tmpmsg = tmpmsg.replace("JDBot*update ","")
tmpmsg = tmpmsg.replace("[title] ","")
tmpmsg = tmpmsg.replace("[body_head] ","")
tmpmsg = tmpmsg.replace("[body] ","")
return tmpmsg
i = -1
for arg in message.content.split(" "):
i=i+1
if i > 0:
args = args + str(arg)
if i+1 != len(message.content.split(" ")):
args=args+" "
return args
async def UpdateNote(message,client):
mode = "NULL"
banana=0
args =[ "NULL","NULL"]
try:
mode = message.content.split(" ")[1]
except:
banana=0
try:
args[0] = GetArgs(message)
except:
banana=0
if mode == "[title]":
try:
DatabaseConfig.db.update_note.insert_one({"ser_id":message.guild.id,"title":args[0],"body_head":"NULL","body":"NULL"})
except:
doc = DatabaseConfig.db.update_note.find_one({'ser_id':message.guild.id})
DatabaseConfig.db.update_note.delete_one(doc)
DatabaseConfig.db.update_note.insert_one({'ser_id':doc['ser_id'],'title':args[0],'body_head':doc['body_head'],'body':doc['body']})
if mode == "[body_head]":
try:
DatabaseConfig.db.update_note.insert_one({"ser_id":message.guild.id,"title":"NULL","body_head":args[0],"body":"NULL"})
except:
doc = DatabaseConfig.db.update_note.find_one({'ser_id':message.guild.id})
DatabaseConfig.db.update_note.delete_one(doc)
DatabaseConfig.db.update_note.insert_one({'ser_id':doc['ser_id'],'title':doc['title'],'body_head':args[0],'body':doc['body']})
if mode == "[body]":
try:
DatabaseConfig.db.update_note.insert_one({"ser_id":message.guild.id,"title":"NULL","body_head":"NULL","body":args[0]})
except:
doc = DatabaseConfig.db.update_note.find_one({'ser_id':message.guild.id})
DatabaseConfig.db.update_note.delete_one(doc)
DatabaseConfig.db.update_note.insert_one({'ser_id':doc['ser_id'],'title':doc['title'],'body_head':doc['body_head'],'body':args[0]})
if mode == '[preview]':
try:
doc = DatabaseConfig.db.update_note.find_one({'ser_id':message.guild.id})
except:
banana=0
embedVar = discord.Embed(title=doc['title'])
embedVar.add_field(name=doc['body_head'],value=doc['body'],inline =True)
if (message.author.dm_channel is None):
await message.author.create_dm()
await message.author.dm_channel.send(embed=embedVar)
if mode=='[set]':
await message.channel.send(SetChannel(message))
if mode=='[send]':
doc = DatabaseConfig.db.server_settings.find_one({"ser_id":message.guild.id})
try:
doc1 = DatabaseConfig.db.update_note.find_one({'ser_id':message.guild.id})
embedVar = discord.Embed(title=doc1['title'])
embedVar.add_field(name=doc1['body_head'],value=doc1['body'],inline =True)
await client.get_channel(doc['up_chan']).send(embed=embedVar)
except:
if (message.author.dm_channel is None):
await message.author.create_dm()
await message.author.dm_channel.send("There is no channel specified to send this update message to! Please define what channel you would like to send updates to with 'JDBot*update set' in the channel that you would like to send messages to")
def SetChannel(message):
try:
DatabaseConfig.db.server_settings.insert_one({"ser_id":message.guild.id,"st":0,"up_chan":message.channel.id})
return "Update Channel Linked"
except:
doc = DatabaseConfig.db.server_settings.find_one({"ser_id":message.guild.id})
DatabaseConfig.db.server_settings.delete_one(doc)
DatabaseConfig.db.server_settings.insert_one({"ser_id":message.guild.id,"st":doc['st'],"up_chan":message.channel.id})
return "There was already a channel set to recive updates so it was deleted and this one was linked instead!"
```
|
{
"source": "JDJGInc/PythonOs",
"score": 3
}
|
#### File: JDJGInc/PythonOs/clear_code.py
```python
from os import system, name
# import only system from os
# define our clear function
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
#Thank you https://www.geeksforgeeks.org/clear-screen-python/
```
|
{
"source": "JDJGInc/SMG4_testing",
"score": 3
}
|
#### File: SMG4_testing/cogs/listeners.py
```python
from typing import TYPE_CHECKING
from discord.ext import commands
if TYPE_CHECKING:
from main import RTFMBot
else:
RTFMBot = commands.Bot
class Events(commands.Cog):
def __init__(self, bot: RTFMBot) -> None:
self.bot = bot
@commands.Cog.listener()
async def on_ready(self) -> None:
print(
"Bot is Ready",
f"Logged in as {self.bot.user} (ID: {self.bot.user.id})", # type: ignore # .user isn't None.
sep="\n",
)
@commands.Cog.listener()
async def on_guild_available(self, guild) -> None:
print(f"Guild {guild.name} (ID: {guild.id}) is available")
@commands.Cog.listener()
async def on_guild_unavailable(self, guild) -> None:
print(f"Guild {guild.name} (ID: {guild.id}) is unavailable")
async def setup(bot: RTFMBot) -> None:
await bot.add_cog(Events(bot))
```
#### File: SMG4_testing/cogs/rtfm_slash.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
from discord.ext import commands
import typing
from discord.app_commands import AppCommandError, command as app_command, Choice
if TYPE_CHECKING:
from main import RTFMBot
from discord import Interaction
else:
RTFMBot = commands.Bot
class RTFMSlash(commands.Cog):
def __init__(self, bot: RTFMBot) -> None:
self.bot = bot
@app_command(description="looks up docs")
async def rtfm(self, interaction: Interaction, library: str, query: typing.Optional[str] = None) -> None:
"""Looks up docs for a library with optionally a query."""
if query is None or query == "No Results Found":
return await interaction.response.send_message(f"Alright Let's see {library}")
await interaction.response.send_message(f"Alright Let's see {library}{query}")
@rtfm.autocomplete("library")
async def rtfm_library_autocomplete(self, interaction: Interaction, current: str) -> list[Choice]:
libraries = self.bot.rtfm_libraries
all_choices: list[Choice] = [Choice(name=name, value=link) for name, link in libraries.items()]
startswith: list[Choice] = [choices for choices in all_choices if choices.name.startswith(current)]
if not (current and startswith):
return all_choices[0:25]
return startswith
@rtfm.autocomplete("query")
async def rtfm_query_autocomplete(self, interaction: Interaction, current: str) -> list[Choice]:
url = interaction.namespace.library or list(self.bot.rtfm_libraries.values())[0]
assert self.bot.scraper is not None
results = await self.bot.scraper.search(current, page=url)
if not results:
return [Choice(name="No results found", value="No Results Found")]
to_slice_link = len(url)
all_choices: list[Choice] = [Choice(name=name, value=link[to_slice_link:]) for name, link in results]
startswith: list[Choice] = [choices for choices in all_choices if choices.name.startswith(current)]
if not current:
return all_choices[:25]
return startswith[:25]
@rtfm.error
async def rtfm_error(self, interaction: Interaction, error: AppCommandError) -> None:
await interaction.response.send_message(f"{error}! Please Send to this to my developer", ephemeral=True)
print(error)
print(interaction.command)
async def setup(bot: RTFMBot) -> None:
await bot.add_cog(RTFMSlash(bot))
```
#### File: SMG4_testing/utils/simple_paginator.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, Union
from discord import ButtonStyle, Embed
from discord.ui import View, button as button_decorator
from discord.utils import maybe_coroutine
if TYPE_CHECKING:
from typing_extensions import Self
from discord import Interaction, InteractionMessage, Message, WebhookMessage
from discord.ui.button import Button
from discord.ui.item import Item
from discord.ext.commands.context import Context
ValidPage = Union[str, Embed]
PossibleMessage = Union[InteractionMessage, Message, WebhookMessage]
else:
Interaction = Any
Button = Any
Context = Any
__all__: tuple[str, ...] = ("SimplePaginator",)
class SimplePaginator(View):
def __init__(
self,
pages: list[ValidPage],
*,
delete_message_after: bool = False,
):
self.pages = pages
super().__init__()
self.delete_message_after = delete_message_after
self.message: Optional[PossibleMessage] = None
self.current_page: int = 0
def _init_children(self) -> list[Item[Self]]:
org_children = super()._init_children()
# only show stop button if there is only 1 page.
if len(self.pages) <= 1:
return [item for item in org_children if item.callback.callback.__name__ == "stop_button"]
return org_children
def format_page(self, page: ValidPage) -> ValidPage:
return page
async def get_page_kwargs(self, page_number: int) -> dict[str, Any]:
page = await maybe_coroutine(self.format_page, self.pages[page_number])
base_kwargs: dict[str, Any] = {"content": None, "embeds": [], "view": self}
if isinstance(page, Embed):
base_kwargs["embeds"].append(page)
elif isinstance(page, str):
base_kwargs["content"] = page
elif isinstance(page, dict):
return page
return base_kwargs
async def update(self, interaction: Interaction) -> None:
if hasattr(self, "right_button") and hasattr(self, "left_button"):
if self.current_page >= len(self.pages) - 1:
self.right_button.disabled = True
self.left_button.disabled = False
elif self.current_page == 0:
self.right_button.disabled = False
self.left_button.disabled = True
if self.current_page > len(self.pages):
self.current_page = 0
kwargs = await self.get_page_kwargs(self.current_page)
if not interaction.response.is_done():
await interaction.response.edit_message(**kwargs)
if not self.message:
self.message = await interaction.original_message()
else:
if self.message:
await self.message.edit(**kwargs)
else:
await interaction.message.edit(**kwargs) # type: ignore
self.message = interaction.message
async def start(
self, ctx: Optional[Context] = None, interaction: Optional[Interaction] = None, **kwargs
) -> Optional[PossibleMessage]:
kwargs = await self.get_page_kwargs(self.current_page)
if self.message:
await self.message.edit(**kwargs)
return self.message
if ctx:
self.message = await ctx.send(**kwargs)
elif interaction:
if not interaction.response.is_done():
await interaction.response.send_message(**kwargs)
self.message = await interaction.original_message()
else:
self.message = await interaction.followup.send(wait=True, **kwargs)
return self.message
@button_decorator(emoji="⬅️", style=ButtonStyle.secondary, custom_id="left")
async def left_button(self, interaction: Interaction, button: Button) -> None:
self.current_page -= 1
await self.update(interaction)
@button_decorator(label="Stop", style=ButtonStyle.red, custom_id="stop")
async def stop_button(self, interaction: Interaction, button: Button) -> None:
self.stop()
if self.delete_message_after:
await self.message.delete() # type: ignore
@button_decorator(emoji="➡️", style=ButtonStyle.secondary, custom_id="right")
async def right_button(self, interaction: Interaction, button: Button) -> None:
self.current_page += 1
await self.update(interaction)
```
|
{
"source": "jd-jones/kinemparse",
"score": 2
}
|
#### File: airplane/scripts/baseline.py
```python
import os
import logging
import csv
import warnings
import yaml
import joblib
from scipy import io
import numpy as np
from matplotlib import pyplot as plt
# Stop numba from throwing a bunch of warnings when it compiles LCTM
from numba import NumbaWarning; warnings.filterwarnings('ignore', category=NumbaWarning)
import LCTM.metrics
from mathtools import utils
from kinemparse import airplanecorpus
logger = logging.getLogger(__name__)
def writeLabels(fn, label_seq, header=None):
with open(fn, 'wt') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if header is not None:
writer.writerow(header)
for label in label_seq:
writer.writerow(label)
def toStringSeq(string_array):
return [arr[0] for arr in string_array[0]]
def plotDetections(fn, detection_seq, pred_seq, label_seq):
num_samples, num_detections = detection_seq.shape
f, axes = plt.subplots(num_detections + 1, sharex=True, sharey=True)
for i in range(num_detections):
detection_label = (label_seq == i).astype(int)
axes[i].set_ylabel(f'bin {i}')
axes[i].plot(detection_seq[:, i])
axes[i].twinx().plot(detection_label, color='tab:orange')
axes[-1].plot(pred_seq, label='pred')
axes[-1].plot(label_seq, label='true')
axes[-1].legend()
plt.tight_layout()
plt.savefig(fn)
plt.close()
def main(
out_dir=None, preds_dir=None, data_dir=None, metric_names=None,
detection_threshold=None,
plot_output=None, results_file=None, sweep_param_name=None):
if metric_names is None:
metric_names = ('accuracy', 'edit_score', 'overlap_score')
preds_dir = os.path.expanduser(preds_dir)
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
logger.info(f"Writing to: {out_dir}")
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
if os.path.exists(results_file):
os.remove(results_file)
else:
results_file = os.path.expanduser(results_file)
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
def loadAll(seq_ids, var_name, data_dir, convert=None):
def loadOne(seq_id):
fn = os.path.join(data_dir, f'trial={seq_id}_{var_name}')
key = os.path.splitext(var_name)[0].replace('-', '_')
if var_name.endswith('.mat'):
data = io.loadmat(fn)[key]
elif var_name.endswith('.pkl'):
data = joblib.load(fn)
if convert is not None:
data = convert(data)
return data
return tuple(map(loadOne, seq_ids))
part_names, part_names_to_idxs, part_idxs_to_bins = airplanecorpus.loadParts()
transition_vocabulary = joblib.load(os.path.join(data_dir, 'transition-vocabulary.pkl'))
trial_ids = utils.getUniqueIds(preds_dir, prefix='trial=', suffix='.mat')
pred_seqs = loadAll(trial_ids, 'pred-state-seq.mat', preds_dir, convert=toStringSeq)
# true_seqs = loadAll(trial_ids, 'true-state-seq.mat', preds_dir, convert=toStringSeq)
true_seqs = loadAll(trial_ids, 'label-seq.pkl', data_dir)
detection_scores = loadAll(trial_ids, 'detection-scores.mat', preds_dir)
for i, trial_id in enumerate(trial_ids):
logger.info(f"VIDEO {trial_id}:")
pred_action_seq = pred_seqs[i]
true_seq = true_seqs[i]
detection_score_seq = detection_scores[i]
seq_len = min(len(pred_action_seq), true_seq.shape[0], detection_score_seq.shape[0])
pred_action_seq = pred_action_seq[:seq_len]
true_seq = true_seq[:seq_len]
detection_score_seq = detection_score_seq[:seq_len, :]
true_transition_seq = tuple(transition_vocabulary[i] for i in true_seq)
# true_assembly_seq = tuple(n for c, n in true_transition_seq)
true_action_seq = tuple(
airplanecorpus.actionFromTransition(c, n)
for c, n in true_transition_seq
)
true_action_index_seq = np.array([part_names_to_idxs[i] for i in true_action_seq])
true_bin_index_seq = np.array([part_idxs_to_bins[i] for i in true_action_index_seq])
pred_action_index_seq = np.array([part_names_to_idxs[i] for i in pred_action_seq])
pred_bin_index_seq = detection_score_seq.argmax(axis=1)
if detection_threshold is not None:
above_thresh = detection_score_seq.max(axis=1) > detection_threshold
true_bin_index_seq = true_bin_index_seq[above_thresh]
pred_bin_index_seq = pred_bin_index_seq[above_thresh]
detection_score_seq = detection_score_seq[above_thresh, :]
fn = os.path.join(fig_dir, f"trial={trial_id}_baseline-detections.png")
plotDetections(fn, detection_score_seq, pred_bin_index_seq, true_bin_index_seq)
writeLabels(
os.path.join(fig_dir, f"trial={trial_id}_action-seqs"),
zip(true_action_seq, pred_action_seq),
header=('true', 'pred')
)
writeLabels(
os.path.join(fig_dir, f"trial={trial_id}_bin-seqs"),
zip(true_bin_index_seq, pred_bin_index_seq),
header=('true', 'pred')
)
metric_dict = {}
for name in metric_names:
key = f"{name}_action"
value = getattr(LCTM.metrics, name)(pred_action_index_seq, true_action_index_seq) / 100
metric_dict[key] = value
logger.info(f" {key}: {value * 100:.1f}%")
key = f"{name}_bin"
value = getattr(LCTM.metrics, name)(pred_bin_index_seq, true_bin_index_seq) / 100
metric_dict[key] = value
logger.info(f" {key}: {value * 100:.1f}%")
utils.writeResults(results_file, metric_dict, sweep_param_name, {})
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: airplane/scripts/eval_output.py
```python
import os
import logging
import csv
import warnings
import yaml
import joblib
import numpy as np
# Stop numba from throwing a bunch of warnings when it compiles LCTM
from numba import NumbaWarning; warnings.filterwarnings('ignore', category=NumbaWarning)
import LCTM.metrics
from mathtools import utils
from kinemparse import airplanecorpus
logger = logging.getLogger(__name__)
def equivalent(pred_assembly, true_assembly):
residual = pred_assembly ^ true_assembly
residual = residual - frozenset(['wheel1', 'wheel2', 'wheel3', 'wheel4'])
return len(residual) == 0
def writeLabels(fn, label_seq, header=None):
with open(fn, 'wt') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if header is not None:
writer.writerow(header)
for label in label_seq:
writer.writerow(label)
def main(
out_dir=None, preds_dir=None, data_dir=None, metric_names=None,
plot_output=None, results_file=None, sweep_param_name=None):
if metric_names is None:
metric_names = ('accuracy', 'edit_score', 'overlap_score')
preds_dir = os.path.expanduser(preds_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
logger.info(f"Writing to: {out_dir}")
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
if os.path.exists(results_file):
os.remove(results_file)
else:
results_file = os.path.expanduser(results_file)
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
def loadAll(seq_ids, var_name, data_dir):
def loadOne(seq_id):
fn = os.path.join(data_dir, f'trial={seq_id}_{var_name}')
return joblib.load(fn)
return tuple(map(loadOne, seq_ids))
part_names, part_names_to_idxs, part_idxs_to_bins = airplanecorpus.loadParts()
transition_vocabulary = joblib.load(os.path.join(data_dir, 'transition-vocabulary.pkl'))
trial_ids = utils.getUniqueIds(preds_dir, prefix='trial=')
pred_seqs = loadAll(trial_ids, 'pred-label-seq.pkl', preds_dir)
true_seqs = loadAll(trial_ids, 'true-label-seq.pkl', preds_dir)
for i, trial_id in enumerate(trial_ids):
logger.info(f"VIDEO {trial_id}:")
pred_transition_index_seq = pred_seqs[i]
pred_transition_seq = tuple(transition_vocabulary[i] for i in pred_transition_index_seq)
pred_action_seq = tuple(
airplanecorpus.actionFromTransition(c, n)
for c, n in pred_transition_seq
)
pred_action_index_seq = np.array([part_names_to_idxs[i] for i in pred_action_seq])
pred_bin_index_seq = np.array([part_idxs_to_bins[i] for i in pred_action_index_seq])
pred_assembly_seq = tuple(n for c, n in pred_transition_seq)
saveVariable(pred_assembly_seq, f'trial={trial_id}_pred-assembly-seq')
saveVariable(pred_action_seq, f'trial={trial_id}_pred-action-seq')
true_transition_index_seq = true_seqs[i]
true_transition_seq = tuple(transition_vocabulary[i] for i in true_transition_index_seq)
true_assembly_seq = tuple(n for c, n in true_transition_seq)
true_action_seq = tuple(
airplanecorpus.actionFromTransition(c, n)
for c, n in true_transition_seq
)
true_action_index_seq = np.array([part_names_to_idxs[i] for i in true_action_seq])
true_bin_index_seq = np.array([part_idxs_to_bins[i] for i in true_action_index_seq])
saveVariable(true_assembly_seq, f'trial={trial_id}_true-assembly-seq')
saveVariable(true_action_seq, f'trial={trial_id}_true-action-seq')
writeLabels(
os.path.join(fig_dir, f"trial={trial_id}_action-seqs"),
zip(true_action_seq, pred_action_seq),
header=('true', 'pred')
)
writeLabels(
os.path.join(fig_dir, f"trial={trial_id}_bin-seqs"),
zip(true_bin_index_seq, pred_bin_index_seq),
header=('true', 'pred')
)
metric_dict = {}
for name in metric_names:
key = f"{name}_action"
value = getattr(LCTM.metrics, name)(pred_action_index_seq, true_action_index_seq) / 100
metric_dict[key] = value
logger.info(f" {key}: {value * 100:.1f}%")
key = f"{name}_bin"
value = getattr(LCTM.metrics, name)(pred_bin_index_seq, true_bin_index_seq) / 100
metric_dict[key] = value
logger.info(f" {key}: {value * 100:.1f}%")
final_pred = pred_assembly_seq[-1]
final_true = true_assembly_seq[-1]
final_equivalent = equivalent(final_pred, final_true)
metric_dict['accuracy_model'] = float(final_equivalent)
logger.info(f" FINAL (PRED): {final_pred}")
logger.info(f" FINAL (TRUE): {final_true}")
logger.info(f" RESIDUAL: {final_pred ^ final_true}")
logger.info(f" EQUIVALENT: {final_equivalent}")
utils.writeResults(results_file, metric_dict, sweep_param_name, {})
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: blocks-actions/scripts/event_assembly_decode.py
```python
import os
import logging
import yaml
import numpy as np
from matplotlib import pyplot as plt
# import pandas as pd
# import scipy
import LCTM.metrics
from kinemparse import decode
from mathtools import utils # , metrics
# from blocks.core import blockassembly
logger = logging.getLogger(__name__)
def eval_metrics(pred_seq, true_seq, name_suffix='', append_to={}):
state_acc = (pred_seq == true_seq).astype(float).mean()
metric_dict = {
'State Accuracy' + name_suffix: state_acc,
'State Edit Score' + name_suffix: LCTM.metrics.edit_score(pred_seq, true_seq) / 100,
'State Overlap Score' + name_suffix: LCTM.metrics.overlap_score(pred_seq, true_seq) / 100
}
append_to.update(metric_dict)
return append_to
def suppress_nonmax(scores):
col_idxs = scores.argmax(axis=1)
new_scores = np.zeros_like(scores)
row_idxs = np.arange(scores.shape[0])
new_scores[row_idxs, col_idxs] = scores[row_idxs, col_idxs]
return new_scores
def make_event_assembly_transition_priors(event_vocab, assembly_vocab):
def isValid(event, cur_assembly, next_assembly):
is_valid = diff == event
return is_valid
num_events = len(event_vocab)
num_assemblies = len(assembly_vocab)
priors = np.zeros((num_events, num_assemblies, num_assemblies), dtype=bool)
for j, cur_assembly in enumerate(assembly_vocab):
for k, next_assembly in enumerate(assembly_vocab):
try:
diff = next_assembly - cur_assembly
except ValueError:
continue
for i, event in enumerate(event_vocab):
priors[i, j, k] = diff == event
return priors
def make_assembly_transition_priors(assembly_vocab):
def isValid(diff):
for i in range(diff.connections.shape[0]):
c = diff.connections.copy()
c[i, :] = 0
c[:, i] = 0
if not c.any():
return True
return False
num_assemblies = len(assembly_vocab)
priors = np.zeros((num_assemblies, num_assemblies), dtype=bool)
for j, cur_assembly in enumerate(assembly_vocab):
for k, next_assembly in enumerate(assembly_vocab):
if cur_assembly == next_assembly:
continue
try:
diff = next_assembly - cur_assembly
except ValueError:
continue
priors[j, k] = isValid(diff)
return priors
def count_transitions(label_seqs, num_classes, support_only=False):
start_counts = np.zeros(num_classes, dtype=float)
end_counts = np.zeros(num_classes, dtype=float)
for label_seq in label_seqs:
start_counts[label_seq[0]] += 1
end_counts[label_seq[-1]] += 1
start_probs = start_counts / start_counts.sum()
end_probs = end_counts / end_counts.sum()
if support_only:
start_probs = (start_probs > 0).astype(float)
end_probs = (end_probs > 0).astype(float)
return start_probs, end_probs
def count_priors(label_seqs, num_classes, stride=None, approx_upto=None, support_only=False):
dur_counts = {}
class_counts = {}
for label_seq in label_seqs:
for label, dur in zip(*utils.computeSegments(label_seq[::stride])):
class_counts[label] = class_counts.get(label, 0) + 1
dur_counts[label, dur] = dur_counts.get((label, dur), 0) + 1
class_priors = np.zeros((num_classes))
for label, count in class_counts.items():
class_priors[label] = count
class_priors /= class_priors.sum()
max_dur = max(dur for label, dur in dur_counts.keys())
dur_priors = np.zeros((num_classes, max_dur))
for (label, dur), count in dur_counts.items():
assert dur
dur_priors[label, dur - 1] = count
dur_priors /= dur_priors.sum(axis=1, keepdims=True)
if approx_upto is not None:
cdf = dur_priors.cumsum(axis=1)
approx_bounds = (cdf >= approx_upto).argmax(axis=1)
dur_priors = dur_priors[:, :approx_bounds.max()]
if support_only:
dur_priors = (dur_priors > 0).astype(float)
return class_priors, dur_priors
def viz_priors(fn, class_priors, dur_priors):
fig, axes = plt.subplots(3)
axes[0].matshow(dur_priors)
axes[1].stem(class_priors)
plt.tight_layout()
plt.savefig(fn)
plt.close()
def viz_transition_probs(fig_dir, transitions):
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
for i, transition_arr in enumerate(transitions):
plt.matshow(transition_arr)
plt.savefig(os.path.join(fig_dir, f"action={i:03d}"))
plt.close()
def pack_scores(transitions, start, end):
num_assemblies = transitions.shape[0]
packed = np.zeros((num_assemblies + 1, num_assemblies + 1), dtype=float)
packed[0, :-1] = start
packed[1:, -1] = end
packed[1:, :-1] = transitions
return packed
def computeMoments(feature_seqs):
features = np.concatenate(feature_seqs, axis=0)
mean = features.mean(axis=0)
std = features.std(axis=0)
return mean, std
def main(
out_dir=None, assembly_scores_dir=None, event_scores_dir=None,
labels_from='assemblies',
feature_fn_format='score-seq', label_fn_format='true-label-seq',
only_fold=None, plot_io=None, prefix='seq=', stop_after=None,
background_action='', stride=None, standardize_inputs=False,
model_params={}, cv_params={},
results_file=None, sweep_param_name=None):
event_scores_dir = os.path.expanduser(event_scores_dir)
assembly_scores_dir = os.path.expanduser(assembly_scores_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
misc_dir = os.path.join(out_dir, 'misc')
if not os.path.exists(misc_dir):
os.makedirs(misc_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
scores_dirs = {
'events': event_scores_dir,
'assemblies': assembly_scores_dir
}
data_dir = scores_dirs[labels_from]
seq_ids = utils.getUniqueIds(
data_dir, prefix=prefix, suffix=f'{label_fn_format}.*',
to_array=True
)
event_dataset = utils.FeaturelessCvDataset(
seq_ids, event_scores_dir,
prefix=prefix,
label_fn_format=label_fn_format
)
assembly_dataset = utils.FeaturelessCvDataset(
seq_ids, assembly_scores_dir,
prefix=prefix,
label_fn_format=label_fn_format
)
logger.info(f"Loaded scores for {len(seq_ids)} sequences from {data_dir}")
# Define cross-validation folds
cv_folds = utils.makeDataSplits(len(seq_ids), **cv_params)
utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)
# Load vocabs; create priors
event_vocab = utils.loadVariable('vocab', event_scores_dir)
assembly_vocab = utils.loadVariable('vocab', assembly_scores_dir)
vocabs = {
'event_vocab': tuple(range(len(event_vocab))),
'assembly_vocab': tuple(range(len(assembly_vocab)))
}
try:
event_priors = utils.loadVariable('event-priors', out_data_dir)
except AssertionError:
event_priors = make_event_assembly_transition_priors(event_vocab, assembly_vocab)
utils.saveVariable(event_priors, 'event-priors', out_data_dir)
viz_transition_probs(os.path.join(fig_dir, 'event-priors'), event_priors)
np.savetxt(
os.path.join(misc_dir, "event-transitions.csv"),
np.column_stack(event_priors.nonzero()),
delimiter=",", fmt='%d'
)
try:
assembly_priors = utils.loadVariable('assembly-priors', out_data_dir)
except AssertionError:
assembly_priors = make_assembly_transition_priors(assembly_vocab)
utils.saveVariable(assembly_priors, 'assembly-priors', out_data_dir)
viz_transition_probs(os.path.join(fig_dir, 'assembly-priors'), assembly_priors[None, ...])
np.savetxt(
os.path.join(misc_dir, "assembly-transitions.csv"),
np.column_stack(assembly_priors.nonzero()),
delimiter=",", fmt='%d'
)
event_assembly_scores = np.log(event_priors)
assembly_scores = np.log(assembly_priors)
assembly_scores = np.zeros_like(assembly_scores)
for cv_index, cv_fold in enumerate(cv_folds):
if only_fold is not None and cv_index != only_fold:
continue
train_indices, val_indices, test_indices = cv_fold
logger.info(
f"CV FOLD {cv_index + 1} / {len(cv_folds)}: "
f"{len(train_indices)} train, {len(val_indices)} val, {len(test_indices)} test"
)
cv_str = f'cvfold={cv_index}'
(train_event_labels, _), _, (_, test_seq_ids) = event_dataset.getFold(cv_fold)
(train_assembly_labels, _), _, _ = assembly_dataset.getFold(cv_fold)
assembly_start_probs, assembly_end_probs = count_transitions(
train_assembly_labels, len(assembly_vocab),
support_only=True
)
assembly_start_scores = np.log(assembly_start_probs)
assembly_end_scores = np.log(assembly_end_probs)
assembly_transition_scores = pack_scores(
assembly_scores, assembly_start_scores, assembly_end_scores
)
class_priors, event_dur_probs = count_priors(
train_event_labels, len(event_vocab),
approx_upto=0.95, support_only=True
)
event_dur_scores = np.log(event_dur_probs)
event_dur_scores = np.zeros_like(event_dur_scores)
scores = (event_dur_scores, event_assembly_scores, assembly_transition_scores)
model = decode.AssemblyActionRecognizer(scores, vocabs, model_params)
viz_priors(
os.path.join(fig_dir, f'{cv_str}_priors'),
class_priors, event_dur_probs
)
model.write_fsts(os.path.join(misc_dir, f'{cv_str}_fsts'))
model.save_vocabs(os.path.join(out_data_dir, f'{cv_str}_model-vocabs'))
for i, seq_id in enumerate(test_seq_ids):
if stop_after is not None and i >= stop_after:
break
trial_prefix = f"{prefix}{seq_id}"
logger.info(f" Processing sequence {seq_id}...")
true_label_seq = utils.loadVariable(
f"{trial_prefix}_true-label-seq",
data_dir
)
event_score_seq = utils.loadVariable(f"{trial_prefix}_score-seq", event_scores_dir)
score_seq = model.forward(event_score_seq)
pred_label_seq = model.predict(score_seq)
metric_dict = eval_metrics(pred_label_seq, true_label_seq)
for name, value in metric_dict.items():
logger.info(f" {name}: {value * 100:.2f}%")
utils.writeResults(results_file, metric_dict, sweep_param_name, model_params)
utils.saveVariable(score_seq, f'{trial_prefix}_score-seq', out_data_dir)
utils.saveVariable(pred_label_seq, f'{trial_prefix}_pred-label-seq', out_data_dir)
utils.saveVariable(true_label_seq, f'{trial_prefix}_true-label-seq', out_data_dir)
if plot_io:
utils.plot_array(
event_score_seq.T, (pred_label_seq.T, true_label_seq.T), ('pred', 'true'),
fn=os.path.join(fig_dir, f"seq={seq_id:03d}.png")
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: blocks-actions/scripts/make_decode_dataset.py
```python
import os
import logging
import yaml
import numpy as np
import scipy
from mathtools import utils
logger = logging.getLogger(__name__)
def drawVocab(fig_dir, vocab):
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
for i, x in enumerate(vocab):
x.draw(fig_dir, i)
def main(
out_dir=None, data_dirs=None,
prefix='seq=', feature_fn_format='score-seq', label_fn_format='true-label-seq',
stride=None,
only_fold=None, stop_after=None, take_log=None,
modalities=('assembly', 'event'),
plot_io=None, draw_vocab=False,
results_file=None, sweep_param_name=None):
data_dirs = {
name: os.path.expanduser(dir_)
for name, dir_ in data_dirs.items()
}
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
else:
results_file = os.path.expanduser(results_file)
dir_seq_ids = {
name: utils.getUniqueIds(
data_dirs[name], prefix=prefix[name], suffix=f'{label_fn_format[name]}.*',
to_array=True
)
for name in modalities
}
ids_sets = [set(ids) for ids in dir_seq_ids.values()]
seq_ids = np.array(sorted(ids_sets[0].intersection(*ids_sets[1:])))
substr = '; '.join([f'{len(ids)} seqs in {name}' for name, ids in dir_seq_ids.items()])
logger.info(f"Found {substr}; {len(seq_ids)} shared")
for name in modalities:
data_dir = data_dirs[name]
out_data_dir = os.path.join(out_dir, f'{name}-data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
fig_dir = os.path.join(out_dir, 'figures', name)
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
misc_dir = os.path.join(out_dir, 'misc', name)
if not os.path.exists(misc_dir):
os.makedirs(misc_dir)
vocab = utils.loadVariable('vocab', data_dir)
metadata = utils.loadMetadata(data_dir, rows=seq_ids)
if name == 'event':
for i in range(len(vocab)):
if isinstance(vocab[i].sign, np.ndarray):
vocab[i].sign = np.sign(vocab[i].sign.sum())
all_labels = tuple(
utils.loadVariable(f"{prefix[name]}{seq_id}_{label_fn_format[name]}", data_dir)
for seq_id in seq_ids
)
# Remove labels that don't occur in the dataset
if name == 'assembly':
unique_labels = np.sort(np.unique(np.hstack(all_labels)))
OOV_INDEX = unique_labels.shape[0]
old_idxs_to_new = np.full(len(vocab), OOV_INDEX, dtype=int)
for new, old in enumerate(unique_labels):
old_idxs_to_new[old] = new
vocab = [vocab[i] for i in unique_labels]
num_removed = np.sum(old_idxs_to_new == OOV_INDEX)
logger.info(f'Removing {num_removed} labels that do not occur in dataset')
utils.saveVariable(vocab, 'vocab', out_data_dir)
utils.saveMetadata(metadata, out_data_dir)
if draw_vocab:
drawVocab(os.path.join(fig_dir, 'vocab'), vocab)
for i, seq_id in enumerate(seq_ids):
if stop_after is not None and i >= stop_after:
break
trial_prefix = f"{prefix[name]}{seq_id}"
logger.info(f"Processing sequence {seq_id}...")
true_label_seq = all_labels[i]
score_seq = utils.loadVariable(
f"{trial_prefix}_{feature_fn_format[name]}",
data_dir
)
true_label_seq = true_label_seq[::stride[name]]
if name == 'assembly':
true_label_seq = old_idxs_to_new[true_label_seq]
if take_log[name]:
score_seq = np.log(score_seq)
score_seq = score_seq[::stride[name]]
if name == 'assembly':
score_seq = score_seq[:, unique_labels]
score_seq = scipy.special.log_softmax(score_seq, axis=1)
pred_label_seq = score_seq.argmax(axis=1)
trial_prefix = f"seq={seq_id}"
utils.saveVariable(score_seq, f'{trial_prefix}_score-seq', out_data_dir)
utils.saveVariable(pred_label_seq, f'{trial_prefix}_pred-label-seq', out_data_dir)
utils.saveVariable(true_label_seq, f'{trial_prefix}_true-label-seq', out_data_dir)
if plot_io:
utils.plot_array(
score_seq.T, (true_label_seq.T,), ('gt',),
fn=os.path.join(fig_dir, f"seq={seq_id:03d}.png")
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: blocks-videos_child/scripts/make_fusion_dataset.py
```python
import os
import logging
import yaml
import numpy as np
import torch
from torch.nn import functional as F
from mathtools import utils, torchutils
logger = logging.getLogger(__name__)
def make_attribute_features(score_seq):
prob_seq = F.softmax(score_seq, dim=1)
feat_seq = torch.reshape(
prob_seq.transpose(-1, -2),
(prob_seq.shape[0], -1)
)
return 2 * feat_seq.float() - 1
def resample(rgb_attribute_seq, rgb_timestamp_seq, imu_attribute_seq, imu_timestamp_seq):
imu_attribute_seq = utils.resampleSeq(imu_attribute_seq, imu_timestamp_seq, rgb_timestamp_seq)
return rgb_attribute_seq, imu_attribute_seq
class FusionDataset(object):
def __init__(
self, trial_ids, rgb_attributes_dir, rgb_data_dir, imu_attributes_dir, imu_data_dir,
device=None, modalities=None):
self.trial_ids = trial_ids
self.metadata = utils.loadMetadata(rgb_data_dir, rows=trial_ids)
self.vocab = utils.loadVariable('vocab', rgb_attributes_dir)
self.rgb_attributes_dir = rgb_attributes_dir
self.rgb_data_dir = rgb_data_dir
self.imu_attributes_dir = imu_attributes_dir
self.imu_data_dir = imu_data_dir
self.device = device
self.modalities = modalities
def loadInputs(self, seq_id):
if self.modalities == ['rgb']:
return self.loadInputsRgb(seq_id)
trial_prefix = f"trial={seq_id}"
rgb_attribute_seq = torch.tensor(
utils.loadVariable(f"{trial_prefix}_score-seq", self.rgb_attributes_dir),
dtype=torch.float, device=self.device
)
rgb_timestamp_seq = utils.loadVariable(
f"{trial_prefix}_rgb-frame-timestamp-seq",
from_dir=self.rgb_data_dir
)
imu_attribute_seq = torch.tensor(
utils.loadVariable(f"{trial_prefix}_score-seq", self.imu_attributes_dir),
dtype=torch.float, device=self.device
)
imu_timestamp_seq = utils.loadVariable(f"{trial_prefix}_timestamp-seq", self.imu_data_dir)
rgb_attribute_seq, imu_attribute_seq = resample(
rgb_attribute_seq, rgb_timestamp_seq,
imu_attribute_seq, imu_timestamp_seq
)
attribute_feats = {
'rgb': make_attribute_features(rgb_attribute_seq),
'imu': make_attribute_features(imu_attribute_seq)
}
attribute_feats = torch.cat(
tuple(attribute_feats[name] for name in self.modalities),
dim=1
)
return attribute_feats
def loadInputsRgb(self, seq_id):
trial_prefix = f"trial={seq_id}"
rgb_attribute_seq = torch.tensor(
utils.loadVariable(f"{trial_prefix}_score-seq", self.rgb_attributes_dir),
dtype=torch.float, device=self.device
)
attribute_feats = {'rgb': make_attribute_features(rgb_attribute_seq)}
attribute_feats = torch.cat(
tuple(attribute_feats[name] for name in self.modalities),
dim=1
)
return attribute_feats
def loadTargets(self, seq_id):
trial_prefix = f"trial={seq_id}"
true_label_seq = torch.tensor(
utils.loadVariable(f'{trial_prefix}_true-label-seq', self.rgb_attributes_dir),
dtype=torch.long, device=self.device
)
return true_label_seq
def main(
out_dir=None, modalities=['rgb', 'imu'], gpu_dev_id=None, plot_io=None,
rgb_data_dir=None, rgb_attributes_dir=None, imu_data_dir=None, imu_attributes_dir=None):
out_dir = os.path.expanduser(out_dir)
rgb_data_dir = os.path.expanduser(rgb_data_dir)
rgb_attributes_dir = os.path.expanduser(rgb_attributes_dir)
imu_data_dir = os.path.expanduser(imu_data_dir)
imu_attributes_dir = os.path.expanduser(imu_attributes_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
# Load data
if modalities == ['rgb']:
trial_ids = utils.getUniqueIds(rgb_data_dir, prefix='trial=', to_array=True)
logger.info(f"Processing {len(trial_ids)} videos")
else:
rgb_trial_ids = utils.getUniqueIds(rgb_data_dir, prefix='trial=', to_array=True)
imu_trial_ids = utils.getUniqueIds(imu_data_dir, prefix='trial=', to_array=True)
trial_ids = np.array(sorted(set(rgb_trial_ids.tolist()) & set(imu_trial_ids.tolist())))
logger.info(
f"Processing {len(trial_ids)} videos common to "
f"RGB ({len(rgb_trial_ids)} total) and IMU ({len(imu_trial_ids)} total)"
)
device = torchutils.selectDevice(gpu_dev_id)
dataset = FusionDataset(
trial_ids, rgb_attributes_dir, rgb_data_dir, imu_attributes_dir, imu_data_dir,
device=device, modalities=modalities,
)
utils.saveMetadata(dataset.metadata, out_data_dir)
utils.saveVariable(dataset.vocab, 'vocab', out_data_dir)
for i, trial_id in enumerate(trial_ids):
logger.info(f"Processing sequence {trial_id}...")
true_label_seq = dataset.loadTargets(trial_id)
attribute_feats = dataset.loadInputs(trial_id)
# (Process the samples here if we need to)
attribute_feats = attribute_feats.cpu().numpy()
true_label_seq = true_label_seq.cpu().numpy()
trial_prefix = f"trial={trial_id}"
utils.saveVariable(attribute_feats, f'{trial_prefix}_feature-seq', out_data_dir)
utils.saveVariable(true_label_seq, f'{trial_prefix}_label-seq', out_data_dir)
if plot_io:
fn = os.path.join(fig_dir, f'{trial_prefix}.png')
utils.plot_array(
attribute_feats.T,
(true_label_seq,),
('gt',),
fn=fn
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: blocks-videos_child/scripts/make_seg_labels.py
```python
import os
import warnings
import joblib
import yaml
import numpy as np
from matplotlib import pyplot as plt
from skimage import morphology, segmentation, measure, color, img_as_float
from mathtools import utils
from visiontools import imageprocessing
def removeTargetModel(seg_image, num):
""" Find and remove the target model from a segment image. """
seg_centroids = np.row_stack(tuple(
np.column_stack(np.nonzero(seg_image == i)).mean(axis=0)
for i in range(1, num + 1)
))
direction = np.array([3, 4])
seg_scores = seg_centroids @ direction
# Segment labels are one-indexed
best_idx = np.array(seg_scores).argmax() + 1
seg_image[seg_image == best_idx] = 0
seg_image = segmentation.relabel_sequential(seg_image)[0]
return seg_image, num - 1
def makeCoarseSegmentLabels(mask, min_size=100):
mask = morphology.remove_small_objects(mask, min_size=min_size, connectivity=1)
labels, num = measure.label(mask.astype(int), return_num=True)
if num < 2:
return labels
labels, num = removeTargetModel(labels, num)
return labels
def makeFineSegmentLabels(coarse_seg_labels, bg_mask_sat, min_size=100):
labels, num = measure.label(coarse_seg_labels, return_num=True)
for i in range(1, num + 1):
in_seg = labels == i
bg_vals = bg_mask_sat[in_seg]
class_counts = np.hstack((np.sum(bg_vals == 0), np.sum(bg_vals == 1)))
is_bg = class_counts.argmax().astype(bool)
if is_bg:
labels[in_seg] = 0
fg_mask = morphology.remove_small_objects(labels != 0, min_size=min_size, connectivity=1)
labels, num = measure.label(fg_mask.astype(int), return_num=True)
for i in range(1, num + 1):
in_seg = labels == i
labels[in_seg] = coarse_seg_labels[in_seg]
labels = segmentation.relabel_sequential(labels)[0]
return labels
def makeHsvFrame(rgb_image):
rgb_image = img_as_float(rgb_image)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero")
hsv_image = color.rgb2hsv(rgb_image)
return hsv_image
def plotHsvHist(hsv_frame_seq, seg_labels_seq, file_path=None):
fg = hsv_frame_seq[seg_labels_seq != 0]
names = ('hue', 'sat', 'val')
fig, axes = plt.subplots(3)
for i in range(3):
axes[i].hist(fg[:, i], bins=100)
axes[i].set_ylabel(names[i])
plt.tight_layout()
plt.savefig(file_path)
plt.close()
def main(
out_dir=None, data_dir=None, person_masks_dir=None, bg_masks_dir=None,
sat_thresh=1, start_from=None, stop_at=None, num_disp_imgs=None):
out_dir = os.path.expanduser(out_dir)
data_dir = os.path.expanduser(data_dir)
person_masks_dir = os.path.expanduser(person_masks_dir)
bg_masks_dir = os.path.expanduser(bg_masks_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def loadFromDir(var_name, dir_name):
return joblib.load(os.path.join(dir_name, f"{var_name}.pkl"))
def saveToWorkingDir(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f"{var_name}.pkl"))
trial_ids = utils.getUniqueIds(data_dir, prefix='trial=', to_array=True)
for seq_idx, trial_id in enumerate(trial_ids):
if start_from is not None and seq_idx < start_from:
continue
if stop_at is not None and seq_idx > stop_at:
break
trial_str = f"trial={trial_id}"
logger.info(f"Processing video {seq_idx + 1} / {len(trial_ids)} (trial {trial_id})")
logger.info(" Loading data...")
rgb_frame_seq = loadFromDir(f'{trial_str}_rgb-frame-seq', data_dir)
person_mask_seq = loadFromDir(f'{trial_str}_person-mask-seq', person_masks_dir)
bg_mask_seq_depth = loadFromDir(f'{trial_str}_bg-mask-seq-depth', bg_masks_dir)
# bg_mask_seq_rgb = loadFromDir(f'{trial_str}_bg-mask-seq-rgb', bg_masks_dir)
logger.info(" Making segment labels...")
fg_mask_seq = ~bg_mask_seq_depth
seg_labels_seq = np.stack(tuple(map(makeCoarseSegmentLabels, fg_mask_seq)), axis=0)
hsv_frame_seq = np.stack(tuple(map(makeHsvFrame, rgb_frame_seq)), axis=0)
sat_frame_seq = hsv_frame_seq[..., 1]
bg_mask_seq_sat = sat_frame_seq < sat_thresh
seg_labels_seq[person_mask_seq] = 0
seg_labels_seq = np.stack(
tuple(
makeFineSegmentLabels(segs, sat)
for segs, sat in zip(seg_labels_seq, bg_mask_seq_sat)
),
axis=0
)
logger.info(" Saving output...")
saveToWorkingDir(seg_labels_seq.astype(np.uint8), f'{trial_str}_seg-labels-seq')
plotHsvHist(
hsv_frame_seq, seg_labels_seq,
file_path=os.path.join(fig_dir, f'{trial_str}_hsv-hists.png')
)
if num_disp_imgs is not None:
if rgb_frame_seq.shape[0] > num_disp_imgs:
idxs = np.arange(rgb_frame_seq.shape[0])
np.random.shuffle(idxs)
idxs = idxs[:num_disp_imgs]
else:
idxs = slice(None, None, None)
imageprocessing.displayImages(
*(rgb_frame_seq[idxs]),
*(bg_mask_seq_sat[idxs]),
*(bg_mask_seq_depth[idxs]),
*(person_mask_seq[idxs]),
*(seg_labels_seq[idxs]),
num_rows=5, file_path=os.path.join(fig_dir, f'{trial_str}_best-frames.png')
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: blocks-videos_child/scripts/train_assembly_detector.py
```python
import os
import collections
import logging
import yaml
import torch
import joblib
import numpy as np
from mathtools import utils, metrics, torchutils
from kinemparse import sim2real
from blocks.core.blockassembly import BlockAssembly
from blocks.core import definitions as defn
from blocks.core import labels as labels_lib
logger = logging.getLogger(__name__)
def make_single_block_state(block_index):
state = BlockAssembly()
state.addBlock(block_index)
state.blocks[block_index].component_index = state._next_component_index
state.blocks[block_index].theta_global = 0
state.blocks[block_index].t_global = np.zeros(3)
state._addToConnectedComponent(block_index)
return state
def loadMasks(masks_dir=None, trial_ids=None, num_per_video=10):
if masks_dir is None:
return None
def loadMasks(video_id):
masks = joblib.load(os.path.join(masks_dir, f'trial={video_id}_person-mask-seq.pkl'))
any_detections = masks.any(axis=-1).any(axis=-1)
masks = masks[any_detections]
masks = utils.sampleWithoutReplacement(masks, num_samples=num_per_video)
return masks
masks_dir = os.path.expanduser(masks_dir)
if trial_ids is None:
trial_ids = utils.getUniqueIds(masks_dir, prefix='trial=', to_array=True)
masks = np.vstack(tuple(map(loadMasks, trial_ids)))
return masks
def main(
out_dir=None, data_dir=None,
model_name=None, gpu_dev_id=None, batch_size=None, learning_rate=None,
model_params={}, cv_params={}, train_params={}, viz_params={}, load_masks_params={},
kornia_tfs={}, only_edge=None,
num_disp_imgs=None, results_file=None, sweep_param_name=None):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
io_dir = os.path.join(fig_dir, 'model-io')
if not os.path.exists(io_dir):
os.makedirs(io_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def saveVariable(var, var_name, to_dir=out_data_dir):
return utils.saveVariable(var, var_name, to_dir)
trial_ids = utils.getUniqueIds(data_dir, prefix='trial=', to_array=True)
vocab = [BlockAssembly()] + [make_single_block_state(i) for i in range(len(defn.blocks))]
for seq_id in trial_ids:
assembly_seq = utils.loadVariable(f"trial={seq_id}_assembly-seq", data_dir)
for assembly in assembly_seq:
utils.getIndex(assembly, vocab)
parts_vocab, part_labels = labels_lib.make_parts_vocab(
vocab, lower_tri_only=True, append_to_vocab=True
)
if only_edge is not None:
part_labels = part_labels[:, only_edge:only_edge + 1]
logger.info(f"Loaded {len(trial_ids)} sequences; {len(vocab)} unique assemblies")
saveVariable(vocab, 'vocab')
saveVariable(parts_vocab, 'parts-vocab')
saveVariable(part_labels, 'part-labels')
device = torchutils.selectDevice(gpu_dev_id)
if model_name == 'AAE':
Dataset = sim2real.DenoisingDataset
elif model_name == 'Resnet':
Dataset = sim2real.RenderDataset
elif model_name == 'Connections':
Dataset = sim2real.ConnectionDataset
elif model_name == 'Labeled Connections':
Dataset = sim2real.LabeledConnectionDataset
occlusion_masks = loadMasks(**load_masks_params)
if occlusion_masks is not None:
logger.info(f"Loaded {occlusion_masks.shape[0]} occlusion masks")
def make_data(shuffle=True):
dataset = Dataset(
parts_vocab, part_labels,
vocab, device=device, occlusion_masks=occlusion_masks,
kornia_tfs=kornia_tfs,
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
shuffle=shuffle
)
return dataset, data_loader
for cv_index, cv_splits in enumerate(range(1)):
cv_str = f"cvfold={cv_index}"
train_set, train_loader = make_data(shuffle=True)
test_set, test_loader = make_data(shuffle=False)
val_set, val_loader = make_data(shuffle=True)
if model_name == 'AAE':
model = sim2real.AugmentedAutoEncoder(train_set.data_shape, train_set.num_classes)
criterion = torchutils.BootstrappedCriterion(
0.25, base_criterion=torch.nn.functional.mse_loss,
)
metric_names = ('Reciprocal Loss',)
elif model_name == 'Resnet':
model = sim2real.ImageClassifier(train_set.num_classes, **model_params)
criterion = torch.nn.CrossEntropyLoss()
metric_names = ('Loss', 'Accuracy')
elif model_name == 'Connections':
model = sim2real.ConnectionClassifier(train_set.label_shape[0], **model_params)
criterion = torch.nn.BCEWithLogitsLoss()
metric_names = ('Loss', 'Accuracy', 'Precision', 'Recall', 'F1')
elif model_name == 'Labeled Connections':
out_dim = int(part_labels.max()) + 1
num_vertices = len(defn.blocks)
edges = np.column_stack(np.tril_indices(num_vertices, k=-1))
if only_edge is not None:
edges = edges[only_edge:only_edge + 1]
model = sim2real.LabeledConnectionClassifier(
out_dim, num_vertices, edges, **model_params
)
if only_edge is not None:
logger.info(f"Class freqs: {train_set.class_freqs}")
# criterion = torch.nn.CrossEntropyLoss(weight=1 / train_set.class_freqs[:, 0])
criterion = torch.nn.CrossEntropyLoss()
else:
criterion = torch.nn.CrossEntropyLoss()
# criterion = torchutils.BootstrappedCriterion(
# 0.25, base_criterion=torch.nn.functional.cross_entropy,
# )
metric_names = ('Loss', 'Accuracy', 'Precision', 'Recall', 'F1')
model = model.to(device=device)
optimizer_ft = torch.optim.Adam(
model.parameters(), lr=learning_rate,
betas=(0.9, 0.999), eps=1e-08,
weight_decay=0, amsgrad=False
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft, step_size=1, gamma=1.00)
train_epoch_log = collections.defaultdict(list)
val_epoch_log = collections.defaultdict(list)
metric_dict = {name: metrics.makeMetric(name) for name in metric_names}
model, last_model_wts = torchutils.trainModel(
model, criterion, optimizer_ft, lr_scheduler,
train_loader, val_loader,
device=device,
metrics=metric_dict,
train_epoch_log=train_epoch_log,
val_epoch_log=val_epoch_log,
**train_params
)
# Test model
metric_dict = {name: metrics.makeMetric(name) for name in metric_names}
test_io_batches = torchutils.predictSamples(
model.to(device=device), test_loader,
criterion=criterion, device=device,
metrics=metric_dict, data_labeled=True, update_model=False,
seq_as_batch=train_params['seq_as_batch'],
return_io_history=True
)
metric_str = ' '.join(str(m) for m in metric_dict.values())
logger.info('[TST] ' + metric_str)
utils.writeResults(results_file, metric_dict, sweep_param_name, model_params)
for pred_seq, score_seq, feat_seq, label_seq, trial_id in test_io_batches:
trial_str = f"trial={trial_id}"
saveVariable(pred_seq.cpu().numpy(), f'{trial_str}_pred-label-seq')
saveVariable(score_seq.cpu().numpy(), f'{trial_str}_score-seq')
saveVariable(label_seq.cpu().numpy(), f'{trial_str}_true-label-seq')
saveVariable(model, f'{cv_str}_model-best')
if train_epoch_log:
torchutils.plotEpochLog(
train_epoch_log,
subfig_size=(10, 2.5),
title='Training performance',
fn=os.path.join(fig_dir, f'{cv_str}_train-plot.png')
)
if val_epoch_log:
torchutils.plotEpochLog(
val_epoch_log,
subfig_size=(10, 2.5),
title='Heldout performance',
fn=os.path.join(fig_dir, f'{cv_str}_val-plot.png')
)
if num_disp_imgs is not None:
model.plotBatches(test_io_batches, io_dir, dataset=test_set)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: ikea_anu/scripts/make_assembly_data.py
```python
import os
import logging
import glob
import yaml
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from mathtools import utils
logger = logging.getLogger(__name__)
def plotLabels(fn, label_seq):
fig, axis = plt.subplots(1, figsize=(12, 9))
axis.plot(label_seq)
plt.tight_layout()
plt.savefig(fn)
plt.close()
def writeLabels(fn, label_seq, vocab):
seg_label_idxs, seg_durs = utils.computeSegments(label_seq)
seg_durs = np.array(seg_durs)
seg_ends = np.cumsum(seg_durs) - 1
seg_starts = np.array([0] + (seg_ends + 1)[:-1].tolist())
seg_labels = tuple(str(vocab[i]) for i in seg_label_idxs)
d = {
'start': seg_starts,
'end': seg_ends,
'label': seg_labels
}
pd.DataFrame(d).to_csv(fn, index=False)
def parseActions(assembly_actions, num_frames, vocab):
def makeJoint(part1, part2):
return tuple(sorted([part1, part2]))
def updateAssembly(assembly, joint):
return tuple(sorted(cur_assembly + (joint,)))
assembly_index_seq = np.zeros(num_frames, dtype=int)
cur_assembly = tuple()
prev_start = -1
prev_end = -1
for i, row in assembly_actions.iterrows():
if row.start != prev_start or row.end != prev_end:
cur_assembly_index = utils.getIndex(cur_assembly, vocab)
assembly_index_seq[prev_end:row.end + 1] = cur_assembly_index
prev_start = row.start
prev_end = row.end
if row.action == 'connect':
joint = makeJoint(row.part1, row.part2)
cur_assembly = updateAssembly(cur_assembly, joint)
elif row.action == 'pin':
continue
else:
raise ValueError()
cur_assembly_index = utils.getIndex(cur_assembly, vocab)
assembly_index_seq[prev_end:] = cur_assembly_index
return assembly_index_seq
def main(out_dir=None, data_dir=None, labels_dir=None):
out_dir = os.path.expanduser(out_dir)
data_dir = os.path.expanduser(data_dir)
labels_dir = os.path.expanduser(labels_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
filenames = [
utils.stripExtension(fn)
for fn in glob.glob(os.path.join(labels_dir, '*.csv'))
]
metadata = utils.loadMetadata(data_dir)
metadata['seq_id'] = metadata.index
metadata = metadata.set_index('dir_name', drop=False).loc[filenames].set_index('seq_id')
seq_ids = np.sort(metadata.index.to_numpy())
logger.info(f"Loaded {len(seq_ids)} sequences from {labels_dir}")
vocab = []
for i, seq_id in enumerate(seq_ids):
seq_id_str = f"seq={seq_id}"
seq_dir_name = metadata['dir_name'].loc[seq_id]
labels_fn = os.path.join(labels_dir, f'{seq_dir_name}.csv')
event_labels = utils.loadVariable(f'{seq_id_str}_labels', data_dir)
assembly_actions = pd.read_csv(labels_fn)
label_seq = parseActions(assembly_actions, event_labels.shape[0], vocab)
utils.saveVariable(label_seq, f'{seq_id_str}_label-seq', out_data_dir)
plotLabels(os.path.join(fig_dir, f'{seq_id_str}_labels.png'), label_seq)
writeLabels(os.path.join(fig_dir, f'{seq_id_str}_labels.csv'), label_seq, vocab)
utils.saveMetadata(metadata, out_data_dir)
utils.saveVariable(vocab, 'vocab', out_data_dir)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: kinemparse/scripts/decode_airplanes.py
```python
import os
import functools
import time
import argparse
import pdb
import yaml
import joblib
import scipy
import torch
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
import torch_struct
import numpy as np
from mathtools import utils, metrics, torchutils
from kinemparse import models, airplanecorpus
def assign_hands(hand_detections):
new_detections = np.zeros_like(hand_detections)
hand_a = hand_detections[0, :2]
# hand_b = hand_detections[0, 2:]
for t, detections in enumerate(hand_detections):
if np.isnan(detections).any():
new_detections[t, :] = np.nan
loc_a = detections[:2]
loc_b = detections[2:]
aa = np.linalg.norm(hand_a - loc_a)
ba = np.linalg.norm(hand_a - loc_b)
# ab = np.linalg.norm(hand_b - loc_a)
# bb = np.linalg.norm(hand_b - loc_b)
if aa < ba:
hand_a = loc_a
# hand_b = loc_b
new_detections[t, :2] = loc_a
new_detections[t, 2:] = loc_b
else:
# hand_b = loc_a
hand_a = loc_b
new_detections[t, :2] = loc_b
new_detections[t, 2:] = loc_a
return new_detections
def main(
config_path=None, out_dir=None, scores_dir=None, airplane_corpus_dir=None,
subsample_period=None, window_size=None, corpus_name=None, debug=None,
default_annotator=None, cv_scheme=None, model_config=None, overwrite=None,
ignore_objects_in_comparisons=None, gpu_dev_id=None,
start_from_fold=None, max_folds=None, presegment=False, min_dur=None):
out_dir = os.path.expanduser(out_dir)
airplane_corpus_dir = os.path.expanduser(airplane_corpus_dir)
if scores_dir is not None:
scores_dir = os.path.expanduser(scores_dir)
model_name = model_config.pop('model_name')
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def saveToWorkingDir(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f"{var_name}.pkl"))
logger.info(f"Starting run")
device = torchutils.selectDevice(gpu_dev_id)
hand_detection_seqs, action_seqs, trial_ids, part_info = airplanecorpus.loadCorpus(
airplane_corpus_dir, subsample_period=None, reaching_only=True, parse_actions=False,
# airplane_corpus_dir, subsample_period=None, reaching_only=False, parse_actions=False,
ignore_objects_in_comparisons=ignore_objects_in_comparisons,
)
part_idxs_to_models, model_names, model_names_to_idxs = airplanecorpus.loadModelAssignments()
part_idxs_to_models = torch.tensor(part_idxs_to_models).float()
# Split into train and test sets
if cv_scheme == 'leave one out':
num_seqs = len(trial_ids)
cv_folds = []
for i in range(num_seqs):
test_fold = (i,)
train_fold = tuple(range(0, i)) + tuple(range(i + 1, num_seqs))
cv_folds.append((train_fold, test_fold))
elif cv_scheme == 'test on train set':
if not debug:
err_str = "You can't test on the train set if you're not debugging!"
raise ValueError(err_str)
num_seqs = len(trial_ids)
cv_folds = []
for i in range(num_seqs):
test_fold = (i,)
train_fold = tuple(range(0, num_seqs))
cv_folds.append((train_fold, test_fold))
logger.warning(f"TESTING ON THE TRAINING SET---DON'T BELIEVE THESE NUMBERS")
saveToWorkingDir(cv_folds, f'cv-folds')
if start_from_fold is not None:
cv_folds = cv_folds[start_from_fold:]
if max_folds is not None:
cv_folds = cv_folds[:max_folds]
num_cv_folds = len(cv_folds)
num_equivalent = 0
for fold_index, (train_idxs, test_idxs) in enumerate(cv_folds):
logger.info(f"CV FOLD {fold_index + 1} / {num_cv_folds}")
if cv_scheme != 'test on train set':
utils.validateCvFold(train_idxs, test_idxs)
model = getattr(models, model_name)(
*part_info, device=device, **model_config['init_kwargs']
)
selectTrain = functools.partial(utils.select, train_idxs)
train_hand_detection_seqs = selectTrain(hand_detection_seqs)
train_action_seqs = selectTrain(action_seqs)
logger.info(f" Training model on {len(train_idxs)} sequences...")
model.fit(
train_action_seqs, train_hand_detection_seqs,
**model_config['fit_kwargs']
)
logger.info(f' Model trained on {model.num_states} unique assembly states')
logger.info(f" Testing model on {len(test_idxs)} sequences...")
for i, test_idx in enumerate(test_idxs):
trial_id = trial_ids[test_idx]
hand_detection_seq = hand_detection_seqs[test_idx]
true_action_seq = action_seqs[test_idx]
true_action_names = tuple(
model._obsv_model._part_names[i] for i in true_action_seq[:, 0]
)
"""
hand_detection_seq = assign_hands(hand_detection_seq)
f, axes = plt.subplots(4)
axes[0].plot(hand_detection_seq[:, 0])
axes[1].plot(hand_detection_seq[:, 1])
axes[2].plot(hand_detection_seq[:, 2])
axes[3].plot(hand_detection_seq[:, 3])
# axis.plot(hand_detection_seq[:, 2], hand_detection_seq[:, 3])
plt.savefig(os.path.join(fig_dir, f"hands-{trial_id}.png"))
plt.close()
continue
"""
fst = model.showFST(action_dict=model._obsv_model._part_names)
fst.render(os.path.join(fig_dir, f"fst-{trial_id}"), cleanup=True)
detection_scores = torch.tensor(
scipy.io.loadmat(
os.path.join(scores_dir, f'trial-{trial_id}-detection-scores.mat')
)['detection_scores']
).float()
num_samples, num_bins = detection_scores.shape
_, axis = plt.subplots(1, figsize=(12, 4))
for i in range(num_bins):
axis.plot(detection_scores[:, i].numpy(), label=f"bin {i}")
axis.set_title(f"Detections, {trial_id}")
axis.legend()
axis.grid()
plt.savefig(os.path.join(fig_dir, f"detections-{trial_id}.png"))
plt.close()
def makeSegmentScores(sample_scores, min_dur=1):
x = torch.any(sample_scores > 0, dim=1).int()
changepoints = x[1:] - x[:-1]
start_idxs = (torch.nonzero(changepoints == 1)[:, 0] + 1).tolist()
end_idxs = (torch.nonzero(changepoints == -1)[:, 0] + 1).tolist()
if x[0]:
start_idxs = [0] + start_idxs
assert(len(start_idxs) == len(end_idxs))
segment_scores = tuple(
sample_scores[seg_start:seg_end].mean(dim=0)
for seg_start, seg_end in zip(start_idxs, end_idxs)
if seg_end - seg_start > min_dur
)
return torch.stack(segment_scores)
if presegment:
try:
detection_scores = makeSegmentScores(detection_scores, min_dur=min_dur)
except AssertionError as e:
logger.warning(e)
continue
row_is_inf = torch.isinf(detection_scores[:, :-1]).all(-1)
detection_scores = detection_scores[~row_is_inf, :]
# logger.info(f"{int(row_is_inf.sum())} inf-valued rows in detection array")
# duration_scores = None
duration_scores = torch.tensor(
scipy.io.loadmat(
os.path.join(scores_dir, f'trial-{trial_id}-duration-scores.mat')
)['duration_scores']
).float()
_, axes = plt.subplots(1, 2, figsize=(12, 12))
axes[0].matshow(detection_scores)
axes[0].set_ylabel('detection')
axes[1].matshow(duration_scores)
axes[0].set_ylabel('duration')
plt.tight_layout()
plt.savefig(os.path.join(fig_dir, f"scores-{trial_id}.png"))
plt.close()
logger.info(f' Decoding video {trial_id}...')
# start_time = time.process_time()
pred_action_idxs, pred_scores = model.predictSeq(
hand_detection_seq,
obsv_scores=detection_scores, dur_scores=duration_scores,
**model_config['decode_kwargs']
)
# end_time = time.process_time()
# logger.info(utils.makeProcessTimeStr(end_time - start_time))
semiring = torch_struct.LogSemiring()
best_score = semiring.sum(pred_scores.max(dim=-1).values, dim=0)
logger.info(f" Path score: {best_score}")
_, axes = plt.subplots(2, figsize=(12, 12))
axes[0].matshow(pred_scores.numpy().T)
axes[1].matshow(pred_scores.exp().numpy().T)
plt.tight_layout()
plt.savefig(os.path.join(fig_dir, f"marginals-{trial_id}.png"))
plt.close()
pdb.set_trace()
pred_action_idx_segs, _ = utils.computeSegments(pred_action_idxs.tolist())
pred_action_names = tuple(
model._obsv_model._part_names[i]
for i in pred_action_idx_segs
)
pred_action_names = tuple(a for a in pred_action_names if a != 'null')
num_samples, num_bins = detection_scores.shape
_, axes = plt.subplots(2, sharex=True, figsize=(10, 10))
for i in range(num_bins):
axes[0].plot(detection_scores[:, i].numpy(), label=f"bin {i}")
axes[0].scatter(range(num_samples), detection_scores[:, i].numpy())
axes[0].set_title(f"System I/O, {trial_id}")
axes[0].legend()
axes[0].grid()
axes[-1].plot(pred_action_idxs.numpy())
axes[-1].scatter(range(num_samples), pred_action_idxs.numpy())
axes[-1].set_yticks(range(len(model._obsv_model._part_names)))
axes[-1].set_yticklabels(model._obsv_model._part_names)
axes[-1].grid()
plt.tight_layout()
plt.savefig(os.path.join(fig_dir, f"io-{trial_id}.png"))
plt.close()
raw_bin_idxs = detection_scores.argmax(-1).numpy()
pred_bin_idxs = model._obsv_model._part_to_bin[pred_action_idxs].numpy()
num_samples, num_bins = detection_scores.shape
_, axis = plt.subplots(1, figsize=(10, 5))
axis.plot(raw_bin_idxs, label='raw')
axis.scatter(range(num_samples), raw_bin_idxs)
axis.set_title(f"Bin predictions, {trial_id}")
axis.plot(pred_bin_idxs, label='decode')
axis.scatter(range(num_samples), pred_bin_idxs)
axis.yaxis.set_major_locator(MaxNLocator(integer=True))
axis.grid()
axis.legend()
plt.savefig(os.path.join(fig_dir, f"bins-{trial_id}.png"))
plt.close()
last_state_pred = frozenset(pred_action_names)
last_state_true = frozenset(true_action_names)
residual = last_state_pred ^ last_state_true
logger.info(f' Actions, pred: {models.stateToString(pred_action_names)}')
logger.info(f' Actions, true: {models.stateToString(true_action_names)}')
logger.info(f' Errors: {models.stateToString(sorted(tuple(residual)))}')
file_path = os.path.join(out_dir, f'{trial_id}-pred.txt')
with open(file_path, "w") as text_file:
for action in pred_action_names:
print(action, file=text_file)
file_path = os.path.join(out_dir, f'{trial_id}-true.txt')
with open(file_path, "w") as text_file:
for action in true_action_names:
print(action, file=text_file)
def getModel(action_names):
model_names = ('a', 'b', 'c')
model_names_to_idxs = {n: i for i, n in enumerate(model_names)}
model_names_to_idxs['ab'] = [0, 1]
model_counts = torch.zeros(3)
for n in action_names:
if n.startswith('nose') or n.startswith('wing') or n.startswith('tail'):
model_name = n[:-1].split('_')[-1]
model_idx = model_names_to_idxs[model_name]
model_counts[model_idx] += 1
best_model_idx = model_counts.argmax()
best_model_name = model_names[best_model_idx]
return best_model_name
def predictModel():
# model_scores = torch.einsum('tp,pm->tm', pred_scores, part_idxs_to_models)
model_scores = semiring.matmul(pred_scores, part_idxs_to_models.log())
model_scores = semiring.sum(model_scores, dim=0)
best_model_idx = model_scores.argmax(-1)
best_model_name = model_names[best_model_idx]
return best_model_name
true_model = getModel(true_action_names)
pred_model = predictModel()
models_match = true_model == pred_model
# equiv_upto_optional = residual <= frozenset(['wheel1', 'wheel2'])
logger.info(f" MODEL CORRECT: {models_match} (p {pred_model} | t {true_model})")
num_equivalent += int(models_match)
edit_dist = metrics.levenshtein(
true_action_names, pred_action_names, segment_level=False
)
logger.info(
f" EDIT DISTANCE: {edit_dist} "
f"({len(true_action_names)} true, {len(pred_action_names)} pred)"
)
# Save intermediate results
logger.info(f"Saving output...")
saveToWorkingDir(true_action_seq, f'true_action_seq-{trial_id}')
saveToWorkingDir(true_action_names, f'true_action_names-{trial_id}')
saveToWorkingDir(pred_action_names, f'pred_action_names-{trial_id}')
saveToWorkingDir(pred_action_idxs, f'pred_action_idxs-{trial_id}')
num_seqs = len(trial_ids)
final_score = num_equivalent / num_seqs
logger.info(
f"TOTAL: {final_score * 100:.2f}% ({num_equivalent} / {num_seqs})"
" of final states correct"
)
if __name__ == '__main__':
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
parser.add_argument('--out_dir')
args = vars(parser.parse_args())
args = {k: v for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.expanduser(
os.path.join('~', 'repo', 'kinemparse', 'scripts', 'config', config_fn)
)
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
config.update(args)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
```
#### File: kinemparse/scripts/decode_keyframes.py
```python
import os
import functools
import time
import glob
import argparse
import numpy as np
import joblib
import yaml
from blocks.core import utils, labels, duplocorpus
from blocks.estimation import imageprocessing, models, render, metrics
def removeBackground(image, foreground_mask, replace_with=None):
if replace_with is None:
replace_with = np.zeros_like(image)
new_image = image.copy().astype(float)
new_image[~foreground_mask] = replace_with[~foreground_mask]
return new_image
def getUniqueTrialIds(dir_path):
trial_ids = set(
int(os.path.basename(fn).split('-')[1].split('_')[0])
for fn in glob.glob(os.path.join(dir_path, f"trial-*.pkl"))
)
return sorted(tuple(trial_ids))
def main(
out_dir=None, data_dir=None, preprocess_dir=None, detections_dir=None,
data_scores_dir=None, keyframes_dir=None,
num_seqs=None, only_task_ids=None, resume=None, num_folds=None,
scores_run_name=None, keyframe_model_name=None, reselect_keyframes=None,
subsample_period=None, window_size=None, corpus_name=None, debug=None,
remove_skin=None, remove_background=None,
default_annotator=None, cv_scheme=None, model_config=None, overwrite=None,
legacy_mode=None):
out_dir = os.path.expanduser(out_dir)
data_dir = os.path.expanduser(data_dir)
preprocess_dir = os.path.expanduser(preprocess_dir)
detections_dir = os.path.expanduser(detections_dir)
if data_scores_dir is not None:
data_scores_dir = os.path.expanduser(data_scores_dir)
if keyframes_dir is not None:
keyframes_dir = os.path.expanduser(keyframes_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
if overwrite is None:
overwrite = debug
if legacy_mode:
model_config['decode_kwargs']['legacy_mode'] = legacy_mode
def loadFromDataDir(var_name):
return joblib.load(os.path.join(data_dir, f"{var_name}.pkl"))
def loadFromPreprocessDir(var_name):
return joblib.load(os.path.join(preprocess_dir, f"{var_name}.pkl"))
def loadFromDetectionsDir(var_name):
return joblib.load(os.path.join(detections_dir, f"{var_name}.pkl"))
def loadFromKeyframesDir(var_name):
return joblib.load(os.path.join(keyframes_dir, f"{var_name}.pkl"))
def saveToWorkingDir(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f"{var_name}.pkl"))
trial_ids = getUniqueTrialIds(detections_dir)
corpus = duplocorpus.DuploCorpus(corpus_name)
if num_seqs is not None and num_seqs > 0:
logger.info(f"Ignoring all but the first {num_seqs} videos")
trial_ids = trial_ids[:num_seqs]
logger.info(f"Loading data...")
kept_trial_ids = []
rgb_keyframe_seqs = []
depth_keyframe_seqs = []
seg_keyframe_seqs = []
background_keyframe_seqs = []
assembly_keyframe_seqs = []
assembly_seqs = []
label_keyframe_seqs = []
foreground_mask_seqs = []
for seq_idx, trial_id in enumerate(trial_ids):
try:
trial_str = f"trial={trial_id}"
rgb_frame_seq = loadFromDataDir(f'{trial_str}_rgb-frame-seq')
depth_frame_seq = loadFromDataDir(f'{trial_str}_depth-frame-seq')
rgb_timestamp_seq = loadFromDataDir(f'{trial_str}_rgb-frame-timestamp-seq')
action_seq = loadFromDataDir(f'{trial_str}_action-seq')
trial_str = f"trial-{trial_id}"
foreground_mask_seq = loadFromPreprocessDir(
f'{trial_str}_foreground-mask-seq_no-ref-model'
)
background_plane_seq = loadFromPreprocessDir(f'{trial_str}_background-plane-seq')
# FIXME: I need a better way of handling this. For child data it's
# better to get segments from block detections, but for the easy
# dataset it's better to use the original foreground segments.
if legacy_mode:
segment_seq = loadFromDetectionsDir(f'{trial_str}_block-segment-frame-seq')
else:
segment_seq = loadFromPreprocessDir(f'{trial_str}_segment-frame-seq')
label_frame_seq = loadFromDetectionsDir(f'{trial_str}_class-label-frame-seq')
assembly_seq = labels.parseLabelSeq(action_seq, timestamps=rgb_timestamp_seq)
if len(assembly_seq) < 2:
logger.info(f"Skipping trial {trial_id} --- error parsing state seq")
continue
assembly_seq[-1].end_idx = len(rgb_frame_seq) - 1
except FileNotFoundError as e:
logger.warning(e)
logger.info(f"Skipping trial {trial_id} --- no data in {scores_run_name}")
continue
task_id = None # corpus.getTaskIndex(trial_id) (FIXME: getTaskIndex is broken)
assembly_seqs.append(assembly_seq)
if keyframes_dir is not None:
keyframe_idxs = loadFromKeyframesDir(f'{trial_str}_keyframe-idxs')
assembly_seq = labels.resampleStateSeq(keyframe_idxs, assembly_seq)
rgb_frame_seq = rgb_frame_seq[keyframe_idxs]
depth_frame_seq = depth_frame_seq[keyframe_idxs]
segment_seq = segment_seq[keyframe_idxs]
background_plane_seq = tuple(
background_plane_seq[i] for i in keyframe_idxs
)
label_frame_seq = label_frame_seq[keyframe_idxs]
foreground_mask_seq = foreground_mask_seq[keyframe_idxs]
if not only_task_ids or task_id in only_task_ids:
rgb_keyframe_seqs.append(rgb_frame_seq)
depth_keyframe_seqs.append(depth_frame_seq)
seg_keyframe_seqs.append(segment_seq)
background_keyframe_seqs.append(background_plane_seq)
assembly_keyframe_seqs.append(assembly_seq)
label_keyframe_seqs.append(label_frame_seq)
foreground_mask_seqs.append(foreground_mask_seq)
kept_trial_ids.append(trial_id)
trial_ids = kept_trial_ids
# Split into train and test sets
if cv_scheme == 'leave one out':
num_seqs = len(trial_ids)
cv_folds = []
for i in range(num_seqs):
test_fold = (i,)
train_fold = tuple(range(0, i)) + tuple(range(i + 1, num_seqs))
cv_folds.append((train_fold, test_fold))
elif cv_scheme == 'train on child':
child_corpus = duplocorpus.DuploCorpus('child')
child_trial_ids = utils.loadVariable('trial_ids', 'preprocess-all-data', 'child')
train_assembly_seqs = tuple(
labels.parseLabelSeq(child_corpus.readLabels(trial_id, 'Cathryn')[0])
for trial_id in child_trial_ids
)
hmm = models.EmpiricalImageHmm(**model_config['init_kwargs'])
logger.info(f" Training model on {len(train_assembly_seqs)} sequences...")
hmm.fit(train_assembly_seqs, **model_config['fit_kwargs'])
logger.info(f' Model trained on {hmm.num_states} unique assembly states')
saveToWorkingDir(hmm, f'hmm-fold0')
cv_folds = [(tuple(range(len(child_trial_ids))), tuple(range(len(trial_ids))))]
num_cv_folds = len(cv_folds)
saveToWorkingDir(cv_folds, f'cv-folds')
total_correct = 0
total_items = 0
for fold_index, (train_idxs, test_idxs) in enumerate(cv_folds):
if num_folds is not None and fold_index >= num_folds:
break
logger.info(f"CV FOLD {fold_index + 1} / {num_cv_folds}")
# Initialize and train model
if cv_scheme == 'train on child':
pass
else:
utils.validateCvFold(train_idxs, test_idxs)
selectTrain = functools.partial(utils.select, train_idxs)
# train_trial_ids = selectTrain(trial_ids)
train_assembly_seqs = selectTrain(assembly_keyframe_seqs)
hmm = models.EmpiricalImageHmm(**model_config['init_kwargs'])
logger.info(f" Training model on {len(train_idxs)} sequences...")
hmm.fit(train_assembly_seqs, **model_config['fit_kwargs'])
logger.info(f' Model trained on {hmm.num_states} unique assembly states')
saveToWorkingDir(hmm, f'hmm-fold{fold_index}')
# Decode on the test set
logger.info(f" Testing model on {len(test_idxs)} sequences...")
for i, test_index in enumerate(test_idxs):
trial_id = trial_ids[test_index]
rgb_frame_seq = rgb_keyframe_seqs[test_index]
depth_frame_seq = depth_keyframe_seqs[test_index]
seg_frame_seq = seg_keyframe_seqs[test_index]
background_plane_seq = background_keyframe_seqs[test_index]
true_assembly_seq = assembly_keyframe_seqs[test_index]
true_assembly_seq_orig = assembly_seqs[test_index]
label_frame_seq = label_keyframe_seqs[test_index]
foreground_mask_seq = foreground_mask_seqs[test_index]
if data_scores_dir is not None:
try:
data_scores = joblib.load(
os.path.join(data_scores_dir, f"trial={trial_id}_data-scores.pkl")
)
except FileNotFoundError:
logger.info(" Skipping trial {trial_id} --- scores file not found")
continue
else:
data_scores = None
rgb_frame_seq = tuple(
imageprocessing.saturateImage(
rgb_image, background_mask=~foreground_mask,
remove_background=remove_background
)
for rgb_image, foreground_mask in zip(rgb_frame_seq, foreground_mask_seq)
)
depth_bkgrd_frame_seq = tuple(
render.renderPlane(
background_plane,
camera_params=render.intrinsic_matrix,
camera_pose=render.camera_pose,
plane_appearance=render.object_colors[0]
)[1]
for background_plane in background_plane_seq
)
depth_frame_seq = tuple(
removeBackground(depth_image, foreground_mask, replace_with=depth_bkgrd)
for depth_image, foreground_mask, depth_bkgrd
in zip(depth_frame_seq, foreground_mask_seq, depth_bkgrd_frame_seq)
)
# FIXME: This is a really hacky way of dealing with the fact that
# fitScene takes a background plane but stateLogLikelihood takes
# a background plane IMAGE
if legacy_mode:
background_seq = depth_bkgrd_frame_seq
else:
background_seq = background_plane_seq
logger.info(f' Decoding video {trial_id}...')
num_oov = sum(int(s not in hmm.states) for s in true_assembly_seq)
logger.info(f" {num_oov} out-of-vocabulary states in ground-truth")
start_time = time.process_time()
ret = hmm.predictSeq(
rgb_frame_seq, depth_frame_seq, seg_frame_seq, label_frame_seq,
background_seq, log_likelihoods=data_scores,
**model_config['decode_kwargs']
)
pred_assembly_seq, pred_idx_seq, max_log_probs, log_likelihoods, poses_seq = ret
end_time = time.process_time()
logger.info(utils.makeProcessTimeStr(end_time - start_time))
if data_scores_dir is not None:
# FIXME: I only save the pose of the best sequence, but I should
# save all of them
# poses_seq = joblib.load(
# os.path.join(data_scores_dir, f"trial-{trial_id}_poses-seq.pkl")
# )
if legacy_mode:
poses_seq = tuple(
((0, np.zeros(2)),) * len(s.connected_components)
for s in pred_assembly_seq
)
else:
poses_seq = tuple(
((np.eye(3), np.zeros(3)),) * len(s.connected_components)
for s in pred_assembly_seq
)
if len(pred_assembly_seq) == len(true_assembly_seq):
num_correct, num_total = metrics.numberCorrect(true_assembly_seq, pred_assembly_seq)
logger.info(f' ACCURACY: {num_correct} / {num_total}')
total_correct += num_correct
total_items += num_total
else:
logger.info(
f" Skipping accuracy computation: "
f"{len(pred_assembly_seq)} pred states != "
f"{len(true_assembly_seq)} gt states"
)
# Save intermediate results
logger.info(f"Saving output...")
trial_str = f"trial-{trial_id}"
saveToWorkingDir(true_assembly_seq_orig, f'{trial_str}_true-state-seq-orig')
saveToWorkingDir(true_assembly_seq, f'{trial_str}_true-state-seq')
saveToWorkingDir(pred_assembly_seq, f'{trial_str}_pred-state-seq')
saveToWorkingDir(poses_seq, f'{trial_str}_poses-seq')
saveToWorkingDir(max_log_probs, f'{trial_str}_viterbi-scores')
saveToWorkingDir(log_likelihoods, f'{trial_str}_data-scores')
# Save figures
if legacy_mode:
renders = tuple(
render.makeFinalRender(
p, assembly=a,
rgb_background=np.zeros_like(rgb),
depth_background=depth_bkgrd,
camera_pose=render.camera_pose,
camera_params=render.intrinsic_matrix,
block_colors=render.object_colors
)
for p, a, rgb, depth, depth_bkgrd in zip(
poses_seq, pred_assembly_seq, rgb_frame_seq, depth_frame_seq,
depth_bkgrd_frame_seq
)
)
rgb_rendered_seq, depth_rendered_seq, label_rendered_seq = tuple(
zip(*renders)
)
gt_poses_seq = tuple(
((0, np.zeros(2)),) * len(s.connected_components)
for s in true_assembly_seq
)
renders = tuple(
render.makeFinalRender(
p, assembly=a,
rgb_background=np.zeros_like(rgb),
depth_background=depth_bkgrd,
camera_pose=render.camera_pose,
camera_params=render.intrinsic_matrix,
block_colors=render.object_colors
)
for p, a, rgb, depth, depth_bkgrd in zip(
gt_poses_seq, true_assembly_seq, rgb_frame_seq, depth_frame_seq,
depth_bkgrd_frame_seq
)
)
rgb_rendered_seq_gt, depth_rendered_seq_gt, label_rendered_seq_gt = tuple(
zip(*renders)
)
else:
rgb_rendered_seq, depth_rendered_seq, label_rendered_seq = utils.batchProcess(
render.renderScene,
background_plane_seq, pred_assembly_seq, poses_seq,
static_kwargs={
'camera_pose': render.camera_pose,
'camera_params': render.intrinsic_matrix,
'object_appearances': render.object_colors
},
unzip=True
)
gt_poses_seq = tuple(
((np.eye(3), np.zeros(3)),) * len(s.connected_components)
for s in true_assembly_seq
)
renders = utils.batchProcess(
render.renderScene,
background_plane_seq, true_assembly_seq, gt_poses_seq,
static_kwargs={
'camera_pose': render.camera_pose,
'camera_params': render.intrinsic_matrix,
'object_appearances': render.object_colors
},
unzip=True
)
rgb_rendered_seq_gt, depth_rendered_seq_gt, label_rendered_seq_gt = renders
if utils.in_ipython_console():
file_path = None
else:
trial_str = f"trial-{trial_id}"
file_path = os.path.join(fig_dir, f'{trial_str}_best-frames.png')
diff_images = tuple(np.abs(f - r) for f, r in zip(rgb_frame_seq, rgb_rendered_seq))
imageprocessing.displayImages(
*rgb_frame_seq, *diff_images, *rgb_rendered_seq, *rgb_rendered_seq_gt,
*seg_frame_seq, *label_frame_seq,
num_rows=6, file_path=file_path
)
logger.info(f'AVG ACCURACY: {total_correct / total_items * 100: .1f}%')
if __name__ == '__main__':
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
parser.add_argument('--out_dir')
parser.add_argument('--data_dir')
parser.add_argument('--preprocess_dir')
parser.add_argument('--detections_dir')
parser.add_argument('--keyframes_dir')
parser.add_argument('--data_scores_dir')
args = vars(parser.parse_args())
args = {k: v for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.expanduser(
os.path.join(
'~', 'repo', 'blocks', 'blocks', 'estimation', 'scripts', 'config',
config_fn
)
)
else:
config_fn = os.path.basename(config_file_path)
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
config.update(args)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
```
#### File: kinemparse/scripts/detect_oov.py
```python
import argparse
import os
import inspect
import collections
import yaml
import numpy as np
import joblib
from matplotlib import pyplot as plt
from mathtools import utils, metrics
from blocks.core import labels
def OOVrate(assembly_seqs, eq_class=None):
all_assemblies = []
label_seqs = tuple(
np.array(list(labels.gen_eq_classes(seq, all_assemblies, equivalent=None)))
for seq in assembly_seqs
)
num_oov = collections.defaultdict(int)
num_counts = collections.defaultdict(int)
for i, heldout_seq in enumerate(label_seqs):
train_seqs = label_seqs[:i] + label_seqs[i + 1:]
train_seqs = np.hstack(train_seqs)
train_labels = np.unique(train_seqs)
for label, prev_label in zip(heldout_seq[1:], heldout_seq[:-1]):
if eq_class == 'state index':
prev_eq_class = prev_label
elif eq_class == 'is oov':
prev_eq_class = int(prev_label not in train_labels)
else:
raise NotImplementedError()
if label not in train_labels:
num_oov[prev_eq_class] += 1
num_counts[prev_eq_class] += 1
num_labels = max(num_counts.keys()) + 1
oov_counts = np.zeros(num_labels)
for label, count in num_oov.items():
oov_counts[label] = count
total_counts = np.zeros(num_labels)
for label, count in num_counts.items():
total_counts[label] = count
contextual_oov_rate = oov_counts / total_counts
oov_rate = oov_counts.sum() / total_counts.sum()
return oov_rate, contextual_oov_rate, total_counts
def plotOOV(oov_rates, state_counts, fn=None, eq_class=None, subplot_width=12, subplot_height=3):
num_subplots = 2
figsize = (subplot_width, num_subplots * subplot_height)
fig, axes = plt.subplots(num_subplots, figsize=figsize, sharex=True)
axes[0].stem(oov_rates, use_line_collection=True)
axes[0].set_ylabel("OOV rate")
axes[0].set_xlabel(eq_class)
axes[1].stem(state_counts, use_line_collection=True)
axes[1].set_ylabel("count")
axes[1].set_xlabel(eq_class)
plt.tight_layout()
if fn is None:
plt.show()
else:
plt.savefig(fn)
plt.close()
def scatterOOV(
oov_rates, state_counts, fn=None, eq_class=None, subplot_width=12, subplot_height=12):
num_subplots = 1
figsize = (subplot_width, num_subplots * subplot_height)
fig, axis = plt.subplots(num_subplots, figsize=figsize, sharex=True)
axes = [axis]
axes[0].plot(state_counts, oov_rates, 'o')
axes[0].set_ylabel("OOV rate")
axes[0].set_xlabel("state count")
plt.tight_layout()
if fn is None:
plt.show()
else:
plt.savefig(fn)
plt.close()
def main(
out_dir=None, data_dir=None, cv_data_dir=None, scores_dir=None, eq_class='state index',
plot_predictions=None, results_file=None, sweep_param_name=None,
model_params={}, cv_params={}, train_params={}, viz_params={}):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
scores_dir = os.path.expanduser(scores_dir)
if cv_data_dir is not None:
cv_data_dir = os.path.expanduser(cv_data_dir)
if results_file is None:
results_file = os.path.join(out_dir, f'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
def loadAll(seq_ids, var_name, data_dir):
def loadOne(seq_id):
fn = os.path.join(data_dir, f'trial={seq_id}_{var_name}')
return joblib.load(fn)
return tuple(map(loadOne, seq_ids))
# Load data
trial_ids = utils.getUniqueIds(scores_dir, prefix='trial=')
assembly_seqs = loadAll(trial_ids, 'assembly-seq.pkl', data_dir)
score_seqs = loadAll(trial_ids, 'data-scores.pkl', scores_dir)
oov_rate, contextual_oov_rate, state_counts = OOVrate(assembly_seqs, eq_class=eq_class)
logger.info(f"OOV RATE: {oov_rate * 100:.1f}%")
plotOOV(
contextual_oov_rate, state_counts,
fn=os.path.join(fig_dir, "oovs.png"), eq_class=eq_class
)
scatterOOV(
contextual_oov_rate, state_counts,
fn=os.path.join(fig_dir, "oovs_scatter.png"), eq_class=eq_class
)
import sys; sys.exit()
# Define cross-validation folds
if cv_data_dir is None:
dataset_size = len(trial_ids)
cv_folds = utils.makeDataSplits(dataset_size, **cv_params)
cv_fold_trial_ids = tuple(
tuple(map(lambda x: trial_ids[x], splits))
for splits in cv_folds
)
else:
fn = os.path.join(cv_data_dir, f'cv-fold-trial-ids.pkl')
cv_fold_trial_ids = joblib.load(fn)
def getSplit(split_idxs):
split_data = tuple(
tuple(s[i] for i in split_idxs)
for s in (score_seqs, assembly_seqs, trial_ids)
)
return split_data
for cv_index, (train_ids, test_ids) in enumerate(cv_fold_trial_ids):
logger.info(
f'CV fold {cv_index + 1}: {len(trial_ids)} total '
f'({len(train_ids)} train, {len(test_ids)} test)'
)
try:
test_idxs = np.array([trial_ids.tolist().index(i) for i in test_ids])
except ValueError:
logger.info(f" Skipping fold: missing test data")
continue
# TRAIN PHASE
if cv_data_dir is None:
train_idxs = np.array([trial_ids.index(i) for i in train_ids])
train_assembly_seqs = tuple(assembly_seqs[i] for i in train_idxs)
train_assemblies = []
for seq in train_assembly_seqs:
list(labels.gen_eq_classes(seq, train_assemblies, equivalent=None))
model = None
else:
fn = f'cvfold={cv_index}_train-assemblies.pkl'
train_assemblies = joblib.load(os.path.join(cv_data_dir, fn))
train_idxs = [i for i in range(len(trial_ids)) if i not in test_idxs]
fn = f'cvfold={cv_index}_model.pkl'
# model = joblib.load(os.path.join(cv_data_dir, fn))
model = None
train_features, train_assembly_seqs, train_ids = getSplit(train_idxs)
test_assemblies = train_assemblies.copy()
for score_seq, gt_assembly_seq, trial_id in zip(*getSplit(test_idxs)):
gt_seq = np.array(list(
labels.gen_eq_classes(gt_assembly_seq, test_assemblies, equivalent=None)
))
# oov_rate = OOVrate(train_assembly_seqs)
# logger.info(f" OOV RATE: {oov_rate * 100:.1f}%")
if plot_predictions:
assembly_fig_dir = os.path.join(fig_dir, 'assembly-imgs')
if not os.path.exists(assembly_fig_dir):
os.makedirs(assembly_fig_dir)
for i, assembly in enumerate(test_assemblies):
assembly.draw(assembly_fig_dir, i)
# TEST PHASE
accuracies = []
for score_seq, gt_assembly_seq, trial_id in zip(*getSplit(test_idxs)):
gt_seq = np.array(list(
labels.gen_eq_classes(gt_assembly_seq, test_assemblies, equivalent=None)
))
num_labels = gt_seq.shape[0]
num_scores = score_seq.shape[-1]
if num_labels != num_scores:
err_str = f"Skipping trial {trial_id}: {num_labels} labels != {num_scores} scores"
logger.info(err_str)
continue
if model is None:
pred_seq = score_seq.argmax(axis=0)
else:
raise AssertionError()
pred_seq = score_seq.argmax(axis=0)
pred_assemblies = [train_assemblies[i] for i in pred_seq]
gt_assemblies = [test_assemblies[i] for i in gt_seq]
acc = metrics.accuracy_upto(pred_assemblies, gt_assemblies, equivalence=None)
accuracies.append(acc)
# num_states = len(gt_seq)
# logger.info(f" trial {trial_id}: {num_states} keyframes")
# logger.info(f" accuracy (fused): {acc * 100:.1f}%")
saveVariable(score_seq, f'trial={trial_id}_data-scores')
saveVariable(pred_assemblies, f'trial={trial_id}_pred-assembly-seq')
saveVariable(gt_assemblies, f'trial={trial_id}_gt-assembly-seq')
if accuracies:
fold_accuracy = float(np.array(accuracies).mean())
metric_dict = {'Accuracy': fold_accuracy}
utils.writeResults(results_file, metric_dict, sweep_param_name, model_params)
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
for arg_name in inspect.getfullargspec(main).args:
parser.add_argument(f'--{arg_name}')
args = vars(parser.parse_args())
args = {k: yaml.safe_load(v) for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.join(
os.path.expanduser('~'), 'repo', 'kinemparse', 'scripts', config_fn
)
else:
config_fn = os.path.basename(config_file_path)
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
for k, v in args.items():
if isinstance(v, dict) and k in config:
config[k].update(v)
else:
config[k] = v
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
```
#### File: kinemparse/scripts/eval_preds.py
```python
import argparse
import os
import inspect
import functools
import yaml
import numpy as np
import joblib
from matplotlib import pyplot as plt
import graphviz as gv
import pandas as pd
from mathtools import utils, metrics
from blocks.core import labels
def drawPaths(paths, fig_fn, base_path, state_img_dir, path_labels=None, img_ext='png'):
""" Draw a sequence of `BlockAssembly` states using graphviz.
Parameters
----------
path : iterable( int )
A path is a list of state indices.
fig_fn : str
Filename of the figure
base_path : str
Path to the directory where figure will be saved
state_img_dir : str
Path to the directory containing source images of the states that make
up this path. State filenames are assumed to have the format
`state<state_index>.<img_ext>`
img_ext : str, optional
Extension specifying the image file type. Can be 'svg', 'png', etc.
"""
path_graph = gv.Digraph(name=fig_fn, format=img_ext, directory=base_path)
for j, path in enumerate(paths):
for i, state_index in enumerate(path):
image_fn = 'state{}.{}'.format(state_index, img_ext)
image_path = os.path.join(state_img_dir, image_fn)
if path_labels is not None:
label = f"{path_labels[j, i]}"
else:
label = None
path_graph.node(
f"{j}, {i}", image=image_path,
fixedsize='true', width='1', height='0.5', imagescale='true',
pad='1', fontsize='12', label=label
)
if i > 0:
path_graph.edge(f"{j}, {i - 1}", f"{j}, {i}", fontsize='12')
path_graph.render(filename=fig_fn, directory=base_path, cleanup=True)
def plot_scores(score_seq, k=None, fn=None):
subplot_width = 12
subplot_height = 3
num_axes = score_seq.shape[0] + 1
figsize = (subplot_width, num_axes * subplot_height)
fig, axes = plt.subplots(num_axes, figsize=figsize)
if num_axes == 1:
axes = [axes]
score_seq = score_seq.copy()
for i, scores in enumerate(score_seq):
if k is not None:
bottom_k = (-scores).argsort(axis=0)[k:, :]
for j in range(scores.shape[1]):
scores[bottom_k[:, j], j] = -np.inf
axes[i].imshow(scores, interpolation='none', aspect='auto')
axes[-1].imshow(score_seq.sum(axis=0), interpolation='none', aspect='auto')
plt.tight_layout()
if fn is None:
plt.show()
else:
plt.savefig(fn)
plt.close()
def plot_hists(scores, axis_labels=None, fn=None):
subplot_width = 12
subplot_height = 3
num_axes = scores.shape[0]
figsize = (subplot_width, num_axes * subplot_height)
fig, axes = plt.subplots(num_axes, figsize=figsize)
if num_axes == 1:
axes = [axes]
for i, s in enumerate(scores):
s[np.isinf(s)] = s[~np.isinf(s)].min() - 1
axes[i].hist(s, bins=50, density=True)
if axis_labels is not None:
axes[i].set_ylabel(axis_labels[i])
plt.tight_layout()
if fn is None:
plt.show()
else:
plt.savefig(fn)
plt.close()
def is_goal_error(goal, pred, true):
return (pred <= goal) == (true <= goal)
def is_goal_layer(goal, assembly):
def makeLayers(assembly):
if not assembly.blocks:
return {}
def makeLayer(z, assembly):
layer = assembly.copy()
for b_id in tuple(layer.blocks.keys()):
if b_id not in layer.blocks:
continue
block = layer.getBlock(b_id)
block_height = block.metric_vertices[:, -1].max()
if block_height > z:
layer.connections[b_id, :] = False
layer.connections[:, b_id] = False
component = layer.connected_components[block.component_index]
component.remove(b_id)
if not component:
del layer.connected_componets[block.component_index]
del layer.blocks[b_id]
return layer
def max_z(block_vertices):
return block_vertices[:, -1].max()
block_heights = np.array(list(map(max_z, assembly.vertices)))
layer_heights = np.unique(block_heights)
layers = {z: makeLayer(z, assembly) for z in layer_heights}
return layers
for comp_idx in assembly.connected_components.keys():
assembly.centerComponent(comp_idx, zero_at='smallest_z')
for comp_idx in goal.connected_components.keys():
goal.centerComponent(comp_idx, zero_at='smallest_z')
assembly_layers = makeLayers(assembly)
goal_layers = makeLayers(goal)
prev_layer_heights = sorted(assembly_layers.keys())[:-1]
prev_layers_complete = tuple(
assembly_layers[z] == goal_layers[z]
for z in prev_layer_heights
)
is_goal_layer = all(prev_layers_complete)
if len(goal.blocks) > 4:
# import pdb; pdb.set_trace()
pass
# logger.info(f" z_max prev: {zmax_prev:.1f}")
# logger.info(f" z_max cur: {zmax_cur:.1f}")
# logger.info(f"cur is new layer: {cur_is_new_layer}")
# logger.info(f" prev is correct: {prev_is_correct}")
# logger.info(f" is goal layer: {is_goal_layer}")
return is_goal_layer
def main(
out_dir=None, data_dir=None, metadata_file=None,
plot_predictions=None, results_file=None, sweep_param_name=None):
logger.info(f"Reading from: {data_dir}")
logger.info(f"Writing to: {out_dir}")
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
if metadata_file is not None:
metadata_file = os.path.expanduser(metadata_file)
metadata = pd.read_csv(metadata_file, index_col=0)
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def loadVariable(var_name):
return joblib.load(os.path.join(data_dir, f'{var_name}.pkl'))
def loadAll(seq_ids, var_name):
def loadOne(seq_id):
fn = os.path.join(data_dir, f'trial={seq_id}_{var_name}')
return joblib.load(fn)
return tuple(map(loadOne, seq_ids))
trial_ids = utils.getUniqueIds(data_dir, prefix='trial=', to_array=True)
pred_assembly_seqs = loadAll(trial_ids, "pred-assembly-seq.pkl")
gt_assembly_seqs = loadAll(trial_ids, "gt-assembly-seq.pkl")
all_assemblies = []
gt_label_seqs = tuple(
np.array(list(labels.gen_eq_classes(gt_assembly_seq, all_assemblies)))
for gt_assembly_seq in gt_assembly_seqs
)
pred_label_seqs = tuple(
np.array(list(labels.gen_eq_classes(pred_assembly_seq, all_assemblies)))
for pred_assembly_seq in pred_assembly_seqs
)
if plot_predictions:
assembly_fig_dir = os.path.join(fig_dir, 'assembly-imgs')
if not os.path.exists(assembly_fig_dir):
os.makedirs(assembly_fig_dir)
for i, assembly in enumerate(all_assemblies):
assembly.draw(assembly_fig_dir, i)
logger.info(f"Evaluating {len(trial_ids)} sequence predictions")
accuracies = {
'state': [],
'is_error': [],
'is_layer': []
}
data = zip(trial_ids, pred_assembly_seqs, gt_assembly_seqs)
for i, trial_id in enumerate(trial_ids):
pred_assembly_seq = pred_assembly_seqs[i]
gt_assembly_seq = gt_assembly_seqs[i]
task = int(metadata.iloc[trial_id]['task id'])
goal = labels.constructGoalState(task)
is_error = functools.partial(is_goal_error, goal)
is_layer = functools.partial(is_goal_layer, goal)
state = None
logger.info(f"SEQUENCE {trial_id}: {len(gt_assembly_seq)} items")
for name in accuracies.keys():
if name == 'is_layer':
pred_is_layer = np.array(list(map(is_layer, pred_assembly_seq)))
gt_is_layer = np.array(list(map(is_layer, gt_assembly_seq)))
matches = pred_is_layer == gt_is_layer
acc = matches.sum() / len(matches)
logger.info(f" {name}: {acc:.2}")
logger.info(f" {gt_is_layer.sum():2} gt layers")
logger.info(f" {pred_is_layer.sum():2} pred layers")
else:
acc = metrics.accuracy_upto(
# pred_assembly_seq[1:], gt_assembly_seq[1:],
pred_assembly_seq, gt_assembly_seq,
equivalence=locals()[name]
)
logger.info(f" {name}: {acc:.2}")
accuracies[name].append(acc)
if plot_predictions:
paths_dir = os.path.join(fig_dir, 'path-imgs')
if not os.path.exists(paths_dir):
os.makedirs(paths_dir)
fn = f"trial={trial_id}_paths"
paths = np.row_stack((gt_label_seqs[i], pred_label_seqs[i]))
path_labels = np.row_stack((gt_is_layer, pred_is_layer))
drawPaths(paths, fn, paths_dir, assembly_fig_dir, path_labels=path_labels)
logger.info("EVALUATION RESULTS:")
max_width = max(map(len, accuracies.keys()))
for name, vals in accuracies.items():
vals = np.array(vals) * 100
mean = vals.mean()
std = vals.std()
logger.info(f" {name:{max_width}}: {mean:4.1f} +/- {std:4.1f}%")
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
for arg_name in inspect.getfullargspec(main).args:
parser.add_argument(f'--{arg_name}')
args = vars(parser.parse_args())
args = {k: yaml.safe_load(v) for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.join(
os.path.expanduser('~'), 'repo', 'kinemparse', 'scripts', config_fn
)
else:
config_fn = os.path.basename(config_file_path)
if os.path.exists(config_file_path):
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
else:
config = {}
for k, v in args.items():
if isinstance(v, dict) and k in config:
config[k].update(v)
else:
config[k] = v
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
```
#### File: kinemparse/scripts/make_activity_dataset_video.py
```python
import os
import logging
import numpy as np
from matplotlib import pyplot as plt
import joblib
import yaml
from mathtools import utils
logger = logging.getLogger(__name__)
def makeActivityLabels(action_seq, seq_len=None, use_end_label=False):
"""
0: before main activity
1: main activity
2: after main activity
"""
if seq_len is None:
seq_len = action_seq['end'].max() + 1
is_tag = action_seq['action'] > 7
action_seq = action_seq[~is_tag]
# Say everything is the main activity at first
labels = np.ones(seq_len, dtype=int)
# Label everything before the start of the first (non-tag) action as "before"
first_start = action_seq[0]['start']
labels[:first_start] = 0
if use_end_label:
# Label everything after the end of the last (non-tag) action as "after"
last_end = action_seq[-1]['end']
labels[last_end + 1:] = 2
return labels
def plotScores(
timestamp_seq, score_seq,
imu_timestamp_seq=None, imu_score_seq=None,
keyframe_idxs=None, action_labels=None, raw_labels=None, fn=None):
score_seqs = [score_seq]
timestamp_seqs = [timestamp_seq]
if imu_score_seq is not None:
score_seqs.append(imu_score_seq)
timestamp_seqs.append(imu_timestamp_seq)
num_subplots = len(score_seqs)
_, axes = plt.subplots(num_subplots, figsize=(12, 5 * num_subplots), sharex=True)
if num_subplots == 1:
axes = [axes]
for i, axis in enumerate(axes):
score_seq = score_seqs[i]
timestamp_seq = timestamp_seqs[i]
axis.set_title('Video frame scores')
axis.set_xlabel('Frame index')
axis.set_ylabel('Frame score')
axis.axhline(np.nanmean(score_seq), color='k')
axis.plot(timestamp_seq, score_seq)
score_seq[np.isnan(score_seq)] = np.nanmean(score_seq)
if action_labels is not None:
axis = axis.twinx()
axis.plot(timestamp_seqs[0], action_labels, color='tab:green')
if raw_labels is not None:
is_first_touch_label = raw_labels['action'] == 7
first_touch_idxs = raw_labels['start'][is_first_touch_label]
first_touch_times = timestamp_seqs[0][first_touch_idxs]
first_touch_scores = score_seqs[0][first_touch_idxs]
axes[0].scatter(first_touch_times, first_touch_scores, color='tab:red')
if keyframe_idxs is not None:
keyframe_times = timestamp_seqs[0][keyframe_idxs]
keyframe_scores = score_seqs[0][keyframe_idxs]
axes[0].scatter(keyframe_times, keyframe_scores, color='tab:orange')
plt.tight_layout()
if fn is None:
pass
else:
plt.savefig(fn)
plt.close()
def plotScoreHists(scores, labels, fn=None):
unique_labels = np.unique(labels)
num_labels = len(unique_labels)
f, axes = plt.subplots(num_labels, figsize=(12, 3 * num_labels))
for i, label in enumerate(unique_labels):
matches_label = labels == label
matching_scores = scores[matches_label]
axes[i].hist(matching_scores[~np.isnan(matching_scores)], bins=50)
axes[i].set_xlabel(f'scores, label={label}')
axes[i].set_ylabel('counts')
plt.tight_layout()
if fn is None:
pass
else:
plt.savefig(fn)
plt.close()
def make_imu_feats(imu_score_seq):
return imu_score_seq[..., 2].swapaxes(0, 1).max(axis=1)
def main(
out_dir=None, video_data_dir=None, imu_data_dir=None,
video_seg_scores_dir=None, imu_seg_scores_dir=None, gt_keyframes_dir=None,
label_kwargs={}):
out_dir = os.path.expanduser(out_dir)
video_data_dir = os.path.expanduser(video_data_dir)
imu_data_dir = os.path.expanduser(imu_data_dir)
video_seg_scores_dir = os.path.expanduser(video_seg_scores_dir)
if imu_seg_scores_dir is not None:
imu_seg_scores_dir = os.path.expanduser(imu_seg_scores_dir)
if gt_keyframes_dir is not None:
gt_keyframes_dir = os.path.expanduser(gt_keyframes_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
def loadFromDir(var_name, dir_name):
return joblib.load(os.path.join(dir_name, f"{var_name}.pkl"))
def saveToWorkingDir(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f"{var_name}.pkl"))
trial_ids = utils.getUniqueIds(video_seg_scores_dir, prefix='trial-', suffix='.pkl')
all_score_seqs = []
all_action_labels = []
for seq_idx, trial_id in enumerate(trial_ids):
logger.info(f"Processing video {seq_idx + 1} / {len(trial_ids)} (trial {trial_id})")
logger.info(" Loading data...")
score_seq = loadFromDir(f"trial-{trial_id}_frame-scores", video_seg_scores_dir)
raw_labels = loadFromDir(f"trial-{trial_id}_action-seq", video_data_dir)
action_labels = makeActivityLabels(raw_labels, seq_len=score_seq.shape[0], **label_kwargs)
timestamp_seq = loadFromDir(f"trial-{trial_id}_rgb-frame-timestamp-seq", video_data_dir)
if timestamp_seq.shape != score_seq.shape:
logger.warning(
f"Video dimensions don't match: "
f"{score_seq.shape} scores, {timestamp_seq.shape} timestamps"
)
continue
if imu_seg_scores_dir is not None:
try:
imu_score_seq = loadFromDir(
f'trial={trial_id}_score-seq',
imu_seg_scores_dir
)
imu_score_seq = make_imu_feats(imu_score_seq)
except FileNotFoundError:
logger.info(f" IMU scores not found: trial {trial_id}")
continue
imu_timestamp_seq = loadFromDir(f"trial={trial_id}_timestamp-seq", imu_data_dir)
if imu_timestamp_seq.shape != imu_score_seq.shape:
logger.warning(
f"IMU dimensions don't match: "
f"{imu_score_seq.shape} scores, {imu_timestamp_seq.shape} timestamps"
)
continue
# Downsample imu scores to match rgb scores
imu_score_seq = utils.resampleSeq(imu_score_seq, imu_timestamp_seq, timestamp_seq)
imu_timestamp_seq = timestamp_seq
else:
imu_score_seq = None
imu_timestamp_seq = None
logger.info(" Saving output...")
gt_keyframe_fn = os.path.join(gt_keyframes_dir, f"trial-{trial_id}_gt-keyframe-seq.pkl")
if os.path.exists(gt_keyframe_fn):
gt_keyframes = joblib.load(gt_keyframe_fn)
else:
gt_keyframes = None
trial_str = f"trial={trial_id}"
fn = os.path.join(fig_dir, f'{trial_str}_scores-plot.png')
plotScores(
timestamp_seq, score_seq,
action_labels=action_labels, raw_labels=raw_labels,
imu_timestamp_seq=imu_timestamp_seq, imu_score_seq=imu_score_seq,
keyframe_idxs=gt_keyframes, fn=fn
)
all_score_seqs.append(score_seq)
all_action_labels.append(action_labels)
# Save intermediate results
score_seq -= np.nanmean(score_seq)
score_is_nan = np.isnan(score_seq)
score_seq[score_is_nan] = 0
features = (score_seq, score_is_nan.astype(float))
if imu_score_seq is not None:
features += (imu_score_seq,)
feature_seq = np.column_stack(features)
saveToWorkingDir(feature_seq, f'{trial_str}_feature-seq')
saveToWorkingDir(action_labels, f'{trial_str}_label-seq')
all_score_seqs = np.hstack(tuple(all_score_seqs))
all_action_labels = np.hstack(tuple(all_action_labels))
fn = os.path.join(fig_dir, 'score-hists.png')
plotScoreHists(all_score_seqs, all_action_labels, fn=fn)
if __name__ == '__main__':
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: kinemparse/scripts/make_attr_data_imu.py
```python
import os
import logging
import yaml
import numpy as np
from mathtools import utils
from blocks.core import labels
from kinemparse import imu
logger = logging.getLogger(__name__)
def parseActions(action_seq, rgb_timestamp_seq, imu_timestamp_seq):
assembly_seq, is_valid = labels.parseLabelSeq(
# None, timestamps=rgb_timestamp_seq, action_seq=action_seq,
action_seq, timestamps=rgb_timestamp_seq,
structure_change_only=True
)
assembly_seq[-1].end_idx = len(rgb_timestamp_seq) - 1
_ = imu.rgbIdxsToImuIdxs(
assembly_seq, rgb_timestamp_seq, imu_timestamp_seq,
action_idxs=False
)
_ = imu.rgbIdxsToImuIdxs(
assembly_seq, rgb_timestamp_seq, imu_timestamp_seq,
action_idxs=True
)
return assembly_seq
def dictToArray(imu_seqs, transform=None):
if transform is None:
return np.hstack(tuple(imu_seqs[i] for i in range(len(imu_seqs))))
return np.hstack(tuple(transform(imu_seqs[i]) for i in range(len(imu_seqs))))
def makeTimestamps(*imu_dicts):
def transform(x):
return imu.getImuGlobalTimestamps(x)[:, None]
imu_timestamps = tuple(
dictToArray(imu_dict, transform=transform)
for imu_dict in imu_dicts
)
imu_timestamps = np.column_stack(imu_timestamps)
return imu_timestamps.mean(axis=1)
def beforeFirstTouch(action_seq, rgb_timestamp_seq, imu_timestamp_seq):
first_touch_indices = np.nonzero(action_seq['action'] == 7)[0]
if first_touch_indices.size:
first_touch_idx = action_seq['start'][first_touch_indices[0]]
else:
logger.warning('No first touch annotation')
return None
before_first_touch = imu_timestamp_seq <= rgb_timestamp_seq[first_touch_idx]
return before_first_touch
def main(
out_dir=None, data_dir=None, use_vid_ids_from=None,
output_data=None, magnitude_centering=None, resting_from_gt=None,
remove_before_first_touch=None, include_signals=None, fig_type=None):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
logger.info(f"Reading from: {data_dir}")
logger.info(f"Writing to: {out_dir}")
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def loadAll(seq_ids, var_name, from_dir=data_dir, prefix='trial='):
all_data = tuple(
utils.loadVariable(f"{prefix}{seq_id}_{var_name}", from_dir)
for seq_id in seq_ids
)
return all_data
def saveVariable(var, var_name, to_dir=out_data_dir):
utils.saveVariable(var, var_name, to_dir)
if fig_type is None:
fig_type = 'multi'
# Load data
if use_vid_ids_from is None:
trial_ids = utils.getUniqueIds(data_dir, prefix='trial=', to_array=True)
else:
use_vid_ids_from = os.path.expanduser(use_vid_ids_from)
trial_ids = utils.getUniqueIds(use_vid_ids_from, prefix='trial-', to_array=True)
accel_seqs = loadAll(trial_ids, 'accel-samples.pkl')
gyro_seqs = loadAll(trial_ids, 'gyro-samples.pkl')
action_seqs = loadAll(trial_ids, 'action-seq.pkl')
rgb_timestamp_seqs = loadAll(trial_ids, 'rgb-frame-timestamp-seq.pkl')
def validate_imu(seqs):
def is_valid(d):
return not any(np.isnan(x).any() for x in d.values())
return np.array([is_valid(d) for d in seqs])
imu_is_valid = validate_imu(accel_seqs) & validate_imu(gyro_seqs)
logger.info(
f"Ignoring {(~imu_is_valid).sum()} IMU sequences with NaN-valued samples "
f"(of {len(imu_is_valid)} total)"
)
def chooseValid(seq):
return tuple(x for x, is_valid in zip(seq, imu_is_valid) if is_valid)
trial_ids = np.array(list(chooseValid(trial_ids)))
accel_seqs = chooseValid(accel_seqs)
gyro_seqs = chooseValid(gyro_seqs)
action_seqs = chooseValid(action_seqs)
rgb_timestamp_seqs = chooseValid(rgb_timestamp_seqs)
vocab = []
metadata = utils.loadMetadata(data_dir, rows=trial_ids)
utils.saveMetadata(metadata, out_data_dir)
utils.saveVariable(vocab, 'vocab', out_data_dir)
def norm(x):
norm = np.linalg.norm(imu.getImuSamples(x), axis=1)[:, None]
return norm
accel_mag_seqs = tuple(map(lambda x: dictToArray(x, transform=norm), accel_seqs))
gyro_mag_seqs = tuple(map(lambda x: dictToArray(x, transform=norm), gyro_seqs))
imu_timestamp_seqs = utils.batchProcess(makeTimestamps, accel_seqs, gyro_seqs)
if remove_before_first_touch:
before_first_touch_seqs = utils.batchProcess(
beforeFirstTouch, action_seqs, rgb_timestamp_seqs, imu_timestamp_seqs
)
num_ignored = sum(b is None for b in before_first_touch_seqs)
logger.info(
f"Ignoring {num_ignored} sequences without first-touch annotations "
f"(of {len(before_first_touch_seqs)} total)"
)
trials_missing_first_touch = [
i for b, i in zip(before_first_touch_seqs, trial_ids)
if b is None
]
logger.info(f"Trials without first touch: {trials_missing_first_touch}")
def clip(signal, bool_array):
return signal[~bool_array, ...]
accel_mag_seqs = tuple(
clip(signal, b) for signal, b in zip(accel_mag_seqs, before_first_touch_seqs)
if b is not None
)
gyro_mag_seqs = tuple(
clip(signal, b) for signal, b in zip(gyro_mag_seqs, before_first_touch_seqs)
if b is not None
)
imu_timestamp_seqs = tuple(
clip(signal, b) for signal, b in zip(imu_timestamp_seqs, before_first_touch_seqs)
if b is not None
)
trial_ids = tuple(
x for x, b in zip(trial_ids, before_first_touch_seqs)
if b is not None
)
action_seqs = tuple(
x for x, b in zip(action_seqs, before_first_touch_seqs)
if b is not None
)
rgb_timestamp_seqs = tuple(
x for x, b in zip(rgb_timestamp_seqs, before_first_touch_seqs)
if b is not None
)
assembly_seqs = utils.batchProcess(
parseActions,
action_seqs, rgb_timestamp_seqs, imu_timestamp_seqs
)
if output_data == 'components':
accel_feat_seqs = accel_mag_seqs
gyro_feat_seqs = gyro_mag_seqs
unique_components = {frozenset(): 0}
imu_label_seqs = zip(
*tuple(
labels.componentLabels(*args, unique_components)
for args in zip(action_seqs, rgb_timestamp_seqs, imu_timestamp_seqs)
)
)
saveVariable(unique_components, 'unique_components')
elif output_data == 'pairwise components':
imu_label_seqs = utils.batchProcess(
labels.pairwiseComponentLabels, assembly_seqs,
static_kwargs={'lower_tri_only': True, 'include_action_labels': False}
)
accel_feat_seqs = tuple(map(imu.pairwiseFeats, accel_mag_seqs))
gyro_feat_seqs = tuple(map(imu.pairwiseFeats, gyro_mag_seqs))
else:
raise AssertionError()
signals = {'accel': accel_feat_seqs, 'gyro': gyro_feat_seqs}
if include_signals is None:
include_signals = tuple(signals.keys())
signals = tuple(signals[key] for key in include_signals)
imu_feature_seqs = tuple(np.stack(x, axis=-1).squeeze(axis=-1) for x in zip(*signals))
video_seqs = tuple(zip(imu_feature_seqs, imu_label_seqs, trial_ids))
imu.plot_prediction_eg(video_seqs, fig_dir, fig_type=fig_type, output_data=output_data)
video_seqs = tuple(
zip(assembly_seqs, imu_feature_seqs, imu_timestamp_seqs, imu_label_seqs, trial_ids)
)
for assembly_seq, feature_seq, timestamp_seq, label_seq, trial_id in video_seqs:
id_string = f"trial={trial_id}"
saveVariable(assembly_seq, f'{id_string}_assembly-seq')
saveVariable(feature_seq, f'{id_string}_feature-seq')
saveVariable(timestamp_seq, f'{id_string}_timestamp-seq')
saveVariable(label_seq, f'{id_string}_label-seq')
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: kinemparse/scripts/parse_assemblies_imu.py
```python
import argparse
import os
import yaml
import joblib
import numpy as np
from matplotlib import pyplot as plt
from mathtools import utils
from kinemparse import imu
def makeErrorSignal(imu_sample_seq, assembly_seq):
error_signal = np.ones_like(imu_sample_seq) * np.nan
for assembly in assembly_seq:
segment_slice = slice(*assembly.getStartEndFrames())
components = assembly.getComponents(include_singleton_blocks=True)
error_signal[segment_slice] = imu.error(imu_sample_seq[segment_slice], components)
return error_signal
def makeLabelSignal(imu_sample_seq, assembly_seq, action=False):
label_seq = np.zeros_like(imu_sample_seq, dtype=bool)
for assembly in assembly_seq:
if action:
segment_slice = slice(*assembly.getActionStartEndFrames())
else:
segment_slice = slice(*assembly.getStartEndFrames())
connected_indices = assembly.symmetrized_connections.any(axis=0).nonzero()[0]
label_seq[segment_slice, connected_indices] = True
return label_seq
def plotError(
trial_id, signal, error_signal,
component_label_seq, block_label_seq, block_action_label_seq,
fn=None):
subplot_width = 12
subplot_height = 2
num_plots = error_signal.shape[1] + 1
figsize = (subplot_width, num_plots * subplot_height)
f, axes = plt.subplots(num_plots, figsize=figsize, sharex=True)
axes[0].set_title(f"Error signal, video {trial_id}")
axes[0].plot(component_label_seq)
for i, axis in enumerate(axes[1:]):
axis.set_ylabel(f"IMU {i}")
axis.plot(signal[:, i])
axis.plot(error_signal[:, i])
axis = axis.twinx()
axis.plot(block_action_label_seq[:, i], c='tab:red')
axis.plot(block_label_seq[:, i], c='tab:green')
plt.tight_layout()
if fn is None:
plt.show()
else:
plt.savefig(fn)
plt.close()
def main(
out_dir=None, data_dir=None, model_name=None,
gpu_dev_id=None, batch_size=None, learning_rate=None, independent_signals=None,
model_params={}, cv_params={}, train_params={}, viz_params={}):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def loadVariable(var_name):
return joblib.load(os.path.join(data_dir, f'{var_name}.pkl'))
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
# Load data
trial_ids = loadVariable('trial_ids')
imu_sample_seqs = loadVariable('imu_sample_seqs')
imu_label_seqs = loadVariable('imu_label_seqs')
assembly_seqs = loadVariable('assembly_seqs')
imu_sample_seqs = tuple(map(np.squeeze, imu_sample_seqs))
errors = utils.batchProcess(makeErrorSignal, imu_sample_seqs, assembly_seqs)
state_labels = utils.batchProcess(
makeLabelSignal, imu_sample_seqs, assembly_seqs,
static_kwargs={'action': False}
)
action_labels = utils.batchProcess(
makeLabelSignal, imu_sample_seqs, assembly_seqs,
static_kwargs={'action': True}
)
plot_args = zip(
trial_ids, imu_sample_seqs, errors,
imu_label_seqs, state_labels, action_labels
)
for args in plot_args:
plotError(*args, fn=os.path.join(fig_dir, f"{args[0]}.png"))
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
parser.add_argument('--out_dir')
args = vars(parser.parse_args())
args = {k: v for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.join(
os.path.expanduser('~'), 'repo', 'kinemparse', 'scripts', config_fn
)
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
config.update(args)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
```
#### File: kinemparse/scripts/precompute_signatures.py
```python
import inspect
import argparse
import os
import yaml
import joblib
import numpy as np
# from matplotlib import pyplot as plt
from mathtools import utils
from blocks.core import labels
def make_signatures(unique_assemblies):
signatures = np.stack([
labels.inSameComponent(a, lower_tri_only=True)
for a in unique_assemblies
])
signatures = 2 * signatures.astype(float) - 1
return signatures
def main(out_dir=None, data_dir=None):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
def loadVariable(var_name):
fn = os.path.join(data_dir, f'{var_name}.pkl')
return joblib.load(fn)
# Load data
trial_ids = utils.getUniqueIds(data_dir, prefix='trial-')
cv_folds = loadVariable(f'cv-folds')
assembly_seqs = tuple(
loadVariable(f"trial-{seq_id}_true-state-seq-orig")
for seq_id in trial_ids
)
unique_assemblies = []
label_seqs = tuple(
np.array(list(labels.gen_eq_classes(seq, unique_assemblies, equivalent=None)))
for seq in assembly_seqs
)
saveVariable(unique_assemblies, f'unique-assemblies')
for cv_index, (train_idxs, test_idxs) in enumerate(cv_folds):
logger.info(
f'CV fold {cv_index + 1}: {len(trial_ids)} total '
f'({len(train_idxs)} train, {len(test_idxs)} test)'
)
# train_ids = trial_ids[np.array(train_idxs)]
train_label_seqs = tuple(label_seqs[i] for i in train_idxs)
unique_train_labels = np.unique(np.hstack(train_label_seqs))
unique_train_assemblies = tuple(unique_assemblies[i] for i in unique_train_labels)
signatures = make_signatures(unique_train_assemblies)
test_ids = trial_ids[np.array(test_idxs)]
for trial_id in test_ids:
saveVariable(signatures, f'trial={trial_id}_signatures')
saveVariable(unique_train_labels, f'trial={trial_id}_unique-train-labels')
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
for arg_name in inspect.getfullargspec(main).args:
parser.add_argument(f'--{arg_name}')
args = vars(parser.parse_args())
args = {k: yaml.safe_load(v) for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.join(
os.path.expanduser('~'), 'repo', 'kinemparse', 'scripts', config_fn
)
else:
config_fn = os.path.basename(config_file_path)
if not os.path.exists(config_file_path):
config = {}
else:
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
for k, v in args.items():
if isinstance(v, dict) and k in config:
config[k].update(v)
else:
config[k] = v
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
```
#### File: kinemparse/scripts/predict_seq_pytorch.py
```python
import os
import collections
import logging
import itertools
import math
import yaml
import torch
from mathtools import utils, torchutils, metrics
logger = logging.getLogger(__name__)
class DummyClassifier(torch.nn.Module):
def __init__(self, input_dim, out_set_size, binary_multiclass=False):
super().__init__()
self.input_dim = input_dim
self.out_set_size = out_set_size
logger.info(
f'Initialized dummy classifier. '
f'Input dim: {self.input_dim}, Output dim: {self.out_set_size}'
)
def forward(self, input_seq):
return input_seq.transpose(1, 2)
def predict(self, outputs):
__, preds = torch.max(outputs, -1)
return preds
class ConvClassifier(torch.nn.Module):
def __init__(self, input_dim, out_set_size, kernel_size=3, binary_multiclass=False):
super().__init__()
if not (kernel_size % 2):
raise NotImplementedError("Kernel size must be an odd number!")
self.input_dim = input_dim
self.out_set_size = out_set_size
self.binary_labels = binary_multiclass
self.conv1d = torch.nn.Conv1d(
self.input_dim, self.out_set_size, kernel_size,
padding=(kernel_size // 2)
)
logger.info(
f'Initialized 1D convolutional classifier. '
f'Input dim: {self.input_dim}, Output dim: {self.out_set_size}'
)
def forward(self, input_seq):
output_seq = self.conv1d(input_seq).transpose(1, 2)
return output_seq
def predict(self, outputs):
if self.binary_labels:
return (outputs > 0.5).float()
__, preds = torch.max(outputs, -1)
return preds
class TcnClassifier(torch.nn.Module):
def __init__(
self, input_dim, out_set_size, num_multiclass=None,
binary_multiclass=False, tcn_channels=None, **tcn_kwargs):
super().__init__()
self.input_dim = input_dim
self.out_set_size = out_set_size
self.binary_multiclass = binary_multiclass
self.num_multiclass = num_multiclass
self.TCN = torchutils.TemporalConvNet(input_dim, tcn_channels, **tcn_kwargs)
if self.num_multiclass is None:
self.linear = torch.nn.Linear(tcn_channels[-1], self.out_set_size)
else:
self.linear = torch.nn.Linear(
tcn_channels[-1],
self.out_set_size * self.num_multiclass
)
logger.info(
f'Initialized TCN classifier. '
f'Input dim: {self.input_dim}, Output dim: {self.out_set_size}'
)
def forward(self, input_seq, return_feats=False):
tcn_out = self.TCN(input_seq).transpose(1, 2)
linear_out = self.linear(tcn_out)
if self.num_multiclass is not None:
linear_out = linear_out.view(
*linear_out.shape[0:2], self.out_set_size, self.num_multiclass
)
if return_feats:
return linear_out, tcn_out
return linear_out
def predict(self, outputs, return_scores=False):
""" outputs has shape (num_batch, num_samples, num_classes, ...) """
if self.binary_multiclass:
return (outputs > 0.5).float()
__, preds = torch.max(outputs, dim=2)
if return_scores:
scores = torch.nn.softmax(outputs, dim=1)
return preds, scores
return preds
class LstmClassifier(torch.nn.Module):
def __init__(
self, input_dim, out_set_size, num_multiclass=None,
binary_multiclass=False, hidden_dim=512, **lstm_kwargs):
super().__init__()
self.input_dim = input_dim
self.out_set_size = out_set_size
self.binary_multiclass = binary_multiclass
self.num_multiclass = num_multiclass
self.hidden_size = hidden_dim
self.num_layers = lstm_kwargs.get('num_layers', 1)
if lstm_kwargs.get('bidirectional', False):
self.num_directions = 2
else:
self.num_directions = 1
self.LSTM = torch.nn.LSTM(input_dim, self.hidden_size, **lstm_kwargs)
if self.num_multiclass is None:
self.linear = torch.nn.Linear(
self.num_directions * self.hidden_size,
self.out_set_size
)
else:
self.linear = torch.nn.Linear(
self.num_directions * self.hidden_size,
self.out_set_size * self.num_multiclass
)
logger.info(
f'Initialized LSTM classifier. '
f'Input dim: {self.input_dim}, Output dim: {self.out_set_size}'
)
def forward(self, input_seq, return_feats=False):
batch_size = input_seq.shape[0]
h0 = torch.randn(
self.num_layers * self.num_directions, batch_size, self.hidden_size,
device=input_seq.device
)
c0 = torch.randn(
self.num_layers * self.num_directions, batch_size, self.hidden_size,
device=input_seq.device
)
lstm_out, (hn, cn) = self.LSTM(input_seq, (h0, c0))
linear_out = self.linear(lstm_out)
if self.num_multiclass is not None:
# BATCH x TIME x VOCAB * EDGE -> BATCH x TIME x VOCAB x EDGE
linear_out = linear_out.view(
*linear_out.shape[0:2], self.out_set_size, self.num_multiclass
)
if return_feats:
return linear_out, lstm_out
return linear_out
def predict(self, outputs, return_scores=False):
""" outputs has shape (num_batch, num_samples, num_classes, ...) """
if self.binary_multiclass:
return (outputs > 0.5).float()
__, preds = torch.max(outputs, dim=2)
if return_scores:
scores = torch.nn.softmax(outputs, dim=1)
return preds, scores
return preds
def splitSeqs(feature_seqs, label_seqs, trial_ids, active_only=False):
num_signals = label_seqs[0].shape[1]
if num_signals >= 100:
raise ValueError("{num_signals} signals will cause overflow in sequence ID (max is 99)")
def validate(seqs):
return all(seq.shape[1] == num_signals for seq in seqs)
all_valid = all(validate(x) for x in (feature_seqs, label_seqs))
if not all_valid:
raise AssertionError("Features and labels don't all have the same number of sequences")
trial_ids = tuple(
itertools.chain(
*(
tuple(t_id + 0.01 * (i + 1) for i in range(num_signals))
for t_id in trial_ids
)
)
)
def splitSeq(arrays):
return tuple(row for array in arrays for row in array)
feature_seqs = splitSeq(map(lambda x: x.swapaxes(0, 1), feature_seqs))
label_seqs = splitSeq(map(lambda x: x.T, label_seqs))
if active_only:
is_active = tuple(map(lambda x: x.any(), label_seqs))
def filterInactive(arrays):
return tuple(arr for arr, act in zip(arrays, is_active) if act)
return tuple(map(filterInactive, (feature_seqs, label_seqs, trial_ids)))
return feature_seqs, label_seqs, trial_ids
def joinSeqs(batches):
def stack(seq):
stacked = torch.stack(seq, dim=-1)
return stacked[None, ...]
all_seqs = collections.defaultdict(dict)
for batch in batches:
for b in zip(*batch):
i = b[-1]
seqs = b[:-1]
seq_id, trial_id = math.modf(i)
seq_id = int(round(seq_id * 100))
trial_id = int(round(trial_id))
all_seqs[trial_id][seq_id] = seqs
for trial_id, seq_dict in all_seqs.items():
seqs = (seq_dict[k] for k in sorted(seq_dict.keys()))
seqs = map(stack, zip(*seqs))
yield tuple(seqs) + ((trial_id,),)
def main(
out_dir=None, data_dir=None, model_name=None, predict_mode='classify',
gpu_dev_id=None, batch_size=None, learning_rate=None,
independent_signals=None, active_only=None, output_dim_from_vocab=False,
prefix='trial=', feature_fn_format='feature-seq.pkl', label_fn_format='label_seq.pkl',
dataset_params={}, model_params={}, cv_params={}, train_params={}, viz_params={},
metric_names=['Loss', 'Accuracy', 'Precision', 'Recall', 'F1'],
plot_predictions=None, results_file=None, sweep_param_name=None):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def saveVariable(var, var_name, to_dir=out_data_dir):
return utils.saveVariable(var, var_name, to_dir)
# Load data
device = torchutils.selectDevice(gpu_dev_id)
trial_ids = utils.getUniqueIds(
data_dir, prefix=prefix, suffix=feature_fn_format,
to_array=True
)
dataset = utils.CvDataset(
trial_ids, data_dir, prefix=prefix,
feature_fn_format=feature_fn_format, label_fn_format=label_fn_format,
)
utils.saveMetadata(dataset.metadata, out_data_dir)
utils.saveVariable(dataset.vocab, 'vocab', out_data_dir)
# Define cross-validation folds
cv_folds = utils.makeDataSplits(len(trial_ids), **cv_params)
utils.saveVariable(cv_folds, 'cv-folds', out_data_dir)
if predict_mode == 'binary multiclass':
# criterion = torchutils.BootstrappedCriterion(
# 0.25, base_criterion=torch.nn.functional.binary_cross_entropy_with_logits,
# )
criterion = torch.nn.BCEWithLogitsLoss()
labels_dtype = torch.float
elif predict_mode == 'multiclass':
criterion = torch.nn.CrossEntropyLoss()
labels_dtype = torch.long
elif predict_mode == 'classify':
criterion = torch.nn.CrossEntropyLoss()
labels_dtype = torch.long
else:
raise AssertionError()
def make_dataset(feats, labels, ids, shuffle=True):
dataset = torchutils.SequenceDataset(
feats, labels, device=device, labels_dtype=labels_dtype, seq_ids=ids,
**dataset_params
)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
return dataset, loader
for cv_index, cv_fold in enumerate(cv_folds):
train_data, val_data, test_data = dataset.getFold(cv_fold)
if independent_signals:
train_data = splitSeqs(*train_data, active_only=active_only)
val_data = splitSeqs(*val_data, active_only=active_only)
test_data = splitSeqs(*test_data, active_only=False)
train_set, train_loader = make_dataset(*train_data, shuffle=True)
test_set, test_loader = make_dataset(*test_data, shuffle=False)
val_set, val_loader = make_dataset(*val_data, shuffle=True)
logger.info(
f'CV fold {cv_index + 1} / {len(cv_folds)}: {len(dataset.trial_ids)} total '
f'({len(train_set)} train, {len(val_set)} val, {len(test_set)} test)'
)
logger.info(
f'{train_set.num_label_types} unique labels in train set; '
f'vocab size is {len(dataset.vocab)}'
)
input_dim = train_set.num_obsv_dims
output_dim = train_set.num_label_types
if output_dim_from_vocab:
output_dim = len(dataset.vocab)
if model_name == 'linear':
model = torchutils.LinearClassifier(
input_dim, output_dim, **model_params
).to(device=device)
elif model_name == 'conv':
model = ConvClassifier(input_dim, output_dim, **model_params).to(device=device)
elif model_name == 'TCN':
if predict_mode == 'multiclass':
num_multiclass = train_set[0][1].shape[-1]
output_dim = max([
train_set.num_label_types,
test_set.num_label_types,
val_set.num_label_types
])
else:
num_multiclass = None
model = TcnClassifier(
input_dim, output_dim, num_multiclass=num_multiclass,
**model_params
).to(device=device)
elif model_name == 'LSTM':
if predict_mode == 'multiclass':
num_multiclass = train_set[0][1].shape[-1]
output_dim = max([
train_set.num_label_types,
test_set.num_label_types,
val_set.num_label_types
])
else:
num_multiclass = None
model = LstmClassifier(
input_dim, output_dim, num_multiclass=num_multiclass,
**model_params
).to(device=device)
else:
raise AssertionError()
optimizer_ft = torch.optim.Adam(
model.parameters(), lr=learning_rate,
betas=(0.9, 0.999), eps=1e-08,
weight_decay=0, amsgrad=False
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft, step_size=1, gamma=1.00)
train_epoch_log = collections.defaultdict(list)
val_epoch_log = collections.defaultdict(list)
metric_dict = {name: metrics.makeMetric(name) for name in metric_names}
model, last_model_wts = torchutils.trainModel(
model, criterion, optimizer_ft, lr_scheduler,
train_loader, val_loader,
device=device,
metrics=metric_dict,
train_epoch_log=train_epoch_log,
val_epoch_log=val_epoch_log,
**train_params
)
# Test model
metric_dict = {name: metrics.makeMetric(name) for name in metric_names}
test_io_history = torchutils.predictSamples(
model.to(device=device), test_loader,
criterion=criterion, device=device,
metrics=metric_dict, data_labeled=True, update_model=False,
seq_as_batch=train_params['seq_as_batch'],
return_io_history=True
)
if independent_signals:
test_io_history = tuple(joinSeqs(test_io_history))
logger.info('[TST] ' + ' '.join(str(m) for m in metric_dict.values()))
utils.writeResults(
results_file, {k: v.value for k, v in metric_dict.items()},
sweep_param_name, model_params
)
if plot_predictions:
io_fig_dir = os.path.join(fig_dir, 'model-io')
if not os.path.exists(io_fig_dir):
os.makedirs(io_fig_dir)
label_names = ('gt', 'pred')
preds, scores, inputs, gt_labels, ids = zip(*test_io_history)
for batch in test_io_history:
batch = tuple(
x.cpu().numpy() if isinstance(x, torch.Tensor) else x
for x in batch
)
for preds, _, inputs, gt_labels, seq_id in zip(*batch):
fn = os.path.join(io_fig_dir, f"{prefix}{seq_id}_model-io.png")
utils.plot_array(inputs, (gt_labels.T, preds.T), label_names, fn=fn)
for batch in test_io_history:
batch = tuple(
x.cpu().numpy() if isinstance(x, torch.Tensor) else x
for x in batch
)
for pred_seq, score_seq, feat_seq, label_seq, trial_id in zip(*batch):
saveVariable(pred_seq, f'{prefix}{trial_id}_pred-label-seq')
saveVariable(score_seq, f'{prefix}{trial_id}_score-seq')
saveVariable(label_seq, f'{prefix}{trial_id}_true-label-seq')
saveVariable(model, f'cvfold={cv_index}_{model_name}-best')
train_fig_dir = os.path.join(fig_dir, 'train-plots')
if not os.path.exists(train_fig_dir):
os.makedirs(train_fig_dir)
if train_epoch_log:
torchutils.plotEpochLog(
train_epoch_log,
subfig_size=(10, 2.5),
title='Training performance',
fn=os.path.join(train_fig_dir, f'cvfold={cv_index}_train-plot.png')
)
if val_epoch_log:
torchutils.plotEpochLog(
val_epoch_log,
subfig_size=(10, 2.5),
title='Heldout performance',
fn=os.path.join(train_fig_dir, f'cvfold={cv_index}_val-plot.png')
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: kinemparse/scripts/restructure_output_decode_keyframes.py
```python
import argparse
import os
import inspect
import yaml
import joblib
import numpy as np
import scipy
from mathtools import utils
def main(out_dir=None, data_dir=None, detections_dir=None, modality=None, normalization=None):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
if detections_dir is not None:
detections_dir = os.path.expanduser(detections_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
def loadVariable(var_name):
return joblib.load(os.path.join(data_dir, f'{var_name}.pkl'))
def loadAll(var_name, trial_ids):
def loadOne(trial_id):
return loadVariable(f"trial-{trial_id}_{var_name}")
return tuple(map(loadOne, trial_ids))
# Load data
trial_ids = utils.getUniqueIds(data_dir, prefix='trial-', to_array=True)
# Define cross-validation folds
dataset_size = len(trial_ids)
cv_folds = utils.makeDataSplits(
dataset_size,
precomputed_fn=os.path.join(data_dir, 'cv-folds.pkl')
)
cv_folds = tuple(tuple(map(np.array, splits)) for splits in cv_folds)
# Validate CV folds by checking that each split covers all trial ids
for train_idxs, test_idxs in cv_folds:
num_train = len(train_idxs)
num_test = len(test_idxs)
num_total = len(trial_ids)
if num_train + num_test != num_total:
err_str = f"{num_train} train + {num_test} test != {num_total} total"
raise AssertionError(err_str)
cv_fold_trial_ids = tuple(
tuple(map(lambda x: trial_ids[x], splits))
for splits in cv_folds
)
saveVariable(cv_fold_trial_ids, "cv-fold-trial-ids")
for cv_index, (train_idxs, test_idxs) in enumerate(cv_folds):
logger.info(
f'CV fold {cv_index + 1}: {len(trial_ids)} total '
f'({len(train_idxs)} train, {len(test_idxs)} test)'
)
hmm = loadVariable(f"hmm-fold{cv_index}")
train_assemblies = hmm.states
saveVariable(train_assemblies, f"cvfold={cv_index}_train-assemblies")
saveVariable(hmm, f"cvfold={cv_index}_model")
for trial_id in trial_ids[test_idxs]:
# true_state_seqs = loadVariable('trial-{trial_id}_true-state-seq-orig', trial_ids)
saved_predictions = loadVariable(f'trial-{trial_id}_pred-state-seq')
# shape: (num_states, num_samples)
data_scores = loadVariable(f'trial-{trial_id}_data-scores')
if data_scores.shape[0] != len(train_assemblies):
warn_str = (
f"Trial {trial_id}: {data_scores.shape[0]} data scores "
f"!= {len(train_assemblies)} assemblies in vocab"
)
logger.warning(warn_str)
continue
if detections_dir is not None:
fn = f'trial-{trial_id}_class-label-frame-seq.pkl'
pixel_label_frame_seq = joblib.load(os.path.join(detections_dir, fn))
for i, label_frame in enumerate(pixel_label_frame_seq):
# px_is_unoccluded = (label_frame == 0) + (label_frame == 3)
if normalization == 'per-pixel':
px_is_blocks = label_frame == 3
denominator = px_is_blocks.sum()
if modality == 'RGB':
denominator *= 3
else:
raise NotImplementedError(f"Modality {modality} not recognized")
data_scores[:, i] /= denominator
elif normalization == 'marginal':
# This scalar value is broadcast to a uniform (log) prior
log_prior = -np.log(data_scores.shape[0])
joint_probs = data_scores + log_prior
data_marginals = scipy.special.logsumexp(joint_probs, axis=0)
data_scores = joint_probs - data_marginals
elif normalization is not None:
raise NotImplementedError()
# Validate the loaded data
pred_assembly_idxs = data_scores.argmax(axis=0)
pred_assembly_seq = tuple(train_assemblies[i] for i in pred_assembly_idxs)
preds_same = all(p1 == p1 for p1, p2 in zip(pred_assembly_seq, saved_predictions))
if not preds_same:
raise AssertionError('Computed predictions differ from saved predictions')
saveVariable(data_scores, f"trial={trial_id}_data-scores")
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
for arg_name in inspect.getfullargspec(main).args:
parser.add_argument(f'--{arg_name}')
args = vars(parser.parse_args())
args = {k: yaml.safe_load(v) for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.join(
os.path.expanduser('~'), 'repo', 'kinemparse', 'scripts', config_fn
)
else:
config_fn = os.path.basename(config_file_path)
if not os.path.exists(config_file_path):
config = {}
else:
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
for k, v in args.items():
if isinstance(v, dict) and k in config:
config[k].update(v)
else:
config[k] = v
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
```
#### File: kinemparse/scripts/score_keyframes.py
```python
import os
from matplotlib import pyplot as plt
import numpy as np
import joblib
import yaml
from mathtools import utils
# FIXME: remove dependency on blocks
from blocks.estimation import models
from kinemparse import videoprocessing
from visiontools import imageprocessing
def plotScores(frame_scores, keyframe_idxs, fn):
_, axis = plt.subplots(1, figsize=(12, 8))
axis.plot(frame_scores)
axis.set_xlabel('Frame index')
axis.set_ylabel('Frame score')
axis.scatter(keyframe_idxs, frame_scores[keyframe_idxs])
axis.set_title('Video frame scores')
plt.tight_layout()
plt.savefig(fn)
plt.close()
def main(
out_dir=None, data_dir=None, preprocess_dir=None, segments_dir=None,
keyframe_model_fn=None, max_seqs=None, subsample_period=None,
frame_scoring_options={}, frame_selection_options={}):
out_dir = os.path.expanduser(out_dir)
data_dir = os.path.expanduser(data_dir)
preprocess_dir = os.path.expanduser(preprocess_dir)
keyframe_model_fn = os.path.expanduser(keyframe_model_fn)
if segments_dir is not None:
segments_dir = os.path.expanduser(segments_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
def loadFromDir(var_name, dir_name):
return joblib.load(os.path.join(dir_name, f"{var_name}.pkl"))
def loadFromPreprocessDir(var_name):
return joblib.load(os.path.join(preprocess_dir, f"{var_name}.pkl"))
def saveToWorkingDir(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f"{var_name}.pkl"))
trial_ids = utils.getUniqueIds(preprocess_dir, prefix='trial-', suffix='.pkl')
keyframe_model = joblib.load(keyframe_model_fn)
models.visualizeKeyframeModel(keyframe_model, fn=os.path.join(fig_dir, 'keyframe-model.png'))
if max_seqs is not None:
trial_ids = trial_ids[:max_seqs]
for seq_idx, trial_id in enumerate(trial_ids):
logger.info(f"Processing video {seq_idx + 1} / {len(trial_ids)} (trial {trial_id})")
logger.info(f" Loading data...")
trial_str = f"trial-{trial_id}"
rgb_frame_seq = loadFromDir(f'{trial_str}_rgb-frame-seq', data_dir)
segment_frame_seq = loadFromDir(f'{trial_str}_segment-frame-seq', preprocess_dir)
if segments_dir is not None:
fn = f'trial={trial_id}_pred-segment-seq-rgb'
try:
segments_seq = loadFromDir(fn, segments_dir)
except FileNotFoundError:
logger.info(f" File not found: {fn}")
continue
else:
segments_seq = None
logger.info(f" Scoring frames...")
frame_scores = videoprocessing.scoreFrames(
keyframe_model,
rgb_frame_seq, segment_frame_seq,
score_kwargs=frame_scoring_options
)
segment_keyframe_idxs = videoprocessing.selectSegmentKeyframes(
frame_scores, segment_labels=segments_seq, **frame_selection_options
)
logger.info(f" Saving output...")
fn = os.path.join(fig_dir, f'{trial_str}_scores-plot.png')
plotScores(frame_scores, segment_keyframe_idxs, fn)
def saveFrames(indices, label):
best_rgb = rgb_frame_seq[indices]
best_seg = segment_frame_seq[indices]
rgb_quantized = np.stack(
tuple(
videoprocessing.quantizeImage(keyframe_model, rgb_img, segment_img)
for rgb_img, segment_img in zip(best_rgb, best_seg)
)
)
imageprocessing.displayImages(
*best_rgb, *best_seg, *rgb_quantized, num_rows=3,
file_path=os.path.join(fig_dir, f'{trial_str}_best-frames-{label}.png')
)
saveFrames(segment_keyframe_idxs, 'segment')
# Save intermediate results
saveToWorkingDir(frame_scores, f'{trial_str}_frame-scores')
saveToWorkingDir(segment_keyframe_idxs, f'{trial_str}_keyframe-idxs')
if __name__ == '__main__':
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
#### File: kinemparse/scripts/segment_from_imu.py
```python
import os
import logging
import yaml
import joblib
import numpy as np
from sklearn import metrics
from matplotlib import pyplot as plt
from mathtools import utils
logger = logging.getLogger(__name__)
def removeSmallSegments(labels, min_seg_len=10):
seg_labels, seg_lens = utils.computeSegments(labels)
seg_label_seq = np.zeros_like(labels)
prev_start_index = 0
prev_seg_len = 0
prev_seg_label = -1
for seg_label, seg_len in zip(seg_labels, seg_lens):
if seg_len < min_seg_len:
prev_seg_len += seg_len
continue
if seg_label == prev_seg_label:
prev_seg_len += seg_len
continue
prev_end_index = prev_start_index + prev_seg_len
seg_label_seq[prev_start_index:prev_end_index] = prev_seg_label
prev_start_index = prev_end_index
prev_seg_len = seg_len
prev_seg_label = seg_label
prev_end_index = prev_start_index + prev_seg_len
seg_label_seq[prev_start_index:prev_end_index] = prev_seg_label
return seg_label_seq
def filterSegments(label_seq, **filter_args):
label_seq = label_seq.copy()
for i in range(label_seq.shape[0]):
label_seq[i] = removeSmallSegments(label_seq[i], **filter_args)
return label_seq
def segmentFromLabels(label_seq, num_vals=2, min_seg_len=10):
row_nums = num_vals ** np.arange(label_seq.shape[0])
reduced_label_seq = np.dot(row_nums, label_seq)
if min_seg_len > 0:
reduced_label_seq = removeSmallSegments(reduced_label_seq, min_seg_len=30)
seg_label_seq = utils.makeSegmentLabels(reduced_label_seq)
return seg_label_seq
def plot_labels(
gt_seg_label_seq, pred_seg_label_seq, imu_timestamp_seq, keyframe_timestamp_seq,
fn=None):
subplot_width = 12
subplot_height = 3
num_axes = 1
figsize = (subplot_width, num_axes * subplot_height)
fig, axis = plt.subplots(num_axes, figsize=figsize, sharex=True)
axis.plot(imu_timestamp_seq, gt_seg_label_seq)
axis.plot(imu_timestamp_seq, pred_seg_label_seq)
axis.scatter(keyframe_timestamp_seq, np.zeros_like(keyframe_timestamp_seq))
plt.tight_layout()
if fn is None:
plt.show()
else:
plt.savefig(fn)
plt.close()
def plot_arrays_simple(*arrays, labels=None, title=None, fn=None):
subplot_width = 12
subplot_height = 3
num_axes = len(arrays)
figsize = (subplot_width, num_axes * subplot_height)
fig, axes = plt.subplots(num_axes, figsize=figsize, sharex=True)
if num_axes == 1:
axes = [axes]
for i, (axis, array) in enumerate(zip(axes, arrays)):
axis.imshow(array, interpolation='none', aspect='auto')
if labels is not None:
axis.set_ylabel(labels[i])
if title is not None:
axes[0].set_title(title)
plt.tight_layout()
if fn is None:
plt.show()
else:
plt.savefig(fn)
plt.close()
def retrievalMetrics(seg_keyframe_counts):
centered_counts = seg_keyframe_counts - 1
num_true_positives = np.sum(centered_counts == 0)
num_false_positives = np.sum(centered_counts < 0)
num_false_negatives = np.sum(centered_counts > 0)
precision = num_true_positives / (num_true_positives + num_false_positives)
recall = num_true_positives / (num_true_positives + num_false_negatives)
F1 = 2 * (precision * recall) / (precision + recall)
return precision, recall, F1
def segments_from_gt(label_seq, seg_label_seq):
def reduction(segment):
reduced = utils.reduce_all_equal(segment)
return reduced
segment_gt, sample_gt = utils.reduce_over_segments(
label_seq, seg_label_seq,
reduction=reduction
)
return segment_gt.T, sample_gt.T
def segments_from_features(feature_seq, seg_label_seq):
def reduction(segment):
reduced = segment.mean(axis=0).argmax(axis=-1)
reduced[reduced == 2] = 0
reduced[reduced == 3] = 0
return reduced
segment_preds, sample_preds = utils.reduce_over_segments(
feature_seq, seg_label_seq,
reduction=reduction
)
return segment_preds.T, sample_preds.T
def remapLabels(labels, remap_dict):
remapped = labels.copy()
for k, v in remap_dict.items():
remapped[labels == k] = v
return remapped
def main(
out_dir=None, predictions_dir=None, imu_data_dir=None, video_data_dir=None,
use_gt_segments=None, model_name=None, model_params={},
results_file=None, sweep_param_name=None,
cv_params={}, viz_params={},
plot_predictions=None):
predictions_dir = os.path.expanduser(predictions_dir)
out_dir = os.path.expanduser(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, f'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
def loadAll(seq_ids, var_name, data_dir):
def loadOne(seq_id):
fn = os.path.join(data_dir, f'trial={seq_id}_{var_name}')
return joblib.load(fn)
return tuple(map(loadOne, seq_ids))
# Load data
trial_ids = utils.getUniqueIds(predictions_dir, prefix='trial=')
feature_seqs = loadAll(trial_ids, 'score-seq.pkl', predictions_dir)
pred_label_seqs = loadAll(trial_ids, 'pred-label-seq.pkl', predictions_dir)
label_seqs = loadAll(trial_ids, 'true-label-seq.pkl', predictions_dir)
# Define cross-validation folds
dataset_size = len(trial_ids)
cv_folds = utils.makeDataSplits(dataset_size, **cv_params)
def getSplit(split_idxs):
split_data = tuple(
tuple(s[i] for i in split_idxs)
for s in (feature_seqs, pred_label_seqs, label_seqs, trial_ids)
)
return split_data
for cv_index, cv_splits in enumerate(cv_folds):
train_data, val_data, test_data = tuple(map(getSplit, cv_splits))
train_ids = train_data[-1]
test_ids = test_data[-1]
val_ids = val_data[-1]
logger.info(
f'CV fold {cv_index + 1}: {len(trial_ids)} total '
f'({len(train_ids)} train, {len(val_ids)} val, {len(test_ids)} test)'
)
metric_dict = {
'ARI': [],
# 'kf_prec': [],
# 'kf_rec': [],
# 'kf_f1': [],
'prec': [],
'rec': [],
'f1': []
}
for feature_seq, pred_label_seq, label_seq, trial_id in zip(*test_data):
label_seq = remapLabels(label_seq, {2: 0, 3: 0})
pred_label_seq = remapLabels(pred_label_seq, {2: 0, 3: 0})
gt_seg_label_seq = segmentFromLabels(label_seq, num_vals=2, min_seg_len=0)
segment_labels, sample_labels = segments_from_gt(label_seq.T, gt_seg_label_seq)
pred_seg_label_seq = segmentFromLabels(filterSegments(pred_label_seq), num_vals=2)
segment_preds, sample_preds = segments_from_features(
np.moveaxis(feature_seq, [0, 1, 2], [1, 2, 0]), pred_seg_label_seq
)
prec = metrics.precision_score(sample_labels.ravel(), sample_preds.ravel())
metric_dict['prec'].append(prec)
rec = metrics.recall_score(sample_labels.ravel(), sample_preds.ravel())
metric_dict['rec'].append(rec)
f1 = metrics.f1_score(sample_labels.ravel(), sample_preds.ravel())
metric_dict['f1'].append(f1)
metric_dict['ARI'] = metrics.adjusted_rand_score(gt_seg_label_seq, pred_seg_label_seq)
if use_gt_segments:
saveVariable(gt_seg_label_seq, f'trial={trial_id}_segment-seq-imu')
else:
saveVariable(pred_seg_label_seq, f'trial={trial_id}_segment-seq-imu')
if plot_predictions:
fn = os.path.join(fig_dir, f'trial-{trial_id:03}_kf.png')
labels = ('ground truth', 'pred, segmented', 'pred, raw')
num_segs_pred = pred_seg_label_seq.max() + 1
num_segs_gt = gt_seg_label_seq.max() + 1
title = f"{num_segs_pred} pred segs, {num_segs_gt} gt segs"
plot_arrays_simple(
sample_labels, sample_preds, pred_label_seq,
labels=labels, title=title, fn=fn
)
if imu_data_dir is not None and video_data_dir is not None:
imu_data_dir = os.path.expanduser(imu_data_dir)
imu_timestamp_seq = joblib.load(
os.path.join(imu_data_dir, f'trial={trial_id}_timestamp-seq.pkl')
)
try:
video_data_dir = os.path.expanduser(video_data_dir)
rgb_timestamp_fn = f'trial-{trial_id}_rgb-frame-timestamp-seq.pkl'
rgb_frame_timestamp_seq = joblib.load(
os.path.join(video_data_dir, rgb_timestamp_fn)
)
# keyframe_fn = f'trial={trial_id}_rgb-frame-seq.pkl'
# keyframe_seq = joblib.load(os.path.join(video_data_dir, keyframe_fn))
# import pdb; pdb.set_trace()
except FileNotFoundError:
logger.info(f"File not found: {rgb_timestamp_fn}")
continue
# find imu indices closest to rgb frame timestamps
imu_frame_idxs = utils.nearestIndices(imu_timestamp_seq, rgb_frame_timestamp_seq)
if use_gt_segments:
gt_seg_label_seq_rgb = gt_seg_label_seq[imu_frame_idxs]
saveVariable(gt_seg_label_seq_rgb, f'trial={trial_id}_segment-seq-rgb')
else:
pred_seg_label_seq_rgb = pred_seg_label_seq[imu_frame_idxs]
saveVariable(pred_seg_label_seq_rgb, f'trial={trial_id}_segment-seq-rgb')
if False:
num_segments = np.unique(pred_seg_label_seq).max() + 1
seg_keyframe_counts = utils.makeHistogram(num_segments, pred_seg_label_seq_rgb)
prec, rec, f1 = retrievalMetrics(seg_keyframe_counts)
metric_dict['kf_prec'].append(prec)
metric_dict['kf_rec'].append(rec)
metric_dict['kf_f1'].append(f1)
if plot_predictions:
fn = os.path.join(fig_dir, f'trial-{trial_id:03}_segs.png')
plot_labels(
gt_seg_label_seq, pred_seg_label_seq,
imu_timestamp_seq, rgb_frame_timestamp_seq, fn=fn
)
for name, value in metric_dict.items():
metric_dict[name] = np.array(value).mean()
metric_str = ' '.join(f"{k}: {v * 100:.1f}%" for k, v in metric_dict.items())
logger.info('[TST] ' + metric_str)
utils.writeResults(results_file, metric_dict, sweep_param_name, model_params)
saveVariable(train_ids, f'cvfold={cv_index}_train-ids')
saveVariable(test_ids, f'cvfold={cv_index}_test-ids')
saveVariable(val_ids, f'cvfold={cv_index}_val-ids')
# saveVariable(model, f'cvfold={cv_index}_{model_name}-best')
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
```
|
{
"source": "jd-jones/mathtools",
"score": 2
}
|
#### File: mathtools/mathtools/metrics.py
```python
import logging
try:
import torch
except ImportError:
pass
import numpy as np
from matplotlib import pyplot as plt
from sklearn import metrics
from . import utils
logger = logging.getLogger(__name__)
def oov_rate(state_seq, state_vocab):
state_is_oov = ~np.array([s in state_vocab for s in state_seq], dtype=bool)
prop_state_oov = state_is_oov.sum() / state_is_oov.size
return prop_state_oov
def confusionMatrix(all_pred_seqs, all_true_seqs, vocab_size):
"""
Returns
-------
confusions: np.ndarray of int, shape (vocab_size, vocab_size)
Rows represent predicted labels; columns represent true labels.
"""
confusions = np.zeros((vocab_size, vocab_size), dtype=int)
for pred_seq, true_seq in zip(all_pred_seqs, all_true_seqs):
for i_pred, i_true in zip(pred_seq, true_seq):
confusions[i_pred, i_true] += 1
return confusions
def scoreConfusionMatrix(all_score_seqs, all_true_seqs, vocab_size):
"""
Returns
-------
confusions: np.ndarray of int, shape (vocab_size, vocab_size)
Rows represent predicted labels; columns represent true labels.
"""
confusions = np.full((vocab_size, vocab_size), -np.inf, dtype=float)
for score_seq, true_seq in zip(all_score_seqs, all_true_seqs):
for score_row, i_true in zip(score_seq, true_seq):
for i_pred, score in enumerate(score_row):
confusions[i_pred, i_true] = np.logaddexp(confusions[i_pred, i_true], score)
return confusions
def perClassAcc(confusions, return_counts=False):
class_counts = confusions.sum(axis=0)
per_class_acc = np.diag(confusions) / class_counts
if return_counts:
return per_class_acc, class_counts
return per_class_acc
def plotConfusions(fn, confusions, vocab, size=24, disp_counts=False):
plt.figure(figsize=(size, size))
# vmax = np.abs(confusions).max()
# plt.matshow(confusions, cmap='coolwarm', vmin=-vmax, vmax=vmax)
plt.matshow(confusions)
if disp_counts:
for i_row, row in enumerate(confusions):
for i_col, val in enumerate(row):
if not val:
continue
plt.text(
i_col, i_row, val,
fontsize=8, color='black', ha='center', va='center'
)
plt.xticks(ticks=range(len(vocab)), labels=vocab, rotation='vertical')
plt.yticks(ticks=range(len(vocab)), labels=vocab)
plt.colorbar()
plt.savefig(fn, bbox_inches='tight')
plt.close()
def plotPerClassAcc(fn, vocab, per_class_acc, class_preds, class_counts):
macro_acc = per_class_acc.mean()
f, axes = plt.subplots(3, figsize=(12, 6), sharex=True)
axes[0].set_title(f"Macro Accuracy: {macro_acc * 100:.2f}%")
axes[0].bar(range(len(vocab)), per_class_acc)
for index, val in enumerate(per_class_acc):
eps = np.sign(val) * 0.01
axes[0].text(
x=index, y=val + eps, s=f"{val * 100:.0f}",
fontdict=dict(fontsize=8), va='center'
)
axes[0].set_ylabel("Accuracy")
axes[1].bar(range(len(vocab)), class_preds / class_counts.sum())
for index, val in enumerate(class_preds / class_counts.sum()):
eps = np.sign(val) * 0.01
axes[1].text(
x=index, y=val + eps, s=f"{val * 100:.0f}",
fontdict=dict(fontsize=8), va='center'
)
axes[1].set_ylabel("Pred Frequency")
axes[2].bar(range(len(vocab)), class_counts / class_counts.sum())
for index, val in enumerate(class_counts / class_counts.sum()):
eps = np.sign(val) * 0.01
axes[2].text(
x=index, y=val + eps, s=f"{val * 100:.0f}",
fontdict=dict(fontsize=8), va='center'
)
axes[2].set_xticks(range(len(vocab)))
axes[2].set_xticklabels(vocab, rotation='vertical')
axes[2].set_ylabel("True Frequency")
plt.savefig(fn, bbox_inches='tight')
def makeMetric(name):
if name == 'Reciprocal Loss':
return ReciprocalAverageLoss()
elif name == 'Loss':
return AverageLoss()
elif name == 'Accuracy':
return Accuracy()
elif name == 'Precision':
return Precision()
elif name == 'Recall':
return Recall()
elif name == 'F1':
return Fmeasure(beta=1)
else:
raise AssertionError()
def accuracy_upto(pred_seq, gt_seq, equivalence=None):
if equivalence is None:
def equivalence(x, y):
return x == y
is_eq = np.array(
[equivalence(p, gt) for p, gt in zip(pred_seq, gt_seq)],
dtype=bool
)
accuracy = is_eq.sum() / len(is_eq)
return accuracy
class RationalPerformanceMetric():
"""
Performance metric with a numerator and a denominator.
"""
def __init__(self):
self.initializeCounts()
def initializeCounts(self):
self._numerator = 0
self._denominator = 0
def evaluate(self):
return utils.safeDivide(self._numerator, self._denominator)
@property
def value(self):
return float(self.evaluate())
def __str__(self):
return f'{self.evaluate():.5f}'
def accumulate(self, outputs=None, labels=None, loss=None):
self._numerator += self._count_numerator(outputs, labels, loss)
self._denominator += self._count_denominator(outputs, labels, loss)
def _count_numerator(self, outputs=None, labels=None, loss=None):
raise NotImplementedError()
def _count_denominator(self, outputs=None, labels=None, loss=None):
raise NotImplementedError()
class AverageLoss(RationalPerformanceMetric):
def _count_numerator(self, outputs=None, labels=None, loss=None):
return loss
def _count_denominator(self, outputs=None, labels=None, loss=None):
return 1
@property
def name(self):
return 'loss'
def __str__(self):
return self.name + ': ' + super().__str__()
class ReciprocalAverageLoss(AverageLoss):
def evaluate(self):
return 1 / super().evaluate()
@property
def name(self):
return 'reciprocal loss'
class ConfusionPerformanceMetric(RationalPerformanceMetric):
def __init__(self):
self.initializeCounts()
def initializeCounts(self):
self._true_positives = 0
self._true_negatives = 0
self._false_positives = 0
self._false_negatives = 0
def __str__(self):
return f'{self.evaluate() * 100:5.2f}%'
def accumulate(self, predicted=None, true=None, loss=None):
self._accumulate_confusions(predicted, true)
def _accumulate_confusions(self, predicted, true):
self._true_positives += truePositives(predicted, true)
self._true_negatives += trueNegatives(predicted, true)
self._false_positives += falsePositives(predicted, true)
self._false_negatives += falseNegatives(predicted, true)
@property
def _numerator(self):
raise NotImplementedError()
@property
def _denominator(self):
raise NotImplementedError()
class Fmeasure(ConfusionPerformanceMetric):
def __init__(self, beta=1):
super().__init__()
self._beta = beta
@property
def _numerator(self):
return (1 + self._beta ** 2) * self._true_positives
@property
def _denominator(self):
denom = (
self._false_positives
+ (self._beta ** 2) * self._false_negatives
+ (1 + self._beta ** 2) * self._true_positives
)
return denom
@property
def name(self):
return f'F_{self._beta}'
def __str__(self):
return self.name + ': ' + super().__str__()
class Recall(ConfusionPerformanceMetric):
@property
def _numerator(self):
return self._true_positives
@property
def _denominator(self):
return self._true_positives + self._false_negatives
@property
def name(self):
return 'rec'
def __str__(self):
return self.name + ': ' + super().__str__()
class Precision(ConfusionPerformanceMetric):
@property
def _numerator(self):
return self._true_positives
@property
def _denominator(self):
return self._true_positives + self._false_positives
@property
def name(self):
return 'prc'
def __str__(self):
return self.name + ': ' + super().__str__()
class Accuracy(ConfusionPerformanceMetric):
@property
def _numerator(self):
return self._true_positives + self._true_negatives
@property
def _denominator(self):
data_positives = self._true_positives + self._false_negatives
data_negatives = self._false_positives + self._true_negatives
return data_positives + data_negatives
@property
def name(self):
return 'acc'
def __str__(self):
return self.name + ': ' + super().__str__()
def truePositives(predicted, true, background_index=0):
if isinstance(predicted, torch.Tensor) and isinstance(true, torch.Tensor):
pred_is_positive = predicted != background_index
true_positives = predicted[pred_is_positive] == true[pred_is_positive]
return true_positives.sum().float()
if isinstance(predicted, np.ndarray) and isinstance(true, np.ndarray):
pred_is_positive = predicted != background_index
true_positives = predicted[pred_is_positive] == true[pred_is_positive]
return true_positives.sum().astype(float)
try:
return sum(p == t for p, t in zip(predicted, true) if t != background_index)
except TypeError:
return int(predicted == true) if true != background_index else 0
def trueNegatives(predicted, true, background_index=0):
if isinstance(predicted, torch.Tensor) and isinstance(true, torch.Tensor):
pred_is_negative = predicted == background_index
true_negatives = predicted[pred_is_negative] == true[pred_is_negative]
return true_negatives.sum().float()
if isinstance(predicted, np.ndarray) and isinstance(true, np.ndarray):
pred_is_negative = predicted == background_index
true_negatives = predicted[pred_is_negative] == true[pred_is_negative]
return true_negatives.sum().astype(float)
try:
return sum(p == t for p, t in zip(predicted, true) if t == background_index)
except TypeError:
return int(predicted == true) if true == background_index else 0
def falsePositives(predicted, true, background_index=0):
if isinstance(predicted, torch.Tensor) and isinstance(true, torch.Tensor):
pred_is_positive = predicted != background_index
false_positives = predicted[pred_is_positive] != true[pred_is_positive]
return false_positives.sum().float()
if isinstance(predicted, np.ndarray) and isinstance(true, np.ndarray):
pred_is_positive = predicted != background_index
false_positives = predicted[pred_is_positive] != true[pred_is_positive]
return false_positives.sum().astype(float)
try:
return sum(p != t for p, t in zip(predicted, true) if t)
except TypeError:
return int(predicted != true) if true else 0
def falseNegatives(predicted, true, background_index=0):
if isinstance(predicted, torch.Tensor) and isinstance(true, torch.Tensor):
pred_is_negative = predicted == background_index
false_negatives = predicted[pred_is_negative] != true[pred_is_negative]
return false_negatives.sum().float()
if isinstance(predicted, np.ndarray) and isinstance(true, np.ndarray):
pred_is_negative = predicted == background_index
false_negatives = predicted[pred_is_negative] != true[pred_is_negative]
return false_negatives.sum().astype(float)
try:
return sum(p != t for p, t in zip(predicted, true) if t == background_index)
except TypeError:
return int(predicted != true) if true == background_index else 0
def classAccuracy(true_label_seqs, predicted_label_seqs):
avg_accuracies = utils.iterate(
seqClassAccuracy,
true_label_seqs,
predicted_label_seqs
)
true_label_seq_lens = [len(s) for s in true_label_seqs]
avg_accuracies = np.array(avg_accuracies)
true_label_seq_lens = np.array(true_label_seq_lens)
num_classes = utils.numClasses([label for ls in true_label_seqs for label in ls])
avg_accy = np.average(avg_accuracies, weights=true_label_seq_lens)
chance = 1 / num_classes
return avg_accy, chance
def seqClassAccuracy(true_label_seq, predicted_label_seq):
num_correct = 0
for true, predicted in zip(true_label_seq, predicted_label_seq):
num_correct += int((true == predicted).all())
total = len(true_label_seq)
return num_correct / total
def avgAccuracy(true_label_seqs, predicted_label_seqs):
true_labels = utils.toFlattenedArray(true_label_seqs)
predicted_labels = utils.toFlattenedArray(predicted_label_seqs)
metric = metrics.accuracy_score(true_labels.ravel(), predicted_labels.ravel())
chance = 0.5
return metric, chance
def avgPrecision(true_label_seqs, predicted_label_seqs):
true_labels = utils.toFlattenedArray(true_label_seqs)
predicted_labels = utils.toFlattenedArray(predicted_label_seqs)
metric = metrics.precision_score(true_labels.ravel(), predicted_labels.ravel())
chance = true_labels.ravel().sum() / true_labels.size
return metric, chance
def avgRecall(true_label_seqs, predicted_label_seqs):
true_labels = utils.toFlattenedArray(true_label_seqs)
predicted_labels = utils.toFlattenedArray(predicted_label_seqs)
metric = metrics.recall_score(true_labels.ravel(), predicted_labels.ravel())
chance = 0.5
return metric, chance
def edgeDistance(true_label_seqs, predicted_label_seqs):
avg_distances = utils.iterate(seqEdgeDistance, true_label_seqs, predicted_label_seqs)
true_label_seq_lens = [len(s) for s in true_label_seqs]
avg_distances = np.array(avg_distances)
true_label_seq_lens = np.array(true_label_seq_lens)
metric = np.average(avg_distances, weights=true_label_seq_lens)
chance = -1
return metric, chance
def seqEdgeDistance(true_label_seq, predicted_label_seq):
sum_dist = 0
for true, predicted in zip(true_label_seq, predicted_label_seq):
sum_dist += (true != predicted).sum()
total = len(true_label_seq)
return sum_dist / total
def nonempty(assembly_state):
return assembly_state.any()
def countTrue(true, pred, precision='state', denom_mode='accuracy'):
if precision == 'block':
p_blocks = set(pred.blocks.keys())
t_blocks = set(true.blocks.keys())
num_true = len(p_blocks & t_blocks)
if denom_mode == 'accuracy':
num_total = len(p_blocks | t_blocks)
elif denom_mode == 'precision':
num_total = len(p_blocks)
elif denom_mode == 'recall':
num_total = len(t_blocks)
elif precision == 'edge':
p_edges = pred.connections
t_edges = true.connections
num_true = np.sum(p_edges & t_edges)
if denom_mode == 'accuracy':
num_total = np.sum(p_edges | t_edges)
elif denom_mode == 'precision':
num_total = np.sum(p_edges)
elif denom_mode == 'recall':
num_total = np.sum(t_edges)
elif precision == 'state':
num_true = int(pred == true)
if denom_mode == 'accuracy':
num_total = 1
elif denom_mode == 'precision':
num_total = int(nonempty(pred))
elif denom_mode == 'recall':
num_total = int(nonempty(true))
# Don't count empty states in precision/recall mode
num_true *= num_total
elif precision == 'topology':
num_true = int((pred.connections == true.connections).all())
if denom_mode == 'accuracy':
num_total = 1
elif denom_mode == 'precision':
num_total = int(nonempty(pred))
elif denom_mode == 'recall':
num_total = int(nonempty(true))
# Don't count empty states in precision/recall mode
num_true *= num_total
elif precision == 'subset_topo':
pred_minus_true = pred.connections * ~true.connections
true_minus_pred = true.connections * ~pred.connections
pred_subset_true = true_minus_pred.any() and not pred_minus_true.any()
num_true = int(pred_subset_true)
if not pred.any() and true.any():
num_true = 0
if (pred.connections == true.connections).all():
num_true = 1
if denom_mode == 'accuracy':
num_total = 1
elif denom_mode == 'precision':
num_total = int(nonempty(pred))
elif denom_mode == 'recall':
num_total = int(nonempty(true))
# don't count empty states in precision/recall mode
num_true *= num_total
elif precision == 'subset_geom':
num_true = int(pred <= true)
if not pred.any() and true.any():
num_true = 0
if denom_mode == 'accuracy':
num_total = 1
elif denom_mode == 'precision':
num_total = int(nonempty(pred))
elif denom_mode == 'recall':
num_total = int(nonempty(true))
# don't count empty states in precision/recall mode
num_true *= num_total
elif precision == 'off by one':
differences = (pred.symmetrized_connections ^ true.symmetrized_connections).astype(int)
# since arrays are symmetric, any difference results in at least two changed edges,
# so divide by two. Comparison is <= 2 because we're allowing one EDIT
# (i.e. change one edge into another)
num_true = int(differences.sum() / 2 <= 2)
if denom_mode == 'accuracy':
num_total = 1
elif denom_mode == 'precision':
num_total = int(nonempty(pred))
elif denom_mode == 'recall':
num_total = int(nonempty(true))
# don't count empty states in precision/recall mode
num_true *= num_total
return num_true, num_total
def countSeq(true_seq, pred_seq, precision='states', denom_mode='accuracy'):
len_true = len(true_seq)
len_pred = len(pred_seq)
if len_true != len_pred:
err_str = f'{len_true} samples in true_seq'
err_str += f' != {len_pred} samples in pred_seq'
raise ValueError(err_str)
num_correct = 0
num_total = 0
for true, pred in zip(true_seq, pred_seq):
cur_correct, cur_total = countTrue(true, pred, precision=precision, denom_mode=denom_mode)
num_correct += cur_correct
num_total += cur_total
return num_correct, num_total
def numberCorrect(
true_seq, predicted_seq,
ignore_empty_true=False, ignore_empty_pred=False, precision='states'):
len_true = len(true_seq)
len_pred = len(predicted_seq)
if len_true != len_pred:
err_str = f'{len_true} samples in true_seq != {len_pred} samples in predicted_seq'
raise ValueError(err_str)
if ignore_empty_true:
indices = tuple(i for i, s in enumerate(true_seq) if nonempty(s))
predicted_seq = tuple(predicted_seq[i] for i in indices)
true_seq = tuple(true_seq[i] for i in indices)
if not len(true_seq):
warn_str = 'All ground-truth sequences were empty!'
logger.warning(warn_str)
if ignore_empty_pred:
indices = tuple(i for i, s in enumerate(predicted_seq) if nonempty(s))
predicted_seq = tuple(predicted_seq[i] for i in indices)
true_seq = tuple(true_seq[i] for i in indices)
num_correct = 0
num_total = 0
for p, t in zip(predicted_seq, true_seq):
if precision == 'states':
num_correct += int(p == t)
num_total += 1
elif precision == 'edges':
num_correct += int(np.all(p.connections == t.connections))
num_total += 1
elif precision == 'vertices':
num_correct += int(p.blocks == t.blocks)
num_total += 1
elif precision == 'structure':
if not (nonempty(p) and nonempty(t)):
num_correct += int(p == t)
else:
num_correct += int(p < t or p > t or p == t)
num_total += 1
elif precision == 'blocks':
p_blocks = set(p.blocks.keys())
t_blocks = set(t.blocks.keys())
num_correct += len(p_blocks & t_blocks)
num_total += len(p_blocks | t_blocks)
elif precision == 'blocks_recall':
p_blocks = set(p.blocks.keys())
t_blocks = set(t.blocks.keys())
num_correct += len(p_blocks & t_blocks)
num_total += len(t_blocks)
elif precision == 'blocks_precision':
p_blocks = set(p.blocks.keys())
t_blocks = set(t.blocks.keys())
num_correct += len(p_blocks & t_blocks)
num_total += len(p_blocks)
elif precision == 'avg_edge':
p_edges = p.connections
t_edges = t.connections
num_correct += np.sum(p_edges & t_edges)
num_total += np.sum(p_edges | t_edges)
elif precision == 'avg_edge_precision':
p_edges = p.connections
t_edges = t.connections
num_correct += np.sum(p_edges & t_edges)
num_total += np.sum(p_edges)
elif precision == 'avg_edge_recall':
p_edges = p.connections
t_edges = t.connections
num_correct += np.sum(p_edges & t_edges)
num_total += np.sum(t_edges)
return num_correct, num_total
def stateOverlap(true_seq, predicted_seq, ignore_empty=False):
len_true = len(true_seq)
len_pred = len(predicted_seq)
if len_true != len_pred:
err_str = f'{len_true} samples in true_seq != {len_pred} samples in predicted_seq'
# raise ValueError(err_str)
logger.warn(err_str)
if ignore_empty:
predicted_seq = tuple(filter(nonempty, predicted_seq))
true_seq = tuple(filter(nonempty, true_seq))
size_intersect = 0
size_union = 0
for p, t in zip(predicted_seq, true_seq):
p_blocks = set(p.blocks.keys())
t_blocks = set(t.blocks.keys())
size_intersect += len(p_blocks & t_blocks)
size_union += len(p_blocks | t_blocks)
return size_intersect, size_union
def vertexOverlap(state1, state2):
pass
def edgeOverlap(state1, state2):
pass
def levenshtein(
reference, candidate, normalized=False, segment_level=False,
# reduce_reference=True, reduce_candidate=False,
deletion_cost=1, insertion_cost=1, substitution_cost=1,
return_num_elems=False, corpus=None, resolution=None):
""" Compute the Levenshtein (edit) distance between two sequences.
Parameters
----------
reference : iterable(object)
candidate : iterable(object)
normalized : bool, optional
reduce_true : bool, optional
deletion_cost : int, optional
Cost of deleting an element from the `candidate`.
insertion_cost : int, optional
Cost of inserting an element from the `reference`.
substitution_cost : int, optional
Cost of substituting an element in the `reference` for an element in
the `candidate`.
Returns
-------
dist : int
NOTE: `dist` has type `float` if `normalized == True`
"""
if corpus is None:
def size(state, resolution):
return 1
def difference(state, other, resolution):
return int(state != other)
elif corpus == 'airplane':
def size(state, resolution):
if resolution == 'state':
return 1
elif resolution == 'block':
return len(state.assembly_state)
raise NotImplementedError
def difference(state, other, resolution):
if resolution == 'state':
return int(state != other)
elif resolution == 'block':
return len(state.assembly_state ^ other.assembly_state)
raise NotImplementedError
elif corpus in ('easy', 'child'):
def size(state, resolution):
if resolution == 'state':
return 1
elif resolution == 'edge':
return state.connections.sum()
elif resolution == 'block':
state_blocks = set(state.blocks.keys())
return len(state_blocks)
raise NotImplementedError
def difference(state, other, resolution):
if resolution == 'state':
return int(state != other)
elif resolution == 'edge':
edge_diff = state.connections ^ other.connections
return edge_diff.sum()
elif resolution == 'block':
state_blocks = set(state.blocks.keys())
other_blocks = set(other.blocks.keys())
return len(state_blocks ^ other_blocks)
raise NotImplementedError
if segment_level:
reference, _ = utils.computeSegments(reference)
candidate, _ = utils.computeSegments(candidate)
num_true = 1 if not reference else len(reference)
num_pred = 1 if not candidate else len(candidate)
prefix_dists = np.zeros((num_pred, num_true), dtype=int)
# Cost for deleting all elements of candidate
for i in range(1, num_pred):
candidate_size = size(candidate[i], resolution=resolution)
prefix_dists[i, 0] = prefix_dists[i - 1, 0] + deletion_cost * candidate_size
# Cost for inserting all elements of reference
for j in range(1, num_true):
reference_size = size(reference[j], resolution=resolution)
prefix_dists[0, j] = prefix_dists[0, j - 1] + insertion_cost * reference_size
for i in range(1, num_pred):
for j in range(1, num_true):
# needs_sub = int(reference[i] != candidate[j])
candidate_size = size(candidate[i], resolution=resolution)
reference_size = size(reference[j], resolution=resolution)
sub_size = difference(candidate[i], reference[j], resolution=resolution)
prefix_dists[i, j] = min(
prefix_dists[i - 1, j] + deletion_cost * candidate_size,
prefix_dists[i, j - 1] + insertion_cost * reference_size,
prefix_dists[i - 1, j - 1] + substitution_cost * sub_size,
)
dist = prefix_dists[num_pred - 1, num_true - 1]
if normalized:
size_pred = sum(size(state, resolution) for state in candidate)
size_true = sum(size(state, resolution) for state in reference)
dist /= max(size_pred, size_true)
if return_num_elems:
return dist, (num_true, num_pred)
return dist
def avgLevenshtein(true_seqs, predicted_seqs, normalized=False):
num_true_seqs = len(true_seqs)
num_pred_seqs = len(predicted_seqs)
if num_true_seqs != num_pred_seqs:
err_str = f'{num_true_seqs} ground-truth sequences but {num_pred_seqs} prediction sequences'
raise ValueError(err_str)
dist = 0
for t, p in zip(true_seqs, predicted_seqs):
dist += levenshtein(t, p, normalized=normalized)
return dist / num_true_seqs
def blockAccuracy(true_seqs, predicted_seqs, ignore_empty=False):
total_intersect = 0
total_union = 0
for p_seq, t_seq in zip(predicted_seqs, true_seqs):
size_intersect, size_union = stateOverlap(p_seq, t_seq, ignore_empty=ignore_empty)
total_intersect += size_intersect
total_union += size_union
return total_intersect / total_union, -1
def stateAccuracy(true_seqs, predicted_seqs, precision='states'):
total_states = 0
total_correct = 0
num_true_seqs = len(true_seqs)
num_pred_seqs = len(predicted_seqs)
if num_true_seqs != num_pred_seqs:
err_str = f'{num_true_seqs} ground-truth sequences but {num_pred_seqs} prediction sequences'
raise ValueError(err_str)
for p_seq, t_seq in zip(predicted_seqs, true_seqs):
num_correct, num_states = numberCorrect(p_seq, t_seq, precision=precision)
total_correct += num_correct
total_states += num_states
return total_correct / total_states, -1
def statePrecision(true_seqs, predicted_seqs, precision='states'):
total_states = 0
total_correct = 0
for p_seq, t_seq in zip(predicted_seqs, true_seqs):
num_correct, num_states = numberCorrect(
t_seq, p_seq, ignore_empty_pred=True, precision=precision
)
total_correct += num_correct
total_states += num_states
if total_states:
return total_correct / total_states, -1
if total_correct:
return np.inf, -1
return np.nan, -1
def stateRecall(true_seqs, predicted_seqs, precision='states'):
total_states = 0
total_correct = 0
for p_seq, t_seq in zip(predicted_seqs, true_seqs):
num_correct, num_states = numberCorrect(
t_seq, p_seq, ignore_empty_true=True, precision=precision
)
total_correct += num_correct
total_states += num_states
if total_states:
return total_correct / total_states, -1
if total_correct:
return np.inf, -1
return np.nan, -1
```
|
{
"source": "jd-jones/seqtools",
"score": 2
}
|
#### File: seqtools/seqtools/fstutils_openfst.py
```python
import logging
import numpy as np
import pywrapfst as openfst
logger = logging.getLogger(__name__)
EPSILON = 0
EPSILON_STRING = 'ε'
def __test(num_samples=5):
def sampleGT(transition_probs, initial_probs):
cur_state = np.random.choice(initial_probs.shape[0], p=initial_probs)
gt_seq = [cur_state]
while True:
transitions = transition_probs[cur_state, :]
cur_state = np.random.choice(transitions.shape[0], p=transitions)
if cur_state == transitions.shape[0] - 1:
return gt_seq
gt_seq.append(cur_state)
def sampleScores(gt_seq, num_states):
""" score[i, j, k] := weight(sample i | state j -> state k) """
num_samples = len(gt_seq) - 1
scores = np.random.random_sample(size=(num_samples, num_states, num_states))
return scores
def samplePair(transition_probs, initial_probs):
gt_seq = sampleGT(transition_probs, initial_probs)
score_seq = sampleScores(gt_seq, initial_probs.shape[0])
return gt_seq, score_seq
def simulate(num_samples, transition, initial, final):
transition_probs = np.hstack((transition, final[:, None]))
transition_probs /= transition_probs.sum(axis=1)[:, None]
initial_probs = initial.copy()
initial_probs /= initial_probs.sum()
simulated_dataset = tuple(
samplePair(transition_probs, initial_probs)
for __ in range(num_samples)
)
return simulated_dataset
transition = np.array(
[[0, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 1],
[0, 0, 0, 0, 0]], dtype=float
)
initial = np.array([1, 0, 1, 0, 0], dtype=float)
final = np.array([0, 1, 0, 0, 1], dtype=float) / 10
num_states = len(initial)
transition_to_arc = {}
for s_cur in range(num_states):
for s_next in range(num_states):
transition_to_arc[(s_cur, s_next)] = len(transition_to_arc)
for s in range(num_states):
transition_to_arc[(-1, s)] = len(transition_to_arc)
arc_to_transition = {v: k for k, v in transition_to_arc.items()}
seq_params = (transition, initial, final)
simulated_dataset = simulate(num_samples, *seq_params)
seq_params = tuple(map(lambda x: -np.log(x), seq_params))
return seq_params, simulated_dataset, arc_to_transition, transition_to_arc
class LatticeCrf(object):
def __init__(
self, transition_weights=None, initial_weights=None, final_weights=None,
update_method='simple'):
self._params = {
'transition': transition_weights,
'initial': initial_weights,
'final': final_weights
}
num_states = self._params['initial'].shape[0]
self._transition_to_arc = {}
for s_cur in range(num_states):
for s_next in range(num_states):
self._transition_to_arc[(s_cur, s_next)] = len(self._transition_to_arc)
for s in range(num_states):
self._transition_to_arc[(-1, s)] = len(self._transition_to_arc)
self._arc_to_transition = {v: k for k, v in self._transition_to_arc.items()}
if update_method == 'simple':
self._updateWeights = gradientStep
else:
raise NotImplementedError()
def fit(self, train_samples, train_labels, observation_scores=None, num_epochs=1):
if observation_scores is None:
observation_scores = self.score(train_samples)
obs_fsts = tuple(
fromArray(
scores.reshape(scores.shape[0], -1),
output_labels=self._transition_to_arc
)
for scores in observation_scores
)
obs_batch_fst = easyUnion(*obs_fsts, disambiguate=True)
train_labels = tuple(
[self._transition_to_arc[t] for t in toTransitionSeq(label)]
for label in train_labels
)
gt_fsts = tuple(
fromSequence(labels, symbol_table=obs_batch_fst.output_symbols())
for labels in train_labels
)
gt_batch_fst = easyUnion(*gt_fsts, disambiguate=True)
losses = []
params = []
for i in range(num_epochs):
seq_fst = seqFstToBatch(self._makeSeqFst(), gt_batch_fst)
denom_fst = openfst.compose(obs_batch_fst, seq_fst)
num_fst = openfst.compose(denom_fst, gt_batch_fst)
batch_loss, batch_arcgrad = fstProb(num_fst, denom_fst)
param_gradient = self._backward(batch_arcgrad)
self._params = self._update_params(self._params, param_gradient)
params.append(self._params.copy())
losses.append(float(batch_loss))
return np.array(losses), params
def _makeSeqFst(self):
transition_fst = fromTransitions(
self._params['transition'], self._params['initial'], self._params['final'],
transition_ids=self._transition_to_arc
)
return transition_fst
def _backward(self, batch_arcgrad):
param_gradient = seqGradient(batch_arcgrad, self._arc_to_transition)
return param_gradient
def _update_params(self, params, param_gradient, **update_kwargs):
updated = params.copy()
for name, gradient in param_gradient.items():
updated[name] = self._updateWeights(params[name], gradient, **update_kwargs)
return updated
def predict(self, test_samples, observation_scores=None):
if observation_scores is None:
observation_scores = self.score(test_samples)
transition_fst = fromArray(self._transition_weights)
observation_fsts = tuple(
fromArray(scores.reshape(scores.shape[0], -1))
for scores in observation_scores
)
all_preds = []
for observation_fst in observation_fsts:
decode_graph = openfst.compose(observation_fst, transition_fst)
pred_labels = viterbi(decode_graph)
all_preds.append(pred_labels)
return all_preds
def score(self, train_samples):
raise NotImplementedError()
class DurationCrf(LatticeCrf):
def __init__(self, labels, num_states=2, transition_weights=None, self_weights=None):
super().__init__()
if transition_weights is None:
transition_weights = [0.6] * num_states
if self_weights is None:
self_weights = [0.4] * num_states
self._transition_weights = transition_weights
self._self_weights = self_weights
self._num_states = num_states
self._labels = labels
def _makeSeqFst(self):
dur_fsts = [
durationFst(
label, self._num_states,
transition_weights=self._transition_weights, self_weights=self._self_weights
)
for label in self._labels
]
dur_fst = dur_fsts[0].union(*dur_fsts[1:]).closure(closure_plus=True)
return dur_fst
def _backward(self, batch_arcgrad):
raise NotImplementedError()
# -=( MISC UTILS )==-----------------------------------------------------------
def toTransitionSeq(state_seq):
transition_seq = ((-1, state_seq[0]),) + tuple(zip(state_seq[:-1], state_seq[1:]))
return transition_seq
def toStateSeq(transition_seq):
state_seq = tuple(transition[1] for transition in transition_seq)
return state_seq
def isLattice(fst):
for state in fst.states():
input_labels = tuple(arc.ilabel for arc in fst.arcs(state))
input_label = input_labels[0]
if not all(i == input_label for i in input_labels):
return False
# TODO
return True
def iteratePaths(fst):
paths = [tuple(fst.arcs(fst.start()))]
while paths:
path = paths.pop()
state = path[-1].nextstate
if state == -1:
yield path
for arc in fst.arcs(state):
new_path = path + (arc,)
paths.append(new_path)
def outputLabels(fst):
for path in iteratePaths(fst):
yield tuple(arc.olabel for arc in path)
def getLabels(fst, label_type='output'):
for path in iteratePaths(fst):
if label_type == 'output':
yield tuple(arc.olabel for arc in path)
elif label_type == 'input':
yield tuple(arc.ilabel for arc in path)
else:
raise AssertionError()
def easyCompose(*fsts, determinize=True, minimize=True):
composed = fsts[0]
for fst in fsts[1:]:
composed = openfst.compose(composed, fst)
if determinize:
composed = openfst.determinize(composed)
if minimize:
composed.minimize()
return composed
def easyUnion(*fsts, disambiguate=False):
union_fst = openfst.VectorFst(arc_type=fsts[0].arc_type())
union_fst.set_start(union_fst.add_state())
merged_input_symbols = fsts[0].input_symbols()
merged_output_symbols = fsts[0].output_symbols()
for fst in fsts:
merged_input_symbols = openfst.merge_symbol_table(
merged_input_symbols, fst.input_symbols()
)
merged_output_symbols = openfst.merge_symbol_table(
merged_output_symbols, fst.output_symbols()
)
union_fst.set_input_symbols(merged_input_symbols)
union_fst.set_output_symbols(merged_output_symbols)
for fst in fsts:
fst.set_input_symbols(merged_input_symbols)
fst.set_output_symbols(merged_output_symbols)
union_fst.union(*fsts)
if disambiguate:
for seq_index, __ in enumerate(fsts):
union_fst.mutable_input_symbols().add_symbol(f"seq{seq_index}")
for seq_index, __ in enumerate(fsts):
union_fst.mutable_output_symbols().add_symbol(f"seq{seq_index}")
seq_index = 0
arc_iterator = union_fst.mutable_arcs(union_fst.start())
while not arc_iterator.done():
arc = arc_iterator.value()
arc.ilabel = union_fst.input_symbols().find(f"seq{seq_index}")
arc.olabel = union_fst.output_symbols().find(f"seq{seq_index}")
arc_iterator.set_value(arc)
arc_iterator.next()
seq_index += 1
return union_fst
# -=( CREATION AND CONVERSION )==----------------------------------------------
def makeSymbolTable(vocabulary, prepend_epsilon=True):
symbol_table = openfst.SymbolTable()
if prepend_epsilon:
symbol_table.add_symbol(EPSILON_STRING, key=EPSILON)
start = 1
else:
start = 0
for index, symbol in enumerate(vocabulary, start=start):
symbol_table.add_symbol(str(symbol), key=index)
return symbol_table
def fromArray(
weights, final_weight=None, arc_type=None,
# input_labels=None, output_labels=None
input_symbols=None, output_symbols=None):
""" Instantiate a state machine from an array of weights.
Parameters
----------
weights : array_like, shape (num_inputs, num_outputs)
Needs to implement `.shape`, so it should be a numpy array or a torch
tensor.
final_weight : arc_types.AbstractSemiringWeight, optional
Should have the same type as `arc_type`. Default is `arc_type.zero`
arc_type : {'standard', 'log'}, optional
Default is 'standard' (ie the tropical arc_type)
input_labels :
output_labels :
Returns
-------
fst : fsm.FST
The transducer's arcs have input labels corresponding to the state
they left, and output labels corresponding to the state they entered.
"""
if weights.ndim == 3:
is_lattice = False
elif weights.ndim == 2:
is_lattice = True
else:
raise AssertionError(f"weights have unrecognized shape {weights.shape}")
if arc_type is None:
arc_type = 'standard'
"""
if output_labels is None:
output_labels = {str(i): i for i in range(weights.shape[1])}
if input_labels is None:
if is_lattice:
input_labels = {str(i): i for i in range(weights.shape[0])}
else:
input_labels = {str(i): i for i in range(weights.shape[2])}
input_table = openfst.SymbolTable()
input_table.add_symbol(EPSILON_STRING, key=EPSILON)
for in_symbol, index in input_labels.items():
input_table.add_symbol(str(in_symbol), key=index + 1)
output_table = openfst.SymbolTable()
output_table.add_symbol(EPSILON_STRING, key=EPSILON)
for out_symbol, index in output_labels.items():
output_table.add_symbol(str(out_symbol), key=index + 1)
"""
fst = openfst.VectorFst(arc_type=arc_type)
fst.set_input_symbols(input_symbols)
fst.set_output_symbols(output_symbols)
zero = openfst.Weight.zero(fst.weight_type())
one = openfst.Weight.one(fst.weight_type())
if final_weight is None:
final_weight = one
else:
final_weight = openfst.Weight(fst.weight_type(), final_weight)
init_state = fst.add_state()
fst.set_start(init_state)
if is_lattice:
prev_state = init_state
for sample_index, row in enumerate(weights):
cur_state = fst.add_state()
for i, weight in enumerate(row):
input_label_index = sample_index + 1
output_label_index = i + 1
weight = openfst.Weight(fst.weight_type(), weight)
if weight != zero:
arc = openfst.Arc(
input_label_index, output_label_index,
weight, cur_state
)
fst.add_arc(prev_state, arc)
prev_state = cur_state
fst.set_final(cur_state, final_weight)
else:
prev_state = init_state
for sample_index, input_output in enumerate(weights):
cur_state = fst.add_state()
for i, outputs in enumerate(input_output):
for j, weight in enumerate(outputs):
input_label_index = i + 1
output_label_index = j + 1
weight = openfst.Weight(fst.weight_type(), weight)
if weight != zero:
arc = openfst.Arc(
input_label_index, output_label_index,
weight, cur_state
)
fst.add_arc(prev_state, arc)
prev_state = cur_state
fst.set_final(cur_state, final_weight)
if not fst.verify():
raise openfst.FstError("fst.verify() returned False")
return fst
def toArray(lattice):
zero = openfst.Weight.zero(lattice.weight_type())
# one = openfst.Weight.one(lattice.weight_type())
num_states = lattice.num_states()
num_outputs = lattice.output_symbols().num_symbols()
weights = np.full((num_states, num_outputs), float(zero))
for state in lattice.states():
for arc in lattice.arcs(state):
weights[state, arc.olabel] = float(arc.weight)
return weights, lattice.weight_type()
def fromSequence(seq, arc_type='standard', symbol_table=None):
fst = openfst.VectorFst(arc_type=arc_type)
one = openfst.Weight.one(fst.weight_type())
if symbol_table is not None:
fst.set_input_symbols(symbol_table)
fst.set_output_symbols(symbol_table)
init_state = fst.add_state()
fst.set_start(init_state)
cur_state = init_state
for i, label in enumerate(seq):
next_state = fst.add_state()
arc = openfst.Arc(label + 1, label + 1, one, next_state)
fst.add_arc(cur_state, arc)
cur_state = next_state
fst.set_final(cur_state, one)
if not fst.verify():
raise openfst.FstError("fst.verify() returned False")
return fst
def fromTransitions(
transition_weights, init_weights=None, final_weights=None,
arc_type='standard', transition_ids=None):
""" Instantiate a state machine from state transitions.
Parameters
----------
Returns
-------
"""
num_states = transition_weights.shape[0]
if transition_ids is None:
transition_ids = {}
for s_cur in range(num_states):
for s_next in range(num_states):
transition_ids[(s_cur, s_next)] = len(transition_ids)
for s in range(num_states):
transition_ids[(-1, s)] = len(transition_ids)
output_table = openfst.SymbolTable()
output_table.add_symbol(EPSILON_STRING, key=EPSILON)
for transition, index in transition_ids.items():
output_table.add_symbol(str(transition), key=index + 1)
input_table = openfst.SymbolTable()
input_table.add_symbol(EPSILON_STRING, key=EPSILON)
for transition, index in transition_ids.items():
input_table.add_symbol(str(transition), key=index + 1)
fst = openfst.VectorFst(arc_type=arc_type)
fst.set_input_symbols(input_table)
fst.set_output_symbols(output_table)
zero = openfst.Weight.zero(fst.weight_type())
one = openfst.Weight.one(fst.weight_type())
if init_weights is None:
init_weights = tuple(float(one) for __ in range(num_states))
if final_weights is None:
final_weights = tuple(float(one) for __ in range(num_states))
fst.set_start(fst.add_state())
def makeState(i):
state = fst.add_state()
initial_weight = openfst.Weight(fst.weight_type(), init_weights[i])
if initial_weight != zero:
transition = transition_ids[-1, i] + 1
arc = openfst.Arc(EPSILON, transition, initial_weight, state)
fst.add_arc(fst.start(), arc)
final_weight = openfst.Weight(fst.weight_type(), final_weights[i])
if final_weight != zero:
fst.set_final(state, final_weight)
return state
states = tuple(makeState(i) for i in range(num_states))
for i_cur, row in enumerate(transition_weights):
for i_next, tx_weight in enumerate(row):
cur_state = states[i_cur]
next_state = states[i_next]
weight = openfst.Weight(fst.weight_type(), tx_weight)
transition = transition_ids[i_cur, i_next] + 1
if weight != zero:
arc = openfst.Arc(transition, transition, weight, next_state)
fst.add_arc(cur_state, arc)
if not fst.verify():
raise openfst.FstError("fst.verify() returned False")
# print("fst.verify() returned False")
return fst
def durationFst(
label, num_states, transition_weights=None, self_weights=None,
arc_type='standard', symbol_table=None):
""" Construct a left-to-right WFST from an input sequence.
The input is usually a sequence of segment-level labels, and this machine
is used to align labels with sample-level scores.
Parameters
----------
input_seq : iterable(int or string)
Returns
-------
fst : openfst.Fst
A linear-chain weighted finite-state transducer. Each state
has one self-transition and one transition to its right neighbor. i.e.
the topology looks like this:
__ __ __
\/ \/ \/
[START] --> s1 --> s2 --> s3 --> [END]
"""
if num_states < 1:
raise AssertionError(f"num_states = {num_states}, but should be >= 1)")
fst = openfst.VectorFst(arc_type=arc_type)
one = openfst.Weight.one(fst.weight_type())
zero = openfst.Weight.zero(fst.weight_type())
if transition_weights is None:
transition_weights = [one for __ in range(num_states)]
if self_weights is None:
self_weights = [one for __ in range(num_states)]
if symbol_table is not None:
fst.set_input_symbols(symbol_table)
fst.set_output_symbols(symbol_table)
init_state = fst.add_state()
fst.set_start(init_state)
cur_state = fst.add_state()
arc = openfst.Arc(EPSILON, label + 1, one, cur_state)
fst.add_arc(init_state, arc)
for i in range(num_states):
next_state = fst.add_state()
transition_weight = openfst.Weight(fst.weight_type(), transition_weights[i])
if transition_weight != zero:
arc = openfst.Arc(label + 1, EPSILON, transition_weight, next_state)
fst.add_arc(cur_state, arc)
self_weight = openfst.Weight(fst.weight_type(), self_weights[i])
if self_weight != zero:
arc = openfst.Arc(label + 1, EPSILON, self_weight, cur_state)
fst.add_arc(cur_state, arc)
cur_state = next_state
fst.set_final(cur_state, one)
if not fst.verify():
raise openfst.FstError("fst.verify() returned False")
return fst
def seqFstToBatch(seq_fst, gt_batch_fst):
seq_prime = seq_fst.copy()
one = openfst.Weight.one(seq_prime.weight_type())
seq_prime.set_input_symbols(gt_batch_fst.input_symbols())
seq_prime.set_output_symbols(gt_batch_fst.output_symbols())
old_start = seq_prime.start()
new_start = seq_prime.add_state()
for seq_index in range(gt_batch_fst.num_arcs(gt_batch_fst.start())):
input_label = seq_prime.input_symbols().find(f"seq{seq_index}")
output_label = seq_prime.output_symbols().find(f"seq{seq_index}")
arc = openfst.Arc(input_label, output_label, one, old_start)
seq_prime.add_arc(new_start, arc)
seq_prime.set_start(new_start)
return seq_prime
# -=( TRAINING )==-------------------------------------------------------------
def gradientStep(weights, gradient, step_size=1e-3):
new_weights = weights - step_size * gradient
return new_weights
# -=( CRF/HMM ALGORITHMS )==---------------------------------------------------
def fstProb(numerator_graph, denominator_graph):
denom_betas = backward(denominator_graph, neglog_to_log=True)
num_betas = backward(numerator_graph, neglog_to_log=True)
# LOSS FUNCTION
denom_weight = denom_betas[denominator_graph.start()]
num_weight = num_betas[numerator_graph.start()]
# num_weight - denom_weight is the log-prob, so invert to get the prob in
# openfst's log semiring
neg_log_prob = openfst.divide(denom_weight, num_weight)
# LOSS GRADIENT
num_arcgrad = fstArcGradient(numerator_graph, betas=num_betas)
denom_arcgrad = fstArcGradient(denominator_graph, betas=denom_betas)
arc_gradient = subtractArcs(num_arcgrad, denom_arcgrad)
return neg_log_prob, arc_gradient
def subtractArcs(numerator, denominator):
"""
FIXME: This function should return an FST in the real semiring. But since
openfst's python wrapper only exposes the log and tropical semirings,
it returns a log-semiring FST whose weights are actually real-valued.
"""
def findArc(target, candidates):
for candidate in candidates:
if candidate.ilabel == target.ilabel and candidate.olabel == target.olabel:
return candidate
raise ValueError("No candidate matches target!")
def subtractLogWeights(lhs, rhs):
difference = np.exp(-float(lhs)) - np.exp(-float(rhs))
return openfst.Weight(lhs.type(), difference)
difference = denominator.copy()
zero = openfst.Weight.zero(numerator.weight_type())
states = [(numerator.start(), difference.start())]
while states:
num_state, difference_state = states.pop()
num_weight = numerator.final(num_state)
denom_weight = difference.final(difference_state)
if denom_weight != zero or num_weight != zero:
difference_weight = subtractLogWeights(num_weight, denom_weight)
difference.set_final(difference_state, difference_weight)
for num_arc in numerator.arcs(num_state):
difference_arc_iterator = difference.mutable_arcs(difference_state)
difference_arc = findArc(num_arc, difference_arc_iterator)
difference_arc.weight = subtractLogWeights(num_arc.weight, difference_arc.weight)
difference_arc_iterator.set_value(difference_arc)
states.append((num_arc.nextstate, difference_arc.nextstate))
return difference
def seqGradient(arc_gradient, arc_to_transition):
"""
TODO: Generalize this so it maps arcs to their partial gradients.
FIXME: arc_gradient is a log-semiring WFST whose weights are actually real-valued.
"""
# NOTE: We assert that arc_gradient should be in the log semiring as a hack.
# Actually it should be in the real semiring, but openfst's python wrapper
# doesn't expose that one. arc_gradient's weights actually represent real
# values.
if arc_gradient.arc_type() != 'log':
raise AssertionError()
zero = openfst.Weight.zero(arc_gradient.weight_type())
num_states = max(v[1] for v in arc_to_transition.values()) + 1
transition_grad = np.zeros((num_states, num_states))
initial_grad = np.zeros(num_states)
final_grad = np.zeros(num_states)
if arc_gradient.final(arc_gradient.start()) != zero:
raise AssertionError("Lattice start states should never be final states!")
for state in arc_gradient.states():
for arc in arc_gradient.arcs(state):
try:
prev_out, cur_out = arc_to_transition[arc.olabel - 1]
except KeyError:
continue
arc_prob = float(arc.weight)
if state == arc_gradient.start():
initial_grad[cur_out] += arc_prob
else:
transition_grad[prev_out, cur_out] += arc_prob
next_final_weight = arc_gradient.final(arc.nextstate)
if next_final_weight != zero:
final_prob = float(arc.weight)
final_grad[cur_out] += final_prob
param_gradient = {
'transition': transition_grad,
'initial': initial_grad,
'final': final_grad
}
return param_gradient
def fstArcGradient(lattice, alphas=None, betas=None):
if lattice.arc_type() != 'log':
lattice = openfst.arcmap(lattice, map_type='to_log')
if alphas is None:
alphas = forward(lattice, neglog_to_log=True)
if betas is None:
betas = backward(lattice, neglog_to_log=True)
total_weight = betas[lattice.start()]
zero = openfst.Weight.zero(lattice.weight_type())
arc_gradient = lattice.copy()
for state in arc_gradient.states():
w_incoming = alphas[state]
arc_iterator = arc_gradient.mutable_arcs(state)
while not arc_iterator.done():
arc = arc_iterator.value()
w_outgoing = betas[arc.nextstate]
weight_thru_arc = openfst.times(w_incoming, w_outgoing)
arc_neglogprob = openfst.divide(total_weight, weight_thru_arc)
arc.weight = arc_neglogprob
arc_iterator.set_value(arc)
arc_iterator.next()
if lattice.final(state) != zero:
# w_outgoing = one --> final weight = w_in \otimes one = w_in
weight_thru_arc = alphas[state]
arc_neglogprob = openfst.divide(total_weight, weight_thru_arc)
arc_gradient.set_final(state, arc_neglogprob)
return arc_gradient
def forward(lattice, neglog_to_log=False):
if lattice.arc_type() != 'log':
lattice = openfst.arcmap(lattice, map_type='to_log')
if neglog_to_log:
inverted = openfst.arcmap(lattice, map_type='invert')
one = openfst.Weight.one(lattice.weight_type())
alphas = [openfst.divide(one, a) for a in forward(inverted)]
return alphas
alphas = openfst.shortestdistance(lattice)
return alphas
def backward(lattice, neglog_to_log=False):
if lattice.arc_type() != 'log':
lattice = openfst.arcmap(lattice, map_type='to_log')
if neglog_to_log:
inverted = openfst.arcmap(lattice, map_type='invert')
one = openfst.Weight.one(lattice.weight_type())
betas = [openfst.divide(one, a) for a in backward(inverted)]
return betas
betas = openfst.shortestdistance(lattice, reverse=True)
return betas
def viterbi(lattice):
if lattice.arc_type() != 'standard':
lattice = openfst.arcmap(lattice, map_type='to_std')
shortest_paths = openfst.shortestpath(lattice).topsort().rmepsilon()
return shortest_paths
def align(scores, label_seq):
""" Align (ie segment) a sequence of scores, given a known label sequence.
NOTE: I don't know if this works with score tables that have more than 9
columns.
Parameters
----------
scores : array_like of float, shape (num_samples, num_labels)
Log probabilities (possibly un-normalized).
label_seq : iterable(string or int)
The segment-level label sequence.
Returns
-------
aligned_labels : tuple(int)
alignment_score : semirings.TropicalSemiringWeight
Score of the best path through the alignment graph (possible un-normalized)
"""
raise NotImplementedError()
```
#### File: seqtools/semirings/paramdict.py
```python
import math
class ParamDict(dict): # inherits from built-in dict
"""
A dictionary used to map parameter names to their numeric values.
The names are arbitrary objects. The default value for a parameter
that has never been updated is normally 0 and is given by the `None` key.
These dictionaries may be treated like sparse numeric vectors --
they may be added, subtracted, multiplied by real scalars, etc.
Also, adding a real number to a dictionary adds it to all values
(including the default), by analogy with numpy vectors.
"""
def __init__(self, contents=(), default=None, **kwargs):
super().__init__(contents, **kwargs)
if default: # explicitly specified default: override anything copied from contents
self[None] = default
elif None not in self: # no explicitly specified default
self[None] = 0 # set to 0 since contents didn't specify one either
def __getitem__(self, key): # supports self[key]
try:
return super().__getitem__(key)
except KeyError:
return super().__getitem__(None) # default
def copy(self):
return self.__class__(self)
def _clean(self):
"""
Put this ParamDict into canonical form by (destructively) removing any
redundant entries that match the default.
"""
default = self[None]
kk = [k for k,v in self.items() if v==default and k is not None] # keys of redundant entries
for k in kk:
del self[k]
def __eq__(self, other): # supports self==other
self._clean()
if isinstance(other,ParamDict):
other._clean()
return super().__eq__(other)
else: # for tests like self==0
return self[None]==other and len(self)==1 # only contains the default
# See https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types
def __neg__(self): # supports -self
result = self.__class__()
for k,v in self.items():
result[k] = -v
return result
def __iadd__(self, other): # supports self += other (destructive)
if isinstance(other,dict):
for k,v in other.items():
if k not in self: self[k] = self[None]
self[k] += v
else:
for k in self:
self[k] += other
return self
def __isub__(self, other): # supports self -= other (destructive)
if isinstance(other,dict):
for k,v in other.items():
if k not in self: self[k] = self[None]
self[k] -= v
else:
for k in self:
self[k] -= other
return self
def __imul__(self, other): # supports self *= other (destructive)
if isinstance(other,dict):
for k,v in other.items():
if k not in self: self[k] = self[None]
self[k] *= v
else:
for k in self:
self[k] *= other
return self
def __itruediv__(self, other): # supports self /= other (destructive)
if isinstance(other,dict):
for k,v in other.items():
if k not in self: self[k] = self[None]
self[k] /= v
else:
for k in self:
self[k] /= other
return self
def __add__(self, other): # supports self+other
result = self.copy()
result += other
return result
def __sub__(self, other): # supports self-other
result = self.copy()
result -= other
return result
def __mul__(self, other): # supports self*other
result = self.copy()
result *= other
return result
def __truediv__(self, other): # supports self/other
result = self.copy()
result /= other
return result
def __radd__(self, other): # supports other+self (when other is a scalar)
return self+other # warning: assumes that + is commutative
def __rsub__(self, other): # supports other-self (when other is a scalar)
return -self+other # warning: assumes that + is commutative
def __rmul__(self, scalar): # supports scalar*self. Does not assume that * is commutative.
result = self.__class__()
for k,v in self.items():
result[k] = scalar*v
return result
def __rtruediv__(self, scalar):
result = self.__class__()
for k,v in self.items():
result[k] = scalar/v
return result
def __str__(self):
self._clean()
return super().__str__()
def quantize(self, quantum): # round keys to multiples of quantum > 0
result = self.__class__()
for k,v in self.items():
if math.isfinite(v):
result[k] = (v // quantum) * quantum
else:
result[k] = v
return result
```
#### File: seqtools/seqtools/torch_lctm.py
```python
import torch
from mathtools import utils
def log_prob(data_scores, y, max_segs, pw=None):
# Scores has shape (num_segs, num_samples, num_classes),
# so marginalizing over number of segments and number of classes at the last
# frame should give us the data likelihood... I think :/
scores = segmental_forward(data_scores, max_segs, pw=pw, semiring='log')
log_Z = torch.logsumexp(scores[-1, :], -1)
if torch.isnan(log_Z).any():
raise ValueError("log_Z contains NaN values")
y_score = segmental_score(data_scores, y, pw=pw)
if torch.isnan(y_score).any():
raise ValueError("y_score contains NaN values")
return -y_score - log_Z
def segmental_score(data_scores, y, pw=None):
score = 0
start_index = 0
prev_label = None
for seg_label, seg_len in utils.genSegments(y):
next_start_index = start_index + seg_len + 1
score += data_scores[start_index:next_start_index, seg_label].sum()
if start_index > 0:
score += pw[prev_label, seg_label]
start_index = next_start_index
prev_label = seg_label
return score
def segmental_forward(x, max_dur, pw=None):
# From S&C NIPS 2004
T, n_classes = x.shape
scores = torch.full([T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
# classes_prev = torch.ones([T, n_classes], np.int)
if pw is None:
pw = torch.zeros([n_classes, n_classes], dtype=x.dtype)
# initialize first segment scores
integral_scores = torch.cumsum(x, 0)
scores[0] = integral_scores[0]
def dur_score(t_end, duration, c):
t_start = t_end - duration
current_segment = integral_scores[t_end, c] - integral_scores[t_start, c]
# Elementwise semiring times
dur_scores = scores[t_start, :] + current_segment + pw[:, c]
# Reduction: semiring plus
# FIXME: change max to logsumexp
return dur_scores.max()
# Compute scores per timestep
for t_end in range(1, T):
# Compute scores per class
for c in range(n_classes):
# Compute over all durations
best_dur_scores = torch.tensor(
[
dur_score(t_end, duration, c)
for duration in range(1, min(t_end, max_dur) + 1)
]
)
# FIXME: change max to logsumexp
best_score = best_dur_scores.max()
# Add cost of curent frame to best previous cost
scores[t_end, c] = best_score
return scores
def segmental_viterbi(x, max_dur, pw=None):
# From S&C NIPS 2004
T, n_classes = x.shape
scores = torch.full([T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
lengths = torch.ones([T, n_classes], dtype=torch.long)
# classes_prev = torch.ones([T, n_classes], np.int)
if pw is None:
pw = torch.zeros([n_classes, n_classes], dtype=x.dtype)
# initialize first segment scores
integral_scores = torch.cumsum(x, 0)
scores[0] = integral_scores[0]
# -------- Forward -----------
# Compute scores per timestep
for t_end in range(1, T):
# Compute scores per class
for c in range(n_classes):
# Compute over all durations
best_dur = 0
best_score = -float("Inf")
# best_class = -1
for duration in range(1, min(t_end, max_dur) + 1):
t_start = t_end - duration
current_segment = integral_scores[t_end, c] - integral_scores[t_start, c]
if t_start == 0 and current_segment > best_score:
best_dur = duration
best_score = current_segment
# best_class = -1
continue
# Check if it is cheaper to create a new segment or stay in same class
for c_prev in range(n_classes):
if c_prev == c:
continue
# Previous segment, other class
tmp = scores[t_start, c_prev] + current_segment + pw[c_prev, c]
if tmp > best_score:
best_dur = duration
best_score = tmp
# best_class = c_prev
# Add cost of curent frame to best previous cost
scores[t_end, c] = best_score
lengths[t_end, c] = best_dur
# classes_prev[t_end, c] = best_class
# Set nonzero entries to 0 for visualization
# scores[scores<0] = 0
scores[torch.isinf(scores)] = 0
# -------- Backward -----------
classes = [scores[-1].argmax()]
times = [T]
t = T - lengths[-1, classes[-1]]
while t > 0:
class_prev = scores[t].argmax()
length = lengths[t, class_prev]
classes.insert(0, class_prev)
times.insert(0, t)
t -= length
y_out = torch.zeros(T, torch.long)
t = 0
for c, l in zip(classes, times):
y_out[t:t + l] = c
t += l
return scores
def segmental_forward_normalized(x, max_segs, pw=None):
""" This version maximizes!!! """
# Assumes segment function is normalized by duration: f(x)= 1/d sum_t'=t^t+d x_t'
T, n_classes = x.shape
scores = torch.full([max_segs, T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
if pw is None:
pw = torch.zeros([n_classes, n_classes], dtype=x.dtype)
integral_scores = torch.cumsum(x, 0)
# Intial scores
scores[0] = integral_scores.copy()
starts = torch.zeros([max_segs, n_classes], torch.long) + 1
# Compute scores for each segment in sequence
for m in range(1, max_segs):
# Compute score for each class
for c in range(n_classes):
best_score = -float("Inf")
for c_prev in range(n_classes):
if c_prev == c:
continue
# Compute scores for each timestep
for t in range(1, T):
new_segment = integral_scores[t, c] - integral_scores[starts[m, c], c]
# Previous segment, other class
score_change = scores[m - 1, t, c_prev] + pw[c_prev, c]
if score_change > best_score:
best_score = score_change
starts[m, c] = t
# Add cost of curent frame to best previous cost
scores[m, t, c] = best_score + new_segment
# Set nonzero entries to 0 for visualization
scores[torch.isinf(scores)] = 0
return scores
def sparsify_incoming_pw(pw):
# Output is INCOMING transitions
n_classes = pw.shape[0]
valid = torch.nonzero(~torch.isinf(pw.T), as_tuple=True) # requires pytorch 1.3
sparse_idx = [[] for i in range(n_classes)]
for i, j in zip(valid[0], valid[1]):
sparse_idx[i] += [j]
return sparse_idx
def log_prob_eccv(data_scores, y, max_segs, pw=None):
# Scores has shape (num_segs, num_samples, num_classes),
# so marginalizing over number of segments and number of classes at the last
# frame should give us the data likelihood... I think :/
scores = segmental_forward_eccv(data_scores, max_segs, pw=pw, semiring='log')
log_Z = torch.logsumexp(torch.logsumexp(scores[:, -1, :], 0), -1)
if torch.isnan(log_Z).any():
raise ValueError("log_Z contains NaN values")
y_score = segmental_score(data_scores, y, pw=pw)
if torch.isnan(y_score).any():
raise ValueError("y_score contains NaN values")
return -y_score - log_Z
def segmental_forward_eccv(x, max_segs, pw=None, semiring='tropical'):
if torch.isnan(x).any():
raise ValueError("x contains NaN values")
if semiring == 'tropical':
def sr_prod(x, y):
return x + y
def sr_sum(x):
return x.max()
elif semiring == 'log':
def sr_prod(x, y):
return x + y
def sr_sum(x):
return torch.logsumexp(x, 0)
else:
raise AssertionError()
# Assumes segment function is additive: f(x)=sum_t'=t^t+d x_t'
T, n_classes = x.shape
scores = torch.full([max_segs, T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
if pw is None:
pw = torch.log(1 - torch.eye(n_classes))
# print("pw is None: Using uniform transition weights (no self-loops)")
# initialize first segment scores
scores[0] = torch.cumsum(x, 0)
# Compute scores per segment
for m in range(1, max_segs):
# Compute scores per timestep
for t in range(1, T):
# Compute scores per class
for c in range(n_classes):
# Elementwise semiring times
new_scores = torch.cat(
(scores[m, t - 1, c:c + 1], sr_prod(scores[m - 1, t - 1, :], pw[:, c]))
)
# Reduction: semiring plus
best_prev = sr_sum(new_scores)
# Add cost of curent frame to best previous cost
scores[m, t, c] = sr_prod(best_prev, x[t, c])
if torch.isnan(scores).any():
raise ValueError("scores contains NaN values")
return scores
def segmental_backward_eccv(scores, pw=None):
n_segs, T, n_classes = scores.shape
if pw is None:
pw = torch.log(1 - torch.eye(n_classes))
# print("pw is None: Using uniform transition weights (no self-loops)")
best_scores = scores[:, -1].max(1).values
n_segs = torch.argmax(best_scores)
# Start at end
seq_c = [scores[n_segs, -1].argmax()] # Class
seq_t = [T] # Time
m = n_segs
for t in range(T, -1, -1):
if m == 0:
break
# Scores of previous timestep in current segment
score_same = scores[m, t - 1, seq_c[0]]
score_diff = scores[m - 1, t - 1] + pw[:, seq_c[0]]
# Check if it's better to stay or switch segments
if any(score_diff > score_same):
next_class = score_diff.argmax()
score_diff = score_diff[next_class]
seq_c.insert(0, next_class)
seq_t.insert(0, t)
m -= 1
elif all(score_diff == score_same):
m -= 1
seq_t.insert(0, 0)
if m != 0:
raise AssertionError("Found " + str(m) + " segments, but expected zero!")
y_out = torch.full((T,), -1, dtype=torch.long, device=scores.device)
for i in range(len(seq_c)):
y_out[seq_t[i]:seq_t[i + 1]] = seq_c[i]
return y_out
def segmental_inference(
x, max_segs=None, pw=None, normalized=False, verbose=False, return_scores=False):
# Scores has shape (num_segs, num_samples, num_classes)
scores = segmental_forward_eccv(x, max_segs, pw)
y_out = segmental_backward_eccv(scores, pw)
if return_scores:
num_segs = len(utils.segment_labels(y_out))
y_idxs = torch.arange(y_out.numel)
y_scores = scores[num_segs, y_idxs, y_out]
return y_out, y_scores
return y_out
def segmental_forward_oracle(x, max_segs, pw, y_oracle, oracle_valid):
# Assumes segment function is additive: f(x)=sum_t'=t^t+d x_t'
T, n_classes = x.shape
scores = torch.full([max_segs, T, n_classes], -float("Inf"), dtype=x.dtype, device=x.device)
lengths = torch.zeros([max_segs, T, n_classes], torch.long)
if pw is None:
pw = torch.log(1 - torch.eye(n_classes))
# initialize first segment scores
scores[0] = torch.cumsum(x, 0)
# Compute scores per segment
for m in range(1, max_segs):
# scores[m, 0, c] = scores[m-1, 0, c]
# Compute scores per timestep
for t in range(1, T):
# Compute scores per class
for c in range(n_classes):
# Score for staying in same segment
best_prev = scores[m, t - 1, c]
length = lengths[m, t - 1, c] + 1
# Check if it is cheaper to create a new segment or stay in same class
for c_prev in range(n_classes):
# Previous segment, other class
tmp = scores[m - 1, t - 1, c_prev] + pw[c_prev, c]
if tmp > best_prev:
best_prev = tmp
length = 1
if oracle_valid[y_oracle[t], c] == 0:
best_prev = -float("Inf")
# Add cost of curent frame to best previous cost
scores[m, t, c] = best_prev + x[t, c]
lengths[m, t, c] = length
# Set nonzero entries to 0 for visualization
scores[torch.isinf(scores)] = 0
return scores
def segmental_inference_oracle(x, max_segs, pw, y_oracle, oracle_valid):
scores = segmental_forward_oracle(x, max_segs, pw, y_oracle, oracle_valid)
return segmental_backward_eccv(scores, pw)
```
#### File: seqtools/seqtools/utils.py
```python
import warnings
import collections
import logging
import numpy as np
from matplotlib import pyplot as plt
logger = logging.getLogger(__name__)
def smoothCounts(
edge_counts, state_counts, init_states, final_states, num_states=None,
init_regularizer=0, final_regularizer=0,
uniform_regularizer=0, diag_regularizer=0,
override_transitions=False, structure_only=False, as_numpy=False, as_scores=False):
if num_states is None:
num_states = max(state_counts.keys()) + 1
bigram_counts = np.zeros((num_states, num_states))
for (i, j), count in edge_counts.items():
bigram_counts[i, j] = count
unigram_counts = np.zeros(num_states)
for i, count in state_counts.items():
unigram_counts[i] = count
initial_counts = np.zeros(num_states)
for i, count in init_states.items():
initial_counts[i] = count
final_counts = np.zeros(num_states)
for i, count in final_states.items():
final_counts[i] = count
# Regularize the heck out of these counts
initial_states = initial_counts.nonzero()[0]
for i in initial_states:
bigram_counts[i, i] += init_regularizer
final_states = final_counts.nonzero()[0]
for i in final_states:
bigram_counts[i, i] += final_regularizer
bigram_counts += uniform_regularizer
diag_indices = np.diag_indices(bigram_counts.shape[0])
bigram_counts[diag_indices] += diag_regularizer
if override_transitions:
logger.info('Overriding bigram_counts with an array of all ones')
bigram_counts = np.ones_like(bigram_counts)
if structure_only:
bigram_counts = (bigram_counts > 0).astype(float)
initial_counts = (initial_counts > 0).astype(float)
final_counts = (final_counts > 0).astype(float)
denominator = bigram_counts.sum(1)
transition_probs = np.divide(
bigram_counts, denominator[:, None],
out=np.zeros_like(bigram_counts),
where=denominator[:, None] != 0
)
final_probs = np.divide(
final_counts, denominator,
out=np.zeros_like(final_counts),
where=denominator != 0
)
initial_probs = initial_counts / initial_counts.sum()
if as_numpy:
def to_numpy(x):
return x.numpy().astype(float)
return tuple(map(to_numpy, (transition_probs, initial_probs, final_probs)))
if as_scores:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='divide by zero')
transition_probs = np.log(transition_probs)
initial_probs = np.log(initial_probs)
final_probs = np.log(final_probs)
return transition_probs, initial_probs, final_probs
def countSeqs(seqs):
""" Count n-gram statistics on a collection of sequences.
Parameters
----------
seqs : iterable( iterable(Hashable) )
Returns
-------
bigram_counts : collections.defaultdict((Hashable, Hashable) -> int)
unigram_counts : collections.defaultdict(Hashable -> int)
initial_counts : collections.defaultdict(Hashable -> int)
final_counts : collections.defaultdict(Hashable -> int)
"""
bigram_counts = collections.defaultdict(int)
unigram_counts = collections.defaultdict(int)
initial_counts = collections.defaultdict(int)
final_counts = collections.defaultdict(int)
for seq in seqs:
initial_counts[seq[0]] += 1
final_counts[seq[-1]] += 1
for state in seq:
unigram_counts[state] += 1
for prev, cur in zip(seq[:-1], seq[1:]):
bigram_counts[prev, cur] += 1
return bigram_counts, unigram_counts, initial_counts, final_counts
def plot_transitions(fn, transition_probs, initial_probs, final_probs):
plt.figure()
plt.matshow(transition_probs)
plt.title('Transitions')
plt.savefig(fn)
plt.close()
```
#### File: seqtools/tests/fsm.py
```python
import os
import collections
import argparse
import yaml
import numpy as np
import torch
from seqtools import fsm, torchutils, metrics, utils
def sampleMarkov(start_probs, end_probs, transition_probs):
samples = []
transition_dist = start_probs
while True:
new_sample = np.random.choice(len(transition_dist), p=transition_dist)
samples.append(new_sample)
transition_dist = transition_probs[new_sample]
end_dist = [1 - end_probs[new_sample], end_probs[new_sample]]
if np.random.choice(2, p=end_dist):
return np.array(samples, dtype=int)
def sampleGHMM(start_probs, end_probs, transition_probs, means, covs):
label_seq = sampleMarkov(start_probs, end_probs, transition_probs)
obsv_seq = np.array(
np.row_stack(tuple(np.random.multivariate_normal(means[l], covs[l]) for l in label_seq))
)
return obsv_seq, label_seq
def main(
out_dir=None, gpu_dev_id=None,
sample_size=10, random_seed=None,
learning_rate=1e-3, num_epochs=500,
dataset_kwargs={}, dataloader_kwargs={}, model_kwargs={}):
out_dir = os.path.expanduser(out_dir)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
device = torchutils.selectDevice(gpu_dev_id)
start_probs = np.array([0, 0.25, 0.75])
end_probs = np.array([0.05, 0, 0])
transition_probs = np.array([
[0.8, 0.2, 0.0],
[0.1, 0.8, 0.1],
[0.1, 0.1, 0.8],
])
means = np.array([
[0],
[1],
[2]
])
covs = np.array([
[[0.01]],
[[0.01]],
[[0.01]]
])
obsv_seqs, label_seqs = zip(
*tuple(
sampleGHMM(start_probs, end_probs, transition_probs, means, covs)
for _ in range(sample_size)
)
)
dataset = torchutils.SequenceDataset(obsv_seqs, label_seqs, **dataset_kwargs)
data_loader = torch.utils.data.DataLoader(dataset, **dataloader_kwargs)
# class CRF(fsm.FstScorer, torchutils.LinearClassifier):
# pass
class CRF(torchutils.LinearChainScorer, torchutils.LinearClassifier):
pass
train_loader = data_loader
val_loader = data_loader
input_dim = dataset.num_obsv_dims
output_dim = transition_probs.shape[0]
transition_weights = torch.randn(transition_probs.shape).to(device)
# transition_weights = torch.tensor(transition_probs, device=device).float().log()
model = CRF(
transition_weights, input_dim, output_dim,
initial_weights=None, final_weights=None,
**model_kwargs
)
# Train the model
train_epoch_log = collections.defaultdict(list)
val_epoch_log = collections.defaultdict(list)
metric_dict = {
'Avg Loss': metrics.AverageLoss(),
'Accuracy': metrics.Accuracy()
}
# criterion = fsm.fstNLLLoss
criterion = torchutils.StructuredNLLLoss
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=1.00)
model, last_model_wts = torchutils.trainModel(
model, criterion, optimizer, scheduler, train_loader,
val_loader,
metrics=metric_dict,
test_metric='Avg Loss',
train_epoch_log=train_epoch_log,
val_epoch_log=val_epoch_log,
num_epochs=num_epochs,
device=device,
)
torchutils.plotEpochLog(
train_epoch_log, title="Train Epoch Log",
fn=os.path.join(out_dir, "train-log.png")
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
parser.add_argument('--out_dir')
args = vars(parser.parse_args())
args = {k: v for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.expanduser(
os.path.join('~', 'repo', 'seqtools', 'tests', config_fn)
)
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
config.update(args)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
```
|
{
"source": "jd-jones/visiontools",
"score": 2
}
|
#### File: visiontools/visiontools/render.py
```python
import logging
import json
import os
import numpy as np
from scipy.spatial.qhull import QhullError
import torch
try:
import neural_renderer as nr
except ImportError:
class DummyModule(object):
Renderer = object
nr = DummyModule()
import mathtools as m
from mathtools import utils
from . import geometry
logger = logging.getLogger(__name__)
""" Functions and attributes for rendering images.
Attributes
----------
intrinsic_matrix : numpy array of float, shape (3, 3)
The intrinsic matrix estimated during camera calibration. This array is
loaded from ``~/repo/blocks/blocks/assets/camera_params.json``. For more
information about how these parameters were estimated, see
``README_camera_params.md`` in the same directory. Layout of the intrinsic
matrix is as follows:
..math:
K = \left[ \begin{matrix}
\alpha_x & 0 & o_x \\
0 & \alpha_y & o_y \\
0 & 0 & 1 \\
\end{matrix} \right]
where :math:`\alpha_x = f s_x` is the size of unit length in horizontal
pixels and :math:`o_x` is the horizontal coordinate of the principal point,
in pixels. :math:`\alpha_y` and :math:`o_y` are defined the same way, but
are vertical measurements.
camera_pose : numpy array of float, shape (4, 4)
object_colors : numpy array of float, shape (num_blocks + 1, 3)
"""
IMAGE_HEIGHT = 240
IMAGE_WIDTH = 320
def loadCameraParams(
assets_dir=None, camera_params_fn=None, camera_pose_fn=None,
object_colors_fn=None, as_dict=False):
""" Load camera parameters from external files.
Parameters
----------
assets_dir : str, optional
camera_params_fn : str, optional
camera_pose_fn : str, optional
object_colors_fn : str, optional
as_dict : bool, optional
If True, the parameters are returned as a dictionary instead of a tuple,
with format
{
'intrinsic_matrix': intrinsic_matrix,
'camera_pose': camera_pose,
'object_colors': object_colors
}
Returns
-------
intrinsic_matrix : numpy array of float, shape (3, 3)
The intrinsic matrix estimated during camera calibration. Layout is as
follows:
..math:
K = \left[ \begin{matrix}
\alpha_x & 0 & o_x \\
0 & \alpha_y & o_y \\
0 & 0 & 1 \\
\end{matrix} \right]
where :math:`\alpha_x = f s_x` is the size of unit length in horizontal
pixels and :math:`o_x` is the horizontal coordinate of the principal point,
in pixels. :math:`\alpha_y` and :math:`o_y` are defined the same way, but
are vertical measurements.
camera_pose : numpy array of float, shape (4, 4)
object_colors : numpy array of float, shape (num_blocks + 1, 3)
"""
if assets_dir is None:
assets_dir = os.path.expanduser(os.path.join('~', 'repo', 'blocks', 'blocks', 'assets'))
if camera_params_fn is None:
camera_params_fn = 'camera_params.json'
if camera_pose_fn is None:
camera_pose_fn = 'camera_pose.json'
if object_colors_fn is None:
object_colors_fn = 'object_colors.csv'
# Load camera params
with open(os.path.join(assets_dir, camera_params_fn), 'rt') as f:
json_obj = json.load(f)['camera_intrinsics']['intrinsic_matrix']
intrinsic_matrix = m.np.transpose(m.np.array(json_obj))
# Load camera pose
with open(os.path.join(assets_dir, camera_pose_fn), 'rt') as f:
camera_pose_dict = json.load(f)['camera_pose']
R_camera = geometry.rotationMatrix(**camera_pose_dict['orientation'])
t_camera = m.np.array(camera_pose_dict['position'])
camera_pose = geometry.homogeneousMatrix(R_camera, t_camera, range_space_homogeneous=True)
# Load object colors (ie rudimentary appearance model)
object_colors = m.np.loadtxt(
os.path.join(assets_dir, object_colors_fn),
delimiter=',', skiprows=1
)
if as_dict:
return {
'intrinsic_matrix': intrinsic_matrix,
'camera_pose': camera_pose,
'colors': object_colors
}
return intrinsic_matrix, camera_pose, object_colors
intrinsic_matrix, camera_pose, object_colors = loadCameraParams()
class TorchSceneRenderer(nr.Renderer):
def __init__(self, intrinsic_matrix=None, camera_pose=None, colors=None, **super_kwargs):
K = intrinsic_matrix
K = K[None, :, :].cuda()
R, t = geometry.fromHomogeneous(camera_pose)
R = R[None, :, :].float().cuda()
t = t[None, None, :].float().cuda()
self.colors = colors
super().__init__(
camera_mode='projection', K=K, R=R, t=t,
near=0, far=1000, **super_kwargs
)
def render(self, vertices, faces, textures, intrinsic_matrix=None, camera_pose=None):
""" Wrapper around a differentiable renderer implemented in pytorch.
Parameters
----------
Returns
-------
image
"""
if intrinsic_matrix is None:
K = None
else:
K = intrinsic_matrix
K = K[None, :, :].cuda()
if camera_pose is None:
R = None
t = None
else:
R, t = geometry.fromHomogeneous(camera_pose)
R = R[None, :, :].float().cuda()
t = t[None, None, :].float().cuda()
if len(vertices.shape) == 2:
# [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
vertices = vertices[None, ...]
if len(faces.shape) == 2:
# [num_faces, 3] -> [batch_size=1, num_faces, 3]
faces = faces[None, ...]
if len(textures.shape) == 5:
textures = textures[None, ...]
images_rgb, images_depth, images_alpha = super().render(vertices, faces, textures)
# [batch_size, RGB, image_size, image_size] -> [batch_size, image_size, image_size, RGB]
images_rgb = images_rgb.permute(0, 2, 3, 1)
return images_rgb, images_depth
def renderScene(
self, background_plane, assembly, component_poses,
camera_pose=None, camera_params=None, render_background=True,
as_numpy=False):
""" Render a scene consisting of a spatial assembly and a background plane.
Parameters
----------
Returns
-------
"""
if camera_pose is None:
# FIXME
R = self.R[0].cpu().numpy()
t = self.t[0][0].cpu().numpy()
camera_pose = torch.tensor(geometry.homogeneousMatrix(R, t)).float().cuda()
if camera_params is None:
camera_params = self.K[0]
if render_background:
rgb_bkgrnd, depth_bkgrnd = self.renderPlane(
background_plane, camera_pose=camera_pose, camera_params=camera_params
)
if not assembly.blocks:
return rgb_bkgrnd, depth_bkgrnd
assembly = assembly.setPose(component_poses, in_place=False)
vertices = makeBatch(assembly.vertices, dtype=torch.float).cuda()
faces = makeBatch(assembly.faces, dtype=torch.int).cuda()
textures = makeBatch(assembly.textures, dtype=torch.float).cuda()
rgb_images, depth_images = self.render(vertices, faces, textures)
if render_background:
rgb_images = torch.cat((rgb_bkgrnd, rgb_images), 0)
depth_images = torch.cat((depth_bkgrnd, depth_images), 0)
rgb_image, depth_image, label_image = reduceByDepth(rgb_images, depth_images)
if as_numpy:
rgb_image = rgb_image.detach().cpu().numpy()
depth_image = depth_image.detach().cpu().numpy()
return rgb_image, depth_image
def renderPlane(self, plane, camera_pose=None, camera_params=None):
if camera_pose is None:
camera_pose = geometry.homogeneousMatrix(self.R[0], self.t[0][0])
if camera_params is None:
camera_params = self.K[0]
vertices, faces = planeVertices(plane, camera_pose, camera_params)
textures = makeTextures(faces, uniform_color=self.colors['black'])
rgb_image, depth_image = self.render(vertices, faces, textures)
return rgb_image, depth_image
def renderComponent(self, assembly, component_index, component_pose, background_images=None):
"""
Parameters
----------
background_images : tuple(
array of float, shape (img_height, img_width, 3),
array of shape (img_height, img_width)
)
Elements should be as follows:
0 --- RGB image
1 --- Depth image
Returns
-------
"""
assembly = assembly.recenter(component_index, in_place=False)
vertices = makeBatch(assembly.componentVertices(component_index), dtype=torch.float).cuda()
faces = makeBatch(assembly.componentFaces(component_index), dtype=torch.int).cuda()
textures = makeBatch(assembly.componentTextures(component_index), dtype=torch.float).cuda()
R, t = component_pose
vertices = vertices @ R.T + t
rgb_images, depth_images = self.render(vertices, faces, textures)
if background_images is not None:
rgb_images = torch.cat((background_images[0], rgb_images), 0)
depth_images = torch.cat((background_images[1], depth_images), 0)
rgb_image, depth_image, label_image = reduceByDepth(rgb_images, depth_images)
return rgb_image, depth_image
class LegacySceneRenderer(object):
def __init__(self, intrinsic_matrix=None, camera_pose=None, colors=None, **super_kwargs):
self.intrinsic_matrix = intrinsic_matrix
self.camera_pose = camera_pose
self.colors = colors
def renderScene(self, background_plane, assembly, component_poses):
out = renderScene(
background_plane, assembly, component_poses,
camera_pose=self.camera_pose, camera_params=self.intrinsic_matrix,
object_appearances=self.colors
)
return out
def renderPlane(self, plane):
out = renderPlane(
plane, camera_pose=None, camera_params=None, plane_appearance=None,
range_image=None, label_image=None, rgb_image=None
)
return out
def renderComponent(self, assembly, component_idx):
out = renderComponent(
assembly, component_idx, component_pose=None, img_type=None,
camera_pose=None, camera_params=None, block_colors=None,
range_image=None, label_image=None, rgb_image=None,
crop_rendered=False, in_place=True
)
return out
# -=( HELPER FUNCTIONS FOR PYTORCH RENDERER )==--------------------------------
def makeBatch(arrays, **tensor_kwargs):
batch = torch.stack(tuple(torch.tensor(a, **tensor_kwargs) for a in arrays))
return batch
def reduceByDepth(rgb_images, depth_images, max_depth=None):
""" For each pixel in a scene, select the object closest to the camera.
Parameters
----------
rgb_images : torch.tensor of float, shape (batch_size, img_height, img_width)
depth_images : torch.tensor of float, shape (batch_size, img_height, img_width)
Returns
-------
rgb_image : torch.tensor of float, shape (img_height, img_width)
depth_image : torch.tensor of float, shape (img_height, img_width)
label_image : torch.tensor of int, shape (img_height, img_width)
"""
label_image = depth_images.argmin(-3)
new_shape = label_image.shape
num_batch = new_shape[0]
num_rows, num_cols = new_shape[-2:]
b, r, c = torch.meshgrid(
torch.arange(num_batch),
torch.arange(num_rows),
torch.arange(num_cols)
)
i_min = label_image.contiguous().view(-1)
b = b.contiguous().view(-1)
r = r.contiguous().view(-1)
c = c.contiguous().view(-1)
depth_image = depth_images[b, i_min, r, c].view(*new_shape)
rgb_image = rgb_images[b, i_min, r, c, :].view(*new_shape, 3)
if max_depth is not None:
label_image += 1
is_background = depth_image == max_depth
label_image[is_background] = 0
return rgb_image, depth_image, label_image
def planeVertices(plane, intrinsic_matrix, camera_pose, image_shape=None):
if image_shape is None:
image_shape = (IMAGE_HEIGHT, IMAGE_WIDTH)
image_shape = tuple(float(x) for x in image_shape)
face_coords = m.np.array([
[0, 0],
[0, image_shape[0]],
[image_shape[1], 0],
[image_shape[1], image_shape[0]]
])
plane_faces = m.np.array([
[0, 1, 2],
[3, 2, 1]
])
# Consruct face_coords_camera in a way that allows
# geometry.slopeIntercept to compute the plane parameters it needs.
face_coords_camera = m.np.zeros((3, 3))
face_coords_camera[0, :] = plane._t
face_coords_camera[1, :] = plane._t + plane._U[:, 0]
face_coords_camera[2, :] = plane._t + plane._U[:, 0] + plane._U[:, 1]
# Backproject each pixel in the face to its location in camera coordinates
n, b = geometry.slopeIntercept(face_coords_camera)
metric_coords_camera = geometry.backprojectIntoPlane(face_coords, n, b, intrinsic_matrix)
vertices = geometry.homogeneousVector(metric_coords_camera) @ m.np.transpose(camera_pose)
return vertices, plane_faces
def makeTextures(faces, texture_size=2, uniform_color=None):
"""
Parameters
----------
faces : array of int, shape (num_faces, 3)
texture_size : int, optional
uniform_color : [int, int, int], optional
Returns
-------
textures : array of float, shape (num_faces, texture_size, texture_size, texture_size, 3)
"""
# create texture [num_faces, texture_size, texture_size, texture_size, RGB]
textures = m.np.zeros(
faces.shape[0], texture_size, texture_size, texture_size, 3,
dtype=torch.float32
)
if uniform_color is not None:
textures[..., :] = torch.tensor(uniform_color)
return textures
# -=( HELPER FUNCTIONS FOR LEGACY RENDERER )==---------------------------------
def findCentroid(img):
if not img.any():
raise ValueError
if len(img.shape) > 2:
img = img.sum(axis=2)
rows, cols = np.nonzero(img)
cent_r = np.rint(rows.mean()).astype(int)
cent_c = np.rint(cols.mean()).astype(int)
centroid = (cent_r, cent_c)
len_r = rows.max() - rows.min()
len_c = cols.max() - cols.min()
nonzero_shape = (len_r, len_c)
return centroid, nonzero_shape
def centerBoundingBox(img_centroid, img_shape):
r_centroid, c_centroid = img_centroid
r_len, c_len = img_shape[0:2]
r_max_centered = r_centroid + r_len // 2
if r_len % 2:
r_max_centered += 1
r_min_centered = r_centroid - r_len // 2
r_extent = (r_min_centered, r_max_centered)
c_max_centered = c_centroid + c_len // 2
if c_len % 2:
c_max_centered += 1
c_min_centered = c_centroid - c_len // 2
c_extent = (c_min_centered, c_max_centered)
return r_extent, c_extent
def cropImage(img, shape=None):
img_centroid, nonzero_shape = findCentroid(img)
# By default, shape is a bounding square for the nonzero image elements
if shape is None:
r_len, c_len = nonzero_shape
max_len = (r_len ** 2 + c_len ** 2) ** 0.5
max_len = np.ceil(max_len).astype(int)
shape = (max_len, max_len)
(r_min, r_max), (c_min, c_max) = centerBoundingBox(img_centroid, shape)
cropped = img[r_min:r_max, c_min:c_max].copy()
return cropped
def renderScene(
background_plane, assembly, component_poses,
camera_pose=None, camera_params=None, object_appearances=None):
""" Render a scene consisting of a spatial assembly and a background plane.
Parameters
----------
Returns
-------
"""
# Start by rendering the background
rgb_image, range_image, label_image = renderPlane(
background_plane, camera_pose, camera_params,
plane_appearance=object_appearances[0, :],
)
# Then render each foreground object one-by-one
for comp_idx, comp_key in enumerate(assembly.connected_components.keys()):
comp_pose = component_poses[comp_idx]
_ = renderComponent(
assembly, comp_key, component_pose=comp_pose,
camera_pose=camera_pose, camera_params=camera_params,
block_colors=object_appearances,
range_image=range_image, label_image=label_image, rgb_image=rgb_image,
crop_rendered=False
)
return rgb_image, range_image, label_image
def renderPlane(
plane, camera_pose=None, camera_params=None, plane_appearance=None,
range_image=None, label_image=None, rgb_image=None):
""" Render a component of the state.
Parameters
----------
plane : geometry.Plane
Plane that should be rendered.
camera_pose : numpy array of float, shape (4, 4)
The camera's pose with respect to the world coordinate frame. This is
a rigid motion (R, t), represented as a homogeneous matrix.
camera_params : numpy array of float, shape (3, 4)
The camera's intrinsic parameters.
plane_appearance : numpy array, shape (3,)
The color of the plane.
range_image : numpy array of float, shape (img_height, img_width), optional
Pre-existing Z_buffer. Each pixel value is the distance from the camera
in mm.
label_image : numpy array of int, shape (img_height, img_width), optional
Label image corresponding to the Z-buffer. Each pixel value is the label
of the object that was projected onto the pixel.
rgb_image : numpy array of float, shape (img_height, img_width, 3), optional
Pre-existing RGB image.
Returns
-------
rgb_image : numpy array of float, shape (img_height, img_width, 3)
Color image in RGB format.
range_image : numpy array of float, shape (img_height, img_width)
The Z-buffer. Each pixel value is the distance from the camera in mm.
label_image : numpy array of int, shape (img_height, img_width)
Label image corresponding to the Z-buffer. Each pixel value is the label
of the object that was projected onto the pixel.
"""
if range_image is None:
img_size = (IMAGE_HEIGHT, IMAGE_WIDTH)
range_image = np.full(img_size, np.inf)
if label_image is None:
img_size = (IMAGE_HEIGHT, IMAGE_WIDTH)
label_image = np.zeros(img_size, dtype=int)
if plane_appearance is None:
plane_appearance = np.zeros(3)
face_coords = np.array([
[0, 0],
[0, range_image.shape[0]],
[range_image.shape[1], 0],
[range_image.shape[1], range_image.shape[0]]
])
zBufferConvexPolygon(
range_image, label_image, camera_pose, camera_params,
face_coords_image=face_coords, plane=plane,
face_label=0
)
# Render plane appearance
if rgb_image is None:
rgb_shape = label_image.shape + plane_appearance.shape
rgb_image = np.zeros(rgb_shape)
rgb_image[:,:] = plane_appearance
return rgb_image, range_image, label_image
def renderComponent(
state, component_idx, component_pose=None, img_type=None,
camera_pose=None, camera_params=None, block_colors=None,
range_image=None, label_image=None, rgb_image=None,
crop_rendered=False, in_place=True):
""" Render a component of the state.
Parameters
----------
state : blockassembly.BlockAssembly
Spatial assembly that should be rendered.
component_index : int
Index of the sub-component of the spatial assembly that should be
rendered.
component_pose : tuple(numpy array of shape (3,3), numpy array of shape (3,))
This component's pose with respect to the canonical retinal coordinate
frame, represented as a rotation matrix and translation vector :math:`(R, t)`.
Units are expressed in millimeters.
img_type : {'rgb', 'depth', 'label', None}
If None, this function returns all three images. Otherwise it returns
the specified image only.
camera_pose : numpy array of float, shape (4, 4)
The camera's pose with respect to the world coordinate frame. This is
a rigid motion (R, t), represented as a homogeneous matrix.
camera_params : numpy array of float, shape (3, 4)
The camera's intrinsic parameters.
block_colors : numpy array, shape (num_blocks + 1, 3)
Each row is the color of a block. Note that the first row corresponds
to the background.
crop_rendered : bool, optional
If True, the rendered image is cropped to a bounding box around the
nonzero portion. Default is True.
range_image : numpy array of float, shape (img_height, img_width), optional
Pre-existing Z_buffer. Each pixel value is the distance from the camera
in mm.
label_image : numpy array of int, shape (img_height, img_width), optional
Label image corresponding to the Z-buffer. Each pixel value is the label
of the object that was projected onto the pixel.
rgb_image : numpy array of float, shape (img_height, img_width, 3), optional
Pre-existing RGB image.
in_place : bool, optional
If True, this function modifies the pre-existing images when rendering.
Otherwise it makes a local copy.
Returns
-------
rgb_image : numpy array of float, shape (img_height, img_width, 3)
Color image in RGB format.
range_image : numpy array of float, shape (img_height, img_width)
The Z_buffer. Each pixel value is the distance from the camera in mm.
label_image : numpy array of int, shape (img_height, img_width)
Label image corresponding to the Z-buffer. Each pixel value is the label
of the object that was projected onto the pixel.
"""
if not in_place:
if rgb_image is not None:
rgb_image = rgb_image.copy()
if range_image is not None:
range_image = range_image.copy()
if label_image is not None:
label_image = label_image.copy()
if component_pose is None:
R = np.eye(3)
t = np.zeros(3)
else:
R, t = component_pose
if state.blocks:
s = state.copy()
s.centerComponent(component_idx, zero_at='centroid')
s.centerComponent(component_idx, zero_at='smallest_z')
s.setComponentPose(component_idx, R, t)
range_image, label_image = zBufferComponent(
s, component_idx, camera_pose, camera_params,
range_image=range_image, label_image=label_image
)
if crop_rendered:
range_image = cropImage(range_image)
label_image = cropImage(label_image)
else:
range_image = np.zeros((1, 1), dtype=float)
label_image = np.zeros((1, 1), dtype=int)
# Render block appearances using the label image and block colors
if rgb_image is None:
rgb_shape = label_image.shape + block_colors.shape[1:2]
rgb_image = np.zeros(rgb_shape, dtype=block_colors.dtype)
if label_image.any():
for i in range(1, label_image.max() + 1):
obj_patch = label_image == i
rgb_image[obj_patch, :] = block_colors[i, :]
if img_type == 'rgb':
return rgb_image
elif img_type == 'depth':
return range_image
elif img_type == 'label':
return label_image
return rgb_image, range_image, label_image
def zBufferComponent(
state, component_index, camera_pose, camera_params,
range_image=None, label_image=None):
""" Render depth and label images of a component of a spatial assembly.
Parameters
----------
state : blockassembly.BlockAssembly
Spatial assembly that should be rendered.
component_index : int
Index of the sub-component of the spatial assembly that should be
rendered.
camera_pose : numpy array of float, shape (4, 4)
The camera's pose with respect to the world coordinate frame. This is
a rigid motion (R, t), represented as a homogeneous matrix.
camera_params : numpy array of float, shape (3, 4)
The camera's intrinsic parameters.
range_image : numpy array of float, shape (img_height, img_width), optional
Pre-existing Z_buffer. Each pixel value is the distance from the camera
in mm.
label_image : numpy array of int, shape (img_height, img_width), optional
Label image corresponding to the Z-buffer. Each pixel value is the label
of the object that was projected onto the pixel.
Returns
-------
range_image : numpy array of float, shape (img_height, img_width)
The Z-buffer. Each pixel value is the distance from the camera in mm.
label_image : numpy array of int, shape (img_height, img_width)
Label image corresponding to the Z-buffer. Each pixel value is the label
of the object that was projected onto the pixel.
"""
if range_image is None:
img_size = (IMAGE_HEIGHT, IMAGE_WIDTH)
range_image = np.full(img_size, np.inf)
if label_image is None:
img_size = (IMAGE_HEIGHT, IMAGE_WIDTH)
label_image = np.zeros(img_size, dtype=int)
component = state.connected_components[component_index]
for index in component:
block = state.getBlock(index)
zBufferBlock(block, range_image, label_image, camera_pose, camera_params)
range_image[np.isinf(range_image)] = 0 # np.nan
return range_image, label_image
def zBufferBlock(block, range_image, label_image, camera_pose, camera_params):
""" Draw a block to the Z_buffer.
Parameters
----------
block : blockassembly.Block
The block to render.
range_image : numpy array of float, shape (img_height, img_width)
The Z_buffer. Each pixel value is the distance from the camera in mm.
label_image : numpy array of int, shape (img_height, img_width)
Label image corresponding to the Z-buffer. Each pixel value is the label
of the object that was projected onto the pixel.
camera_pose : numpy array of float, shape (4, 4)
The camera's pose with respect to the world coordinate frame. This is
a rigid motion (R, t), represented as a homogeneous matrix.
camera_params : numpy array of float, shape (3, 4)
The camera's intrinsic parameters.
"""
img_h, img_w = range_image.shape
block_label = block.index + 1
vertex_coords = block.metric_vertices
for i, face_coords in enumerate(makeFaces(vertex_coords)):
zBufferConvexPolygon(
range_image, label_image, camera_pose, camera_params,
face_coords_world=face_coords, face_label=block_label
)
def makeFaces(vertex_coords):
""" Construct a cube's faces from its vertices.
Parameters
----------
vertex_coords : numpy array of float, shape (num_vertices, 3)
Vertex coordinates in the world frame.
Returns
-------
faces : generator(numpy array of float, shape (4, 3)
Coordinates of each face. For each face, coordinates are arranged in
conter-clockwise order.
"""
vertex_indices = (
[0, 1, 2, 3],
[0, 1, 5, 4],
[1, 2, 6, 5],
[3, 2, 6, 7],
[4, 5, 6, 7],
[0, 3, 7, 4]
)
faces = (vertex_coords[idxs, :] for idxs in vertex_indices)
return faces
def zBufferConvexPolygon(
range_image, label_image, camera_pose, camera_params,
face_coords_world=None, face_coords_image=None, plane=None,
face_label=0):
""" Draw a convex polygon to the Z-buffer.
Parameters
----------
range_image : numpy array of float, shape (img_height, img_width)
The Z_buffer. Each pixel value is the distance from the camera in mm.
label_image : numpy array of int, shape (img_height, img_width)
Label image corresponding to the Z-buffer. Each pixel value is the label
of the object that was projected onto the pixel.
camera_pose : numpy array of float, shape (4, 4)
The camera's pose with respect to the world coordinate frame. This is
a rigid motion (R, t), represented as a homogeneous matrix.
camera_params : numpy array of float, shape (3, 4)
The camera's intrinsic parameters.
face_coords_world : numpy array of float, shape (num_face_points, 3), optional
Coordinates of the vertices of this face, in the world reference frame.
face_coords_image : numpy array of float, shape (num_face_points, 2), optional
Coordinates of the vertices of this face, in the image reference frame.
plane : geometry.Plane, optional
A Plane object whose parameters are expressed in the camera reference
frame.
face_label : int, optional
The integer label associated with this face.
"""
if face_coords_image is None:
# Project face vertices from world coordinates to pixel coordinates
proj = geometry.homogeneousMatrix(np.eye(3), np.zeros(3))
face_coords_camera, _ = geometry.projectHomogeneous(
geometry.homogeneousVector(face_coords_world) @ m.np.transpose(camera_pose)
)
face_coords_image, _ = geometry.projectHomogeneous(
geometry.homogeneousVector(face_coords_camera) @ m.np.transpose(camera_params @ proj)
)
face_coords_image = utils.roundToInt(face_coords_image)
elif face_coords_world is None:
# Consruct face_coords_camera in a way that allows
# geometry.slopeIntercept to compute the plane parameters it needs.
face_coords_camera = np.zeros((3, 3))
face_coords_camera[0, :] = plane._t
face_coords_camera[1, :] = plane._t + plane._U[:, 0]
face_coords_camera[2, :] = plane._t + plane._U[:, 0] + plane._U[:, 1]
else:
err_str = "This function requires either face_coords_image or face_coords_world"
raise ValueError(err_str)
bounding_box = geometry.boundingBox(face_coords_image, range_image.shape)
try:
pixel_in_hull = geometry.in_hull(bounding_box, face_coords_image)
except QhullError:
return
image_pixels = bounding_box[pixel_in_hull,:]
if image_pixels.shape[0] == 1:
# msg_str = 'Only one pixel in object image: {}'.format(image_pixels)
# logger.warn(msg_str)
return
# Backproject each pixel in the face to its location in camera coordinates
n, b = geometry.slopeIntercept(face_coords_camera)
metric_coords_camera = geometry.backprojectIntoPlane(image_pixels, n, b, camera_params)
# Remove any points that are occluded by another face
z_computed = metric_coords_camera[:,2]
rows = image_pixels[:,1]
cols = image_pixels[:,0]
computed_is_nearer = z_computed < range_image[rows, cols]
rows = rows[computed_is_nearer]
cols = cols[computed_is_nearer]
z_computed = z_computed[computed_is_nearer]
# Write to the Z-buffer and label image
range_image[rows, cols] = z_computed
label_image[rows, cols] = face_label
```
|
{
"source": "jdjong/TicTacToePython",
"score": 3
}
|
#### File: TicTacToePython/tests/test_game.py
```python
import sys
sys.path.append('src/')
import pytest
import classes
@pytest.mark.parametrize(
'player1WinningInputSequence', [
["1", "2", "3", "4", "5", "6", "7"],
["1", "2", "4", "3", "7"],
["1", "2", "3", "5", "9", "7", "8", "4", "6"], # board is full
]
)
def test_player1_is_winner(player1WinningInputSequence):
player1 = classes.Player("Henk", "X")
player2 = classes.Player("Ali", "O")
player1.otherPlayer = player2
player2.otherPlayer = player1
game = classes.Game(player1, player2)
def mock_input(s):
return player1WinningInputSequence.pop(0)
classes.input = mock_input
game.play()
assert game.getWinner() == player1
@pytest.mark.parametrize(
'player2WinningInputSequence', [
["2", "1", "3", "4", "5", "8", "9", "7"],
["1", "2", "4", "5", "9", "8"],
]
)
def test_player2_is_winner(player2WinningInputSequence):
player1 = classes.Player("Henk", "X")
player2 = classes.Player("Ali", "O")
player1.otherPlayer = player2
player2.otherPlayer = player1
game = classes.Game(player1, player2)
def mock_input(s):
return player2WinningInputSequence.pop(0)
classes.input = mock_input
game.play()
assert game.getWinner() == player2
@pytest.mark.parametrize(
'noWinnerInputSequence', [
["5", "1", "9", "6", "8", "7", "4", "2", "3"],
]
)
def test_no_winner(noWinnerInputSequence):
player1 = classes.Player("Henk", "X")
player2 = classes.Player("Ali", "O")
player1.otherPlayer = player2
player2.otherPlayer = player1
game = classes.Game(player1, player2)
def mock_input(s):
return noWinnerInputSequence.pop(0)
classes.input = mock_input
game.play()
assert game.getWinner() == None
def teardown_method(self, method):
# This method is being called after each test case, and it will revert input back to original function
classes.input = input
```
|
{
"source": "jdjuli/jdjuli.github.io",
"score": 3
}
|
#### File: jdjuli/jdjuli.github.io/HTTPS_server.py
```python
from http.server import HTTPServer, SimpleHTTPRequestHandler
import ssl
import os
import re
os.chdir("./docs")
class MyRequestHandler(SimpleHTTPRequestHandler):
def translate_path(self, path):
regex = re.compile(r"\/vr-programming(\/?)$")
if self.path.startswith('/vr-programming'):
if regex.match(self.path):
return SimpleHTTPRequestHandler.translate_path(self, '/index.html')
else:
return SimpleHTTPRequestHandler.translate_path(self, path[len('/vr-programming'):])
else:
return SimpleHTTPRequestHandler.translate_path(self, path)
httpd = HTTPServer(('0.0.0.0', 4443), MyRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket,
keyfile="../key.pem",
certfile='../cert.pem', server_side=True)
httpd.serve_forever()
```
|
{
"source": "jdjustdata/jobs-by-neighborhood",
"score": 2
}
|
#### File: apps/business/forms.py
```python
from django.forms import ModelForm, EmailInput, URLInput, TextInput
from django.utils.translation import ugettext_lazy as _
from datetime import date
from models import Business, Location
from ..app_form_templates import USPhoneNumberMultiWidget
def today_as_string():
today = date.today().isoformat()
return today
class NewBusiness(ModelForm):
def __init__(self, data=None, *args, **kwargs):
super(NewBusiness, self).__init__(data, *args, **kwargs)
class Meta:
model = Business
fields = [
'name',
'industry',
'subindustry',
'website',
'email',
'phone',
'primary_poc',
'poc_role'
]
widgets = {
'email': EmailInput(attrs={'placeholder': _('<EMAIL>')}),
'website': URLInput(attrs={'placeholder': _('http://www.webaddress.com')}),
'phone': USPhoneNumberMultiWidget(),
'poc_role': TextInput(attrs={'placeholder': _('E.g., Manager, Human Resources Manager')})
}
class NewLocation(ModelForm):
def __init__(self, data=None, *args, **kwargs):
super(NewLocation, self).__init__(data, *args, **kwargs)
class Meta:
model = Location
fields = [
'name',
'is_primary',
'street',
'city',
'state',
'zipcode',
'phone'
]
widgets = {
'phone': USPhoneNumberMultiWidget()
}
```
|
{
"source": "jdjxjnsn/besso",
"score": 2
}
|
#### File: besso/Db/welcome_sql.py
```python
from sqlalchemy import (
BigInteger,
Boolean,
Column,
LargeBinary,
Numeric,
String,
UnicodeText,
)
from Db import SESSION, Base
import os
class Welcome(Base):
__tablename__ = "welcome"
chat_id = Column(Numeric, primary_key=True)
file_id = Column(String)
msg_type = Column(String)
msg_content = Column(String)
def __init__(self, chat_id, msg_type, msg_content, file_id):
self.chat_id = chat_id
self.msg_type = msg_type
self.file_id = file_id
self.msg_content = msg_content
Welcome.__table__.create(checkfirst=True)
def getWelcomeSettings(chat_id):
try:
return SESSION.query(Welcome).filter(Welcome.chat_id == chat_id).one()
except:
return None
finally:
SESSION.close()
def addWelcomeSetting(chat_id, msg_type, msg_content="", file_id=""):
addwel = SESSION.query(Welcome).get(chat_id)
if addwel:
addwel.msg_type = msg_type
addwel.msg_content = msg_content
try:
os.remove(addwel.file_id)
except Exception as e:
print("addWelcomeSetting : %s" % (e))
addwel.file_id = file_id
else:
addwel = Welcome(chat_id, msg_type, msg_content, file_id)
SESSION.add(addwel)
SESSION.commit()
def remWelcomeSetting(chat_id):
remwel = SESSION.query(Welcome).get(chat_id)
if remwel:
SESSION.delete(remwel)
SESSION.commit()
```
#### File: jdjxjnsn/besso/main.py
```python
import os
os.system("sudo service redis-server start")
from telethon import TelegramClient, events, Button, extensions, functions, types
from os.path import dirname, realpath, join
import re
import asyncio
import datetime
from utilities import utilities
import json
loop = asyncio.get_event_loop()
utilities.client = None
def sort_key(p):
return p["name"]
def run_client():
utilities.config = utilities.get_config()
config = utilities.config
utilities.client = TelegramClient(
"sessions_bot", config["api_id"], config["api_hash"], loop=loop
)
utilities.client.start()
utilities.load_plugins()
utilities.plugins.sort(key=sort_key)
utilities.public_plugins.sort(key=sort_key)
run_client()
from Db.mute_sql import getMutedUser, remMuteUser
from Db.dev_sql import getDevsUsers
for dev in getDevsUsers():
utilities.devs.append(int("%.0f" % dev.user_id))
async def saveBotId():
me = await utilities.client.get_me()
utilities.prLightGray("name : " + me.first_name)
if me.username:
utilities.prYellow("username : https://t.me/" + me.username)
if me.bot:
utilities.prGreen("botType : API")
else:
utilities.prGreen("botType : CLI")
utilities.prBlack("---------------------------")
utilities.config["bot_id"] = (me).id
utilities.config["isbot"] = (me).bot
utilities.save_config()
@utilities.client.on(events.ChatAction)
async def my_event_handler(event):
try:
if event.user_joined or event.user_added:
from_user = event.added_by
target_user = event.user
plugins = utilities.plugins
for plugin in plugins:
if "added" not in plugin:
continue
if "bot" in plugin and utilities.config["isbot"] != plugin["bot"]:
if plugin["bot"]:
await event.reply("for bot-api only")
else:
await event.reply("for bot-cli only")
return
# if plugin["sudo"]:
# if check_sudo(event.sender_id):
# return_values = await plugin["added"](
# event,
# event.chat_id,
# 0
# if (target_user.id in utilities.user_steps)
# else utilities.user_steps[target_user.id]["step"],
# crons=utilities.crons,
# )
# for return_value in return_values:
# if return_value:
# await (return_value)
# else:
# await event.reply("for sudores")
# else:
return_values = await plugin["added"](
event,
event.chat_id,
0
if (target_user.id not in utilities.user_steps)
else utilities.user_steps[target_user.id]["step"],
)
if return_values:
for return_value in return_values:
await (return_value)
except Exception as e:
print("chat_handler : %s" % (e))
@utilities.client.on(events.NewMessage)
async def command_interface(event):
try:
message = event.message
prefix = "send"
if message.is_reply:
prefix = "reply"
if message.out:
return
from_id = message.from_id
to_id = message.chat_id
if event.is_private:
pr = utilities.prGreen
else:
pr = utilities.prPurple
if message.raw_text and not message.via_bot_id:
stri = (
str(from_id)
+ ": "
+ prefix
+ " text message : "
+ message.raw_text
+ " to "
+ str(to_id)
)
pr(stri)
elif message.media and not message.via_bot_id:
pr(str(from_id) + ": " + prefix + " media message to " + str(to_id))
elif message.via_bot_id:
pr(str(from_id) + ": " + prefix + " inline message to " + str(to_id))
else:
utilities.prRed(
str(from_id) + ": " + prefix + " unknown message to " + str(to_id)
)
except Exception as e:
print(str(e))
@utilities.client.on(events.MessageEdited)
@utilities.client.on(events.NewMessage)
async def my_event_handler(event):
try:
message = event.message
chat_id = event.chat_id
from_id = event.sender_id
plugins = utilities.plugins
mutedUsers = getMutedUser(chat_id, from_id)
if mutedUsers:
remMuteUser(chat_id, from_id)
if message.raw_text:
matches = re.findall("^[#/!](cancel)$", event.raw_text, re.IGNORECASE)
if len(matches) > 0 and matches[0] == "cancel":
if from_id in utilities.user_steps:
del utilities.user_steps[from_id]
return await message.reply("Canceling successfully !")
if from_id in utilities.user_steps:
for plugin in plugins:
if plugin["name"] == utilities.user_steps[from_id]["name"]:
for pattern in plugin["patterns"]:
if re.search(
pattern, event.raw_text, re.IGNORECASE | re.MULTILINE
):
matches = re.findall(
pattern, event.raw_text, re.IGNORECASE | re.DOTALL
)
break
else:
matches = ["xxxxxxxxxx"]
if plugin["sudo"]:
if utilities.check_sudo(from_id):
return_values = await plugin["run"](
message,
matches[0],
chat_id,
utilities.user_steps[from_id]["step"],
)
for return_value in return_values:
if return_value:
try:
await (return_value)
except Exception as e:
print("step :" + str(e))
else:
return
else:
return_values = await plugin["run"](
message,
matches[0],
chat_id,
utilities.user_steps[from_id]["step"],
)
if return_values:
for return_value in return_values:
await (return_value)
break
return
elif message.raw_text is not None and message.raw_text != "":
if "flood" not in utilities.config:
utilities.config["flood"] = True
utilities.save_config()
if utilities.config["flood"]:
pv = utilities.red.get("flood-" + str(message.sender_id)) or 0
# print("flood-" + str(message.sender_id), pv)
if pv == 0:
utilities.flood[message.sender_id] = True
utilities.red.set(
"flood-" + str(message.sender_id), (int(pv) + 1), ex=1
)
if (int(pv) + 1) == 5 and utilities.flood[message.sender_id]:
await message.reply("please do not flood...")
utilities.prRed(
str(message.sender_id) + " : is causing flood please stop..."
)
utilities.flood[message.sender_id] = False
return
elif (int(pv) + 1) >= 5:
return
for plugin in plugins:
for pattern in plugin["patterns"]:
if re.search(pattern, event.raw_text, re.IGNORECASE | re.MULTILINE):
if (
"bot" in plugin
and utilities.config["isbot"] != plugin["bot"]
):
if plugin["bot"]:
await event.reply("for bot-api only")
else:
await event.reply("for bot-cli only")
return
matches = re.findall(
pattern,
event.raw_text,
re.IGNORECASE | re.MULTILINE | re.DOTALL,
)
if plugin["sudo"]:
if utilities.check_sudo(event.sender_id):
return_values = await plugin["run"](
event, matches[0], chat_id, 0, crons=utilities.crons
)
for return_value in return_values:
if return_value:
try:
await (return_value)
except Exception as e:
print("text main :" + str(e))
else:
continue
else:
return_values = await plugin["run"](
event, matches[0], chat_id, 0, crons=utilities.crons
)
if return_values:
for return_value in return_values:
await (return_value)
elif message.media is not None or message.file is not None:
match = ""
if message.photo:
match = "__photo__"
if message.gif:
match = "__gif__"
for plugin in plugins:
for pattern in plugin["patterns"]:
if re.search(pattern, match, re.IGNORECASE | re.MULTILINE):
matches = re.findall(pattern, match, re.IGNORECASE)
if plugin["sudo"]:
if utilities.check_sudo(event.sender_id):
return_values = await plugin["run"](
event, matches[0], chat_id, 0
)
for return_value in return_values:
try:
await (return_value)
except Exception as e:
print("media :" + str(e))
else:
return
else:
return_values = await plugin["run"](
event, matches[0], chat_id, 0
)
if return_values:
for return_value in return_values:
await (return_value)
except Exception as e:
print(str(e))
await event.reply("Error : " + str(e))
@utilities.client.on(events.InlineQuery)
async def my_event_handler(event):
builder = event.builder
try:
plugins = utilities.plugins
for plugin in plugins:
if "inlineQuery" not in plugin:
continue
for pattern in plugin["inlineData"]:
if re.search(pattern, str(event.text), re.IGNORECASE | re.MULTILINE):
matches = re.findall(
pattern,
str(event.text),
re.IGNORECASE | re.MULTILINE | re.DOTALL,
)
if plugin["sudo"]:
if utilities.check_sudo(event.sender_id):
return_values = await plugin["inlineQuery"](
event,
matches[0],
event.chat_id,
0
if (event.sender_id not in utilities.user_steps)
else utilities.user_steps[event.sender_id]["step"],
crons=utilities.crons,
)
for return_value in return_values:
if return_value:
try:
await (return_value)
except Exception as e:
print("inline :" + str(e))
else:
await event.answer(
[
builder.article(
"for sudors only", text="for sudors only"
)
]
)
else:
return_values = await plugin["inlineQuery"](
event,
matches[0],
event.chat_id,
0
if (event.sender_id not in utilities.user_steps)
else utilities.user_steps[event.sender_id]["step"],
)
if return_values:
for return_value in return_values:
await (return_value)
except Exception as e:
print(str(e))
@utilities.client.on(events.CallbackQuery)
async def handler(event):
try:
plugins = utilities.plugins
for plugin in plugins:
if "callbackQuery" not in plugin:
continue
for pattern in plugin["callbackData"]:
if re.search(
pattern, str(event.data.decode()), re.IGNORECASE | re.MULTILINE
):
matches = re.findall(
pattern,
str(event.data.decode()),
re.IGNORECASE | re.MULTILINE | re.DOTALL,
)
if plugin["sudo"]:
if utilities.check_sudo(event.sender_id):
return_values = await plugin["callbackQuery"](
event,
matches[0],
event.chat_id,
0
if (event.sender_id not in utilities.user_steps)
else utilities.user_steps[event.sender_id]["step"],
crons=utilities.crons,
)
for return_value in return_values:
if return_value:
try:
await (return_value)
except Exception as e:
print("callback :" + str(e))
else:
await event.answer("Sudors only!")
else:
return_values = await plugin["callbackQuery"](
event,
matches[0],
event.chat_id,
0
if (event.sender_id not in utilities.user_steps)
else utilities.user_steps[event.sender_id]["step"],
)
if return_values:
for return_value in return_values:
await (return_value)
except Exception as e:
print(str(e))
async def clock():
while True:
for _data in utilities.red.lrange("crons", 0, -1):
data = json.loads(_data)
if datetime.datetime.fromisoformat(data["time"]) < datetime.datetime.now():
for plugin in utilities.plugins:
if "cron" in plugin and plugin["name"] == data["name"]:
return_values = await plugin["cron"](data)
for return_value in return_values:
if return_value:
try:
await (return_value)
except Exception as e:
print("clock :" + str(e))
utilities.red.lrem("crons", 0, _data)
if len(utilities.crons) != 0:
for data in utilities.crons:
if data["time"] < datetime.datetime.now():
for plugin in utilities.plugins:
if "cron" in plugin and plugin["name"] == data["name"]:
return_values = await plugin["cron"](data)
for return_value in return_values:
if return_value:
try:
await (return_value)
except Exception as e:
print("clock local :" + str(e))
utilities.crons.remove(data)
await asyncio.sleep(1)
if "updateChat" in utilities.config:
loop.create_task(
utilities.client.send_message(
utilities.config["updateChat"], "The bot restart successfully."
)
)
del utilities.config["updateChat"]
utilities.save_config()
loop.create_task(clock())
loop.create_task(saveBotId())
utilities.prCyan("Started Receveving Messages ...")
utilities.client.run_until_disconnected()
```
#### File: besso/nudity/__init__.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
class Nudity:
def __init__(self):
base_path = os.path.dirname(os.path.abspath(__file__))
model_file = os.path.abspath(base_path + "/files/retrained_graph.pb")
input_name = "import/input"
output_name = "import/final_result"
self.input_height = 224
self.input_width = 224
self.input_mean = 128
self.input_std = 128
self.graph = self.load_graph(model_file)
self.input_operation = self.graph.get_operation_by_name(input_name)
self.output_operation = self.graph.get_operation_by_name(output_name)
def read_tensor_from_image_file(
self, file_name, input_height=299, input_width=299, input_mean=0, input_std=255
):
with tf.compat.v1.Session() as sess:
tf.compat.v1.disable_eager_execution()
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.io.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(
file_reader, channels=3, name="png_reader"
)
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader")
)
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader"
)
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.compat.v1.image.resize_bilinear(
dims_expander, [input_height, input_width]
)
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
result = sess.run(normalized)
return result
# def load_graph(self, model_file):
# graph = tf.Graph()
# graph_def = tf.GraphDef()
# with open(model_file, "rb") as f:
# graph_def.ParseFromString(f.read())
# with graph.as_default():
# tf.import_graph_def(graph_def)
# return graph
def load_graph(self, model_file):
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def score(self, file_name):
t = self.read_tensor_from_image_file(
file_name,
input_height=self.input_height,
input_width=self.input_width,
input_mean=self.input_mean,
input_std=self.input_std,
)
with tf.compat.v1.Session(graph=self.graph) as sess:
results = sess.run(
self.output_operation.outputs[0], {self.input_operation.outputs[0]: t}
)
results = np.squeeze(results)
return results[1].item()
def has(self, file_name):
return self.score(file_name) >= 0.8
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
args = parser.parse_args()
if not args.image:
print(
"--image is missing. please set image to be processed with --image='path'"
)
return
nudity = Nudity()
print(nudity.has(args.image))
if __name__ == "__main__":
main()
```
#### File: besso/plugins/execTest.py
```python
import asyncio
import sys
import io
import traceback
async def aexec(code, message, replied=None):
exec(
f"from utilities import utilities as u\nasync def __ex(message,replied):\n global u"
+ "".join(f"\n {l}" for l in code.split("\n"))
)
return await locals()["__ex"](message, replied)
async def run(msg, matches, chat_id, step, crons=None):
if not (msg.out):
message = await msg.reply("Please, wait...")
else:
message = msg
old_stderr = sys.stderr
old_stdout = sys.stdout
redirected_output = sys.stdout = io.StringIO()
redirected_error = sys.stderr = io.StringIO()
stdout, stderr, exc = None, None, None
if msg.is_reply and matches == "exec":
msg = await msg.get_reply_message()
if msg.raw_text:
cmd = msg.raw_text
else:
cmd = "print('Please, reply to text message.')"
elif matches[0] == "exec":
cmd = matches[1]
else:
return [message.delete()]
try:
await aexec(cmd, message, msg)
except Exception:
exc = traceback.format_exc()
stdout = redirected_output.getvalue()
stderr = redirected_error.getvalue()
sys.stdout = old_stdout
sys.stderr = old_stderr
evaluation = ""
if exc:
evaluation = exc
elif stderr:
evaluation = stderr
elif stdout:
evaluation = stdout
else:
evaluation = "Success"
final_output = "**EVAL**: `{}` \n\n **OUTPUT**: \n`{}` \n".format(cmd, evaluation)
if len(final_output) > 4000:
with io.BytesIO(str.encode(final_output)) as out_file:
out_file.name = "exec.text"
await message.reply(file=out_file)
await message.delete()
else:
await message.edit(final_output)
return []
plugin = {
"name": "",
"desc": " — — — — — — — — —.",
"usage": ["❏︙/exec <امر معرفه خطاء الملف + اسم الملف>"],
"run": run,
"sudo": True,
"patterns": ["^[!/#](exec) (.+)$", "^[!/#](exec)$"],
}
```
#### File: besso/plugins/externalYoutube.py
```python
import asyncio
from utilities import utilities
from requests_futures.sessions import FuturesSession
from telethon.tl.types import DocumentAttributeAudio
import requests
import urllib.parse
import re
import json
import time
import youtube_dl
import io
loop = asyncio.get_event_loop()
session = FuturesSession()
def youtube_url_validation(url):
youtube_regex = (
r"(https?://)?(www\.)?"
"(youtube|youtu|youtube-nocookie)\.(com|be)/"
"(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?]{11})"
)
youtube_regex_match = re.match(youtube_regex, url)
if youtube_regex_match:
return youtube_regex_match
return None
def downloaded_file(*factory_args, **factory_kwargs):
def last_response(resp, *args, **kwargs):
try:
async def sendMessage(msg, file):
await msg.reply(file=file)
await msg.delete()
return
msg = factory_kwargs["tp"]
loop.create_task(sendMessage(msg, resp.content))
except Exception as e:
print(str(e))
pass
return last_response
def download_big_data(*factory_args, **factory_kwargs):
def download_fetch(resp, *args, **kwargs):
try:
msg = factory_kwargs["msg"]
loop.create_task(
msg.reply("Please wait, we are uploading music to our server.")
)
f = io.BytesIO(resp.content)
f.name = "music.mp3"
loop.create_task(
utilities.client.send_file(
msg.chat_id,
f,
attributes=[DocumentAttributeAudio(120, performer="bot")],
)
)
return None
except Exception as e:
print(str(e))
return None
return download_fetch
async def sendFileComplete(msg, m):
try:
await utilities.client.send_file(msg.chat_id, m[0])
except Exception as e:
print(str(e))
if len(m) > 0:
# await msg.reply("Please, download file from [Download](" + m[0] + ")")
session.get(
m[0], hooks={"response": download_big_data(msg=msg)},
)
else:
await msg.reply("error please try again later !.")
def hook_factory(*factory_args, **factory_kwargs):
tmp_msg = []
async def callback(msg):
tmp_msg.append(await msg.reply("take a while."))
def first_response(resp, *args, **kwargs):
try:
msg = factory_kwargs["msg"]
message = factory_kwargs["message"]
v_id = factory_kwargs["id"]
url = factory_kwargs["url"]
result = json.loads(resp.content)
res = result["result"].replace("\r\n", "").replace("\\", " ")
m = re.findall(
'var k__id = "(.+)"; var video_service = "youtube";', res, re.IGNORECASE
)
if len(m) > 0:
file_url = True
cookies_temp = None
while file_url:
req = requests.post(
"https://mate03.y2mate.com/mp3Convert",
data={
"type": " youtube",
"_id": m[0],
"v_id": v_id,
"mp3_type": " 128",
},
cookies=cookies_temp,
)
cookies_temp = req.cookies
if req.text != None and req.text != "":
req_res = json.loads(req.content)
if "result" in req_res:
if (
"running"
not in req_res["result"]
.replace("\r\n", "")
.replace("\\", "")
.lower()
):
file_url = False
m = re.findall(
'<a href="(.*)" rel=',
req_res["result"],
re.IGNORECASE,
)
loop.create_task(sendFileComplete(msg, m))
if len(tmp_msg) > 0:
loop.create_task(tmp_msg[0].delete())
return
if len(tmp_msg) == 0:
loop.create_task(callback(msg))
time.sleep(3)
elif len(tmp_msg) > 1:
for i in range(0, len(tmp_msg) - 2):
print(i)
loop.create_task(tmp_msg[i].delete())
else:
loop.create_task(message.edit("error while fetching id..."))
return
pass
except Exception as e:
print(str(e))
return None
return first_response
async def extract_info(url, msg):
info = ""
try:
with youtube_dl.YoutubeDL() as ydl:
info_dict = ydl.extract_info(url, download=False)
info += "Title : " + info_dict["title"] + "\n"
info += "Uploader : " + info_dict["uploader"] + "\n"
except Exception as e:
info = "error while fetching."
await msg.edit(info)
def downloadProcess(id, message, msg):
loop.create_task(extract_info("https://www.youtube.com/watch?v=" + id, message))
url = "https://mate03.y2mate.com/mp3/ajax"
data = {
"url": "https://www.youtube.com/watch?v=" + id,
"q_auto": 0,
"ajax": 1,
}
session.post(
url,
data=data,
hooks={"response": hook_factory(message=message, msg=msg, url=url, id=id)},
)
def get_id(*factory_args, **factory_kwargs):
def first_response(html_content, *args, **kwargs):
try:
msg = factory_kwargs["msg"]
message = factory_kwargs["message"]
if html_content.text != "":
search_results = re.findall(
r"/watch\?v=(.{11})", html_content.content.decode()
)
if len(search_results) > 0:
downloadProcess(search_results[0], message, msg)
else:
loop.create_task(message.edit("not found."))
except Exception as e:
print(str(e))
return None
return first_response
async def run(msg, matches, chat_id, step, crons=None):
response = []
if matches[0] == "sfyt":
if not (msg.out):
message = await msg.reply("please wait..")
else:
message = msg
query_string = urllib.parse.urlencode({"search_query": str(matches[1])})
session.get(
"http://www.youtube.com/results?" + query_string,
hooks={"response": get_id(message=message, msg=msg)},
)
elif msg.is_reply:
if not (msg.out):
message = await msg.reply("please wait..")
else:
message = msg
msg = await msg.get_reply_message()
if msg.text:
valid = youtube_url_validation(msg.text)
if valid is not None:
id = valid.groups()[-1]
downloadProcess(id, message, msg)
return response
plugin = {
"name": "",
"desc": " — — — — — — — — —",
"usage": [
],
"run": run,
"sudo": True,
"patterns": ["^[!/#]fyt$", "^[!/#](sfyt) (.+)$"],
}
```
#### File: besso/plugins/heartAnimation.py
```python
import asyncio
from utilities import utilities
import random
async def run(message, matches, chat_id, step, crons=None):
key = ["❤️", "🧡", "💛", "💚", "💙", "💜", "🖤", "🤍", "🤎"]
if not (message.out):
message = await message.reply(str("please wait."))
if matches == "h":
random.shuffle(key)
for j in range(0, 10):
for i in key:
await message.edit(i)
await asyncio.sleep(0.3)
await message.edit("❤️")
else:
for j in range(0, 20):
text = matches + "🧡."
for i in key:
text = text.replace(i, "%s")
textsplit = text.split("%s")
random.shuffle(key)
await message.edit(text % tuple(key[: len(textsplit) - 1]))
await asyncio.sleep(0.3)
return []
plugin = {
"name": "",
"desc": " — — — — — — — — —",
"usage": [
"❏︙/h <امر قلبي> ",
],
"run": run,
"sudo": True,
"patterns": ["^[!/#]h (.*)$", "^[!/#](h)$"],
}
```
#### File: besso/plugins/lyrics.py
```python
import asyncio
import json
import _thread
from utilities import utilities
import requests
import io
import urllib.parse
from bs4 import BeautifulSoup
loop = asyncio.get_event_loop()
def get_lyrics_result(query, msg, message):
try:
from_id = msg.sender_id
headers = {
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Accept": "application/json, text/javascript, */*; q=0.01",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
"Origin": "https://www.azlyrics.com",
"Sec-Fetch-Site": "same-site",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "https://www.azlyrics.com/",
"Accept-Language": "en,ar;q=0.9,en-GB;q=0.8",
}
params = {"q": query}
response = requests.get(
"https://search.azlyrics.com/suggest.php", headers=headers, params=params
)
js = json.loads(response.content)
if len(js["songs"]) == 0:
loop.create_task(message.edit("there is no soung with this name."))
return None
utilities.user_steps[from_id] = {
"name": "lyrics",
"step": 1,
"data": js["songs"],
}
i = 1
res = "send a number of the below songs:\n"
for song in js["songs"]:
res += f"`{i}` - " + song["autocomplete"] + "\n"
i = i + 1
loop.create_task(message.edit(res))
except Exception as e:
print(str(e))
loop.create_task(message.edit("There was an error please use different name."))
return None
def chunkstring(string, length):
return (string[0 + i : length + i] for i in range(0, len(string), length))
# async def sendmsg(ch, msg):
# for c in ch:
# await msg.reply(c)
def get_lyrics(index, msg):
from_id = msg.sender_id
try:
if int(index) <= 0:
raise Exception("list index out of range")
url = utilities.user_steps[from_id]["data"][int(index) - 1]["url"]
rie = requests.get("https:" + url)
soup = BeautifulSoup(rie.content.decode(), "html.parser")
f = soup.find(
lambda tag: tag.name == "div"
and "class" in tag.attrs
and "col-xs-12 col-lg-8 text-center".split() == tag["class"]
)
fc = f.find(
lambda tag: tag.name == "div"
and " Usage of azlyrics.com content by any third-party lyrics provider is prohibited by our licensing agreement. Sorry about that. "
in tag.contents
)
if len(fc.text) >= 4096:
# ch = chunkstring(fc.text, 4096)
f = io.BytesIO(fc.text.encode())
f.name = "lyrics.txt"
loop.create_task(
utilities.client.send_file(msg.chat_id, f, force_document=True)
)
else:
loop.create_task(msg.reply(fc.text))
del utilities.user_steps[from_id]
except Exception as e:
if str(e) == "list index out of range":
loop.create_task(
msg.reply(
"please, stick with 1-"
+ str(len(utilities.user_steps[from_id]["data"]))
)
)
else:
loop.create_task(
msg.reply(
"please, send `/cancel` to cancel conversation of `lyrics plugin`"
)
)
return None
async def run(msg, matches, chat_id, step, crons=None):
if matches[0] == "lyrics":
if not (msg.out):
message = await msg.reply("please wait..")
else:
message = msg
_thread.start_new_thread(get_lyrics_result, (matches[1], msg, message))
elif step == 1:
_thread.start_new_thread(get_lyrics, (msg.text, msg))
return []
plugin = {
"name": "",
"desc": " — — — — — — — — —.",
"usage": ["❏︙/lyrics <امر عرض كلمات الغنيه >"],
"run": run,
"sudo": True,
"patterns": ["^[!/#](lyrics) (.+)$"],
}
```
|
{
"source": "jdk2588/microservices-in-action",
"score": 2
}
|
#### File: chapter-7/chassis/http_demo.py
```python
import json
from nameko.web.handlers import http
from werkzeug.wrappers import Response
from nameko_sentry import SentryReporter
class HttpDemoService:
name = "http_demo_service"
sentry = SentryReporter()
@http("GET", "/broken")
def broken(self, request):
raise ConnectionRefusedError()
@http('GET', '/books/<string:uuid>')
def demo_get(self, request, uuid):
data = {'id': uuid, 'title': 'The unbearable lightness of being',
'author': '<NAME>'}
return Response(json.dumps({'book': data}),
mimetype='application/json')
@http('POST', '/books')
def demo_post(self, request):
return Response(json.dumps({'book': request.data.decode()}),
mimetype='application/json')
```
#### File: feature/market/app.py
```python
import json
import datetime
import requests
from nameko.rpc import rpc
from nameko.events import EventDispatcher, event_handler
from statsd import StatsClient
from circuitbreaker import circuit
class MarketService:
name = "market_service"
statsd = StatsClient('statsd-agent', 8125,
prefix='simplebank-demo.market')
dispatch = EventDispatcher()
@event_handler("orders_service", "order_created")
@statsd.timer('request_reservation')
def place_order(self, payload):
print("service {} received: {} ... placing order to exchange".format(
self.name, payload))
# place order in stock exchange
exchange_resp = self.__place_order_exchange(payload)
# event: emit order placed event
self.__create_event("order_placed", payload)
return json.dumps({'exchange_response': exchange_resp})
@rpc
@statsd.timer('create_event')
def __create_event(self, event, payload):
print("[{}] {} emiting {} event".format(
payload, self.name, event))
return self.dispatch(event, payload)
@statsd.timer('place_order_stock_exchange')
@circuit(failure_threshold=5, expected_exception=ConnectionError)
def __place_order_exchange(self, request):
print("[{}] {} placing order to stock exchange".format(
request, self.name))
response = requests.get('https://jsonplaceholder.typicode.com/posts/1')
return json.dumps({'code': response.status_code, 'body': response.text})
```
|
{
"source": "jdk5115/Flask",
"score": 3
}
|
#### File: Flask/hw2/model.py
```python
import sqlite3
def show_color(username):
connection = sqlite3.connect('flask_tut.db', check_same_thread=False)
cursor = connection.cursor()
cursor.execute(""" SELECT favorite_color FROM users WHERE Username='{username}' ORDER BY pk DESC;""".format(username = username))
color = cursor.fetchone()[0]
connection.commit()
cursor.close()
connection.close()
message = '{username}\'s favorite color is {color}.'.format(username=username, color=color)
return color, message
```
#### File: jdk5115/Flask/model.py
```python
import sqlite3
def my_name(username):
connection = sqlite3.connect('todo.db', check_same_thread=False)
cursor = connection.cursor()
cursor.execute(""" SELECT password FROM users WHERE username='{username}' ORDER BY pk DESC;""".format(username = username))
password = cursor.fetchone()[0]
connection.commit()
cursor.close()
connection.close()
message = '{username}\'s password is {password}.'.format(username=username, password=password)
return password, message
def check_users():
connection = sqlite3.connect('todo.db', check_same_thread=False)
cursor = connection.cursor()
cursor.execute(""" SELECT username FROM users ORDER BY pk DESC;""")
db_users = cursor.fetchall()
users = []
for i in range(len(db_users)):
person = db_users[i][0]
users.append(person)
connection.commit()
cursor.close()
connection.close()
return users
def check_pw(username):
connection = sqlite3.connect('todo.db', check_same_thread=False)
cursor = connection.cursor()
cursor.execute(""" SELECT password FROM users WHERE username='{username}' ORDER BY pk DESC;""".format(username=username))
password = cursor.fetchone()[0]
connection.commit()
cursor.close()
connection.close()
return password
def signup(username, password, favorite_color):
connection = sqlite3.connect('todo.db', check_same_thread=False)
cursor = connection.cursor()
cursor.execute(""" SELECT password FROM users WHERE username='{username}';""".format(username=username))
exist = cursor.fetchone()
if exist is None:
cursor.execute(""" INSERT INTO users(username, password, favorite_color) VALUES('{username}','{password}','{favorite_color}');""".format(username=username, password=password, favorite_color=favorite_color))
else:
return ('User already exists.')
return ('You have successfully signed up!')
connection.commit()
cursor.close()
connection.close()
return favorite_color
```
|
{
"source": "jdk5115/pygubu",
"score": 3
}
|
#### File: pygubu/widgets/pathchooserinput.py
```python
from __future__ import unicode_literals
try:
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import filedialog
except:
import Tkinter as tk
import ttk
import tkFileDialog as filedialog
class PathChooserInput(ttk.Frame):
""" Allows to choose a file or directory.
Generates <<PathChooserPathChanged>> event when the path is changed.
Dialog options:
initialdir: str
filetypes: iterable
title: str
mustexist: str
Usage Example:
# Choose File:
pcifile = PathChooserInput(framex)
pcifile.config(initialdir='/home', title='Choose a file:', type='file')
pcifile.config(filetypes=[('text files-', '.txt'), ('uifiles', '.ui')])
pcifile.pack(fill='x', side='top')
# Choose directory:
pcidir = PathChooserInput(framex)
pcidir.config(initialdir='/usr/local', mustexist='true',
title='Choose a directory:', type='directory')
pcidir.pack(fill='x', side='top')
"""
FILE = 'file'
DIR = 'directory'
def __init__(self, master=None, **kw):
ttk.Frame.__init__(self, master, **kw)
self._choose = self.FILE
self._oldvalue = ''
self._state = 'normal'
self._fdoptions = {
'filetypes': tuple(),
'initialdir': None,
'mustexist': False,
'title': None,
}
# subwidgets
self.entry = o = ttk.Entry(self, state=self._state)
o.grid(row=0, column=0, sticky='ew')
o.bind('<KeyPress>', self.__on_enter_key_pressed)
o.bind('<FocusOut>', self.__on_focus_out)
self.folder_button = o = ttk.Button(
self,
text='▶',
command=self.__on_folder_btn_pressed,
width=4,
state=self._state)
o.grid(row=0, column=1, padx=2)
#self.rowconfigure(0, weight = 0)
self.columnconfigure(0, weight = 1)
def configure(self, cnf=None, **kw):
args = tk._cnfmerge((cnf, kw))
key = 'type'
if key in args:
self._choose = args[key]
del args[key]
key = 'image'
if key in args:
self.folder_button.configure(image=args[key])
del args[key]
key = 'path'
if key in args:
self.entry.delete(0, 'end')
self.entry.insert(0, args[key])
self._generate_changed_event()
del args[key]
key = 'textvariable'
if key in args:
self.entry.configure(textvariable=args[key])
self._generate_changed_event()
del args[key]
key = 'state'
if key in args:
value = args[key]
self.entry.config(state=value)
if value in ('disabled', 'readonly'):
self.folder_button.config(state='disabled')
else:
self.folder_button.config(state=value)
del args[key]
# dialog options
for key in tuple(args.keys()):
if key in self._fdoptions:
self._fdoptions[key] = args[key]
args.pop(key)
ttk.Frame.configure(self, args)
config = configure
def cget(self, key):
option = 'type'
if key == option:
return self._choose
option = 'image'
if key == option:
return self.folder_button.cget(key)
option = 'path'
if key == option:
return self.entry.get()
option = 'textvariable'
if key == option:
return self.entry.cget(option)
option = 'state'
if key == option:
return self.entry.cget(option)
# dialog options
if key in self._fdoptions:
return self._fdoptions[key]
return ttk.Frame.cget(self, key)
__getitem__ = cget
def _is_changed(self):
# print(repr(self._oldvalue), ':', repr(self.entry.get()))
if self._oldvalue != self.entry.get():
return True
return False
def _generate_changed_event(self):
if self._is_changed():
self._oldvalue = self.entry.get()
self.event_generate('<<PathChooserPathChanged>>')
def __on_enter_key_pressed(self, event):
key = event.keysym
if key in ('Return','KP_Enter'):
self._generate_changed_event()
def __on_focus_out(self, event):
self._generate_changed_event()
def __on_folder_btn_pressed(self):
fname = None
fdoptions = self._fdoptions.copy()
if fdoptions['initialdir'] is None:
fdoptions['initialdir'] = self.cget('path')
if self._choose == self.FILE:
fdoptions.pop('mustexist')
fname = filedialog.askopenfilename(**fdoptions)
else:
fdoptions.pop('filetypes')
fname = filedialog.askdirectory(**fdoptions)
if fname:
self.configure(path=fname)
```
#### File: pygubu/widgets/simpletooltip.py
```python
__all__ = ['ToolTip']
try:
import tkinter as tk
from tkinter import ttk
except:
import Tkinter as tk
import ttk
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def inside_wbbox(self, rx , ry):
bbox = self._calc_bbox(self.widget, True)
inside = False
if (bbox[0] <= rx <= bbox[2]) and (bbox[1] <= ry <= bbox[3]):
inside = True
return inside
def _calc_bbox(self, widget, screen=False):
rx = widget.winfo_x()
ry = widget.winfo_y()
if screen:
rx = widget.winfo_rootx()
ry = widget.winfo_rooty()
x2 = rx + widget.winfo_width()
y2 = ry + widget.winfo_height()
return (rx, ry, x2, y2)
def _calc_final_pos(self):
rx, ry, rcx, rcy = self._calc_bbox(self.widget, True)
w = rcx-rx
h = rcy-ry
sh = self.widget.winfo_screenheight() - 10
sw = self.widget.winfo_screenwidth() - 10
x = y = 0
for region in ('bottom', 'right', 'top', 'left'):
if region == 'bottom':
x = rx + int(w//2 * 0.2)
y = rcy + int(h//2 * 0.1)
elif region == 'right':
x = rcx + int(w//2 * 0.2)
y = ry + int(h//2 * 0.1)
elif region == 'top':
x = rx - int(w//2 * 0.4)
y = ry - int(h//2 * 0.4)
elif region == 'left':
x = rx
y = ry - 20
if x < sw and y < sh:
break
return (x, y)
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y = self._calc_final_pos()
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+{0}+{1}".format(x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", foreground="black",
relief=tk.SOLID, borderwidth=1,
font=("tahoma", "9", "normal"))
label.pack(ipadx=2)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def create(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
if __name__ == '__main__':
root = tk.Tk()
for idx in range(0, 2):
b = tk.Button(root, text='A button')
b.grid()
create(b, 'A tooltip !!')
root.mainloop()
```
#### File: pygubu/tests/test_frame.py
```python
from __future__ import print_function
import os
import sys
import unittest
try:
import tkinter as tk
import tkinter.ttk as ttk
except:
import Tkinter as tk
import ttk
pygubu_basedir = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.realpath(sys.argv[0]))))
if pygubu_basedir not in sys.path:
sys.path.insert(0, pygubu_basedir)
import pygubu
import support
class TestFrame(unittest.TestCase):
def setUp(self):
support.root_deiconify()
xmldata = """<?xml version="1.0" ?>
<interface>
<object class="ttk.Frame" id="mainwindow">
<property name="height">250</property>
<property name="padding">10</property>
<property name="width">250</property>
<property name="class_">MyCustomFrame</property>
<property name="relief">sunken</property>
<property name="style">MyFrameStyle.TFrame</property>
<property name="takefocus">1</property>
<property name="cursor">cross</property>
<bind add="" handler="on_button_click" sequence="<Button-1>"/>
<bind add="True" handler="on_button_click2" sequence="<Button-1>"/>
<layout>
<property name="row">0</property>
<property name="column">0</property>
<property name="sticky">nesw</property>
<property name="pady">10</property>
<property name="padx">5</property>
<property name="propagate">False</property>
<property name="ipady">4</property>
<property name="ipadx">2</property>
<property name="rowspan">1</property>
<property name="columnspan">2</property>
</layout>
<child>
<object class="ttk.Label" id="label">
<property name="text">label</property>
<layout>
<property name="column">0</property>
<property name="propagate">True</property>
<property name="row">1</property>
</layout>
</object>
</child>
</object>
</interface>
"""
self.builder = builder = pygubu.Builder()
builder.add_from_string(xmldata)
self.widget = builder.get_object('mainwindow')
def tearDown(self):
support.root_withdraw()
def test_class(self):
self.assertIsInstance(self.widget, ttk.Frame)
self.widget.destroy()
def test_padding(self):
tclobj = self.widget.cget('padding')[0]
padding = str(tclobj)
self.assertEqual('10', padding)
self.widget.destroy()
def test_width(self):
tclobj = self.widget.cget('width')
width = str(tclobj)
self.assertEqual('250', width)
self.widget.destroy()
def test_class_(self):
tclobj = self.widget.cget('class')
class_ = str(tclobj)
self.assertEqual('MyCustomFrame', class_)
self.widget.destroy()
def test_relief(self):
tclobj = self.widget.cget('relief')
relief = str(tclobj)
self.assertEqual(tk.SUNKEN, relief)
self.widget.destroy()
def test_style(self):
tclobj = self.widget.cget('style')
style = str(tclobj)
self.assertEqual('MyFrameStyle.TFrame', style)
self.widget.destroy()
def test_takefocus(self):
tclobj = self.widget.cget('takefocus')
takefocus = str(tclobj)
self.assertEqual('1', takefocus)
self.widget.destroy()
def test_cursor(self):
tclobj = self.widget.cget('cursor')
cursor = str(tclobj)
self.assertEqual('cross', cursor)
self.widget.destroy()
def test_layout(self):
ginfo = self.widget.grid_info()
expected = [
('row', '0'),
('column', '0'),
('sticky', 'nesw'),
('pady', '10'),
('padx', '5'),
('ipadx', '2'),
('ipady', '4'),
('rowspan', '1'),
('columnspan', '2'),
]
for k, ev in expected:
value = str(ginfo[k])
self.assertEqual(value, ev)
propagate = self.widget.grid_propagate()
self.assertEqual(None, propagate)
self.widget.destroy()
def test_child_count(self):
count = len(self.widget.children)
self.assertEqual(1, count)
self.widget.destroy()
def test_binding_dict(self):
success = []
def on_button_click(event):
success.append(1)
def on_button_click2(event):
success.append(1)
cbdic = {
'on_button_click': on_button_click,
'on_button_click2': on_button_click2
}
self.builder.connect_callbacks(cbdic)
support.simulate_mouse_click(self.widget, 5, 5)
self.widget.update_idletasks()
self.assertTrue(success)
self.widget.destroy()
def test_binding_object(self):
success = []
class AnObject:
def on_button_click(self, event):
success.append(1)
def on_button_click2(self, event):
success.append(1)
cbobj = AnObject()
self.builder.connect_callbacks(cbobj)
support.simulate_mouse_click(self.widget, 5, 5)
self.widget.update_idletasks()
self.assertTrue(success)
self.widget.destroy()
def test_binding_add(self):
success = []
def on_button_click(event):
success.append(1)
def on_button_click2(event):
success.append(1)
cbdic = {
'on_button_click': on_button_click,
'on_button_click2': on_button_click2
}
self.builder.connect_callbacks(cbdic)
support.simulate_mouse_click(self.widget, 5, 5)
self.widget.update_idletasks()
self.assertTrue(len(success) == 2)
self.widget.destroy()
if __name__ == '__main__':
unittest.main()
```
#### File: pygubu/tests/test_toplevelmenuhelper.py
```python
import os
import sys
import unittest
try:
import tkinter as tk
import tkinter.ttk as ttk
except:
import Tkinter as tk
import ttk
pygubu_basedir = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.realpath(sys.argv[0]))))
if pygubu_basedir not in sys.path:
sys.path.insert(0, pygubu_basedir)
import pygubu
import support
class TestToplevelMenuHelper(unittest.TestCase):
def setUp(self):
support.root_deiconify()
xmldata = 'test_toplevelmenuhelper.ui'
self.builder = builder = pygubu.Builder()
builder.add_from_file(xmldata)
self.widget = builder.get_object('toplevel')
self.menuhelper = builder.get_object('topmenuhelper')
self.menu = builder.get_object('topmenu')
def tearDown(self):
support.root_withdraw()
def test_class(self):
self.assertIsInstance(self.menu, tk.Menu)
self.widget.destroy()
def test_class_topmenu(self):
menu1 = self.widget.nametowidget(self.widget.cget('menu'))
self.assertEqual(menu1, self.menu)
self.widget.destroy()
```
|
{
"source": "jdk514/keras2vec",
"score": 3
}
|
#### File: jdk514/keras2vec/demo.py
```python
import numpy as np
from keras2vec.keras2vec import Keras2Vec
from keras2vec.document import Document
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
def doc_similarity(embeddings, id_1, id_2):
doc1 = embeddings[id_1].reshape(1, -1)
doc2 = embeddings[id_2].reshape(1, -1)
return cosine_similarity(doc1, doc2)[0][0] # , euclidean_distances(doc1, doc2)
if __name__ == "__main__":
color_docs = ["red yellow green blue orange violet green blue orange violet",
"blue orange green gray black teal tan blue violet gray black teal",
"blue violet gray black teal yellow orange tan white brown",
"black blue yellow orange tan white brown white green teal pink blue",
"orange pink blue white yellow black black teal tan",
"white green teal gray black pink blue blue violet gray black teal yellow",
]
animal_docs = ["cat dog rat gerbil hamster goat lamb goat cow rat dog pig",
"lamb goat cow rat dog pig dog chicken goat cat cow pig",
"pig lamb goat rat gerbil dog cat dog rat gerbil hamster goat",
"dog chicken goat cat cow pig gerbil goat cow pig gerbil lamb",
"rat hamster pig dog chicken cat lamb goat cow rat dog pig dog",
"gerbil goat cow pig gerbil lamb rat hamster pig dog chicken cat"
]
shape_docs = ["square triangle hexagon circle octagon cube",
"pyramid circle cube pentagon cylinder trapezoid",
"diamond octagon quadrilateral cylinder rectangle square",
"trapezoid cube hexagon diamond triangle circle cylinder",
"square rectangle quadrilateral octagon pentagon square"]
animal_color_docs = ['goat green rat gerbil yellow dog cat blue white',
'gerbil black pink blue lamb rat hamster gray pig dog',
'orange pink cat cow pig black teal gerbil tan',
'hamster pig orange violet dog chicken orange tan']
inference_doc = "red yellow green blue orange violet green blue orange violet"
doc_count = 0
keras_docs = []
keras_docs.extend([Document(doc_count+ix, text, ['color']) for ix, text in enumerate(color_docs)])
doc_count = len(keras_docs)
keras_docs.extend([Document(doc_count+ix, text, ['animal']) for ix, text in enumerate(animal_docs)])
doc_count = len(keras_docs)
keras_docs.extend([Document(doc_count + ix, text, ['shape']) for ix, text in enumerate(shape_docs)])
doc_count = len(keras_docs)
keras_docs.extend([Document(doc_count + ix, text, ['animal', 'color']) for ix, text in enumerate(animal_color_docs)])
# TODO: Add ability to auto-select embedding and seq_size based on data
doc2vec = Keras2Vec(keras_docs, embedding_size=24, seq_size=1)
doc2vec.build_model()
# If the number of epochs is to low, the check at the bottom may fail!
print("Training Model:")
history = doc2vec.fit(250, verbose=1)
print("\ttraining complete!")
embeddings = doc2vec.get_doc_embeddings()
print("Beginning Evaluation:")
"""Docs 0-5 are colors while 6-11 are animals. The cosine distances for
docs from the same topic (colors/animals) should approach 1, while
disimilar docs, coming from different topics, should approach -1"""
if doc_similarity(embeddings, 2, 4) > doc_similarity(embeddings, 1, 10):
print("\t- Like topics are more similar!")
else:
print("\t- Something went wrong during training.")
"""Using the trained model we can now infer document vectors by training
against a model where only the inference layer is trainable"""
doc2vec.infer_vector(Document(0, inference_doc, ['color']), lr=.1, epochs=50)
infer_vec = doc2vec.get_infer_embedding()
infer_dist = cosine_similarity(infer_vec.reshape(1, -1), embeddings[0].reshape(1, -1))[0][0]
infer_dist = "{0:0.2f}".format(infer_dist)
print(f'\t- Document 0 has a cosine similarity of {infer_dist} between train and inferred vectors')
"""Label analysis: shape should be farther away than animal and color"""
label_embeddings = doc2vec.get_label_embeddings()
shape_vector = doc2vec.get_label_embedding('shape').reshape(1, -1)
animal_vector = doc2vec.get_label_embedding('animal').reshape(1, -1)
color_vector = doc2vec.get_label_embedding('color').reshape(1, -1)
animal_color_dist = cosine_similarity(animal_vector, color_vector)[0][0]
shape_color_dist = cosine_similarity(shape_vector, color_vector)[0][0]
if animal_color_dist > shape_color_dist:
print("\t- Label distances look good!")
else:
print("\t- Something went wrong with the labels.")
```
#### File: keras2vec/keras2vec/data_generator.py
```python
import copy
import random
import keras
import numpy as np
from keras2vec.encoder import Encoder
# TODO: Implement as a keras.utils.Sequence class
class DataGenerator(keras.utils.Sequence):
"""The DataGenerator class is used to encode documents and generate training/testing
data for a Keras2Vec instance. Currently this object is only used internally within the
Keras2Vec class and not intended for direct use.
Args:
documents (:obj:`list` of :obj:`Document`): List of documents to vectorize
"""
def __init__(self, documents, seq_size, neg_samples, batch_size=100, shuffle=True, val_gen=False):
self.doc_vocab = self.label_vocab = self.text_vocab = None
self.doc_enc = self.label_enc = self.text_enc = None
self.neg_samples = neg_samples
self.seq_size = seq_size
self.batch_size = batch_size
self.shuffle = shuffle
self.val_gen = val_gen
# TODO: Change the documents attribute to encoded documents
[doc.gen_windows(seq_size) for doc in documents]
self.documents = documents
self.build_vocabs()
self.create_encodings()
if val_gen:
tmp_indexes = list(range(len(self.documents)))
np.random.shuffle(tmp_indexes)
self.indexes = tmp_indexes[:self.batch_size]
else:
self.indexes = list(range(len(self.documents)))
def build_vocabs(self):
"""Build the vocabularies for the document ids, labels, and text of
the provided documents"""
doc_vocab = set()
label_vocab = set()
text_vocab = set([''])
for doc in self.documents:
doc_vocab.add(doc.doc_id)
label_vocab.update(doc.labels)
text_vocab.update(doc.text)
self.doc_vocab = doc_vocab
self.label_vocab = label_vocab
self.text_vocab = text_vocab
def create_encodings(self):
"""Build the encodings for each of the provided data types"""
self.doc_enc = Encoder(self.doc_vocab)
self.label_enc = Encoder(self.label_vocab)
self.text_enc = Encoder(self.text_vocab)
def get_infer_generator(self, infer_doc):
infer_gen = copy.deepcopy(self)
infer_doc.gen_windows(self.seq_size)
infer_gen.doc_vocab = set([0])
infer_gen.documents = [infer_doc]
infer_gen.batch_size = 1
infer_gen.indexes = list(range(len(infer_gen.documents)))
return infer_gen
# TODO: Replace with generator
def neg_sampling(self, window):
neg_samples = []
win_ix = int((self.seq_size - 1) / 2)
center_word = window[win_ix]
word_dict = self.text_vocab.copy()
word_dict.remove(center_word)
dict_len = len(word_dict)
for ix in range(self.neg_samples):
if len(word_dict) < 1:
break
rep_word = random.sample(word_dict, 1)[0]
word_dict.remove(rep_word)
new_win = window.copy()
new_win[win_ix] = rep_word
neg_samples.append(new_win)
return neg_samples
def encode_doc(self, doc, neg_sampling=False, num_neg_samps=3):
"""Encodes a document for the keras model
Args:
doc(Document): The document to encode
neg_sampling(Boolean): Whether or not to generate negative samples for the document
**NOTE**: Currently not implemented"""
docs = []
labels = []
words = []
outputs = []
enc_doc = self.doc_enc.transform(doc.doc_id)
enc_labels = [self.label_enc.transform(lbl) for lbl in doc.labels]
for window in doc.windows:
for label in enc_labels:
enc_words = [self.text_enc.transform(word) for word in window]
docs.append(enc_doc)
labels.append([label])
words.append(enc_words)
outputs.append(1)
if self.neg_samples > 0:
for neg_samp in self.neg_sampling(window):
for label in enc_labels:
enc_words = [self.text_enc.transform(word) for word in neg_samp]
docs.append(enc_doc)
labels.append([label])
words.append(enc_words)
outputs.append(0)
ret = (np.vstack(docs),
labels,
words,
np.vstack(outputs))
return ret
def __len__(self):
"""Denotes the number of batches per epoch"""
if self.val_gen:
return 1
return int(len(self.documents)/self.batch_size)
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
docs = [self.documents[ix] for ix in indexes]
inputs, outputs = self.__data_generation(docs)
return inputs, outputs
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.val_gen:
tmp_indexes = list(range(len(self.documents)))
np.random.shuffle(tmp_indexes)
self.indexes = tmp_indexes[:self.batch_size]
elif self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, docs):
"""Generates a single epoch of encoded data for the keras model"""
batch_docs = []
batch_labels = []
batch_words = []
batch_outputs = []
for doc in docs:
enc_doc, enc_labels, enc_words, outputs = self.encode_doc(doc)
batch_docs.append(enc_doc)
batch_labels.append(np.array(enc_labels))
batch_words.extend(enc_words)
batch_outputs.append(outputs)
if len(self.label_vocab) > 0:
inputs = [np.vstack(batch_docs),
np.vstack(batch_labels),
np.vstack(batch_words)]
else:
inputs = [np.vstack(batch_docs), np.vstack(batch_words)]
outputs = np.vstack(batch_outputs)
return inputs, outputs
```
|
{
"source": "JDK626/test",
"score": 2
}
|
#### File: JDK626/test/example2.py
```python
from PyQt5 import QtCore, QtWidgets
from DBUtils.PooledDB import PooledDB
from PyQt5.QtGui import QIcon
import pymysql
import sys
import yaml
class Ui_MainWindow(object):
def getDbData(self):
with open('./config/db.yaml', encoding='utf-8') as f:
result = yaml.safe_load(f)
return result
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(900, 900)
MainWindow.setWindowIcon(QIcon("./cartoon1.ico"))
MainWindow.setStyleSheet("#MainWindow{border-image:url(./3.png);}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(200, 30, 500, 500))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(10)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(160, 830, 100, 40))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(660, 830, 100, 40))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(320, 830, 100, 40))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(500, 830, 100, 40))
self.pushButton_4.setObjectName("pushButton_4")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(160, 560, 440, 40))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(160, 620, 190, 40))
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(360, 620, 190, 40))
self.lineEdit_3.setObjectName("lineEdit_3")
self.lineEdit_4 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_4.setGeometry(QtCore.QRect(560, 620, 190, 40))
self.lineEdit_4.setObjectName("lineEdit_4")
self.lineEdit_5 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_5.setGeometry(QtCore.QRect(160, 700, 240, 40))
self.lineEdit_5.setObjectName("lineEdit_5")
self.lineEdit_6 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_6.setGeometry(QtCore.QRect(400, 700, 240, 40))
self.lineEdit_6.setObjectName("lineEdit_6")
self.lineEdit_7 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_7.setGeometry(QtCore.QRect(160, 770, 220, 40))
self.lineEdit_7.setObjectName("lineEdit_7")
self.lineEdit_8 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_8.setGeometry(QtCore.QRect(390, 770, 220, 40))
self.lineEdit_8.setObjectName("lineEdit_7")
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(650, 560, 110, 40))
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox_2 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_2.setGeometry(QtCore.QRect(650, 700, 110, 40))
self.comboBox_2.setObjectName("comboBox_2")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_3 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_3.setGeometry(QtCore.QRect(650, 770, 110, 40))
self.comboBox_3.setObjectName("comboBox_3")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.comboBox.activated[str].connect(self.comb_one)
self.comboBox_2.activated[str].connect(self.comb_two)
self.comboBox_3.activated[str].connect(self.comb_three)
self.pushButton.clicked.connect(self.calculated_field)
self.pushButton_2.clicked.connect(MainWindow.close)
self.pushButton_3.clicked.connect(self.search)
self.pushButton_4.clicked.connect(self.t_clear)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.tableWidget.setStyleSheet("background:rgb(0,0,0,0);border-style:outset")
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.horizontalHeader().setVisible(False)
self.tableWidget.setShowGrid(False) # Grid lines are not visible
self.pushButton.setStyleSheet("QPushButton{background:transparent}QPushButton:hover{background:#6DDF6D}\
QPushButton{color:rgb(30,144,150)}")
self.pushButton_2.setStyleSheet("QPushButton{background:transparent}QPushButton:hover{background:#6DDF6D}\
QPushButton{color:rgb(30,144,150)}")
self.pushButton_3.setStyleSheet("QPushButton{background:transparent}QPushButton:hover{background:#6DDF6D}\
QPushButton{color:rgb(30,144,150)}")
self.pushButton_4.setStyleSheet("QPushButton{background:transparent}QPushButton:hover{background:#6DDF6D}\
QPushButton{color:rgb(30,144,150)}")
self.comboBox.setStyleSheet("QComboBox{background:transparent}QComboBox:hover{background:transparent}"
"QComboBox{color:rgb(30,144,150)}")
self.comboBox_2.setStyleSheet("QComboBox{background:transparent}QComboBox:hover{background:transparent}"
"QComboBox{color:rgb(30,144,150)}")
self.comboBox_3.setStyleSheet("QComboBox{background:transparent}QComboBox:hover{background:transparent}"
"QComboBox{color:rgb(30,144,150)}")
self.lineEdit.setStyleSheet("background:rgb(0,0,0,15);border-width:0;border-style:outset")
self.lineEdit_2.setStyleSheet("background:rgb(0,0,0,15);border-width:0;border-style:outset")
self.lineEdit_3.setStyleSheet("background:rgb(0,0,0,15);border-width:0;border-style:outset")
self.lineEdit_4.setStyleSheet("background:rgb(0,0,0,15);border-width:0;border-style:outset")
self.lineEdit_5.setStyleSheet("background:rgb(0,0,0,15);border-width:0;border-style:outset")
self.lineEdit_6.setStyleSheet("background:rgb(0,0,0,15);border-width:0;border-style:outset")
self.lineEdit_7.setStyleSheet("background:rgb(0,0,0,15);border-width:0;border-style:outset")
self.lineEdit_8.setStyleSheet("background:rgb(0,0,0,15);border-width:0;border-style:outset")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Fuzihao-test2,源码已上传Github及个人网站"))
item = self.tableWidget.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "1"))
item = self.tableWidget.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "2"))
item = self.tableWidget.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "3"))
item = self.tableWidget.verticalHeaderItem(3)
item.setText(_translate("MainWindow", "4"))
item = self.tableWidget.verticalHeaderItem(4)
item.setText(_translate("MainWindow", "5"))
item = self.tableWidget.verticalHeaderItem(5)
item.setText(_translate("MainWindow", "6"))
item = self.tableWidget.verticalHeaderItem(6)
item.setText(_translate("MainWindow", "7"))
item = self.tableWidget.verticalHeaderItem(7)
item.setText(_translate("MainWindow", "8"))
item = self.tableWidget.verticalHeaderItem(8)
item.setText(_translate("MainWindow", "9"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", ""))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", ""))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", ""))
self.comboBox.setItemText(0, _translate("MainWindow", "平均"))
self.comboBox.setItemText(1, _translate("MainWindow", "总数"))
self.comboBox.setItemText(2, _translate("MainWindow", "最大"))
self.comboBox.setItemText(3, _translate("MainWindow", "最小"))
self.comboBox.setItemText(4, _translate("MainWindow", "求和"))
self.comboBox_2.setItemText(0, _translate("MainWindow", "升序"))
self.comboBox_2.setItemText(1, _translate("MainWindow", "降序"))
self.comboBox_3.setItemText(0, _translate("MainWindow", "全局"))
self.comboBox_3.setItemText(1, _translate("MainWindow", "开头"))
self.comboBox_3.setItemText(2, _translate("MainWindow", "结尾"))
self.comboBox_3.setItemText(3, _translate("MainWindow", "特殊字符"))
self.pushButton.setText(_translate("MainWindow", "确认"))
self.pushButton_2.setText(_translate("MainWindow", "关闭"))
self.pushButton_3.setText(_translate("MainWindow", "搜索"))
self.pushButton_4.setText(_translate("MainWindow", "清屏"))
def comb_one(self, o):
text = self.lineEdit.text()
if o == '平均':
sql = 'SELECT AVG({text}) FROM t1;'.format(text=text)
elif o == '总数':
sql = 'SELECT COUNT({text}) FROM t1;'.format(text=text)
elif o == '最大':
sql = 'SELECT MAX({text}) FROM t1;'.format(text=text)
elif o == '最小':
sql = 'SELECT MIN({text}) FROM t1;'.format(text=text)
elif o == '求和':
sql = 'SELECT SUM({text}) FROM t1;'.format(text=text)
self.tableWidget.clear()
print(sql)
self.show(sql)
def comb_two(self, t):
text_input = self.lineEdit_5.text()
text_input2 = self.lineEdit_6.text()
if t == '升序':
sql = "SELECT id, species,price FROM t1 GROUP BY {name1} ORDER BY {name2} ASC".format(name1=text_input,
name2=text_input2)
elif t == '降序':
sql = "SELECT id, species,price FROM t1 GROUP BY {name1} ORDER BY {name2} DESC".format(name1=text_input,
name2=text_input2)
print(sql)
self.tableWidget.clear()
self.show(sql)
def comb_three(self, s):
text1 = self.lineEdit_7.text()
text2 = self.lineEdit_8.text()
if s == '全局':
sql = "SELECT id, species FROM t1 WHERE {text1} REGEXP '{text2}';".format(text1=text1, text2=text2)
elif s == '开头':
sql = "SELECT id, species FROM t1 WHERE {text1} REGEXP '^{text2}';".format(text1=text1, text2=text2)
elif s == '结尾':
sql = "SELECT id, species FROM t1 WHERE {text1} REGEXP '{text2}$';".format(text1=text1, text2=text2)
elif s == '特殊字符':
sql = r"SELECT id, species FROM t1 WHERE {text1} REGEXP '{text2}';".format(text1=text1, text2=r'\\' +
text2)
print(sql)
self.tableWidget.clear()
self.show(sql)
def calculated_field(self):
t1 = self.lineEdit_2.text()
t2 = self.lineEdit_3.text()
t3 = self.lineEdit_4.text()
sql = 'SELECT {text1},{text3}, {text1}{text2}' \
'{text3} FROM t1'.format(text1=t1, text2=t2, text3=t3)
print(sql)
self.tableWidget.clear()
self.show(sql)
def search(self):
t1 = self.lineEdit_2.text()
t2 = self.lineEdit_3.text()
t3 = self.lineEdit_4.text()
sql = "SELECT id,species FROM t1 WHERE {text1} IN (SELECT {text1} FROM t2 WHERE {text2} = '{text3}')".format \
(text1=t1, text2=t2, text3=t3)
print(sql)
self.tableWidget.clear()
self.show(sql)
def show(self, sql):
I = self.getDbData()
pool = PooledDB(pymysql, 5, host=str(I['db1']['host']),
port=int(I['db1']['port']),
user=str(I['db1']['user']),
password=str(I['db1']['password']),
db=str(I['db1']['db'])
, setsession=['SET AUTOCOMMIT = 1'])
connection = pool.connection()
cursor = connection.cursor()
cursor.execute(sql)
result = cursor.fetchall()
print("result:", result)
for i in range(len(result)):
for j in range(len(result[i])):
print(result[i][j])
self.tableWidget.setItem(i, j, QtWidgets.QTableWidgetItem(str(result[i][j])))
cursor.close()
connection.close()
def t_clear(self):
self.tableWidget.clear()
self.lineEdit.clear()
self.lineEdit_2.clear()
self.lineEdit_3.clear()
self.lineEdit_4.clear()
self.lineEdit_5.clear()
self.lineEdit_6.clear()
self.lineEdit_7.clear()
self.lineEdit_8.clear()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
```
|
{
"source": "jdkandersson/algorithms-data-structures",
"score": 4
}
|
#### File: library/stack/__init__.py
```python
class _Node:
"""Node for the linked list."""
def __init__(self, value, next_=None):
"""
Construct.
Args:
value: The value for the node.
next_: The next node in the linked list.
"""
self.value = value
self.next_ = next_
class StackEmptyError(Exception):
"""Raised when pop is called on an empty stack."""
class _LinkedList:
"""Linked list for the stack."""
def __init__(self):
"""Construct."""
self.head = None
def add_first(self, value):
"""
Add new value to front of the linked list.
Args:
value: The value to add to the linked list.
"""
self.head = _Node(value, self.head)
def remove_first(self):
"""
Remove the first node from the list and return the value.
Raises StackEmptyError if the list is empty.
Returns:
The value of the first node.
"""
self.raise_empty()
node = self.head
self.head = self.head.next_
return node.value
def raise_empty(self):
"""Raise StackEmptyError if the stack is empty."""
if self.head is None:
raise StackEmptyError
class Stack:
"""Implementation of a stack."""
def __init__(self):
"""Construct."""
self._list = _LinkedList()
def push(self, value):
"""
Add a new value to the stack.
Args:
value: The value to add to the stack.
"""
self._list.add_first(value)
def pop(self):
"""
Remove the most recently added value from the list and return it.
Raises StackEmptyError if the stack is empty.
Returns:
The most recently added value.
"""
return self._list.remove_first()
def peek(self):
"""
Return the value at the top of the stack without popping it.
Raises StackEmptyError if the stack is empty.
Returns:
The top value.
"""
self._list.raise_empty()
return self._list.head.value
def is_empty(self):
"""
Check whether the stack is empty.
Returns:
Whether the stack is empty.
"""
return self._list.head is None
```
#### File: tests/hash_map/test_.py
```python
from unittest import mock
import pytest
from library import hash_map
@pytest.mark.parametrize(
"capacity, raises",
[(0, True), (15, True), (16, False), (17, True), (32, False)],
ids=["too small", "too small", "minimum size", "not multiple of 2", "larger size"],
)
def test_construct_capacity(capacity, raises):
"""
GIVEN initial capacity and whether ValueError should be raised
WHEN a HashMap is constructed with the capacity
THEN ValueError is raised when expected raises is True.
"""
if raises:
with pytest.raises(ValueError):
hash_map.HashMap(capacity)
else:
hash_map.HashMap(capacity)
@pytest.mark.parametrize(
"key, capacity, expected_index",
[
(1, 16, 11),
(2, 16, 5),
(4, 16, 10),
(8, 16, 3),
(16, 16, 9),
(32, 16, 11),
(64, 16, 11),
(1, 32, 11),
(0.1, 16, 0),
(0.2, 16, 13),
(0.4, 16, 1),
(0.8, 16, 4),
(1.6, 16, 13),
(3.2, 16, 1),
(6.4, 16, 6),
(0.1, 32, 16),
("key 1", 16, 9),
("key 2", 16, 3),
("key 4", 16, 9),
("key 8", 16, 7),
("key 16", 16, 0),
("key 32", 16, 14),
("key 64", 16, 6),
("key 1", 32, 9),
],
ids=[
"int-1-16",
"int-2-16",
"int-4-16",
"int-8-16",
"int-16-16",
"int-32-16",
"int-64-16",
"int-1-32",
"float-0.1-16",
"float-0.2-16",
"float-0.4-16",
"float-0.8-16",
"float-1.6-16",
"float-3.2-16",
"float-6.4-16",
"float-0.1-32",
"str-key 1-16",
"str-key 2-16",
"str-key 4-16",
"str-key 8-16",
"str-key 16-16",
"str-key 32-16",
"str-key 64-16",
"str-key 1-32",
],
)
def test_calculate_index(key, capacity, expected_index):
"""
GIVEN key, capacity of HashMap and expected index
WHEN HashMap is constructed with the capacity and _calculate_index is called with
the key
THEN the expected index is returned.
"""
test_hash_map = hash_map.HashMap(capacity)
index = test_hash_map._calculate_index(key)
assert index == expected_index
@pytest.fixture
def mocked_buckets_hash_map():
"""HashMap with mocked buckets and _calculate_index."""
capacity = 16
test_hash_map = hash_map.HashMap(capacity)
for idx in range(capacity):
test_hash_map._buckets[idx] = mock.MagicMock()
test_hash_map._calculate_index = mock.MagicMock()
return test_hash_map
def test_set_calculate_index(
mocked_buckets_hash_map,
): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index and key
WHEN set_ is called with the key
THEN _calculate_index is called with the key.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
mocked_buckets_hash_map.set_(key, "value 1")
mocked_buckets_hash_map._calculate_index.assert_called_with(key)
def test_set_insert(mocked_buckets_hash_map): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index, mocked buckets and key and value
WHEN set_ is called with the key and value
THEN the bucket with the index returned by _calculate_index is called with the key
and value.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
value = "value 1"
mocked_buckets_hash_map.set_(key, value)
mocked_buckets_hash_map._buckets[0].insert.assert_called_once_with(key, value)
def test_get_calculate_index(
mocked_buckets_hash_map,
): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index and key
WHEN get is called with the key
THEN _calculate_index is called with the key.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
mocked_buckets_hash_map.get(key)
mocked_buckets_hash_map._calculate_index.assert_called_once_with(key)
def test_get_get_call(mocked_buckets_hash_map): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index, mocked buckets and key
WHEN get is called with the key
THEN the bucket with the index returned by _calculate_index is called with the key.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
mocked_buckets_hash_map.get(key)
mocked_buckets_hash_map._buckets[0].get.assert_called_once_with(key)
def test_get_get_return(
mocked_buckets_hash_map,
): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index and mocked buckets
WHEN get is called with the key
THEN the get return value of the bucket with the index returned by
_calculate_index is returned.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
return_value = mocked_buckets_hash_map.get(key)
assert return_value == mocked_buckets_hash_map._buckets[0].get.return_value
def test_set_get_missing():
"""
GIVEN empty hash map
WHEN get is called with a key
THEN KeyError is raised.
"""
test_hash_map = hash_map.HashMap()
with pytest.raises(KeyError):
test_hash_map.get("key 1")
def test_set_get():
"""
GIVEN empty hash map and key and value
WHEN set is called with the key and value and get is called with the key
THEN the value is returned.
"""
test_hash_map = hash_map.HashMap()
key = "key 1"
value = "value 1"
test_hash_map.set_(key, value)
return_value = test_hash_map.get(key)
assert return_value == value
def test_exists_calculate_index(
mocked_buckets_hash_map,
): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index and key
WHEN exists is called with the key
THEN _calculate_index is called with the key.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
mocked_buckets_hash_map.exists(key)
mocked_buckets_hash_map._calculate_index.assert_called_once_with(key)
def test_exists_exists_call(
mocked_buckets_hash_map,
): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index, mocked buckets and key
WHEN exists is called with the key
THEN the bucket with the index returned by _calculate_index is called with the key.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
mocked_buckets_hash_map.exists(key)
mocked_buckets_hash_map._buckets[0].exists.assert_called_once_with(key)
def test_exists_exists_return(
mocked_buckets_hash_map,
): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index and mocked buckets
WHEN exists is called with the key
THEN the exists return value of the bucket with the index returned by
_calculate_index is returned.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
return_value = mocked_buckets_hash_map.exists(key)
assert return_value == mocked_buckets_hash_map._buckets[0].exists.return_value
def test_exists_missing():
"""
GIVEN empty hash map
WHEN exists is called with a key
THEN False is returned.
"""
test_hash_map = hash_map.HashMap()
exists = test_hash_map.exists("key 1")
assert exists is False
def test_exists_present():
"""
GIVEN empty hash map and key and value
WHEN set is called with the key and value and exists is called with the key
THEN True is returned.
"""
test_hash_map = hash_map.HashMap()
key = "key 1"
test_hash_map.set_(key, "value 1")
exists = test_hash_map.exists(key)
assert exists is True
def test_delete_calculate_index(
mocked_buckets_hash_map,
): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index and key
WHEN delete is called with the key
THEN _calculate_index is called with the key.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
mocked_buckets_hash_map.delete(key)
mocked_buckets_hash_map._calculate_index.assert_called_once_with(key)
def test_delete_delete_call(
mocked_buckets_hash_map,
): # pylint: disable=redefined-outer-name
"""
GIVEN hash map with mocked _calculate_index, mocked buckets and key
WHEN delete is called with the key
THEN the bucket with the index returned by _calculate_index is called with the key.
"""
mocked_buckets_hash_map._calculate_index.return_value = 0
key = "key 1"
mocked_buckets_hash_map.delete(key)
mocked_buckets_hash_map._buckets[0].delete.assert_called_once_with(key)
def test_delete_missing():
"""
GIVEN empty hash map
WHEN delete is called with a key
THEN KeyError is raised.
"""
test_hash_map = hash_map.HashMap()
with pytest.raises(KeyError):
test_hash_map.delete("key 1")
def test_delete_present():
"""
GIVEN empty hash map and key and value
WHEN set is called with the key and value and delete is called with the key
THEN the key no longer exists in the hash map.
"""
test_hash_map = hash_map.HashMap()
key = "key 1"
test_hash_map.set_(key, "value 1")
test_hash_map.delete(key)
assert test_hash_map.exists(key) is False
@pytest.mark.parametrize(
"actions, expected_size",
[
([], 0),
([("set_", ("key 1", "value 1"))], 1),
([("set_", ("key 1", "value 1")), ("set_", ("key 1", "value 2"))], 1),
([("set_", ("key 1", "value 1")), ("delete", ("key 1",))], 0),
([("set_", ("key 1", "value 1")), ("set_", ("key 2", "value 2"))], 2),
(
[
("set_", ("key 1", "value 1")),
("set_", ("key 2", "value 2")),
("delete", ("key 2",)),
],
1,
),
],
ids=[
"empty,0",
"set,1",
"set-update,1",
"set-delete,0",
"set-set,2",
"set-set-delete,1",
],
)
def test_size(actions, expected_size):
"""
GIVEN empty hash map, actions to modify the hash map and expected size
WHEN the actions are performed on the map
THEN the map has the expected size.
"""
test_hash_map = hash_map.HashMap()
for operation, args in actions:
getattr(test_hash_map, operation)(*args)
assert test_hash_map.size == expected_size
def test_size_delete_raises():
"""
GIVEN hash map with a single element
WHEN delete is called with a different key than the element
THEN size is not decremented.
"""
test_hash_map = hash_map.HashMap()
test_hash_map.set_("key 1", "value 1")
with pytest.raises(KeyError):
test_hash_map.delete("key 2")
assert test_hash_map.size == 1
@pytest.mark.parametrize(
"elements",
[
[],
[("key 1", "value 1")],
[("key 1", "value 1"), ("key 2", "value 2")],
[
("key 1", "value 1"),
("key 2", "value 2"),
("key 4", "value 4"),
("key 8", "value 8"),
("key 16", "value 16"),
("key 32", "value 32"),
("key 64", "value 64"),
],
],
ids=["empty", "single", "multiple", "many"],
)
def test_iterate(elements):
"""
GIVEN empty hash map and elements to set
WHEN elements are set and the map is iterated over
THEN all elements are returned.
"""
test_hash_map = hash_map.HashMap()
for element in elements:
test_hash_map.set_(*element)
element_set = set(iter(test_hash_map))
assert len(element_set) == len(elements)
for element in elements:
assert element in element_set
@pytest.mark.parametrize(
"elements",
[
[],
[("key 1", "value 1")],
[("key 1", "value 1"), ("key 2", "value 2")],
[
("key 1", "value 1"),
("key 2", "value 2"),
("key 4", "value 4"),
("key 8", "value 8"),
("key 16", "value 16"),
("key 32", "value 32"),
("key 64", "value 64"),
],
],
ids=["empty", "single", "multiple", "many"],
)
def test_clear(elements):
"""
GIVEN empty hash map and elements to set
WHEN elements are set and clear is called
THEN the map has zero size and iterating over it returns an empty list.
"""
test_hash_map = hash_map.HashMap()
for element in elements:
test_hash_map.set_(*element)
test_hash_map.clear()
assert test_hash_map.size == 0
assert list(iter(test_hash_map)) == []
@pytest.mark.parametrize(
"elements",
[
[],
[("key 1", "value 1")],
[("key 1", "value 1"), ("key 2", "value 2")],
[
("key 1", "value 1"),
("key 2", "value 2"),
("key 4", "value 4"),
("key 8", "value 8"),
("key 16", "value 16"),
("key 32", "value 32"),
("key 64", "value 64"),
],
],
ids=["empty", "single", "multiple", "many"],
)
def test_construct_source(elements):
"""
GIVEN list of key value tuples
WHEN hash map is constructed with list as source
THEN map contains each element in the list.
"""
test_hash_map = hash_map.HashMap(source=elements)
element_set = set(iter(test_hash_map))
assert len(element_set) == len(elements)
for element in elements:
assert element in element_set
@pytest.mark.parametrize(
"elements",
[
[],
[("key 1", "value 1")],
[("key 1", "value 1"), ("key 2", "value 2")],
[
("key 1", "value 1"),
("key 2", "value 2"),
("key 4", "value 4"),
("key 8", "value 8"),
("key 16", "value 16"),
("key 32", "value 32"),
("key 64", "value 64"),
],
],
ids=["empty", "single", "multiple", "many"],
)
def test_clone_elements(elements):
"""
GIVEN empty hash map and elements to add to it
WHEN hash map is constructed with list as source and cloned
THEN cloned map contains each element in the list.
"""
test_hash_map = hash_map.HashMap(source=elements)
cloned_hash_map = test_hash_map.clone()
element_set = set(iter(cloned_hash_map))
assert len(element_set) == len(elements)
for element in elements:
assert element in element_set
def test_clone_not_same():
"""
GIVEN empty hash map
WHEN hash map is cloned
THEN the cloned map is not the same object and contains different buckets.
"""
test_hash_map = hash_map.HashMap()
cloned_hash_map = test_hash_map.clone()
assert id(cloned_hash_map) != id(test_hash_map)
for original_bucket, cloned_bucket in zip(
test_hash_map._buckets, cloned_hash_map._buckets
):
assert id(original_bucket) != id(cloned_bucket)
def test_clone_capacity_copied():
"""
GIVEN empty hash map and capacity of the hash map
WHEN hash map is cloned
THEN the cloned map has the same capacity.
"""
capacity = 16
test_hash_map = hash_map.HashMap(capacity)
cloned_hash_map = test_hash_map.clone()
assert cloned_hash_map.capacity == capacity
@pytest.mark.parametrize(
"initial_capacity, initial_size, expected_capacity",
[(16, 11, 16), (16, 12, 32), (32, 13, 32), (32, 24, 64)],
ids=[
"16,11,no increase",
"16,12,buckets double",
"32,13,no increase",
"32,24,buckets double",
],
)
def test_resize_up(initial_capacity, initial_size, expected_capacity):
"""
GIVEN empty hash map, initial capacity, initial number of elements and
expected capacity
WHEN the initial size plus one key and value is added to the map
THEN the capacity is the expected final capacity and all added key value pairs are
still in the map.
"""
elements = [
(f"key {idx + 1}", f"value {idx + 1}") for idx in range(initial_size + 1)
]
test_hash_map = hash_map.HashMap(initial_capacity, elements)
assert test_hash_map.capacity == expected_capacity
assert len(test_hash_map._buckets) == expected_capacity
assert test_hash_map.size == initial_size + 1
element_set = set(iter(test_hash_map))
assert len(element_set) == len(elements)
for element in elements:
assert element in element_set
@pytest.mark.parametrize(
"initial_capacity, initial_size, expected_capacity",
[(32, 13, 32), (32, 12, 16), (16, 11, 16), (16, 6, 16), (64, 24, 32)],
ids=[
"32,13,no decrease",
"32,12,buckets halve",
"16,11,no decrease",
"16,6,no decrease",
"64,24,buckets halve",
],
)
def test_resize_down(initial_capacity, initial_size, expected_capacity):
"""
GIVEN empty hash map, initial capacity, initial number of elements and
expected capacity
WHEN the initial size is added to the map and then one is removed
THEN the capacity is the expected final capacity and all key value pairs except the
removed pair are still in the map.
"""
elements = [(f"key {idx + 1}", f"value {idx + 1}") for idx in range(initial_size)]
test_hash_map = hash_map.HashMap(initial_capacity, elements)
test_hash_map.delete(elements[-1][0])
assert test_hash_map.capacity == expected_capacity
assert len(test_hash_map._buckets) == expected_capacity
assert test_hash_map.size == initial_size - 1
element_set = set(iter(test_hash_map))
assert len(element_set) == len(elements[:-1])
for element in elements[:-1]:
assert element in element_set
```
#### File: algorithms-data-structures/tests/test_queue.py
```python
import pytest
from library import queue
@pytest.mark.parametrize(
"length, operations, exception",
[
(0, ["deq"], queue.QueueEmptyError),
(0, ["enq"], queue.QueueFullError),
(1, ["enq", "enq"], queue.QueueFullError),
(1, ["enq", "deq", "deq"], queue.QueueEmptyError),
],
ids=["0,deq", "0,enq", "1,enq-enq", "1,enq-deq-deq"],
)
def test_end_deq_raise(length, operations, exception):
"""
GIVEN length of queue, operations to perform and exception that should be raised
WHEN queue with the length is constructed and the operations are performed
THEN the exception is raised
"""
test_queue = queue.Queue(length)
def perform_operation(operation):
if operation == "enq":
test_queue.enqueue("value")
elif operation == "deq":
test_queue.dequeue()
else:
raise Exception
for idx, operation in enumerate(operations):
if idx == len(operations) - 1:
with pytest.raises(exception):
perform_operation(operation)
continue
perform_operation(operation)
@pytest.mark.parametrize(
"length, actions",
[
(1, [("enq", "value 1"), ("deq", "value 1")]),
(
1,
[
("enq", "value 1"),
("deq", "value 1"),
("enq", "value 1"),
("deq", "value 1"),
],
),
(
2,
[
("enq", "value 1"),
("enq", "value 2"),
("deq", "value 1"),
("enq", "value 3"),
("deq", "value 2"),
("deq", "value 3"),
],
),
],
ids=["1,enq-deq", "1,enq-deq-enq-deq", "2,enq-enq-deq-enq-deq-deq"],
)
def test_enqueue_dequeue(length, actions):
"""
GIVEN list of enqueue (with value to enqueue) and dequeue (with value expected to
be dequeued) actions and the length of a queue
WHEN actions are performed
THE expected values are dequeued.
"""
test_queue = queue.Queue(length)
for action in actions:
operation, value = action
if operation == "enq":
test_queue.enqueue(value)
elif operation == "deq":
assert test_queue.dequeue() == value
def test_get_front_empty():
"""
GIVEN empty queue
WHEN get_front is called
THEN queueEmptyError is raised.
"""
test_queue = queue.Queue()
with pytest.raises(queue.QueueEmptyError):
test_queue.get_front()
def test_get_front_single():
"""
GIVEN queue with a single value
WHEN get_front is called
THEN the value is returned and can still be dequeued.
"""
test_queue = queue.Queue()
value = "value 1"
test_queue.enqueue(value)
returned_value = test_queue.get_front()
assert returned_value == value
assert test_queue.dequeue() == value
def test_get_front_multiple():
"""
GIVEN queue with a multiple values
WHEN get_front is called
THEN the front value is returned.
"""
test_queue = queue.Queue()
test_queue.enqueue("value 1")
test_queue.enqueue("value 2")
returned_value = test_queue.get_front()
assert returned_value == "value 1"
def test_is_empty_empty():
"""
GIVEN empty queue
WHEN is_empty is called
THEN True is returned.
"""
test_queue = queue.Queue()
result = test_queue.is_empty()
assert result is True
def test_is_empty_not_empty():
"""
GIVEN queue that is not empty
WHEN is_empty is called
THEN False is returned.
"""
test_queue = queue.Queue()
test_queue.enqueue("value 1")
result = test_queue.is_empty()
assert result is False
def test_clear():
"""
GIVEN queue with length 1 with an item
WHEN clear is called
THEN the queue is empty and can be filled up again.
"""
test_queue = queue.Queue(1)
test_queue.enqueue("value 1")
test_queue.clear()
assert test_queue.is_empty() is True
test_queue.enqueue("value 2")
```
|
{
"source": "jdkandersson/cloudformation-kubernetes",
"score": 3
}
|
#### File: old_lambda/lambda_function/operations.py
```python
import typing
import kubernetes
from . import exceptions
from . import helpers
class CreateReturn(typing.NamedTuple):
"""
Structure of the create return value.
Attrs:
status: The status of the operation. Is SUCCESS or FAILURE.
reason: If the status is FAILURE, the reason for the failure.
physical_name: If the status is success, the physical name of the created
resource in the form [<namespace>/]<name> where the namespace is included
if the operation is namespaced.
"""
status: str
reason: typing.Optional[str]
physical_name: typing.Optional[str]
def create(*, body: typing.Dict[str, typing.Any]) -> CreateReturn:
"""
Execute create command.
Assume body has at least metadata with a name.
Args:
body: The body to create.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return CreateReturn("FAILURE", str(exc), None)
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="create"
)
# Handling non-namespaced cases
if not namespaced:
try:
response = client_function(body=body)
return CreateReturn("SUCCESS", None, response.metadata.name)
except kubernetes.client.rest.ApiException as exc:
return CreateReturn("FAILURE", str(exc), None)
# Handling namespaced
namespace = helpers.calculate_namespace(body=body)
try:
response = client_function(body=body, namespace=namespace)
return CreateReturn(
"SUCCESS", None, f"{response.metadata.namespace}/{response.metadata.name}"
)
except kubernetes.client.rest.ApiException as exc:
return CreateReturn("FAILURE", str(exc), None)
class ExistsReturn(typing.NamedTuple):
"""
Structure of the update return value.
Attrs:
status: The status of the operation. Is SUCCESS or FAILURE.
reason: If the status is FAILURE, the reason for the failure.
"""
status: str
reason: typing.Optional[str]
def update(*, body: typing.Dict[str, typing.Any], physical_name: str) -> ExistsReturn:
"""
Execute update command.
Assume body has at least metadata with a name.
Args:
body: The body to update.
physical_name: The namespace (if namespaced) and name of the resource.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return ExistsReturn("FAILURE", str(exc))
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="update"
)
# Handling non-namespaced cases
if not namespaced:
try:
client_function(body=body, name=physical_name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
# Handling namespaced
namespace, name = physical_name.split("/")
try:
client_function(body=body, namespace=namespace, name=name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
def delete(*, body: typing.Dict[str, typing.Any], physical_name: str) -> ExistsReturn:
"""
Execute delete command.
Assume body has at least metadata with a name.
Args:
body: The body to delete.
physical_name: The namespace (if namespaced) and name of the resource.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return ExistsReturn("FAILURE", str(exc))
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="delete"
)
# Handling non-namespaced cases
if not namespaced:
try:
client_function(name=physical_name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
# Handling namespaced
namespace, name = physical_name.split("/")
try:
client_function(namespace=namespace, name=name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
```
#### File: tests/lambda_function/test_helpers.py
```python
import pytest
from kubernetes import client
from lambda_function import exceptions
from lambda_function import helpers
from . import fixtures
@pytest.mark.parametrize("group_version", fixtures.GROUP_VERSIONS)
@pytest.mark.helper
def test_calculate_client(group_version):
"""
GIVEN combination of group and version
WHEN calculate_client is called with the group and version separated by /
THEN the returned module is available as a client.
"""
if group_version["group"]:
api_version = f"{group_version['group']}/{group_version['version']}"
else:
api_version = group_version["version"]
module = helpers.calculate_client(api_version=api_version)
assert hasattr(client, module)
@pytest.mark.parametrize("group_version_kind", fixtures.GROUP_VERSION_KINDS)
@pytest.mark.helper
def test_calculate_client_kind(group_version_kind):
"""
GIVEN combination of group, version and kind
WHEN calculate_client is called with the kind and module name
THEN the returned function name is available on the module.
"""
for operation in ["create", "update", "delete"]:
# Getting the expected client module name
if group_version_kind["group"]:
api_version = (
f"{group_version_kind['group']}/{group_version_kind['version']}"
)
else:
api_version = group_version_kind["version"]
module_name = helpers.calculate_client(api_version=api_version)
client_function = helpers.calculate_function_name(
kind=group_version_kind["kind"],
operation=operation,
module_name=module_name,
)
module = getattr(client, module_name)
assert hasattr(module, client_function)
@pytest.mark.parametrize(
"body, expected_namespace",
[
({}, "default"),
({"metadata": {}}, "default"),
({"metadata": {"namespace": "namespace 1"}}, "namespace 1"),
],
ids=["empty", "metadata empty", "namespace in metadata"],
)
@pytest.mark.helper
def test_calculate_namespace_empty(body, expected_namespace):
"""
GIVEN body and expected namespace
WHEN calculate_namespace is called with the body
THEN the expected namespace is returned.
"""
namespace = helpers.calculate_namespace(body=body)
assert namespace == expected_namespace
@pytest.mark.parametrize(
"api_version, kind, expected_function, expected_namespaced",
[
(
"apps/v1",
"deployment",
client.AppsV1Api().create_namespaced_deployment,
True,
),
("v1", "namespace", client.CoreV1Api().create_namespace, False),
],
)
@pytest.mark.helper
def test_get_function(api_version, kind, expected_function, expected_namespaced):
"""
GIVEN api version, kind, expected function and expected namespaced
WHEN get_function is called with the api version, kind and create operation
THEN the expected function is returned with the expected namespaced value.
"""
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="create"
)
assert str(client_function)[:-11] == str(expected_function)[:-11]
assert namespaced == expected_namespaced
@pytest.mark.helper
def test_get_api_version_missing():
"""
GIVEN empty dictionary
WHEN get_api_version is called with the dictionary
THEN ApiVersionMissingError is raised.
"""
with pytest.raises(exceptions.ApiVersionMissingError):
helpers.get_api_version(body={})
@pytest.mark.helper
def test_get_api_version():
"""
GIVEN dictionary with apiVersion
WHEN get_api_version is called with the dictionary
THEN the value of apiVersion is returned.
"""
body = {"apiVersion": "version 1"}
api_version = helpers.get_api_version(body=body)
assert api_version == "version 1"
@pytest.mark.helper
def test_get_kind_missing():
"""
GIVEN empty dictionary
WHEN get_kind is called with the dictionary
THEN KindMissingError is raised.
"""
with pytest.raises(exceptions.KindMissingError):
helpers.get_kind(body={})
@pytest.mark.helper
def test_get_kind():
"""
GIVEN dictionary with kind
WHEN get_kind is called with the dictionary
THEN the value of kind is returned.
"""
body = {"kind": "kind 1"}
kind = helpers.get_kind(body=body)
assert kind == "kind 1"
```
|
{
"source": "jdkaplan/QQ",
"score": 3
}
|
#### File: jdkaplan/QQ/command.py
```python
class Command:
def __eq__(self, other):
return type(self) == type(other)
def copy(self):
return self
def __repr__(self):
return f"Command(name={repr(type(self).__name__)})"
NO_TERMINATE=None
FUNC_TERMINATE=0
LOOP_TERMINATE=1
```
#### File: jdkaplan/QQ/evaluator.py
```python
import sys
from environment import Env
import command
import exceptions
import parser
def repl():
env = Env()
while (inp := input("> ")) != "QUIT!": # or whatever
inst = parser.parse(inp) # TODO
inst.execute(env)
def evaluate_file(fname):
with open(fname) as f:
env = Env()
inst_q = parser.parse(f.read()) # TODO
term = inst_q.execute(env) # assuming this will be a Queue for now
# break on the top level will cause execution to stop, but should really be an error. ret
# can be used to return early.
if term == command.LOOP_TERMINATE:
raise exceptions.QQError("Can't break out of the main program body.")
if __name__ == '__main__':
if len(sys.argv) > 1:
evaluate_file(sys.argv[1])
else:
repl()
```
#### File: jdkaplan/QQ/evaluator_test.py
```python
from dataclasses import dataclass
import os
import subprocess
import tempfile
import unittest
write_golden_files = False
@dataclass
class SampleFile:
base: str
input_path: str
output_path: str
class TestEvaluator(unittest.TestCase):
excluded_programs = [
# This program takes a very long time to terminate.
'rule',
]
def sample_programs(self):
input_dir = os.path.join(os.path.dirname(__file__), 'test_programs')
output_dir = os.path.join(os.path.dirname(__file__), 'test_evaluations')
samples = []
for filename in os.listdir(input_dir):
base, _ = os.path.splitext(filename)
if base in self.excluded_programs:
continue
input_path = os.path.join(input_dir, filename)
stdout_path = os.path.join(output_dir, base + '.output')
samples.append(SampleFile(base, input_path, stdout_path))
return samples
def evaluate(self, filename):
eval_path = os.path.join(os.path.dirname(__file__), 'evaluator.py')
with open(filename) as inp:
text = inp.read()
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as src:
src.write(text)
# Crash the program to get queue state.
src.write("\nQQ\n")
# Since we're not closing the file, we need to seek back to the
# start before we try reading it again.
src.seek(0)
capture = subprocess.run(
['python', eval_path, src.name],
capture_output=True,
text=True,
)
return capture.stdout
@unittest.skipIf(write_golden_files, 'writing golden files')
def test_sample_programs(self):
for program in self.sample_programs():
with self.subTest(program.base):
actual = self.evaluate(program.input_path)
with open(program.output_path) as out:
expected = out.read()
self.assertEqual(actual, expected)
@unittest.skipIf(not write_golden_files, 'not writing golden files')
def test_write_golden_files(self):
for program in self.sample_programs():
with self.subTest(program.base):
stdout = self.evaluate(program.input_path)
with open(program.output_path, 'w') as out:
out.write(stdout)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jdkato/blocktest",
"score": 3
}
|
#### File: examples/simple/my_component.py
```python
import spacy
def my_component(doc): # my_component begin
print("After tokenization, this doc has %s tokens." % len(doc))
if len(doc) < 10:
print("This is a pretty short document.")
return doc
nlp = spacy.load('en')
nlp.add_pipe(my_component, name='print_info', first=True)
print(nlp.pipe_names) # ['print_info', 'tagger', 'parser', 'ner']
doc = nlp(u"This is a sentence.") # my_component end
assert nlp.pipe_names == ['print_info', 'tagger', 'parser', 'ner']
```
|
{
"source": "jdkato/codetype",
"score": 3
}
|
#### File: codetype/test/test_first_lines.py
```python
import os
import sys
import re
import json
import unittest
from codecs import getdecoder
sys.path.insert(0, os.path.abspath("."))
REGEXP_DIR = os.path.join("test", "lang", "regexp")
class FirstLinesTestCase(unittest.TestCase):
"""Tests for codetype's utility functions.
"""
def test_identify(self):
unicode_escape = getdecoder("unicode_escape")
for test in os.listdir(REGEXP_DIR):
if test.endswith(".json"):
with open(os.path.join(REGEXP_DIR, test)) as data_file:
data = json.load(data_file)
for key in data:
regexp = unicode_escape(key)[0]
accepted = data[key]["accepted"]
rejected = data[key]["rejected"]
for s in accepted:
self.assertTrue(
re.search(regexp, s),
msg="'{0}' does not match '{1}'".format(regexp, s)
)
for s in rejected:
self.assertFalse(
re.search(regexp, s),
msg="'{0}' matches '{1}'".format(regexp, s)
)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jdkato/SubVale",
"score": 3
}
|
#### File: jdkato/SubVale/Vale.py
```python
import binascii
import cgi
import json
import os
import subprocess
import urllib.parse
import webbrowser
import requests
import sublime
import sublime_plugin
from Default.paragraph import expand_to_paragraph
Settings = None
class ValeFixCommand(sublime_plugin.TextCommand):
"""Applies a fix for an alert.
"""
def run(self, edit, **args):
alert, suggestion = args["alert"], args["suggestion"]
offset = self.view.text_point(alert["Line"] - 1, 0)
coords = sublime.Region(
offset + alert["Span"][0] - 1, offset + alert["Span"][1]
)
if alert["Action"]["Name"] != "remove":
self.view.replace(edit, coords, suggestion)
else:
coords.b = coords.b + 1
self.view.erase(edit, coords)
self.view.window().status_message(
"[Vale Server] Successfully applied fix!")
def debug(message, prefix="Vale", level="debug"):
"""Print a formatted console entry to the Sublime Text console.
Args:
message (str): A message to print to the console
prefix (str): An optional prefix
level (str): One of debug, info, warning, error [Default: debug]
Returns:
str: Issue a standard console print command.
"""
if Settings.get("vale_debug"):
print(
"{prefix}: [{level}] {message}".format(
message=message, prefix=prefix, level=level
)
)
def show_suggestions(suggestions, payload):
"""Show a Quick Panel of possible solutions for the given alert.
"""
alert = json.loads(payload)
options = []
for suggestion in suggestions:
if alert["Action"]["Name"] == "remove":
options.append("Remove '" + alert["Match"] + "'")
else:
options.append("Replace with '" + suggestion + "'")
sublime.active_window().show_quick_panel(
options,
lambda idx: apply_suggestion(alert, suggestions, idx),
sublime.MONOSPACE_FONT
)
def apply_suggestion(alert, suggestions, idx):
"""Apply the given suggestion to the active buffer.
"""
if idx >= 0 and idx < len(suggestions):
suggestion = suggestions[idx]
view = sublime.active_window().active_view()
view.run_command("vale_fix", {
"alert": alert, "suggestion": suggestion
})
def handle_navigation(path):
"""Handle navigation after a user clicks one of our links.
"""
if os.path.exists(path):
# The path exists, open it in a new tab.
sublime.active_window().open_file(path)
elif path.startswith("http"):
# The path doesn't exist, assume it's an URL.
webbrowser.open(path)
else:
# It's an alert to process.
server = urllib.parse.urljoin(Settings.get("vale_server"), "suggest")
alert = binascii.unhexlify(path.encode()).decode()
r = requests.post(server, data={
"alert": alert
})
show_suggestions(r.json().get("suggestions", []), alert)
def query(endpoint, payload={}):
"""Query the Vale Server API with the given `endpoint` and `payload`.
"""
try:
server = urllib.parse.urljoin(Settings.get("vale_server"), endpoint)
r = requests.get(server, params=payload)
return r.json() if r.status_code == 200 else {}
except requests.exceptions.RequestException as e:
debug(str(e), level="error")
return {}
def make_link(url, linkText="{url}"):
"""Return a link HTML string.
"""
template = "<a href=\"{url}\">" + linkText + "</a>"
return template.format(url=url)
def post_file(path):
"""
"""
try:
server = urllib.parse.urljoin(Settings.get("vale_server"), "file")
debug("running vale ({0}) on {1}".format(server, path))
r = requests.post(server, data={
"file": path,
"path": os.path.dirname(path)
})
if r.status_code != 200:
return {}
body = r.json()["path"]
with open(body, "r+", encoding="utf-8") as f:
return json.load(f)
except requests.exceptions.RequestException as e:
debug(e)
return {}
def post_str(buf, ext):
"""
"""
try:
server = urllib.parse.urljoin(Settings.get("vale_server"), "vale")
debug("running vale ({0}) on {1}".format(server, buf))
r = requests.post(server, data={
"text": buf,
"format": ext
})
if r.status_code != 200:
return {}
return r.json()
except requests.exceptions.RequestException as e:
debug(e)
return {}
class ValeSettings(object):
"""Provide global access to and management of Vale's settings.
"""
settings_file = "Vale.sublime-settings"
settings = sublime.load_settings(settings_file)
def __init__(self):
self.on_hover = []
self.error_template = None
self.warning_template = None
self.info_template = None
self.css = None
self.settings.add_on_change("reload", lambda: self.load())
self.load()
def load(self):
"""Load Vale's settings.
"""
self.settings = sublime.load_settings(self.settings_file)
self.__load_resources()
def is_supported(self, syntax):
"""Determine if `syntax` has been specified in the settings.
"""
return True
def get_styles(self):
"""Get Vale's base styles.
"""
config = self.get_config()
return config["GBaseStyles"]
def get_draw_style(self):
"""Get the region styling.
"""
underlined = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE
style = self.get("vale_alert_style")
if style == "solid_underline":
return sublime.DRAW_SOLID_UNDERLINE | underlined
elif style == "stippled_underline":
return sublime.DRAW_STIPPLED_UNDERLINE | underlined
elif style == "squiggly_underline":
return sublime.DRAW_SQUIGGLY_UNDERLINE | underlined
return sublime.DRAW_OUTLINED
def get_config(self):
"""Create a list of settings from the vale binary.
"""
return query("config")
def put(self, setting, value):
"""Store and save `setting` as `value`.
Args:
setting (str): The name of the setting to be accessed.
value (str, int, bool): The value to be stored.
"""
self.settings.set(setting, value)
sublime.save_settings(self.settings_file)
def get(self, setting):
"""Return the value associated with `setting`.
Args:
setting (str): The name of the setting to be accessed.
Returns:
(str, int, bool): The value associated with `setting`. The default
value is ''.
"""
return self.settings.get(setting, "")
def clear_on_hover(self):
"""Clear Vale's regions and hover data.
"""
for alert in self.on_hover:
for level in ["error", "warning", "suggestion"]:
sublime.View(alert["view_id"]).erase_regions(
"vale-server-" + level
)
del self.on_hover[:]
def __load_resources(self):
"""Load Vale's static resources.
"""
self.error_template = sublime.load_resource(
self.settings.get("vale_error_template")
)
self.warning_template = sublime.load_resource(
self.settings.get("vale_warning_template")
)
self.info_template = sublime.load_resource(
self.settings.get("vale_info_template")
)
self.css = sublime.load_resource(self.settings.get("vale_css"))
class ValeDashboardCommand(sublime_plugin.WindowCommand):
"""Opens the Vale Server dashboard.
"""
def run(self):
instance = Settings.get("vale_server")
webbrowser.open(instance)
class ValeReportCommand(sublime_plugin.WindowCommand):
"""Generates a report for the active folder.
"""
def run(self):
instance = Settings.get("vale_server")
wind = sublime.active_window()
name = os.path.dirname(wind.active_view().file_name())
server = urllib.parse.urljoin(
instance,
"/summary.html?path={0}".format(name)
)
webbrowser.open(server)
class ValeVocabCommand(sublime_plugin.WindowCommand):
"""Opens the user-specified vocab file.
"""
def run(self, name):
config = Settings.get_config()
src = os.path.join(
config["StylesPath"],
"Vocab",
config["Project"],
name + ".txt")
sublime.active_window().open_file(src)
class ValeVocabEditCommand(sublime_plugin.WindowCommand):
"""Adds the user-selected term to the given file.
"""
def run(self, name):
sel = self.window.active_view().sel()
reg = sublime.Region(sel[0].a, sel[0].b)
if reg.size() == 0:
reg = self.window.active_view().word(reg)
term = self.window.active_view().substr(reg)
config = Settings.get_config()
project = config["Project"]
words = query("vocab", {
"name": project, "file": name
})
words.append(term)
sorted_list = sorted(set(words), key=str.casefold)
server = urllib.parse.urljoin(Settings.get("vale_server"), "update")
r = requests.post(server, data={
"path": project + "." + name,
"text": "\n".join(sorted_list)
})
if r.status_code == 200:
self.window.status_message(
"Successfully added '{0}' to '{1}' vocab.".format(term, project)
)
class ValeEditStylesCommand(sublime_plugin.WindowCommand):
"""Provides quick access to styles on a view-specific basis.
"""
styles = []
def run(self):
"""Show a list of all styles applied to the active view.
"""
styles_dir = os.path.dirname(self.window.active_view().file_name())
config = Settings.get_config()
path = config["StylesPath"]
if not path or not os.path.exists(path):
debug("invalid path!")
return
styles = []
for s in os.listdir(path):
style = os.path.join(path, s)
if s == "Vocab" or not os.path.isdir(style):
continue
self.styles.append(style)
styles.append(s)
self.window.show_quick_panel(styles, self.choose_rule)
def choose_rule(self, idx):
"""Show a list of all rules in the user-selected style.
"""
if idx == -1:
return # The panel was cancelled.
d = self.styles[idx]
rules = [x for x in os.listdir(d) if x.endswith(".yml")]
open_rule = (
lambda i: None
if i == -1
else self.window.open_file(os.path.join(d, rules[i]))
)
self.window.show_quick_panel(rules, open_rule)
class ValeCommand(sublime_plugin.TextCommand):
"""Manages Vale's linting functionality.
"""
def is_enabled(self):
syntax = self.view.settings().get("syntax")
return Settings.is_supported(syntax)
def run(self, edit, from_load):
"""Run vale on the user-indicated buffer.
"""
path = self.view.file_name()
if not path or self.view.is_scratch():
debug("invalid path: {0}!".format(path))
return
limit = Settings.get("vale_threshold")
count = self.view.rowcol(self.view.size())[0] + 1
if limit < 0 or (limit > 0 and count >= limit):
if from_load:
return
_, ext = os.path.splitext(path)
reg = expand_to_paragraph(self.view, self.view.sel()[0].b)
buf = self.view.substr(reg)
row, _ = self.view.rowcol(reg.a)
response = post_str(buf, ext)
self.show_alerts(response, row)
else:
response = post_file(path)
self.show_alerts(response, 0)
def show_alerts(self, data, offset):
"""Add alert regions to the view.
"""
Settings.clear_on_hover()
regions = {"suggestion": [], "warning": [], "error": []}
level_to_scope = {
"error": "region.redish",
"warning": "region.orangish",
"suggestion": "region.bluish"
}
if "Code" in data and "Text" in data:
sublime.status_message(
"Vale: runtime error (skipping lint)")
debug(data["Text"])
debug(data.get("Path", ""))
return
for f, alerts in data.items():
for a in alerts:
start = self.view.text_point((a["Line"] - 1) + offset, 0)
loc = (start + a["Span"][0] - 1, start + a["Span"][1])
region = sublime.Region(*loc)
regions[a["Severity"]].append(region)
Settings.on_hover.append(
{
"region": region,
"HTML": self._make_content(a),
"view_id": self.view.id(),
"level": a["Severity"],
"msg": a["Message"],
}
)
for level in ["error", "warning", "suggestion"]:
self.view.add_regions(
"vale-server-" + level,
regions[level],
level_to_scope[level],
"circle",
Settings.get_draw_style(),
)
def _make_content(self, alert):
"""Convert an alert into HTML suitable for a popup.
"""
actions = []
style, rule = alert["Check"].split(".")
path = query("path")["path"]
loc = os.path.join(path, style, rule) + ".yml"
if os.path.exists(loc):
actions.append(make_link(loc, "Edit rule"))
if "Action" in alert and alert["Action"]["Name"] != "":
stringify = json.dumps(alert, separators=(",", ":")).strip()
stringify = binascii.hexlify(stringify.encode()).decode()
actions.append(make_link(stringify, "Fix Alert"))
level = alert["Severity"].capitalize()
if level == "Error":
template = Settings.error_template
elif level == "Warning":
template = Settings.warning_template
else:
template = Settings.info_template
source = alert["Link"]
if source != "":
actions.append(make_link(source, "Read more"))
message = cgi.escape(alert["Message"])
if alert["Description"] == "":
title = "{} - {}".format(level, alert["Check"])
body = message
else:
title = "{}: {}".format(level, message)
body = alert["Description"]
return template.format(
CSS=Settings.css,
header=title,
body=body,
actions=" | ".join(actions))
class ValeEventListener(sublime_plugin.EventListener):
"""Monitors events related to Vale.
"""
def is_enabled(self):
syntax = self.view.settings().get("syntax")
return Settings.is_supported(syntax)
def on_modified_async(self, view):
Settings.clear_on_hover()
if Settings.get("vale_mode") == "background":
debug("running vale on modified")
view.run_command("vale", {"from_load": False})
def on_load_async(self, view):
if Settings.get("vale_mode") == "load_and_save":
debug("running vale on activated")
view.run_command("vale", {"from_load": True})
def on_pre_save_async(self, view):
if Settings.get("vale_mode") in ("load_and_save", "save"):
debug("running vale on pre save")
view.run_command("vale", {"from_load": False})
def on_hover(self, view, point, hover_zone):
loc = Settings.get("vale_alert_location")
for alert in Settings.on_hover:
region = alert["region"]
if alert["view_id"] == view.id() and region.contains(point):
if loc == "hover_popup":
view.show_popup(
alert["HTML"],
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=point,
on_navigate=handle_navigation,
max_width=Settings.get("vale_popup_width"),
)
elif loc == "hover_status_bar":
sublime.status_message(
"vale:{0}:{1}".format(alert["level"], alert["msg"])
)
def plugin_loaded():
"""Load plugin settings and resources.
"""
global Settings
Settings = ValeSettings()
```
|
{
"source": "jdkato/TODOView",
"score": 3
}
|
#### File: jdkato/TODOView/TODOView.py
```python
import os
import re
import sublime
import sublime_plugin
TEMPLATE = r'\b({0})(?:\((.+)\))?: (.+)$'
EXTRACT_PAT = None
EXTRACT_RE = None
PREFS = None
def plugin_loaded():
"""Initialize the extraction regexp.
"""
global EXTRACT_RE, EXTRACT_PAT, PREFS
# TODO: respect updates to settings
settings = sublime.load_settings('TODOView.sublime-settings')
PREFS = sublime.load_settings('Preferences.sublime-settings')
EXTRACT_PAT = TEMPLATE.format('|'.join(settings.get('targets', [])))
EXTRACT_RE = re.compile(EXTRACT_PAT)
def parse_query(query):
"""Parse a search query and return its individual parts.
Args:
query (str): A query in the form 'scope:TODO,...:assignee,...'.
Returns:
[str]: The parsed sections of the query.
Examples:
>>> parse_query('file:TODO:*')
['file', ['TODO'], ['*']]
"""
if query == '':
return ['*', ['*'], ['*']]
elif not query.count(':') == 2:
return []
parts = query.split(':')
categories = parts[1].split(',')
assignees = parts[2].split(',')
return [parts[0], categories, assignees]
def format_message(msg):
"""Format the given message.
"""
if not any(msg.endswith(c) for c in ('.', '?', '!')) and len(msg) > 30:
msg = msg + ' ...'
return msg
def ignore_path(path):
"""Determine if we should ignore the given path.
"""
binary = PREFS.get('binary_file_patterns', [])
files = PREFS.get('file_exclude_patterns', [])
folders = PREFS.get('folder_exclude_patterns', [])
for pat in folders + binary + files:
if '*' in path and re.search(pat, path):
return True
elif pat in path:
return True
return False
def aggregate_views(scope):
"""Find all of the views we should search.
Args:
scope (str): The scope of our search. Accepted values are '(f)ile' (the
view being edited), '(o)pen' (all open views), or '(a)ll' (all files
in the current window).
Returns:
[sublime.View|str]: A list of View objects (when available) and file
paths.
"""
views = []
if scope in ('file', 'f'):
v = sublime.active_window().active_view().file_name()
if not ignore_path(v):
views.append(v)
elif scope in ('open', 'o'):
for v in sublime.active_window().views():
name = v.file_name()
if not ignore_path(name):
views.append(name)
else:
for f in sublime.active_window().folders():
for path, subdirs, files in os.walk(f):
for name in files:
p = os.path.join(path, name)
if not ignore_path(p):
views.append(p)
return views
def extract_comments_from_buffer(path, categories, assignees):
"""Extract all TODO-like comments from the given file.
"""
matches = []
try:
with open(path) as buf:
for i, line in enumerate(buf.readlines()):
m = EXTRACT_RE.search(line)
if m:
c = '*' in categories or m.group(1) in categories
a = '*' in assignees or m.group(2) in assignees
if c and a:
matches.append({
'position': (i, m.start(0)),
'category': m.group(1),
'assignee': m.group(2),
'message': format_message(m.group(3))
})
except UnicodeDecodeError:
pass
return matches
def extract_comments(query):
"""Extract all TODO-like comments from the given sources.
"""
comments = {}
parsed = parse_query(query)
if parsed == []:
return comments
scope, categories, assignees = parsed
for v in aggregate_views(scope):
found = extract_comments_from_buffer(v, categories, assignees)
if found:
comments[v] = found
return comments
class TodoSearchCommand(sublime_plugin.WindowCommand):
"""Search for TODOs using the user-provided query string.
"""
def run(self):
"""Prompt the user for a query.
"""
self.window.show_input_panel(
'Enter a query string: ', '', self.show_results, None, None)
def show_results(self, query):
"""Show the results in either a Quick Panel.
"""
sublime.active_window().run_command(
'todo_quick_panel', {'found': extract_comments(query)})
class TodoQuickPanelCommand(sublime_plugin.WindowCommand):
"""Show relevant TODOs in a Quick Panel.
"""
positions = []
def run(self, found):
"""Extract the comments and populate and Quick Panel with the results.
"""
items = []
for path, comments in found.items():
for c in comments:
self.positions.append((path, c['position']))
if c['assignee']:
heading = '{0}({1})'.format(c['category'], c['assignee'])
else:
heading = c['category']
items.append([heading, c['message'], os.path.basename(path)])
if items:
self.window.show_quick_panel(items, self.navigate)
else:
sublime.active_window().active_view().set_status(
'TODOView', 'TODOView: no matches found')
def navigate(self, idx):
"""Navigate the TODO comment.
"""
if idx < 0:
return
f, p = self.positions[idx]
self.window.open_file(
'{0}:{1}:{2}'.format(f, p[0] + 1, p[1]), sublime.ENCODED_POSITION)
```
|
{
"source": "jdkato/txtlint",
"score": 3
}
|
#### File: comments/in/1.py
```python
def FIXME():
"""
FIXME:
"""
print("""
FIXME: This should *not* be linted.
""")
# XXX: This should be flagged!
NOTE = False # XXX:
XXX = True # NOTE:
r"""
NOTE:
XXX:
"""
def NOTE():
'''
NOTE:
'''
XXX = '''
XXX: This should *not* be linted.
'''
def foo(self):
"""NOTE This is the start of a block.
TODO: Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25. GMT
"""
invalid_date = 'FIXME: Mon, 28 May 999999999999 28:25:26 GMT'
```
|
{
"source": "JDKdevStudio/Taller30Abril",
"score": 4
}
|
#### File: JDKdevStudio/Taller30Abril/12.CaidaLibre.py
```python
from math import sqrt
def get_Numero(prompt):
while True:
try:
return float(input(prompt))
except ValueError:
print("Debes escribir un número")
Altura = get_Numero("Escríba la altura del objeto en metros")
print("El tiempo de caída en segundos es de:", sqrt(2 * Altura / 9.81))
```
#### File: JDKdevStudio/Taller30Abril/29.Numero2Decimal.py
```python
def insert_dot(string, index):
return string[:index] + '.' + string[index:]
print(insert_dot(input("Ingrese un numero para convertirlo en decimal= "), int(input("Escriba en que posicion (izquierda a derecha) desa introducir la notación"))))
```
#### File: JDKdevStudio/Taller30Abril/2.NumeroalCuadrado.py
```python
def get_Numero(prompt):
while True:
try:
return float(input(prompt))
except ValueError:
print("Debes escribir un número")
Numero = get_Numero("Escriba un número para elevar al cuadrado")
print("el número", Numero,"Elevado al cuadrado es", Numero**2)
```
#### File: JDKdevStudio/Taller30Abril/3.Suma2numeros.py
```python
def get_Numero(prompt):
while True:
try:
return int(input(prompt))
except ValueError:
print("Debes escribir un número")
n1, n2 = get_Numero("Escriba el primer número"), get_Numero("Ahora escriba el segundo número")
print("La suma de", n1, "y", n2, "es", n1+n2)
```
#### File: JDKdevStudio/Taller30Abril/43.Print10NumberImpar.py
```python
count = 0
def to_infinity():
index = 0
while True:
yield index
index += 1
for x in to_infinity():
if x % 2 != 0:
print(x)
count += 1
if count == 10:
break
```
#### File: JDKdevStudio/Taller30Abril/50.NnumerosSuma&Promedio.py
```python
def get_Numero(prompt):
while True:
try:
return int(input(prompt))
except ValueError:
print("Debes escribir un número")
cantidad, sumatoria, conteo = 0, 0, get_Numero("Escriba la cantidad de veces a contar")
print("Ingrese los valores numericos que desee:")
while True:
conteo -= 1
x = get_Numero(f"Escriba el número #:{conteo}")
sumatoria += x
cantidad += 1
if conteo == 0:
promedio = sumatoria / cantidad
print("la cantidad de valores que digito fue", cantidad)
print("la sumatoria entre estos valores fue de", sumatoria, "y su promedio fue de", round(promedio))
break
```
#### File: JDKdevStudio/Taller30Abril/56.CustonFactorNum.py
```python
def factor(n):
cant = 0
for x in range(1, n + 1):
if n % x == 0:
cant += 1
return cant
y = int(input("Ingrese un numero: "))
print(factor(y))
```
#### File: JDKdevStudio/Taller30Abril/7.VentaConIVA.py
```python
def get_Numero(prompt):
while True:
try:
return int(input(prompt))
except ValueError:
print("Debes escribir un el precio como número entero")
PrecioItem = get_Numero("Escriba el valor del producto")
PrecioIva = PrecioItem * 0.19
PrecioTotal = PrecioItem + PrecioIva
print(f"El item a comprar vale {PrecioItem}, el valor del iva es de {PrecioIva}, por lo tanto el precio total es de: {PrecioTotal}")
```
|
{
"source": "jdkelly199/ros-sharp",
"score": 2
}
|
#### File: obs_test/scripts/inputObsLocation.py
```python
import rospy
import numpy
from geometry_msgs.msg import Point, Pose, Quaternion
from std_msgs.msg import String
from Xlib import display
from Xlib.ext import randr
def getViconData():
return([0])
def calculateTransform(ref, vic):
print(ref)
def inputObsLocation():
# initialize node
rospy.init_node('calibrate', anonymous = True)
#### Setup obsLocation Publisher
rate = rospy.Rate(10) # 10hz
#Transform Poses
refTransforms = []
viconTransforms = []
nextRefPublisher = rospy.Publisher("/sendRef", String, queue_size = 5)
endRefPublisher = rospy.Publisher("/endRef", String, queue_size = 5)
while not rospy.is_shutdown():
print("waiting")
rospy.wait_for_message("/startCal", String)
print("started calibration")
msg = String("next")
print("on to next")
nextRefPublisher.publish(msg)
while(len(refTransforms) < 10):
print("waiting for next")
refTransforms.append(rospy.wait_for_message("nextTransform", Pose))
viconTransforms.append(getViconData())
print("recieved")
endRefPublisher.publish(msg)
calculateTransform(refTransforms, viconTransforms)
#### Initialize point msg every loop
#msg.x = 0.0
#msg.y = 0.0
#msg.z = 0.0
#### Prompt User for Obs Location
#msg.x = float(input("Please Enter The X Position of the Obstacle: "))
#msg.y = float(input("Please Enter The Y Position of the Obstacle: "))
#msg.z = float(input("Please Enter The Z Position of the Obstacle: "))
#### Publish msg
#rospy.loginfo([pos_x, pos_y])
#obsLocationPublisher.publish(msg)
#rate.sleep()
if __name__ == '__main__':
inputObsLocation()
```
|
{
"source": "jdkennedy45/Discord-Bot",
"score": 3
}
|
#### File: jdkennedy45/Discord-Bot/Games.py
```python
import random
import asyncio
import re
import discord
from discord.ext import commands
from Users import Users
from random import choices
def get_hangman_art():
# prepare array of hangman art
art_array = []
with open("db_and_words\hangmen.txt") as my_file:
for line in my_file:
art_array.append(line)
# convert respective list index-ranges to string with ''.join
# the resulting art_array[0-6] will represent each stage of hangman
art_array[0] = "".join(art_array[0:6])
art_array[1] = "".join(art_array[7:13])
art_array[2] = "".join(art_array[14:20])
art_array[3] = "".join(art_array[21:27])
art_array[4] = "".join(art_array[28:34])
art_array[5] = "".join(art_array[35:41])
art_array[6] = "".join(art_array[42:49])
return art_array
def get_hangman_words():
# only read words file once so we won't have to re-open the file every game call
words_file = open("db_and_words\words.txt", "r")
words = words_file.readlines()
words_file.close()
return words
def battle_decider(fighter1, fighter2, fighter1_weight, fighter2_weight):
# choices function maps a selection to a probability, and selects one choice based off probability
winner = choices([fighter1, fighter2], [fighter1_weight, fighter2_weight])
print(winner)
# choices function returning [1] or [2] so use regex to pull the integers out
return int(re.findall("\d+", str(winner))[0])
def pick_word(cat):
if cat == 1:
random_word = random.choice(all_words[0:180])
category = "Country name"
elif cat == 2:
random_word = random.choice(all_words[181:319])
category = "Farm"
elif cat == 3:
random_word = random.choice(all_words[320:389])
category = "Camping"
elif cat == 4:
random_word = random.choice(all_words[390:490])
category = "Household items/devices"
elif cat == 5:
random_word = random.choice(all_words[491:603])
category = "Beach"
elif cat == 6:
random_word = random.choice(all_words[604:648])
category = "Holidays"
elif cat == 7:
random_word = random.choice(all_words[649:699])
category = "US States"
elif cat == 8:
random_word = random.choice(all_words[700:998])
category = "Sports & Hobbies"
else:
random_word = random.choice(all_words[649:699])
category = "US States"
# quick band-aid fix to truncate CR in text file, COMING BACK LATER TO FIX
length = len(random_word) - 1 # to remove carriage return, I'm not using unix format to make the list
random_word = random_word[:length] # truncate word with [:length] cause of carriage return in text file...
underscore_sequence = list("") # this will be our list of underscores
# it will be consistently replaced by guesses
# fill the underscore_sequence list with underscore underscore_sequencelate of the correct word
for x in random_word:
if x == " ":
underscore_sequence += " " # in the case of 2-word phrases, need to move everything over
elif x == "'":
underscore_sequence += " '"
else:
underscore_sequence += " \u2581" # if not a space, add: \u2581, a special underscore character.
# using to replace by correctly guessed letters
return random_word.upper(), category, underscore_sequence
def add_guess_to_list(guess, guessed): # accepts guess and list of all guesses
if len(guess.clean_content) > 1: # don't want to add whole word to guess list
all_guessed = "".join(map(str, guessed))
return guessed, all_guessed
guessed.extend(guess.clean_content.upper()) # add last guess to the list of guessed words
guessed.extend(" ") # add space to guessed list
all_guessed = "".join(map(str, guessed)) # messy syntax, convert the list into a string so bot can print it
return guessed, all_guessed
def find_matches(guess, correct_word, underscore_sequence):
index = 0
num_matches = 0
for x in correct_word:
index += 1
if x == " ":
index += 2
# if any matches, we need to replace underscore(s) in the sequence
# and increase the number of matches for the loop
if guess.clean_content.upper() == x:
# convulted index scheme due to underscore_sequence format
underscore_sequence[index * 2 - 1] = guess.clean_content.upper()
num_matches += 1
return num_matches, underscore_sequence
def get_slots_emoji_list():
with open("db_and_words\\emoji_names.txt", "r") as lines:
high_tier = []
mid_tier = []
low_tier = []
current_tier = ""
for line in lines:
line = line.rstrip("\n")
if line == "HIGH-TIER-LIST":
current_tier = "high"
continue
if line == "MEDIUM-TIER-LIST":
current_tier = "med"
continue
if line == "LOW-TIER-LIST":
current_tier = "low"
continue
if current_tier == "high":
high_tier.append(line)
elif current_tier == "med":
mid_tier.append(line)
elif current_tier == "low":
low_tier.append(line)
return high_tier, mid_tier, low_tier
# short decorator function declaration, confirm that command user has an account in database
def has_account():
def predicate(ctx):
user = Users(ctx.message.author.id)
if user.find_user() == 0:
return False
else:
return True
return commands.check(predicate)
# store data from text files into memory (emoji lists, hangman words, hangman art)
high_tier_emotes, mid_tier_emotes, low_tier_emotes = get_slots_emoji_list()
all_words = get_hangman_words()
hangmen = get_hangman_art()
class Games:
def __init__(self, client):
self.client = client
"""ROB FUNCTION"""
@has_account()
@commands.cooldown(1, 3600, commands.BucketType.user)
@commands.command(
name="rob",
description="Steal money from others",
brief="can use =steal",
aliases=["thief", "thieve", "ROB", "steal", "mug"],
pass_context=True,
)
async def rob(self, context, *args):
# create instance of the user starting the robbery
robber = Users(context.message.author.id)
# declare 30% fail chance, used to calculate chance of failing rob
fail_chance = 30
# pick a random user in the server to rob
# target variable will function as the victim user's "english" name
target = random.choice(list(context.message.server.members))
# make an instance of the target
victim = Users(target.id)
victim_id = target.id
counter = 1
# if they specified a rob target, change the random target to their specified target
if args:
try:
# use regex to extract only the user-id from the user targeted
victim_id = re.findall("\d+", args[0])[0]
victim = Users(victim_id)
# get_member() returns the "member" object that matches an id provided
target = context.message.server.get_member(victim_id)
# higher fail chance, 35%, if they want to specify a rob target
fail_chance = 35
# if the target doesn't have an account, change fail chance back to 30% and the target will reroll next loop
if victim.find_user() == 0:
fail_chance = 30
await self.client.say(
context.message.author.mention + " Your rob target doesn't have an account."
"\n**Rerolling** rob target now!"
)
if robber.get_user_peace_status() == 1:
fail_chance = 30
await self.client.say(
context.message.author.mention
+ " You are in :dove: **peace mode** :dove: and cannot use =rob @user."
"\n**Rerolling** rob target now!"
)
# pick a random user in the server to rob
# target variable will function as the victim user's "english" name
target = random.choice(list(context.message.server.members))
# make an instance of the target
victim = Users(target.id)
victim_id = target.id
elif victim.get_user_peace_status() == 1:
fail_chance = 30
await self.client.say(
context.message.author.mention
+ " That target is in :dove: **peace mode** :dove: and exempt to =rob @user."
"\n**Rerolling** rob target now!"
)
# pick a random user in the server to rob
# target variable will function as the victim user's "english" name
target = random.choice(list(context.message.server.members))
# make an instance of the target
victim = Users(target.id)
victim_id = target.id
except:
pass
# while the user to rob is the robber, re-roll the target
# while the user to rob does not have an account in the database, re-roll the target
while victim_id == context.message.author.id or victim.find_user() == 0:
# only try 120 members in the user's server
# otherwise if the user was the sole player with an account in the discord server, infinite while loop
# this part is inefficient, but only way I can think of right now with discord's functionality
if counter == 120:
# no users were found to rob if we hit 120 in the counter
# calculate random integer 1-100
# if the result is within 1 through fail chance, they failed the rob, so take bail money and return
if fail_chance >= random.randint(1, 100) >= 1:
robber_level = robber.get_user_level(0)
bail = int(robber_level * 8.4)
robber.update_user_money(bail * -1)
msg = (
"<a:policesiren2:490326123549556746> :oncoming_police_car: "
"<a:policesiren2:490326123549556746>\n<a:monkacop:490323719063863306>"
"\u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B"
"<a:monkacop:490323719063863306>\n" + "Police shot you in the process.\n"
"You spent **$" + str(bail) + "** to bail out of jail."
)
# embed the rob failure message, set thumbnail to 80x80 of a "police siren" gif
em = discord.Embed(description=msg, colour=0x607D4A)
em.set_thumbnail(url="https://cdn.discordapp.com/emojis/490326123549556746.gif?size=80")
await self.client.say(embed=em)
return
else:
# if they passed the fail test, give the user a small prize and return
bonus_prize = int(robber.get_user_level(0) * 29.3)
robber.update_user_money(bonus_prize)
msg = (
"**No users found to rob...** "
"\nOn the way back to your basement, you found **$"
+ str(bonus_prize)
+ "** "
+ "<:poggers:490322361891946496>"
)
# embed the rob confirmation message, set thumbnail to 40x40 of a "ninja" gif
em = discord.Embed(description=msg, colour=0x607D4A)
em.set_thumbnail(url="https://cdn.discordapp.com/emojis/419506568728543263.gif?size=40")
await self.client.say(embed=em)
return
target = random.choice(list(context.message.server.members))
# create a new instance of victim each loop
# in order to check if the reroll has an account in database
victim = Users(target.id)
victim_id = target.id
counter += 1
# calculate random integer 1-100
# if the result is within 1 through fail chance, they failed the rob
if fail_chance >= random.randint(1, 100) >= 1:
robber_level = robber.get_user_level(0)
bail = int(robber_level * 10.4)
robber.update_user_money(bail * -1)
msg = (
"<a:policesiren2:490326123549556746> :oncoming_police_car: "
"<a:policesiren2:490326123549556746>\n<a:monkacop:490323719063863306>"
"\u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B \u200B"
"<a:monkacop:490323719063863306>\n**" + str(target.display_name) + "**"
" dodged"
" and the police shot you"
" in the process.\nYou spent **$" + str(bail) + "** to bail out of jail."
)
# embed the rob failure message, set thumbnail to 80x80 of a "police siren" gif
em = discord.Embed(description=msg, colour=0x607D4A)
em.set_thumbnail(url="https://cdn.discordapp.com/emojis/490326123549556746.gif?size=80")
await self.client.say(embed=em)
return
# we passed the dodge check, so reward thief with prize and bonus prize
victim_money = victim.get_user_money(0)
victim_level = victim.get_user_level(0)
robber_level = robber.get_user_level(0)
# the victim will only lose the prize, not the bonus prize
bonus_prize = int(robber_level * 29.3)
# the prize will begin by scaling by victim's level
prize = int(victim_level * 9.4)
# if prize greater than the robber's maximum prize amount, decrease the standard prize to compensate
if prize > int(robber_level * 9.4):
prize = int(robber_level * 9.4)
# if prize less than the robber's maximum prize amount, increase the bonus prize to compensate
if prize < int(robber_level * 9.4):
bonus_prize += int(robber_level * 9.4 - prize)
# balancing mechanic, don't let victims lose any more money when they have less money than -50x their level
if not victim_money < (victim_level * -50):
# subtract prize from victim
victim.update_user_money(prize * -1)
# reward robber with prize and bonus prize
robber.update_user_money(prize + bonus_prize)
msg = (
"**Success!** <:poggers:490322361891946496> "
"\nRobbed **$"
+ str(prize)
+ "** (+**$"
+ str(bonus_prize)
+ "**) from **"
+ str(target.display_name)
+ "**"
)
# embed the rob confirmation message, set thumbnail to 40x40 of a "ninja" gif
em = discord.Embed(description=msg, colour=0x607D4A)
em.set_thumbnail(url="https://cdn.discordapp.com/emojis/419506568728543263.gif?size=40")
await self.client.say(embed=em)
"""TOURNAMENT BATTLE FUNCTION"""
@has_account()
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(
name="tournament", aliases=["TOURNAMENT", "tourney", "TOURNEY"], pass_context=True,
)
async def enter_daily_tournament(self, context):
# the bulk work of this feature is when the results are calculated from daily_maintenance.py
# create instance of user who wants to enter the daily, server-specific colosseum tournament
fighter = Users(context.message.author.id)
# update their tourney_server_id entry to be the server they executed the command on
msg = fighter.update_user_tourney_server_id(context.message.server.name, context.message.server.id)
# embed the tourney registration confirmation message, set thumbnail to 40x40 of the respective server's icon
em = discord.Embed(description=msg, colour=0x607D4A)
thumb_url = "https://cdn.discordapp.com/icons/{0.id}/{0.icon}.webp?size=40".format(context.message.server)
em.set_thumbnail(url=thumb_url)
await self.client.say(embed=em)
"""1v1 BATTLE FUNCTION"""
@has_account()
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.command(
name="fight",
description="Battle another user in your server",
brief='can use "fight @user X --X being amount to bet"',
aliases=["battle", "BATTLE", "FIGHT", "duel", "DUEL"],
pass_context=True,
)
async def battle_user(self, context, *args):
# try/except block to check argument syntax
try:
if not args:
msg = await self.client.say(
context.message.author.mention
+ '```ml\nuse =fight like so: "=fight @user X" -- X being integer amount to bet```'
)
await asyncio.sleep(5)
await self.client.delete_message(msg)
return
# retrieve how much the fighter is betting on the battle
if len(args) == 2:
# bet will always be second argument
bet = int(args[1])
if bet < 1:
await self.client.say("Bet can't be negative...")
return
else:
await self.client.say("No bet specified, defaulting to **$10**\n ** **")
bet = 10
# if the user still used syntax incorrectly
except:
await self.client.say(
context.message.author.mention
+ '```ml\nuse =fight like so: "=fight @user X" -- X being integer amount to bet```'
)
# make instance of user for user initiating fight
fighter1 = Users(context.message.author.id)
# retrieve battle target
target = args[0]
# use regex to extract only the user-id from the user targeted
fighter2_id = int(re.findall("\d+", target)[0])
fighter2 = Users(fighter2_id)
# check if targeted user has account
if fighter2.find_user() == 0:
await self.client.say(
context.message.author.mention + " Your fighting target doesn't have an account."
"\nTell them to use **=create** to make one."
)
return
# check if both users have enough money
if fighter1.get_user_money(0) < bet or fighter2.get_user_money(0) < bet:
await self.client.say(
context.message.author.mention + " Either you or the target doesn't have enough money..."
)
return
# give target the prompt to ask if they will accept the challenge
alert_msg = await self.client.say(
target
+ ", you were challenged for **$"
+ str(bet)
+ "**\n:crossed_swords: Type **yes** to accept this battle. :crossed_swords: "
)
# made this check function with the help of discord API documentation
# it will be called below to check if the confirmation response to fight is from fighter2
def fighter2check(msg):
return int(msg.author.id) == fighter2_id
# (try to) wait for a battle acceptance from other user
try:
confirm = await self.client.wait_for_message(timeout=60, check=fighter2check)
await self.client.delete_message(alert_msg)
if confirm.clean_content.upper() == "YES":
await self.client.delete_message(confirm)
# have to use 2 messages to enlarge the emojis
msg = (
context.message.author.mention
+ " vs "
+ args[0]
+ " for **$"
+ str(bet)
+ "**\nFight will conclude in 10 seconds..."
)
# embed the duel alert message, set thumbnail to a "nunchuck frog" gif of size 64x64
em = discord.Embed(title="", colour=0x607D4A)
em.add_field(name="DUEL ALERT", value=msg, inline=True)
em.set_thumbnail(url="https://cdn.discordapp.com/emojis/493220414206509056.gif?size=64")
await self.client.say(embed=em)
await asyncio.sleep(10)
# get the stats of each fighter
# algorithm for calculating a fighter's stats in duels: (item score + user level*2 + 20)
f1_stats = fighter1.get_user_item_score() + (fighter1.get_user_level(0) * 2) + 20
f2_stats = fighter2.get_user_item_score() + (fighter2.get_user_level(0) * 2) + 20
total = f1_stats + f2_stats
f1_weight = f1_stats / total
f2_weight = f2_stats / total
# decide winner with custom function
# if it returns 1, fighter 1 won
# if it returns 2, fighter 2 won
winner = battle_decider(1, 2, f1_weight, f2_weight)
# check if they tried to exploit the code by spending all their money during the battle
if fighter1.get_user_money(0) < bet or fighter2.get_user_money(0) < bet:
await self.client.say(
context.message.author.mention + " One of you spent money while battling..."
)
return
# check who the winner was returned as
# update account balances respectively
if winner == 1:
msg = context.message.author.mention + " won **$" + str(bet) + "** by defeating " + target
# embed the duel results message
em = discord.Embed(description=msg, colour=0x607D4A)
await self.client.say(embed=em)
# distribute money properly
fighter1.update_user_money(bet)
fighter2.update_user_money(bet * -1)
elif winner == 2:
msg = target + " won **$" + str(bet) + "** by defeating " + context.message.author.mention
# embed the duel results message
em = discord.Embed(description=msg, colour=0x607D4A)
await self.client.say(embed=em)
# distribute money properly
fighter1.update_user_money(bet * -1)
fighter2.update_user_money(bet)
else:
await self.client.say("You rejected the battle! " + target)
# if the target never responded
except:
await self.client.say("**Battle request ignored...** <a:pepehands:485869482602922021>")
"""FLIP COIN FUNCTION"""
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(
name="flip",
description="Flip a coin to earn social status.",
brief='can use "=flip" or "=flip X", with X being heads or tails',
aliases=["f", "flpi", "FLIP", "F"],
pass_context=True,
)
async def flip_coin(self, context, *args):
result = random.randint(0, 1) # flipping in "binary"
win = 0
# first, check if they specified a bet and they have enough money for it
# this try/catch block will simply pass if they did not specify a bet
try:
user = Users(context.message.author.id)
# Convenient way to flip all
if type(args[1]) == str and args[1] == "all":
bet = user.get_user_money(0)
else:
bet = int(args[1])
# pass 0 to return integer version of money, see USERS.PY function
if bet > user.get_user_money(0) or bet < 1:
error_msg = await self.client.say(
"You don't have enough money for that bet..."
" <a:pepehands:485869482602922021> " + context.message.author.mention
)
await asyncio.sleep(6)
await self.client.delete_message(error_msg)
return
except:
pass
gif = await self.client.say(
"https://media1.tenor.com/images/938e1fc4fcf2e136855fd0e83b1e8a5f/tenor.gif?itemid=5017733"
)
await asyncio.sleep(3)
await self.client.delete_message(gif)
# check if they specified a guess of heads or tails
# process if they won or not
try:
if args[0] in ["heads", "HEADS"]:
if result == 1:
msg = "<:heads:486705167643967508> Result is **Heads**! You win! <a:worryHype:487059927731273739>"
win = 1
else:
msg = "<:heads:486705184370589718> Result is **Tails**! You lost. <a:pepehands:485869482602922021>"
elif args[0] in ["tails", "TAILS"]:
if result == 1:
msg = "<:heads:486705167643967508> Result is **Heads**! You lost. <a:pepehands:485869482602922021>"
else:
msg = "<:heads:486705184370589718> Result is **Tails**! You win! <a:worryHype:487059927731273739>"
win = 1
else:
error_msg = await self.client.say(
"Did you mean heads or tails? Try **=flip heads** or **=flip tails**."
)
await asyncio.sleep(6)
await self.client.delete_message(error_msg)
return
except:
# no arguments provided at all. so just give a result
if result == 1:
msg = "<:heads:486705167643967508> Result is **Heads**!"
else:
msg = "<:heads:486705184370589718> Result is **Tails**!"
# if they specified a "guess" and "bet" that was valid, check if they won
# note this will only pass through if "bet" was assigned through the earlier try/catch
try:
if win == 1:
# triple user's bet if they win, add to account
msg2 = "\n" + user.update_user_money(bet)
else:
# remove user's bet from their account if they lose
msg2 = "\n" + user.update_user_money(bet * -1)
# if they have $0 after that flip, give a donation dollar to discourage account re-creation
# pass in 0 for get_user_money to return the money as integer, SEE USERS.PY
if user.get_user_money(0) == 0:
msg2 += "\n** **\n_Mission failed. We'll get 'em next time. Take this **$1**._"
msg2 += "\n" + user.update_user_money(1)
except:
pass
try:
# embed the flip results message with money won and send
em = discord.Embed(description=msg + msg2, colour=0x607D4A)
await self.client.say(context.message.author.mention, embed=em)
except:
# embed the flip results message and send
em = discord.Embed(description=msg, colour=0x607D4A)
await self.client.say(context.message.author.mention, embed=em)
"""HANGMAN main function"""
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(
name="hangman",
description="Guess the word in order to survive.",
brief='can use "=hangman", type "stop" or "cancel" to end game',
aliases=["hm", "hang", "HM", "HANGMAN"],
pass_context=True,
)
async def hangman(self, context, *args):
# initialize message to be printed if user wants category list
hm_help = (
"```fix\n1. Country name\n2. Farm\n3. Camping\n4. Household items/devices\n"
"5. Beach\n6. Holidays\n7. US States\n8. Sports & Hobbies```"
)
wrong_guesses = 0 # global running count of incorrect guesses
guessed_letters = [""] # string of letters
# pick starting word with a category, also make the string of underscores to replace later
# check if they want to list the categories
if args:
if args[0] in ("help", "HELP", "categories", "cats", "h"):
await self.client.say(
context.message.author.mention + " Categories:\n" + "```fix\n1. Country name\n"
"2. Farm\n3. Camping\n"
"4. Household items/devices\n"
"5. Beach\n6. Holidays\n"
"7. US States\n"
"8. Sports & Hobbies```"
)
return
try:
correct_word, category, underscore_sequence = pick_word(int(args[0]))
except:
await self.client.say("Use a category number! (**Ex for Beach**: =hm 5)")
return
# if no category was specified in argument by user...
else:
# pick random category 1-8
rand_category = random.randint(1, 8)
correct_word, category, underscore_sequence = pick_word(rand_category)
# print the hangman starting interface and ascii setup
# use ** ** for empty line, discord doesn't allow empty messages.
# also, using "".join because discord api can't print lists.
# we could cast, but the format would be unfriendly for the game.
cat_msg = await self.client.say(
context.message.author.mention + " Word category is: **```fix\n" + category + "\n```**"
)
art_msg = await self.client.say("\n** **\n" + hangmen[0] + "\n** **\n" + "".join(underscore_sequence))
counter = 0
while True: # main game loop
guess_prompt_msg = await self.client.say("*Guess a letter or the entire word now...*")
guess_msg = await self.client.wait_for_message(
author=context.message.author, timeout=60
) # wait for user's guess_msg
# make already_guessed boolean to facilitate a while loop that will loop if the user makes duplicate guess
already_guessed = 1
while already_guessed == 1: # loop that will exit immediately if letter guess_msg isn't a repeat
if guess_msg.clean_content.upper() in str("".join(guessed_letters)):
await self.client.delete_message(guess_msg)
already_guessed_msg = await self.client.say(
"\n*You already tried that." " Guess a different letter now...*"
)
# wait for user's guess_msg now
guess_msg = await self.client.wait_for_message(author=context.message.author, timeout=30)
await self.client.delete_message(already_guessed_msg)
else:
already_guessed = 0
"""RUN WIN CHECKS AND CANCEL CHECKS NOW"""
# run conditionals to check if they guessed entire word or they used a cancel keyword
print(guess_msg.clean_content.upper() + " and correct word: " + correct_word) # console print
if guess_msg.clean_content.upper() == correct_word:
await self.client.delete_message(cat_msg)
await self.client.delete_message(art_msg)
await self.client.delete_message(guess_prompt_msg)
await self.client.delete_message(guess_msg)
# pick_result_msg, underscore_seq_msg, guessed_list_msg will only exist if the game has gone at least 1 loop
if counter > 0:
await self.client.delete_message(pick_result_msg)
await self.client.delete_message(underscore_seq_msg)
await self.client.delete_message(guessed_list_msg)
await self.client.say(hangmen[wrong_guesses])
# prepare win message string & embed it
win_msg = (
"**Correct word pick** <a:worryHype:487059927731273739> "
+ " Correct word was: "
+ "**"
+ correct_word.upper()
+ "**\n"
)
# add WINNINGS to user's bank account now
user = Users(context.message.author.id)
prize = user.get_user_level(0) * 8
win_msg += "Won **$" + str(prize) + "**... " + user.update_user_money(prize)
em = discord.Embed(description=win_msg, colour=0x607D4A)
await self.client.say(context.message.author.mention, embed=em)
return
if guess_msg.clean_content.upper() in ["STOP", "CANCEL"]:
await self.client.delete_message(cat_msg)
await self.client.delete_message(art_msg)
await self.client.delete_message(guess_prompt_msg)
await self.client.delete_message(guess_msg)
# pick_result_msg, underscore_seq_msg, guessed_list_msg will only exist if the game has gone at least 1 loop
if counter > 0:
await self.client.delete_message(pick_result_msg)
await self.client.delete_message(underscore_seq_msg)
await self.client.delete_message(guessed_list_msg)
await self.client.say(
"**Cancelled** the game!! <a:pepehands:485869482602922021> Correct word was: "
"**" + correct_word.upper() + "** " + context.message.author.mention
)
return
# quick win check, check for any underscores left to fill.
# if unknown_letters ends up as 0 for this iteration, then there are no letters left to guess.
num_matches, underscore_sequence = find_matches(guess_msg, correct_word, underscore_sequence)
unknown_letters = 0
for x in underscore_sequence:
if x == "\u2581": # if there is a blank underscore , the letter is still unknown to the user
unknown_letters += 1
if unknown_letters == 0:
await self.client.delete_message(cat_msg)
await self.client.delete_message(art_msg)
await self.client.delete_message(guess_prompt_msg)
await self.client.delete_message(guess_msg)
# pick_result_msg, underscore_seq_msg, guessed_list_msg will only exist if the game has gone at least 1 loop
if counter > 0:
await self.client.delete_message(pick_result_msg)
await self.client.delete_message(underscore_seq_msg)
await self.client.delete_message(guessed_list_msg)
await self.client.say(hangmen[wrong_guesses])
# prepare win message string & embed it
win_msg = (
"You **won** the game!!"
+ " <a:worryHype:487059927731273739> Correct word was: "
+ "**"
+ correct_word.upper()
+ "**\n"
)
# add WINNINGS to user's bank account now
user = Users(context.message.author.id)
prize = user.get_user_level(0) * 8
win_msg += "Won **$" + str(prize) + "**... " + user.update_user_money(prize)
em = discord.Embed(description=win_msg, colour=0x607D4A)
await self.client.say(context.message.author.mention, embed=em)
return
# now clear all messages besides category message (cat_msg variable)
await self.client.delete_message(art_msg)
await self.client.delete_message(guess_prompt_msg)
await self.client.delete_message(guess_msg)
# pick_result_msg, underscore_seq_msg, guessed_list_msg will only exist if the game has gone at least 1 loop
if counter > 0:
await self.client.delete_message(pick_result_msg)
await self.client.delete_message(underscore_seq_msg)
await self.client.delete_message(guessed_list_msg)
# if user's guess has zero matches in the correct word
if num_matches == 0:
wrong_guesses += 1 # no letters matched, so they guessed a wrong letter
if len(guess_msg.clean_content) == 1:
pick_result_msg = await self.client.say("**Wrong letter pick** <a:pepehands:485869482602922021>")
else:
pick_result_msg = await self.client.say("**Wrong word pick** <a:pepehands:485869482602922021>")
# if user's guess has any matches found in the correct word
else:
pick_result_msg = await self.client.say("**Correct letter pick** <a:worryHype:487059927731273739>")
# don't need "correct word pick" next because that would trigger
# in the conditional right after the guess is taken
# print the ascii art corresponding to wrong guesses
if wrong_guesses < 6:
art_msg = await self.client.say(hangmen[wrong_guesses])
elif wrong_guesses == 6:
await self.client.delete_message(cat_msg)
await self.client.delete_message(pick_result_msg)
await self.client.say(hangmen[6])
losing_msg = (
"\nYou were **hanged**! <a:pepehands:485869482602922021> "
+ "The word was: "
+ "**"
+ correct_word
+ "**\n"
)
em = discord.Embed(description=losing_msg, colour=0x607D4A)
await self.client.say(context.message.author.mention, embed=em)
return
# print underscores/letters, our main interface
underscore_seq_msg = await self.client.say("** **\n**" + "".join(underscore_sequence) + "**")
# add last guessed letter to our guessed-so-far list
guessed_letters, all_guessed = add_guess_to_list(guess_msg, guessed_letters)
# print all letters guessed so far
guessed_list_msg = await self.client.say("** ```fix\nGuessed so far: " + all_guessed + "``` **")
# add 1 to the main game loop's counter
counter += 1
""" Slot Machine """
@has_account()
@commands.cooldown(15, 86400, commands.BucketType.user)
@commands.command(
name="slot",
description="Slot Machine game",
aliases=["machine", "pachinko", "slots", "spin", "reel"],
pass_context=True,
)
async def slot_machine(self, context):
# Create a user instance
user = Users(context.message.author.id)
# Check if user has enough money. Ticket costs $10
ticket_cost = 10
if user.get_user_money(0) < ticket_cost:
msg = await self.client.say(
context.message.author.mention + " You don't have enough money...\n"
" ticket_cost costs ${}!".format(ticket_cost)
)
await asyncio.sleep(5)
await self.client.delete_message(msg)
return
# Deduct ticket cost from user
user.update_user_money(ticket_cost * -1)
# High tier should have the lowest chance possible
def get_tier():
""" High tier => 7%
Mid Tier => 28%
Low Tier => 65%
"""
tier = ""
# Scuffed way to get real value
# Get a random real value 0.01...100.0
result = (random.randrange(1, 10001)) / 100
if result <= 7.0:
# High Tier
tier = "high"
elif result > 7.0 and result <= 35.0:
# Mid Tier
tier = "mid"
elif result > 35.0 and result <= 100.0:
# Low Tier
tier = "low"
return tier
# This function and get_tier() can probably be merged
def get_emoji(result):
"""Pick a emote from emote tier lists determined by the result
Return a random emote
"""
emote = ""
if result == "high":
emote = random.choice(high_tier_emotes)
elif result == "mid":
emote = random.choice(mid_tier_emotes)
elif result == "low":
emote = random.choice(low_tier_emotes)
return emote
def get_bonus(slot_machine):
"""Getting a jackpot gives user a reward = 500 + bonus
Bonus is determined by the emote tier
High tier = 2000.0
Mid tier = 1000.0
Low tier = 250.0
Getting 2 same emotes also gives user a reward = 120 + bonus
High tier = 230.0
Mid tier = 130.0
Low tier = 0.0
If one emoji is high tier, user is given $50.0
return a list with msg type, reward, and tier
result[0] -> 1 if jackpot, 2 if two equal elements, 0 otherwise
result[1] -> reward
result[2] -> tier
"""
# result list
# default values incase of no bonus
result = [0, 0, ""]
# If all emojis are equal
# Jackpot
if len(set(slot_machine)) == 1:
# Print Jackpot
result[0] = 1
if slot_machine[0] in high_tier_emotes:
result[1] = 500.0 + 2000.0
result[2] = "High"
return result
elif slot_machine[0] in mid_tier_emotes:
result[1] = 500.0 + 1000.0
result[2] = "Mid"
return result
elif slot_machine[0] in low_tier_emotes:
result[1] = 500.0 + 250.0
result[2] = "Low"
return result
# If two emojis inside slot_machine are equal
if len(set(slot_machine)) == 2:
result[0] = 2
temp = [i for i in slot_machine if slot_machine.count(i) > 1]
if temp[0] in high_tier_emotes:
result[1] = 120.0 + 230.0
result[2] = "High"
return result
elif temp[0] in mid_tier_emotes:
result[1] = 120.0 + 130.0
result[2] = "Mid"
return result
elif temp[0] in low_tier_emotes:
result[1] = 120.0
result[2] = "Low"
return result
# If one element is a High Tier emoji
for i in slot_machine:
if i in high_tier_emotes:
result[1] = 50.0
result[2] = "High"
return result
return result
# assign results to 3 different slots
result_1 = get_tier()
result_2 = get_tier()
result_3 = get_tier()
# Get emotes from the tier lists.
slot_1 = get_emoji(result_1)
slot_2 = get_emoji(result_2)
slot_3 = get_emoji(result_3)
# Check for bonus
slot_machine = [slot_1, slot_2, slot_3]
bonus = get_bonus(slot_machine)
# Update users balance
user.update_user_money(bonus[1])
result_msg = f"「 {slot_1} {slot_2} {slot_3} 」"
# Jackpot worry image
em1 = discord.Embed(title="", colour=0x801A06)
em1.set_image(url="https://i.imgur.com/a9pARrC.gif")
await self.client.say(embed=em1)
await asyncio.sleep(1)
# print result
em2 = discord.Embed(title="", description=result_msg, colour=0x801A06)
await self.client.say(embed=em2)
# If bonus
if bonus[1] != 0:
# This assert only works in debug mode due to application error handling
assert bonus[2] != "" # Make sure there is an actual tier
if bonus[0] == 1:
msg = f"**Jackpot**! <a:worrycash:525200274340577290>\n {bonus[2]} Tier! You won **${bonus[1]}**!"
elif bonus[0] == 2:
msg = f"You got **two** {bonus[2]} Tier! <a:worryHype:487059927731273739>\n You won **${bonus[1]}**!"
elif bonus[1] == 50.0:
msg = f"You got **one** {bonus[2]} Tier! <a:worryHype:487059927731273739>\n You won **${bonus[1]}**!"
em3 = discord.Embed(title="", description=msg, colour=0xFFD700)
await self.client.say(embed=em3)
""" Slots tier list information """
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(
name="tiers",
description="Slot Machine help page",
aliases=["slothelp", "slotshelp", "slottiers", "slotstiers"],
pass_context=True,
)
async def slot_tiers_help(self, context):
msg = " ".join(high_tier_emotes)
msg2 = " ".join(mid_tier_emotes)
msg3 = " ".join(low_tier_emotes)
msg4 = "\
**3** Identical High tier = **$2,500**\n\
**3** Identical Mid tier = **$1,500**\n\
**3** Identical Low tier = **$750**\n\n\
**2** Identical High tier = **$350**\n\
**2** Identical Mid tier = **$250**\n\
**2** Identical Low tier = **$120**\n\n\
**1** of __any__ High tier = **$50**\
"
em = discord.Embed(title="**High-tier emotes**", description=msg, colour=0xFFD700)
await self.client.send_message(context.message.author, embed=em)
em = discord.Embed(title="**Mid-tier emotes**", description=msg2, colour=0xFFD700)
await self.client.send_message(context.message.author, embed=em)
em = discord.Embed(title="**Low-tier emotes**", description=msg3, colour=0xFFD700)
await self.client.send_message(context.message.author, embed=em)
em = discord.Embed(title="**Rewards Information**", description=msg4, colour=0xFFD700)
em.set_thumbnail(url="https://i.imgur.com/a9pARrC.gif")
await self.client.send_message(context.message.author, embed=em)
""" High and Low game """
@has_account()
@commands.cooldown(15, 86400, commands.BucketType.user)
@commands.command(
name="high_low",
description="High and low game. Guess the sum of cards.",
aliases=["hl", "guess", "cards", "card", "CARDS"],
pass_context=True,
)
async def high_and_low(self, context, *args):
# try/except block to check argument syntax
try:
# there should be an argument
if args:
# retrieve bet as first argument
bet = int(args[0])
# if bet is negative, return
if bet < 1:
await self.client.say("Bet can't be negative...")
return
# if no argument provided
else:
await self.client.say(
context.message.author.mention + ", no bet specified, defaulting to **$10** ** **")
bet = 10
except:
await self.client.say(
context.message.author.mention
+ '```ml\nuse =cards like so: "=cards X" -- X being integer amount to bet```'
)
return
# Create a user instance
user = Users(context.message.author.id)
# confirm the user has enough money for the bet
if user.get_user_money(0) < bet:
msg = f", you don't have enough money for that bet...\n"
msg = await self.client.say(context.message.author.mention + msg)
await asyncio.sleep(7)
await self.client.delete_message(msg)
return
# take bet money away
user.update_user_money(bet * -1)
CARDS = {
0: "<:card_none:662372124748546058>",
1: "<:card_one:662081420474449930>",
2: "<:card_two:662373668214669313>",
3: "<:card_three:662084754086166528>",
4: "<:card_four:662085726493605918>",
5: "<:card_five:662086717750247444>",
6: "<:card_six:662088270993162253>",
7: "<:card_seven:662091815087898649>",
8: "<:card_eight:662455814543507456>",
9: "<:card_nine:662092003676389380>"
}
instruction = (
"Three cards for you, three cards for me.\nYou flip one of yours over, and I flip two of mine."
)
initial_hand = f"\n{CARDS[0]} {CARDS[0]} {CARDS[0]}\n{CARDS[0]} {CARDS[0]} {CARDS[0]}"
em1 = discord.Embed(description=instruction + initial_hand, colour=0x607D4A)
em1.set_thumbnail(url="https://cdn.discordapp.com/emojis/618921143163682816.png?v=1")
msg1 = await self.client.say(embed=em1)
cpu_cards, user_cards = get_cards()
await asyncio.sleep(3)
assert len(cpu_cards) == 3
assert len(user_cards) == 3
cpu_hand = f"{CARDS[cpu_cards[0]]} {CARDS[cpu_cards[1]]} {CARDS[0]}"
user_hand = f"{CARDS[user_cards[0]]} {CARDS[0]} {CARDS[0]}"
hand1 = (
f"Dealer's hand is: \u200B \u200B {cpu_hand}\nAnd your hand is: {user_hand}"
)
instruction = f"So, **{context.message.author.display_name}**, will your total be higher or lower than mine?" \
f"\n(*60 seconds to answer, else your money's gone*)\n\n{hand1}\n\nEnter **low** or **high**..."
em2 = discord.Embed(description=instruction, colour=0x607D4A)
em2.set_thumbnail(url="https://cdn.discordapp.com/emojis/618921143163682816.png?v=1")
msg2 = await self.client.say(embed=em2)
# confirm the user's guess
confirm = await self.client.wait_for_message(author=context.message.author, timeout=60)
counter = 3
if confirm:
# while not a valid answer, keep prompting up to 3 times
while confirm.clean_content.upper() != "HIGH" and confirm.clean_content.upper() != "LOW":
if counter == 0:
await self.client.say("Sorry, you've reached your attempt limit. Exiting game.")
return
if counter < 3:
await self.client.delete_message(msg3)
await self.client.delete_message(msg4)
msg3 = await self.client.say("Wrong answer!")
msg4 = await self.client.say(
f"\nEnter **low** or **high**..."
f" You have **{counter}** more attempts before your bet money is lost forever.")
confirm = await self.client.wait_for_message(author=context.message.author, timeout=60)
counter -= 1
cpu_hand = f"{CARDS[cpu_cards[0]]} {CARDS[cpu_cards[1]]} {CARDS[cpu_cards[2]]}"
user_hand = f"{CARDS[user_cards[0]]} {CARDS[user_cards[1]]} {CARDS[user_cards[2]]}"
hand2 = (
f"Dealer's hand is: \u200B \u200B {cpu_hand}\nAnd your hand is: {user_hand}"
)
instruction2 = (
f"You're going with **'{confirm.clean_content}'**, then, {confirm.author.display_name}.\n"
f" Right, let's see what we've got...\n\n{hand2}"
)
# build embed of the hand results and send it
em3 = discord.Embed(description=instruction2, colour=0x607D4A)
em3.set_thumbnail(url="https://cdn.discordapp.com/emojis/618921143163682816.png?v=1")
await self.client.say(embed=em3)
# wait 2 seconds to build suspense
await asyncio.sleep(2)
won, sum_cpu, sum_user = win(cpu_cards, user_cards, confirm.clean_content.upper())
results1 = f"My cards add up to **{sum_cpu}**.\nAnd you have a total of **{sum_user}**.\n\n"
if won:
winnings = get_reward(sum_cpu, sum_user, bet)
results2 = (
f"Congratulations, your guess was right!\nYou won **${winnings - bet}**. "
f"{user.update_user_money(winnings)}."
)
em4 = discord.Embed(description=results1 + results2, colour=0x607D4A)
em4.set_thumbnail(url="https://cdn.discordapp.com/emojis/525200274340577290.gif?size=64")
else:
results2 = (
f"Aw... Sorry, but this match goes to me.\nYou lost **${bet}**. "
f"{user.update_user_money(0)}" # bet was already taken at beginning of function
)
em4 = discord.Embed(description=results1 + results2, colour=0x607D4A)
em4.set_thumbnail(url="https://cdn.discordapp.com/emojis/525209793405648896.gif?size=64")
await self.client.say(embed=em4)
await self.client.delete_message(msg1)
await self.client.delete_message(msg2)
# if we timed out waiting for user to answer
else:
await self.client.delete_message(msg1)
await self.client.delete_message(msg2)
await self.client.say("You didn't answer...")
return
def handle_args(args):
if args:
# One arg: bet amount
if len(args) == 1:
return True, args[0]
# We don't expect more than 2 args
elif len(args) > 1:
return False, None
else:
return False, None
def get_cards():
cards = [1, 2, 3, 4, 5, 6, 7, 8, 9]
random.shuffle(cards)
cpu_cards = cards[0:3]
user_cards = cards[6:]
return cpu_cards, user_cards
def win(cpu_hand, user_hand, user_guess):
sum_cpu_hand = sum(cpu_hand)
sum_user_hand = sum(user_hand)
win = False
if sum_user_hand > sum_cpu_hand and user_guess == "HIGH":
win = True
elif sum_user_hand < sum_cpu_hand and user_guess == "LOW":
win = True
return win, sum_cpu_hand, sum_user_hand
def get_reward(sum_cpu, sum_user, bet):
diff = abs(sum_cpu - sum_user)
return int(bet * 1.5) + diff
def setup(client):
client.add_cog(Games(client))
```
|
{
"source": "jdkent/brainiak",
"score": 3
}
|
#### File: tests/eventseg/test_event.py
```python
from brainiak.eventseg.event import EventSegment
from scipy.special import comb
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
def test_create_event_segmentation():
es = EventSegment(5)
assert es, "Invalid EventSegment instance"
def test_fit_shapes():
K = 5
V = 3
T = 10
es = EventSegment(K, n_iter=2)
sample_data = np.random.rand(V, T)
es.fit(sample_data.T)
assert es.segments_[0].shape == (T, K), "Segmentation from fit " \
"has incorrect shape"
assert np.isclose(np.sum(es.segments_[0], axis=1), np.ones(T)).all(), \
"Segmentation from learn_events not correctly normalized"
T2 = 15
sample_data2 = np.random.rand(V, T2)
test_segments, test_ll = es.find_events(sample_data2.T)
assert test_segments.shape == (T2, K), "Segmentation from find_events " \
"has incorrect shape"
assert np.isclose(np.sum(test_segments, axis=1), np.ones(T2)).all(), \
"Segmentation from find_events not correctly normalized"
es_invalid = EventSegment(K)
with pytest.raises(ValueError):
es_invalid.model_prior(K-1)
# ``with`` block is about to end with no error.
pytest.fail("T < K should cause error")
with pytest.raises(ValueError):
es_invalid.set_event_patterns(np.zeros((V, K-1)))
pytest.fail("#Events < K should cause error")
def test_simple_boundary():
es = EventSegment(2)
random_state = np.random.RandomState(0)
sample_data = np.array([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]]) + \
random_state.rand(2, 7) * 10
es.fit(sample_data.T)
events = np.argmax(es.segments_[0], axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly segment two events"
events_predict = es.predict(sample_data.T)
assert np.array_equal(events_predict, [0, 0, 0, 1, 1, 1, 1]), \
"Error in predict interface"
def test_event_transfer():
es = EventSegment(2)
sample_data = np.asarray([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]])
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T)[0]
pytest.fail("Should need to set variance")
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
pytest.fail("Should need to set patterns")
es.set_event_patterns(np.asarray([[1, 0], [0, 1]]))
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
events = np.argmax(seg, axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly transfer two events to new data"
def test_weighted_var():
es = EventSegment(2)
D = np.zeros((8, 4))
for t in range(4):
D[t, :] = (1/np.sqrt(4/3)) * np.array([-1, -1, 1, 1])
for t in range(4, 8):
D[t, :] = (1 / np.sqrt(4 / 3)) * np.array([1, 1, -1, -1])
mean_pat = D[[0, 4], :].T
weights = np.zeros((8, 2))
weights[:, 0] = [1, 1, 1, 1, 0, 0, 0, 0]
weights[:, 1] = [0, 0, 0, 0, 1, 1, 1, 1]
assert np.array_equal(
es.calc_weighted_event_var(D, weights, mean_pat), [0, 0]),\
"Failed to compute variance with 0/1 weights"
weights[:, 0] = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]
weights[:, 1] = [0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1]
true_var = (4 * 0.5 * 12)/(6 - 5/6) * np.ones(2) / 4
assert np.allclose(
es.calc_weighted_event_var(D, weights, mean_pat), true_var),\
"Failed to compute variance with fractional weights"
def test_sym():
es = EventSegment(4)
evpat = np.repeat(np.arange(10).reshape(-1, 1), 4, axis=1)
es.set_event_patterns(evpat)
D = np.repeat(np.arange(10).reshape(1, -1), 20, axis=0)
ev = es.find_events(D, var=1)[0]
# Check that events 1-4 and 2-3 are symmetric
assert np.all(np.isclose(ev[:, :2], np.fliplr(np.flipud(ev[:, 2:])))),\
"Fit with constant data is not symmetric"
def test_chains():
es = EventSegment(5, event_chains=np.array(['A', 'A', 'B', 'B', 'B']))
es.set_event_patterns(np.array([[1, 1, 0, 0, 0],
[0, 0, 1, 1, 1]]))
sample_data = np.array([[0, 0, 0], [1, 1, 1]])
seg = es.find_events(sample_data.T, 0.1)[0]
ev = np.nonzero(seg > 0.99)[1]
assert np.array_equal(ev, [2, 3, 4]),\
"Failed to fit with multiple chains"
def test_prior():
K = 10
T = 100
es = EventSegment(K)
mp = es.model_prior(T)[0]
p_bound = np.zeros((T, K-1))
norm = comb(T-1, K-1)
for t in range(T-1):
for k in range(K-1):
# See supplementary material of Neuron paper
# https://doi.org/10.1016/j.neuron.2017.06.041
p_bound[t+1, k] = comb(t, k) * comb(T-t-2, K-k-2) / norm
p_bound = np.cumsum(p_bound, axis=0)
mp_gt = np.zeros((T, K))
for k in range(K):
if k == 0:
mp_gt[:, k] = 1 - p_bound[:, 0]
elif k == K - 1:
mp_gt[:, k] = p_bound[:, k-1]
else:
mp_gt[:, k] = p_bound[:, k-1] - p_bound[:, k]
assert np.all(np.isclose(mp, mp_gt)),\
"Prior does not match analytic solution"
```
#### File: tests/utils/test_fmrisim.py
```python
import numpy as np
import math
from brainiak.utils import fmrisim as sim
import pytest
from itertools import product
def test_generate_signal():
# Inputs for generate_signal
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [3]
feature_type = ['cube']
feature_coordinates = np.array([[5, 5, 5]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
assert np.all(volume.shape == dimensions), "Check signal shape"
assert np.max(volume) == signal_magnitude, "Check signal magnitude"
assert np.sum(volume > 0) == math.pow(feature_size[0], 3), (
"Check feature size")
assert volume[5, 5, 5] == signal_magnitude, "Check signal location"
assert volume[5, 5, 1] == 0, "Check noise location"
feature_coordinates = np.array(
[[5, 5, 5], [3, 3, 3], [7, 7, 7]])
# Check feature size is correct
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=['loop', 'cavity', 'sphere'],
feature_size=[3],
signal_magnitude=signal_magnitude)
assert volume[5, 5, 5] == 0, "Loop is empty"
assert volume[3, 3, 3] == 0, "Cavity is empty"
assert volume[7, 7, 7] != 0, "Sphere is not empty"
# Check feature size manipulation
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=['loop', 'cavity', 'sphere'],
feature_size=[1],
signal_magnitude=signal_magnitude)
assert volume[5, 6, 6] == 0, "Loop is too big"
assert volume[3, 5, 5] == 0, "Cavity is too big"
assert volume[7, 9, 9] == 0, "Sphere is too big"
# Check that out of bounds feature coordinates are corrected
feature_coordinates = np.array([0, 2, dimensions[2]])
x, y, z = sim._insert_idxs(feature_coordinates, feature_size[0],
dimensions)
assert x[1] - x[0] == 2, "x min not corrected"
assert y[1] - y[0] == 3, "y was corrected when it shouldn't be"
assert z[1] - z[0] == 1, "z max not corrected"
# Check that signal patterns are created
feature_coordinates = np.array([[5, 5, 5]])
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
signal_constant=0,
)
assert volume[4:7, 4:7, 4:7].std() > 0, "Signal is constant"
def test_generate_stimfunction():
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 100
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
assert stimfunction.shape[0] == duration * 100, "stimfunc incorrect length"
eventNumber = np.sum(event_durations * len(onsets)) * 100
assert np.sum(stimfunction) == eventNumber, "Event number"
# Create the signal function
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
stim_dur = stimfunction.shape[0] / (tr_duration * 100)
assert signal_function.shape[0] == stim_dur, "The length did not change"
# Test
onsets = [0]
tr_duration = 1
event_durations = [1]
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
max_response = np.where(signal_function != 0)[0].max()
assert 25 < max_response <= 30, "HRF has the incorrect length"
assert np.sum(signal_function < 0) > 0, "No values below zero"
# Export a stimfunction
sim.export_3_column(stimfunction,
'temp.txt',
)
# Load in the stimfunction
stimfunc_new = sim.generate_stimfunction(onsets=None,
event_durations=None,
total_time=duration,
timing_file='temp.txt',
)
assert np.all(stimfunc_new == stimfunction), "Export/import failed"
# Break the timing precision of the generation
stimfunc_new = sim.generate_stimfunction(onsets=None,
event_durations=None,
total_time=duration,
timing_file='temp.txt',
temporal_resolution=0.5,
)
assert stimfunc_new.sum() == 0, "Temporal resolution not working right"
# Set the duration to be too short so you should get an error
onsets = [10, 30, 50, 70, 90]
event_durations = [5]
with pytest.raises(ValueError):
sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=89,
)
# Clip the event offset
stimfunc_new = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=95,
)
assert stimfunc_new[-1] == 1, 'Event offset was not clipped'
# Test exporting a group of participants to an epoch file
cond_a = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=110,
)
cond_b = sim.generate_stimfunction(onsets=[x + 5 for x in onsets],
event_durations=event_durations,
total_time=110,
)
stimfunction_group = [np.hstack((cond_a, cond_b))] * 2
sim.export_epoch_file(stimfunction_group,
'temp.txt',
tr_duration,
)
# Check that convolve throws a warning when the shape is wrong
sim.convolve_hrf(stimfunction=np.hstack((cond_a, cond_b)).T,
tr_duration=tr_duration,
temporal_resolution=1,
)
def test_apply_signal():
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[5, 5, 5]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 100
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
# Check that you can compute signal change appropriately
# Preset a bunch of things
stimfunction_tr = stimfunction[::int(tr_duration * 100)]
mask, template = sim.mask_brain(dimensions, mask_self=False)
noise_dict = sim._noise_dict_update({})
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=noise_dict,
iterations=[0, 0]
)
coords = feature_coordinates[0]
noise_function_a = noise[coords[0], coords[1], coords[2], :]
noise_function_a = noise_function_a.reshape(duration // tr_duration, 1)
noise_function_b = noise[coords[0] + 1, coords[1], coords[2], :]
noise_function_b = noise_function_b.reshape(duration // tr_duration, 1)
# Create the calibrated signal with PSC
method = 'PSC'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
sig_b = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[1.0],
method,
)
assert sig_b.max() / sig_a.max() == 2, 'PSC modulation failed'
# Create the calibrated signal with SFNR
method = 'SFNR'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = sig_a / (noise_function_a.mean() / noise_dict['sfnr'])
sig_b = sim.compute_signal_change(signal_function,
noise_function_b,
noise_dict,
[1.0],
method,
)
scaled_b = sig_b / (noise_function_b.mean() / noise_dict['sfnr'])
assert scaled_b.max() / scaled_a.max() == 2, 'SFNR modulation failed'
# Create the calibrated signal with CNR_Amp/Noise-SD
method = 'CNR_Amp/Noise-SD'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = sig_a / noise_function_a.std()
sig_b = sim.compute_signal_change(signal_function,
noise_function_b,
noise_dict,
[1.0],
method,
)
scaled_b = sig_b / noise_function_b.std()
assert scaled_b.max() / scaled_a.max() == 2, 'CNR_Amp modulation failed'
# Create the calibrated signal with CNR_Amp/Noise-Var_dB
method = 'CNR_Amp2/Noise-Var_dB'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = np.log(sig_a.max() / noise_function_a.std())
sig_b = sim.compute_signal_change(signal_function,
noise_function_b,
noise_dict,
[1.0],
method,
)
scaled_b = np.log(sig_b.max() / noise_function_b.std())
assert np.round(scaled_b / scaled_a) == 2, 'CNR_Amp dB modulation failed'
# Create the calibrated signal with CNR_Signal-SD/Noise-SD
method = 'CNR_Signal-SD/Noise-SD'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = sig_a.std() / noise_function_a.std()
sig_b = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[1.0],
method,
)
scaled_b = sig_b.std() / noise_function_a.std()
assert (scaled_b / scaled_a) == 2, 'CNR signal modulation failed'
# Create the calibrated signal with CNR_Amp/Noise-Var_dB
method = 'CNR_Signal-Var/Noise-Var_dB'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = np.log(sig_a.std() / noise_function_a.std())
sig_b = sim.compute_signal_change(signal_function,
noise_function_b,
noise_dict,
[1.0],
method,
)
scaled_b = np.log(sig_b.std() / noise_function_b.std())
assert np.round(scaled_b / scaled_a) == 2, 'CNR signal dB modulation ' \
'failed'
# Convolve the HRF with the stimulus sequence
signal = sim.apply_signal(signal_function=signal_function,
volume_signal=volume,
)
assert signal.shape == (dimensions[0], dimensions[1], dimensions[2],
duration / tr_duration), "The output is the " \
"wrong size"
signal = sim.apply_signal(signal_function=stimfunction,
volume_signal=volume,
)
assert np.any(signal == signal_magnitude), "The stimfunction is not binary"
# Check that there is an error if the number of signal voxels doesn't
# match the number of non zero brain voxels
with pytest.raises(IndexError):
sig_vox = (volume > 0).sum()
vox_pattern = np.tile(stimfunction, (1, sig_vox - 1))
sim.apply_signal(signal_function=vox_pattern,
volume_signal=volume,
)
def test_generate_noise():
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[5, 5, 5]])
signal_magnitude = [1]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 200
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
# Convolve the HRF with the stimulus sequence
signal = sim.apply_signal(signal_function=signal_function,
volume_signal=volume,
)
# Generate the mask of the signal
mask, template = sim.mask_brain(signal,
mask_self=None)
assert min(mask[mask > 0]) > 0.1, "Mask thresholding did not work"
assert len(np.unique(template) > 2), "Template creation did not work"
stimfunction_tr = stimfunction[::int(tr_duration * 100)]
# Create the noise volumes (using the default parameters)
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
iterations=[1, 0],
)
assert signal.shape == noise.shape, "The dimensions of signal and noise " \
"the same"
noise_high = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={'sfnr': 50, 'snr': 25},
iterations=[1, 0],
)
noise_low = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={'sfnr': 100, 'snr': 25},
iterations=[1, 0],
)
system_high = np.std(noise_high[mask > 0], 1).mean()
system_low = np.std(noise_low[mask > 0], 1).mean()
assert system_low < system_high, "SFNR noise could not be manipulated"
# Check that you check for the appropriate template values
with pytest.raises(ValueError):
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template * 2,
mask=mask,
noise_dict={},
)
# Check that iterations does what it should
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={},
iterations=[0, 0],
)
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={},
iterations=None,
)
# Test drift noise
trs = 1000
period = 100
drift = sim._generate_noise_temporal_drift(trs,
tr_duration,
'sine',
period,
)
# Check that the max frequency is the appropriate frequency
power = abs(np.fft.fft(drift))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / trs
period_freq = np.where(freq == 1 / (period // tr_duration))
max_freq = np.argmax(power)
assert period_freq == max_freq, 'Max frequency is not where it should be'
# Do the same but now with cosine basis functions, answer should be close
drift = sim._generate_noise_temporal_drift(trs,
tr_duration,
'discrete_cos',
period,
)
# Check that the appropriate frequency is peaky (may not be the max)
power = abs(np.fft.fft(drift))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / trs
period_freq = np.where(freq == 1 / (period // tr_duration))[0][0]
assert power[period_freq] > power[period_freq + 1], 'Power is low'
assert power[period_freq] > power[period_freq - 1], 'Power is low'
# Check it gives a warning if the duration is too short
drift = sim._generate_noise_temporal_drift(50,
tr_duration,
'discrete_cos',
period,
)
# Test physiological noise (using unrealistic parameters so that it's easy)
timepoints = list(np.linspace(0, (trs - 1) * tr_duration, trs))
resp_freq = 0.2
heart_freq = 1.17
phys = sim._generate_noise_temporal_phys(timepoints,
resp_freq,
heart_freq,
)
# Check that the max frequency is the appropriate frequency
power = abs(np.fft.fft(phys))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / (trs * tr_duration)
peaks = (power > (power.mean() + power.std())) # Where are the peaks
peak_freqs = freq[peaks]
assert np.any(resp_freq == peak_freqs), 'Resp frequency not found'
assert len(peak_freqs) == 2, 'Two peaks not found'
# Test task noise
sim._generate_noise_temporal_task(stimfunction_tr,
motion_noise='gaussian',
)
sim._generate_noise_temporal_task(stimfunction_tr,
motion_noise='rician',
)
# Test ARMA noise
with pytest.raises(ValueError):
noise_dict = {'fwhm': 4, 'auto_reg_rho': [1], 'ma_rho': [1, 1]}
sim._generate_noise_temporal_autoregression(stimfunction_tr,
noise_dict,
dimensions,
mask,
)
# Generate spatial noise
vol = sim._generate_noise_spatial(np.array([10, 10, 10, trs]))
assert len(vol.shape) == 3, 'Volume was not reshaped to ignore TRs'
# Switch some of the noise types on
noise_dict = dict(physiological_sigma=1, drift_sigma=1, task_sigma=1,
auto_reg_sigma=0)
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=noise_dict,
iterations=[0, 0],
)
def test_generate_noise_spatial():
# Set up the inputs
dimensions = np.array([10, 5, 10])
mask = np.ones(dimensions)
vol = sim._generate_noise_spatial(dimensions, mask)
# Run the analysis from _calc_FHWM but for th elast step of aggregating
# across dimensions
v_count = 0
v_sum = 0
v_sq = 0
d_sum = [0.0, 0.0, 0.0]
d_sq = [0.0, 0.0, 0.0]
d_count = [0, 0, 0]
# Pull out all the voxel coordinates
coordinates = list(product(range(dimensions[0]),
range(dimensions[1]),
range(dimensions[2])))
# Find the sum of squared error for the non-masked voxels in the brain
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# Find the the volume sum and squared values
v_count += 1
v_sum += vol[x, y, z]
v_sq += vol[x, y, z] ** 2
# Get the volume variance
v_var = (v_sq - ((v_sum ** 2) / v_count)) / (v_count - 1)
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# For each xyz dimension calculate the squared
# difference of this voxel and the next
in_range = (x < dimensions[0] - 1)
in_mask = in_range and (mask[x + 1, y, z] > 0)
included = in_mask and (~np.isnan(vol[x + 1, y, z]))
if included:
d_sum[0] += vol[x, y, z] - vol[x + 1, y, z]
d_sq[0] += (vol[x, y, z] - vol[x + 1, y, z]) ** 2
d_count[0] += 1
in_range = (y < dimensions[1] - 1)
in_mask = in_range and (mask[x, y + 1, z] > 0)
included = in_mask and (~np.isnan(vol[x, y + 1, z]))
if included:
d_sum[1] += vol[x, y, z] - vol[x, y + 1, z]
d_sq[1] += (vol[x, y, z] - vol[x, y + 1, z]) ** 2
d_count[1] += 1
in_range = (z < dimensions[2] - 1)
in_mask = in_range and (mask[x, y, z + 1] > 0)
included = in_mask and (~np.isnan(vol[x, y, z + 1]))
if included:
d_sum[2] += vol[x, y, z] - vol[x, y, z + 1]
d_sq[2] += (vol[x, y, z] - vol[x, y, z + 1]) ** 2
d_count[2] += 1
# Find the variance
d_var = np.divide((d_sq - np.divide(np.power(d_sum, 2),
d_count)), (np.add(d_count, -1)))
o_var = np.divide(-1, (4 * np.log(1 - (0.5 * d_var / v_var))))
fwhm3 = np.sqrt(o_var) * 2 * np.sqrt(2 * np.log(2))
# Calculate the proportion of std relative to the mean
std_proportion = np.nanstd(fwhm3) / np.nanmean(fwhm3)
print(fwhm3)
assert std_proportion < 0.25, 'Variance is inconsistent across dim'
def test_mask_brain():
# Inputs for generate_signal
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[4, 4, 4]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Mask the volume to be the same shape as a brain
mask, _ = sim.mask_brain(dimensions, mask_self=None,)
brain = volume * mask
assert np.sum(brain != 0) == np.sum(volume != 0), "Masking did not work"
assert brain[0, 0, 0] == 0, "Masking did not work"
assert brain[4, 4, 4] != 0, "Masking did not work"
feature_coordinates = np.array(
[[1, 1, 1]])
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Mask the volume to be the same shape as a brain
mask, _ = sim.mask_brain(dimensions, mask_self=None, )
brain = volume * mask
assert np.sum(brain != 0) < np.sum(volume != 0), "Masking did not work"
# Test that you can load the default
dimensions = np.array([100, 100, 100])
mask, template = sim.mask_brain(dimensions, mask_self=False)
assert mask[20, 80, 50] == 0, 'Masking didn''t work'
assert mask[25, 80, 50] == 1, 'Masking didn''t work'
assert int(template[25, 80, 50] * 100) == 57, 'Template not correct'
# Check that you can mask self
mask_self, template_self = sim.mask_brain(template, mask_self=True)
assert (template_self - template).sum() < 1e2, 'Mask self error'
assert (mask_self - mask).sum() == 0, 'Mask self error'
def test_calc_noise():
# Inputs for functions
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 200
temporal_res = 100
tr_number = int(np.floor(duration / tr_duration))
dimensions_tr = np.array([10, 10, 10, tr_number])
# Preset the noise dict
nd_orig = sim._noise_dict_update({})
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
temporal_resolution=temporal_res,
)
# Mask the volume to be the same shape as a brain
mask, template = sim.mask_brain(dimensions_tr, mask_self=None)
stimfunction_tr = stimfunction[::int(tr_duration * temporal_res)]
nd_orig['matched'] = 0
noise = sim.generate_noise(dimensions=dimensions_tr[0:3],
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=nd_orig,
)
# Check the spatial noise match
nd_orig['matched'] = 1
noise_matched = sim.generate_noise(dimensions=dimensions_tr[0:3],
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=nd_orig,
iterations=[50, 0]
)
# Calculate the noise parameters from this newly generated volume
nd_new = sim.calc_noise(noise, mask, template)
nd_matched = sim.calc_noise(noise_matched, mask, template)
# Check the values are reasonable"
assert nd_new['snr'] > 0, 'snr out of range'
assert nd_new['sfnr'] > 0, 'sfnr out of range'
assert nd_new['auto_reg_rho'][0] > 0, 'ar out of range'
# Check that the dilation increases SNR
no_dilation_snr = sim._calc_snr(noise_matched,
mask,
dilation=0,
reference_tr=tr_duration,
)
assert nd_new['snr'] > no_dilation_snr, "Dilation did not increase SNR"
# Check that template size is in bounds
with pytest.raises(ValueError):
sim.calc_noise(noise, mask, template * 2)
# Check that Mask is set is checked
with pytest.raises(ValueError):
sim.calc_noise(noise, None, template)
# Check that it can deal with missing noise parameters
temp_nd = sim.calc_noise(noise, mask, template, noise_dict={})
assert temp_nd['voxel_size'][0] == 1, 'Default voxel size not set'
temp_nd = sim.calc_noise(noise, mask, template, noise_dict=None)
assert temp_nd['voxel_size'][0] == 1, 'Default voxel size not set'
# Check that the fitting worked
snr_diff = abs(nd_orig['snr'] - nd_new['snr'])
snr_diff_match = abs(nd_orig['snr'] - nd_matched['snr'])
assert snr_diff > snr_diff_match, 'snr fit incorrectly'
# Test that you can generate rician and exponential noise
sim._generate_noise_system(dimensions_tr,
1,
1,
spatial_noise_type='exponential',
temporal_noise_type='rician',
)
# Check the temporal noise match
nd_orig['matched'] = 1
noise_matched = sim.generate_noise(dimensions=dimensions_tr[0:3],
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=nd_orig,
iterations=[0, 50]
)
nd_matched = sim.calc_noise(noise_matched, mask, template)
sfnr_diff = abs(nd_orig['sfnr'] - nd_new['sfnr'])
sfnr_diff_match = abs(nd_orig['sfnr'] - nd_matched['sfnr'])
assert sfnr_diff > sfnr_diff_match, 'sfnr fit incorrectly'
ar1_diff = abs(nd_orig['auto_reg_rho'][0] - nd_new['auto_reg_rho'][0])
ar1_diff_match = abs(nd_orig['auto_reg_rho'][0] - nd_matched[
'auto_reg_rho'][0])
assert ar1_diff > ar1_diff_match, 'AR1 fit incorrectly'
# Check that you can calculate ARMA for a single voxel
vox = noise[5, 5, 5, :]
arma = sim._calc_ARMA_noise(vox,
None,
sample_num=2,
)
assert len(arma) == 2, "Two outputs not given by ARMA"
```
|
{
"source": "jdkent/fitlins",
"score": 3
}
|
#### File: fitlins/utils/strings.py
```python
def snake_to_camel(string):
words = string.split('_')
return words[0] + ''.join(word.title() for word in words[1:])
```
#### File: fitlins/viz/__init__.py
```python
from matplotlib import pyplot as plt
from .corr import plot_corr_matrix
from .contrasts import plot_contrast_matrix
def plot_and_save(fname, plotter, *args, **kwargs):
if (kwargs.get('axes'), kwargs.get('ax')) == (None, None):
fig = plt.figure()
axes = plt.gca()
if 'axes' in kwargs:
kwargs['axes'] = axes
else:
kwargs['ax'] = axes
plotter(*args, **kwargs)
fig.savefig(fname, bbox_inches='tight')
plt.close(fig)
```
|
{
"source": "jdkent/hrfViz",
"score": 3
}
|
#### File: hrfViz/bokeh-app/main.py
```python
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure
from nistats import hemodynamic_models
# Set up data
model = hemodynamic_models._gamma_difference_hrf(tr=2)
x = np.arange(0, len(model))
source = ColumnDataSource(data=dict(x=x, y=model))
# Set up plot
thr = 0.01
plot = figure(plot_height=400, plot_width=400, title="my hrf wave",
tools="crosshair,pan,reset,save,wheel_zoom",
x_range=[0, np.max(x)], y_range=[np.min(model)-thr, np.max(model)+thr])
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
# Set up widgets
text = TextInput(title="title", value='my hrf')
delay = Slider(title="delay", value=6.0, start=0, end=10, step=0.1)
time_length = Slider(title="time_length", value=32.0, start=16, end=48, step=0.1)
onset = Slider(title="onset", value=0.0, start=0.0, end=10, step=0.1)
undershoot = Slider(title="undershoot", value=16.0, start=4, end=32, step=0.1)
dispersion = Slider(title="dispersion", value=1.0, start=0.1, end=5.0, step=0.1)
u_dispersion = Slider(title="u_dispersion", value=1.0, start=0.1, end=5.0, step=0.1)
ratio = Slider(title="ratio", value=0.167, start=0.01, end=2.0, step=0.1)
scale = Slider(title="amplitude", value=1, start=0, end=5, step=0.1)
# Set up callbacks
def update_title(attrname, old, new):
plot.title.text = text.value
text.on_change('value', update_title)
def update_data(attrname, old, new):
# Get the current slider values
dy = delay.value
tl = time_length.value
on = onset.value
un = undershoot.value
di = dispersion.value
ud = u_dispersion.value
ra = ratio.value
# Generate the new curve
model = hemodynamic_models._gamma_difference_hrf(
tr=2, time_length=tl, onset=on, delay=dy, undershoot=un,
dispersion=di, u_dispersion=ud, ratio=ra
) * scale.value
x = np.arange(0, len(model))
source.data = dict(x=x, y=model)
for w in [delay, time_length, onset, delay, undershoot, dispersion, u_dispersion, ratio, scale]:
w.on_change('value', update_data)
# Set up layouts and add to document
inputs = column(text, delay, time_length, onset,
delay, undershoot, dispersion, u_dispersion, ratio,
scale)
curdoc().add_root(row(inputs, plot, width=800))
curdoc().title = "My HRF"
```
|
{
"source": "jdkent/neurodocker",
"score": 2
}
|
#### File: neurodocker/neurodocker/neurodocker.py
```python
from argparse import Action
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
import json
import logging
import sys
import etelemetry
from neurodocker import __version__
from neurodocker import utils
from neurodocker.generators import Dockerfile
from neurodocker.generators import SingularityRecipe
from neurodocker.generators.common import _installation_implementations
logger = logging.getLogger(__name__)
# https://stackoverflow.com/a/9028031/5666087
class OrderedArgs(Action):
"""Object to preserve order in which command-line arguments are given."""
def __call__(self, parser, namespace, values, option_string=None):
if 'ordered_args' not in namespace:
setattr(namespace, 'ordered_args', [])
previous = namespace.ordered_args
previous.append((self.dest, values))
setattr(namespace, 'ordered_args', previous)
def _list_of_kv(kv):
"""Split string `kv` at first equals sign."""
ll = kv.split("=")
ll[1:] = ["=".join(ll[1:])]
return ll
def _add_generate_common_arguments(parser):
p = parser
p.add_argument(
"-b", "--base", help="Base Docker image. E.g., debian:stretch")
p.add_argument(
"-p",
"--pkg-manager",
choices={'apt', 'yum'},
help="Linux package manager.")
p.add_argument(
'--add-to-entrypoint',
action=OrderedArgs,
help=("Add a command to the file /neurodocker/startup.sh, which is the"
" container's default entrypoint."))
p.add_argument(
'--copy',
action=OrderedArgs,
nargs="+",
help="Copy files into container. Use format <src>... <dest>")
p.add_argument(
'--install',
action=OrderedArgs,
nargs="+",
help=("Install system packages with apt-get or yum, depending on the"
" package manager specified."))
p.add_argument(
'--entrypoint',
action=OrderedArgs,
help="Set the container's entrypoint (Docker) / append to runscript"
" (Singularity)")
p.add_argument(
'-e',
'--env',
action=OrderedArgs,
nargs="+",
type=_list_of_kv,
help="Set environment variable(s). Use the format KEY=VALUE")
p.add_argument(
'-r',
'--run',
action=OrderedArgs,
help="Run a command when building container")
p.add_argument(
'--run-bash', action=OrderedArgs, help="Run a command in bash")
p.add_argument(
'-u',
'--user',
action=OrderedArgs,
help="Switch current user (creates user if necessary)")
p.add_argument(
'-w', '--workdir', action=OrderedArgs, help="Set working directory")
# To generate from file.
p.add_argument(
'file',
nargs='?',
help="Generate file from JSON. Overrides other `generate` arguments")
p.add_argument(
'--json', action="store_true", help="Print Neurodocker JSON spec")
# Other arguments (no order).
p.add_argument(
'-o',
'--output',
dest="output",
help="If specified, save Dockerfile to file with this name.")
p.add_argument(
'--no-print',
dest='no_print',
action="store_true",
help="Do not print the generated file")
_ndeb_servers = ", ".join(
_installation_implementations['neurodebian']._servers.keys())
# Software package options.
pkgs_help = {
"all":
"Install software packages. Each argument takes a list of"
" key=value pairs. Where applicable, the default installation"
" behavior is to install by downloading and uncompressing"
" binaries. Some programs can be built from source.",
"afni":
"Install AFNI. Valid keys are version (required), method,"
" install_path, install_r, install_r_pkgs, install_python2,"
" and install_python3. Only the latest version and version"
" 17.2.02 are supported at this time.",
"ants":
"Install ANTs. Valid keys are version (required), method"
" install_path, cmake_opts, and make_opts. Version can be a "
" git commit hash if building from source.",
"convert3d":
"Install Convert3D. Valid keys are version (required),"
" method, and install_path.",
"dcm2niix":
"Install dcm2niix. Valid keys are version, method,"
" install_path, cmake_opts, and make_opts",
"freesurfer":
"Install FreeSurfer. Valid keys are version (required),"
" method, install_path, and exclude_paths. A FreeSurfer"
" license is required to run the software and is not"
" provided by Neurodocker.",
"fsl":
"Install FSL. Valid keys are version (required), method,"
" install_path, and exclude_paths.",
"matlabmcr":
"Install Matlab Compiler Runtime. Valid keys are version,"
" method, and install_path",
"miniconda":
"Install Miniconda. Valid keys are install_path,"
" env_name, conda_install, pip_install, conda_opts,"
" pip_opts, activate (default false), and version"
" (defaults to latest). The options conda_install and"
" pip_install accept strings of packages: conda_install="
'"python=3.6 numpy traits".',
"mricron":
"Install MRIcron. valid keys are version (required), method, and"
" install_path.",
"mrtrix3":
"Install MRtrix3. Valid keys are version (required),"
" method, and install_path",
"ndfreeze":
"Use the NeuroDebian command `nd_freeze` to freeze the apt"
" sources to a certain date. This will only have an effect"
" on Debian and NeuroDebian APT sources.",
"neurodebian":
"Add NeuroDebian repository. Valid keys are "
"os_codename (e.g., zesty), server (e.g., usa-nh), and"
" full (if true, use non-free packages). Valid download"
" servers are {}.".format(_ndeb_servers),
"spm12":
"Install SPM12 and its dependency, Matlab Compiler Runtime."
" Valid keys are version and install_path.",
"minc":
"Install MINC. Valid keys is version (required), method, and"
" install_path. Only version 1.9.15 is supported at this"
" time.",
"petpvc":
"Install PETPVC. Valid keys are version (required), method,"
" and install_path.",
"vnc":
"Install a VNC server. Valid keys are passwd (required),"
" start_at_runtime, and geometry."
}
pkgs = p.add_argument_group(
title="software package arguments", description=pkgs_help['all'])
for pkg in _installation_implementations.keys():
if pkg == '_header':
continue
flag = "--{}".format(pkg)
# MRtrix3 does not need any arguments by default.
nargs = "*" if pkg == "mrtrix3" else "+"
pkgs.add_argument(
flag,
dest=pkg,
nargs=nargs,
action=OrderedArgs,
metavar="",
type=_list_of_kv,
help=pkgs_help[pkg])
def _add_generate_docker_arguments(parser):
"""Add arguments to `parser` for sub-command `generate docker`."""
p = parser
# Arguments that should be ordered.
p.add_argument(
'--add',
action=OrderedArgs,
nargs="+",
help="Dockerfile ADD instruction. Use format <src>... <dest>")
p.add_argument(
'--arg',
action=OrderedArgs,
nargs="+",
type=_list_of_kv,
help="Dockerfile ARG instruction. Use format KEY[=DEFAULT_VALUE] ...")
p.add_argument(
'--cmd',
action=OrderedArgs,
nargs="+",
help="Dockerfile CMD instruction.")
p.add_argument(
'--expose',
nargs="+",
action=OrderedArgs,
help="Dockerfile EXPOSE instruction.")
p.add_argument(
'--label',
action=OrderedArgs,
nargs="+",
type=_list_of_kv,
help="Dockerfile LABEL instruction.")
p.add_argument(
'--volume',
action=OrderedArgs,
nargs="+",
help="Dockerfile VOLUME instruction.")
def _add_generate_singularity_arguments(parser):
"""Add arguments to `parser` for sub-command `generate singularity`."""
pass
def _add_reprozip_trace_arguments(parser):
"""Add arguments to `parser` for sub-command `reprozip-trace`."""
p = parser
p.add_argument(
'container', help="Running container in which to trace commands.")
p.add_argument('commands', nargs='+', help="Command(s) to trace.")
p.add_argument(
'--dir',
'-d',
dest="packfile_save_dir",
default=".",
help=("Directory in which to save pack file. Default "
"is current directory."))
def _add_reprozip_merge_arguments(parser):
"""Add arguments to `parser` for sub-command `reprozip-merge`."""
p = parser
p.add_argument('outfile', help="Filepath to merged pack file.")
p.add_argument('pack_files', nargs='+', help="Pack files to merge.")
def create_parser():
"""Return command-line argument parser."""
parser = ArgumentParser(
description=__doc__, formatter_class=RawDescriptionHelpFormatter)
verbosity_choices = ('debug', 'info', 'warning', 'error', 'critical')
parser.add_argument("-v", "--verbosity", choices=verbosity_choices)
parser.add_argument(
"-V",
"--version",
action="version",
version=('neurodocker version {}'.format(__version__)))
subparsers = parser.add_subparsers(
dest="subparser_name",
title="subcommands",
description="valid subcommands")
# `neurodocker generate` parsers.
generate_parser = subparsers.add_parser(
'generate', help="generate recipes")
generate_subparsers = generate_parser.add_subparsers(
dest="subsubparser_name",
title="subcommands",
description="valid subcommands")
generate_docker_parser = generate_subparsers.add_parser(
'docker', help="generate Dockerfile")
generate_singularity_parser = generate_subparsers.add_parser(
'singularity', help="generate Singularity recipe")
_add_generate_common_arguments(generate_docker_parser)
_add_generate_docker_arguments(generate_docker_parser)
_add_generate_common_arguments(generate_singularity_parser)
_add_generate_singularity_arguments(generate_singularity_parser)
# `neurodocker reprozip` parsers.
reprozip_parser = subparsers.add_parser('reprozip', help="")
reprozip_subparsers = reprozip_parser.add_subparsers(
dest="subsubparser_name",
title="subcommands",
description="valid subcommands")
reprozip_trace_parser = reprozip_subparsers.add_parser(
'trace', help="minify container for traced command(s)")
reprozip_merge_parser = reprozip_subparsers.add_parser(
'merge', help="merge reprozip pack files")
_add_reprozip_trace_arguments(reprozip_trace_parser)
_add_reprozip_merge_arguments(reprozip_merge_parser)
# Add verbosity option to both parsers. How can this be done with parents?
for p in (generate_parser, reprozip_trace_parser, reprozip_merge_parser):
p.add_argument("-v", "--verbosity", choices=verbosity_choices)
return parser
def parse_args(args):
"""Return namespace of command-line arguments."""
parser = create_parser()
namespace = parser.parse_args(args)
if namespace.subparser_name is None:
parser.print_help()
parser.exit(1)
elif (namespace.subparser_name == 'generate'
and namespace.subsubparser_name is None):
parser.print_help()
parser.exit(1)
elif (namespace.subparser_name == 'reprozip'
and namespace.subsubparser_name is None):
parser.print_help()
parser.exit(1)
elif (namespace.subparser_name == 'generate'
and namespace.subsubparser_name in {'docker', 'singularity'}):
_validate_generate_args(namespace)
return namespace
def generate(namespace):
"""Run `neurodocker generate`."""
if namespace.file is None:
specs = utils._namespace_to_specs(namespace)
else:
specs = utils.load_json(namespace.file)
recipe_objs = {
'docker': Dockerfile,
'singularity': SingularityRecipe,
}
recipe_obj = recipe_objs[namespace.subsubparser_name](specs)
if namespace.json:
print(json.dumps(specs))
elif not namespace.no_print:
print(recipe_obj.render())
if namespace.output:
recipe_obj.save(filepath=namespace.output)
def reprozip_trace(namespace):
"""Run `neurodocker reprozip`."""
from neurodocker.reprozip import ReproZipMinimizer
local_packfile_path = ReproZipMinimizer(**vars(namespace)).run()
logger.info("Saved pack file on the local host:\n{}"
"".format(local_packfile_path))
def reprozip_merge(namespace):
"""Run `neurodocker reprozip merge`."""
from neurodocker.reprozip import merge_pack_files
merge_pack_files(namespace.outfile, namespace.pack_files)
def _validate_generate_args(namespace):
if (namespace.file is None
and (namespace.base is None or namespace.pkg_manager is None)):
raise ValueError("-b/--base and -p/--pkg-manager are required if not"
" generating from JSON file.")
def main(args=None):
"""Main program function."""
if args is None:
namespace = parse_args(sys.argv[1:])
else:
namespace = parse_args(args)
if namespace.verbosity is not None:
utils.set_log_level(namespace.verbosity)
logger.debug(vars(namespace))
try:
latest = etelemetry.get_project("kaczmarj/neurodocker")
except RuntimeError as e:
print("# Could not check for version updates: ", e)
else:
if latest and 'version' in latest:
print("# Your version: {0} Latest version: {1}".format(__version__,
latest["version"]))
subparser_functions = {
'docker': generate,
'singularity': generate,
'trace': reprozip_trace,
'merge': reprozip_merge,
}
subparser_functions[namespace.subsubparser_name](namespace)
if __name__ == "__main__": # pragma: no cover
main()
```
|
{
"source": "jdkent/neuroscout",
"score": 2
}
|
#### File: neuroscout/models/analysis.py
```python
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.hybrid import hybrid_property
from ..database import db
from .utils import copy_row
import datetime
# Association table between analysis and predictor.
analysis_predictor = db.Table(
'analysis_predictor',
db.Column('analysis_id', db.Integer(), db.ForeignKey('analysis.id')),
db.Column('predictor_id', db.Integer(), db.ForeignKey('predictor.id')))
class Analysis(db.Model):
"""" A single fMRI analysis. """
id = db.Column(db.Integer, primary_key=True)
hash_id = db.Column(db.Text, unique=True)
name = db.Column(db.Text)
description = db.Column(db.Text)
predictions = db.Column(db.Text, default='')
private = db.Column(db.Boolean, default=True)
model = db.Column(JSONB, default={}) # BIDS Model
data = db.Column(JSONB, default={}) # Additional data (e.g. )
filters = db.Column(JSONB) # List of filters used to select runs
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
modified_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
submitted_at = db.Column(db.DateTime)
saved_count = db.Column(db.Integer, default=0)
status = db.Column(db.Text, default='DRAFT')
__table_args__ = (
db.CheckConstraint(
status.in_(
['PASSED', 'FAILED', 'SUBMITTING', 'PENDING', 'DRAFT'])), )
locked = db.Column(db.Boolean, default=False)
traceback = db.Column(db.Text, default='')
compile_task_id = db.Column(db.Text) # Celery task id
bundle_path = db.Column(db.Text)
dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id'),
nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
# If cloned, this is the parent analysis:
parent_id = db.Column(db.Text, db.ForeignKey('analysis.hash_id'))
predictors = db.relationship('Predictor', secondary=analysis_predictor,
backref='analysis')
runs = db.relationship('Run', secondary='analysis_run')
neurovault_collections = db.relationship(
'NeurovaultCollection')
def clone(self, user):
""" Make copy of analysis, with new id, and linking to parent """
clone_row = copy_row(Analysis, self,
ignored_columns=['id', 'hash_id'])
clone_row.parent_id = self.hash_id
clone_row.user_id = user.id
clone_row.status = "DRAFT"
# Copy relationships
return clone_row
def __repr__(self):
return '<models.Analysis[hash_id =%s]>' % self.hash_id
class Report(db.Model):
"""" Report generation table"""
id = db.Column(db.Integer, primary_key=True)
analysis_id = db.Column(db.Text, db.ForeignKey('analysis.hash_id'))
runs = db.Column(JSONB, default=None)
scale = db.Column(db.Boolean)
sampling_rate = db.Column(db.Float)
generated_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
task_id = db.Column(db.Text) # Celery task id
result = db.Column(JSONB) # JSON result from Celery (once finished)
warnings = db.Column(JSONB, default=[])
traceback = db.Column(db.Text)
status = db.Column(db.Text, default='PENDING')
__table_args__ = (
db.CheckConstraint(status.in_(['OK', 'FAILED', 'PENDING'])), )
class NeurovaultCollection(db.Model):
""" Neurovault collection and upload status """
id = db.Column(db.Integer, primary_key=True)
analysis_id = db.Column(db.Text, db.ForeignKey('analysis.hash_id'))
uploaded_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
collection_id = db.Column(db.Integer, unique=True)
files = db.relationship('NeurovaultFileUpload', backref='collection')
cli_version = db.Column(db.Text) # neuroscout-cli version
fmriprep_version = db.Column(db.Text)
class NeurovaultFileUpload(db.Model):
""" NV file upload """
id = db.Column(db.Integer, primary_key=True)
nv_collection_id = db.Column(
db.Integer, db.ForeignKey('neurovault_collection.id'),
nullable=False)
path = db.Column(db.Text, nullable=False)
task_id = db.Column(db.Text)
level = db.Column(db.Text, nullable=False)
exception = db.Column(db.Text)
traceback = db.Column(db.Text)
status = db.Column(db.Text, default='PENDING')
__table_args__ = (
db.CheckConstraint(status.in_(['OK', 'FAILED', 'PENDING'])),
db.CheckConstraint(level.in_(['GROUP', 'SUBJECT'])),
)
@hybrid_property
def basename(self):
return self.path.split('/')[-1]
```
#### File: neuroscout/models/task.py
```python
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.hybrid import hybrid_property
import statistics
from sqlalchemy import func
from .run import Run
from ..database import db
class Task(db.Model):
""" A task in a dataset. Usually associated with various runs. """
__table_args__ = (
db.UniqueConstraint('dataset_id', 'name'),
)
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False) # Default: base path
description = db.Column(JSONB) # BIDS task description
dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id'),
nullable=False)
runs = db.relationship('Run', backref='task', cascade="delete")
TR = db.Column(db.Float)
summary = db.Column(db.Text) # Summary annotation
@hybrid_property
def n_subjects(self):
""" Number of subjects in task """
return Run.query.filter_by(
task_id=self.id).distinct('subject').count()
@hybrid_property
def n_runs_subject(self):
""" Number of runs per subject """
return statistics.mean(
[r[1] for r in db.session.query(
Run.subject, func.count(Run.id)).filter_by(
task_id=self.id).group_by(Run.subject)])
@hybrid_property
def avg_run_duration(self):
""" Average run duration (seconds) """
avg_run = db.session.query(func.avg(Run.duration)).filter_by(
task_id=self.id).all()[0][0]
if avg_run:
avg_run = round(avg_run, 2)
return avg_run
def __repr__(self):
return '<models.Task[name={}]>'.format(self.name)
```
#### File: neuroscout/resources/dataset.py
```python
from flask_apispec import MethodResource, marshal_with, doc, use_kwargs
from webargs import fields
from ..models import Dataset
from ..core import cache
from .utils import first_or_404
from ..schemas.dataset import DatasetSchema
class DatasetResource(MethodResource):
@doc(tags=['dataset'], summary='Get dataset by id.')
@cache.cached(60 * 60 * 24 * 300, query_string=True)
@marshal_with(DatasetSchema)
def get(self, dataset_id):
return first_or_404(Dataset.query.filter_by(id=dataset_id))
class DatasetListResource(MethodResource):
@doc(tags=['dataset'], summary='Returns list of datasets.')
@use_kwargs({
'active_only': fields.Boolean(
missing=True, description="Return only active Datasets")
},
location='query')
@cache.cached(60 * 60 * 24 * 300, query_string=True)
@marshal_with(DatasetSchema(
many=True, exclude=['dataset_address', 'preproc_address', 'runs']))
def get(self, **kwargs):
query = {}
if kwargs.pop('active_only'):
query['active'] = True
return Dataset.query.filter_by(**query).all()
```
#### File: tasks/utils/warnings.py
```python
from .io import update_record
import pandas as pd
def _flatten(li):
return [item for sublist in li for item in sublist]
def _check_scale_variance(pes_df, transformations):
""" Warns if Scale transformation is applied to any variables with
no variance in a given run. """
# To numeric and coerce
pes_df['value'] = pd.to_numeric(pes_df.value, errors='coerce')
# Check for inputs to Scale with no variance
in_scale = [t['Input'] for t in transformations if t['Name'] == 'Scale']
in_scale = _flatten(in_scale)
no_var = []
if in_scale:
in_scale_df = pes_df[pes_df.predictor_name.isin(in_scale)]
for (n, id), grp in in_scale_df.groupby(['predictor_name', 'run_id']):
if grp.value.astype('float').var() == 0:
no_var.append(n)
messages = []
if no_var:
messages.append([
f"The following variables have no variance in at least one run: "
f"{', '.join(set(no_var))}. "
"Scale transformation cannot be applied."])
return messages
def _check_na(pes_df):
messages = []
nas = pes_df[pes_df.value == 'n/a']
if not nas.empty:
na_vars = nas.predictor_name.unique().tolist()
messages.append([
f"The following variables at least one n/a value: "
f"{', '.join(set(na_vars))}. "
"You must remove n/as using the Scale transformation"
"to apply the HRF convolution."]
)
return messages
def pre_warnings(analysis, pes, report_object):
""" Generate warnings that are relevant prior to running pyBIDS.
This includes warnings that might result in pyBIDS crashes, and make sense
to catch early """
warnings = []
pes_df = pd.DataFrame(pes)
transformations = analysis['model']['Steps'][0]['Transformations']
pred_map = {di['id']: di['name'] for di in analysis['predictors']}
pes_df['predictor_name'] = pes_df.predictor_id.map(pred_map)
warnings += _check_scale_variance(pes_df, transformations)
if warnings:
update_record(
report_object,
warnings=warnings,
)
```
#### File: tests/api/test_analyses.py
```python
from ..request_utils import decode_json
from ...models import Analysis, Run, Report
from ...tasks import report
from ...core import app
import pytest
def test_get(session, auth_client, add_analysis):
# List of analyses
resp = auth_client.get('/api/analyses')
assert resp.status_code == 200
analysis_list = decode_json(resp)
assert type(analysis_list) == list
assert len(analysis_list) == 0 # Analysis is private by default
# Make analysis public
analysis = Analysis.query.filter_by(id=add_analysis).first()
dataset_id = analysis.dataset_id
name = analysis.name
analysis.private = False
analysis.status = "PASSED"
session.commit()
# List of analyses
resp = auth_client.get('/api/analyses')
assert resp.status_code == 200
analysis_list = decode_json(resp)
assert type(analysis_list) == list
assert len(analysis_list) == 1
# Filter by dataset_id and name
resp = auth_client.get('/api/analyses',
params=dict(dataset_id=dataset_id, name=name))
assert resp.status_code == 200
analysis_list = decode_json(resp)
assert len(analysis_list) == 1
# Filter with wrong name
resp = auth_client.get('/api/analyses',
params=dict(dataset_id=100, name='sds'))
assert resp.status_code == 200
analysis_list = decode_json(resp)
assert len(analysis_list) == 0
# Get first analysis
first_analysis_id = analysis.hash_id
# Get first analysis by id
resp = auth_client.get('/api/analyses/{}'.format(first_analysis_id))
assert resp.status_code == 200
analysis = decode_json(resp)
for required_fields in ['name', 'description']:
assert analysis[required_fields] != ''
# Try getting nonexistent analysis
resp = auth_client.get('/api/analyses/{}'.format(987654))
assert resp.status_code == 404
# assert 'requested URL was not found' in decode_json(resp)['message']
# Test getting resources
resp = auth_client.get('/api/analyses/{}/resources'.format(
first_analysis_id))
assert resp.status_code == 200
assert 'dataset_address' in decode_json(resp)
assert 'preproc_address' in decode_json(resp)
def test_post(auth_client, add_task, add_predictor):
# Add analysis
test_analysis = {
"dataset_id": add_task,
"name": "some analysis",
"description": "pretty damn innovative",
"model": {
"Name": "test_model1",
"Description": "this is a sample",
"Input": {
"task": "bidstest"
},
"Steps": [
{
"Level": "Run",
"Transformations": [
{
"Name": "Scale",
"Input": [
"BrightnessExtractor.brightness"
]
}
],
"Model": {
"X": [
"BrightnessExtractor.brightness",
"VibranceExtractor.vibrance"
]
},
"Contrasts": [
{
"Mame": "BvsV",
"ConditionList": [
"BrightnessExtractor.brightness",
"VibranceExtractor.vibrance"
],
"Weights": [
1,
-1
],
"Type": "T"
}
]
},
{
"Level": "Session",
},
{
"Level": "Subject",
"Model": {
"X": [
"BvsV"
]
},
},
{
"Level": "Dataset",
"Model": {
"X": [
"session_diff"
]
},
}
]
}
}
resp = auth_client.post('/api/analyses', data=test_analysis)
assert resp.status_code == 200
rv_json = decode_json(resp)
assert type(rv_json) == dict
for field in ['dataset_id', 'name', 'description', 'hash_id']:
assert field in rv_json
# Check db directly
assert Analysis.query.filter_by(hash_id=rv_json['hash_id']).count() == 1
assert Analysis.query.filter_by(
hash_id=rv_json['hash_id']).one().name == 'some analysis'
# Re post analysis, check that id is diffeent
rv_2 = auth_client.post('/api/analyses', data=test_analysis)
assert rv_2.status_code == 200
assert decode_json(rv_2)['hash_id'] != decode_json(resp)['hash_id']
# Test incorrect post
dataset_id = decode_json(auth_client.get('/api/datasets'))[0]['id']
bad_post = {
"dataset_id": "234234",
"name": "some analysis",
"description": "pretty damn innovative"
}
resp = auth_client.post('/api/analyses', data=bad_post)
assert resp.status_code == 422
assert decode_json(resp)['message']['json']['dataset_id'][0] == \
'Invalid dataset id.'
no_name_OK = {
"dataset_id": dataset_id,
"description": "pretty damn innovative"
}
resp = auth_client.post('/api/analyses', data=no_name_OK)
assert resp.status_code == 200
def test_clone(session, auth_client, add_task, add_analysis, add_users):
(id1, id2), _ = add_users
analysis = Analysis.query.filter_by(id=add_analysis).first()
# Try cloning DRAFT
resp = auth_client.post('/api/analyses/{}/clone'.format(analysis.hash_id))
assert resp.status_code == 422
# Change status try again
analysis.status = 'PASSED'
session.commit()
resp = auth_client.post('/api/analyses/{}/clone'.format(analysis.hash_id))
clone_json = decode_json(resp)
assert clone_json['hash_id'] != analysis.hash_id
# Check that runs and predictors have been copied
assert len(clone_json['runs']) == len(analysis.runs)
assert len(clone_json['predictors']) == len(analysis.predictors)
def test_put(auth_client, add_analysis, add_task, session):
# Get analysis to edit
analysis = Analysis.query.filter_by(id=add_analysis).first()
analysis_json = decode_json(
auth_client.get('/api/analyses/{}'.format(analysis.hash_id)))
analysis_json['name'] = 'NEW NAME'
resp = auth_client.put('/api/analyses/{}'.format(analysis.hash_id),
data=analysis_json)
assert resp.status_code == 200
new_analysis = decode_json(resp)
assert new_analysis['name'] == "NEW NAME"
# Test adding a run_id
analysis_json['runs'] = [Run.query.first().id]
resp = auth_client.put('/api/analyses/{}'.format(analysis.hash_id),
data=analysis_json)
assert resp.status_code == 200
new_analysis = decode_json(resp)
assert new_analysis['runs'] == [Run.query.first().id]
# Test adding an invalid run id
analysis_json['runs'] = [9999]
resp = auth_client.put('/api/analyses/{}'.format(analysis.hash_id),
data=analysis_json)
assert resp.status_code == 422
assert 'runs' in decode_json(resp)['message']['json']
# Add and delete analysis
# Add analysis
test_analysis = {
"dataset_id": add_task,
"name": "some analysis",
"description": "pretty damn innovative"
}
resp = auth_client.post('/api/analyses', data=test_analysis)
analysis_json = decode_json(resp)
# Test adding a run_id
analysis_json['runs'] = [Run.query.first().id]
resp = auth_client.put('/api/analyses/{}'.format(analysis_json['hash_id']),
data=analysis_json)
assert resp.status_code == 200
# Lock analysis
analysis = Analysis.query.filter_by(hash_id=analysis_json['hash_id']).one()
analysis.locked = True
session.commit()
# Try editing locked analysis
analysis_json['description'] = "new!"
resp = auth_client.put('/api/analyses/{}'.format(
analysis_json['hash_id']),
data=analysis_json)
assert resp.status_code == 422
# Try deleting locked analysis
delresp = auth_client.delete('/api/analyses/{}'.format(
analysis_json['hash_id']))
assert delresp.status_code == 422
# Unlock and delete
analysis.locked = False
session.commit()
# Try deleting anlaysis
delresp = auth_client.delete('/api/analyses/{}'.format(
analysis_json['hash_id']))
assert delresp.status_code == 200
assert Analysis.query.filter_by(
hash_id=analysis_json['hash_id']).count() == 0
def test_autofill(auth_client, add_analysis, add_task, session):
# Get analysis to edit
analysis = Analysis.query.filter_by(id=add_analysis).first()
analysis_json = decode_json(
auth_client.get('/api/analyses/{}'.format(analysis.hash_id)))
analysis_json['predictors'] = []
analysis_json['runs'] = []
resp = auth_client.put('/api/analyses/{}'.format(analysis.hash_id),
data=analysis_json)
assert resp.status_code == 200
assert len(decode_json(resp)['predictors']) == 0
assert len(decode_json(resp)['runs']) == 0
resp = auth_client.post('/api/analyses/{}/fill'.format(analysis.hash_id),
data=analysis_json)
assert resp.status_code == 200
analysis_json = decode_json(resp)
assert len(analysis_json['predictors']) == 2
assert len(analysis_json['runs']) == 4
assert analysis_json['model']['Input'] == {
'Run': [1, 2], 'Subject': ['02', '01'], 'Task': ['bidstest']}
# Try with names that don't exist
analysis_json['model']['Steps'][0]["Model"]["X"] = \
["Brightness", "nonexistent"]
analysis_json['predictors'] = []
analysis_json['runs'] = []
resp = auth_client.put('/api/analyses/{}'.format(analysis.hash_id),
data=analysis_json)
assert resp.status_code == 200
# Partial fill should remove a predictor
resp = auth_client.post('/api/analyses/{}/fill'.format(analysis.hash_id),
data=analysis_json,
params=dict(partial=True, dryrun=True))
assert resp.status_code == 200
analysis_json = decode_json(resp)
assert len(analysis_json['predictors']) == 1
assert len(analysis_json['model']['Steps'][0]["Model"]["X"]) == 1
assert len(analysis_json['runs']) == 4
# No partial fill - extra predictor remains in model
resp = auth_client.post('/api/analyses/{}/fill'.format(analysis.hash_id),
data=analysis_json,
params=dict(partial=False, dryrun=True))
assert resp.status_code == 200
analysis_json = decode_json(resp)
assert len(analysis_json['predictors']) == 1
assert len(analysis_json['model']['Steps'][0]["Model"]["X"]) == 2
assert len(analysis_json['runs']) == 4
#
# Try with names that don't exist
analysis_json['model']['Steps'][0]["Model"]["X"] = \
["Brightness", "nonexistent"]
analysis_json['model']['Steps'][0]["Transformations"].append(
{
"Name": "Scale",
"Input": [
"nonexistent"
]
}
)
analysis_json['predictors'] = []
analysis_json['runs'] = []
resp = auth_client.put('/api/analyses/{}'.format(analysis.hash_id),
data=analysis_json)
assert resp.status_code == 200
assert len(decode_json(resp)['model']['Steps'][0]["Transformations"]) == 2
# Partial fill should remove all transformations
resp = auth_client.post('/api/analyses/{}/fill'.format(analysis.hash_id),
data=analysis_json,
params=dict(partial=True, dryrun=True))
assert len(decode_json(resp)['model']['Steps'][0]["Transformations"]) == 0
assert len(decode_json(resp)['model']['Steps'][0]["Contrasts"]) == 0
def test_reports(session, auth_client, add_analysis):
analysis = Analysis.query.filter_by(id=add_analysis).first()
# Create new Report
r = Report(
analysis_id=analysis.hash_id,
runs=[analysis.runs[0].id]
)
session.add(r)
session.commit()
# Trigger report
_ = report.generate_report(app, analysis.hash_id, r.id)
# Get report
resp = auth_client.get('/api/analyses/{}/report'.format(analysis.hash_id))
assert resp.status_code == 200
if decode_json(resp)['status'] != 'OK':
print(decode_json(resp)['status'])
print(decode_json(resp)['traceback'])
assert 0
result = decode_json(resp)['result']
for f in ['design_matrix', 'design_matrix_corrplot', 'design_matrix_plot']:
assert f in result
assert len(result['design_matrix']) == 1
def test_compile(auth_client, add_analysis, add_analysis_fail):
analysis = Analysis.query.filter_by(id=add_analysis).first()
analysis_bad = Analysis.query.filter_by(id=add_analysis_fail).first()
with pytest.raises(Exception):
_ = report.compile(app, analysis_bad.hash_id)
# Test status
resp = auth_client.get('/api/analyses/{}/compile'.format(
analysis_bad.hash_id))
new_analysis = decode_json(resp)
if new_analysis['status'] == 'PASSED':
assert 0
# Test getting bundle prior to compiling
resp = auth_client.get('/api/analyses/{}/bundle'.format(analysis.hash_id))
assert resp.status_code == 404
# Test compiling
_ = report.compile(app, analysis.hash_id)
# Get full
resp = auth_client.get('/api/analyses/{}'.format(analysis.hash_id))
assert resp.status_code == 200
locked_analysis = decode_json(resp)
# Test editing status
locked_analysis['private'] = False
locked_analysis['new_name'] = "Should not change to this"
resp = auth_client.put('/api/analyses/{}'.format(analysis.hash_id),
data=locked_analysis)
assert resp.status_code == 200
assert decode_json(resp)['private'] is False
assert decode_json(resp)['name'] != "Should not change to this"
# Test status after some time
resp = auth_client.get('/api/analyses/{}/compile'.format(analysis.hash_id))
if decode_json(resp)['status'] != 'PASSED':
assert 0
# Try deleting locked analysis
resp = auth_client.delete('/api/analyses/{}'.format(analysis.hash_id))
assert resp.status_code == 422
# Test bundle is tarfile
resp = auth_client.get('/api/analyses/{}/bundle'.format(analysis.hash_id))
assert resp.status_code == 200
assert resp.mimetype == 'application/x-tar'
def test_auth_id(auth_client, add_analysis_user2):
# Try deleting analysis you are not owner of
analysis = Analysis.query.filter_by(id=add_analysis_user2).first()
resp = auth_client.delete('/api/analyses/{}'.format(analysis.hash_id))
assert resp.status_code == 404
def test_bibliography(auth_client, add_analysis, add_task, session):
# Get analysis to edit
analysis = Analysis.query.filter_by(id=add_analysis).first()
bib_json = decode_json(
auth_client.get('/api/analyses/{}/bibliography'.format(
analysis.hash_id)))
assert 'supporting' in bib_json
assert "https://test.test.com/" in bib_json['data'][0]
assert "Google Cloud Computing Services" in bib_json['extraction'][1]
assert len([j['id'] for j in bib_json['csl_json']]) == 4
```
|
{
"source": "jdkent/NiBetaSeries",
"score": 2
}
|
#### File: interfaces/tests/test_nistats.py
```python
import os
import json
from ..nistats import LSSBetaSeries, LSABetaSeries
from ..nistats import _lss_events_iterator, _lsa_events_converter
def test_lss_beta_series(sub_metadata, preproc_file, sub_events,
confounds_file, brainmask_file):
selected_confounds = ['WhiteMatter', 'CSF']
hrf_model = 'spm'
with open(str(sub_metadata), 'r') as md:
bold_metadata = json.load(md)
beta_series = LSSBetaSeries(bold_file=str(preproc_file),
bold_metadata=bold_metadata,
mask_file=str(brainmask_file),
events_file=str(sub_events),
confounds_file=str(confounds_file),
selected_confounds=selected_confounds,
hrf_model=hrf_model,
smoothing_kernel=None,
high_pass=0.008)
res = beta_series.run()
for beta_map in res.outputs.beta_maps:
assert os.path.isfile(beta_map)
os.remove(beta_map)
def test_lsa_beta_series(sub_metadata, preproc_file, sub_events,
confounds_file, brainmask_file):
selected_confounds = ['WhiteMatter', 'CSF']
hrf_model = 'spm'
with open(str(sub_metadata), 'r') as md:
bold_metadata = json.load(md)
beta_series = LSABetaSeries(bold_file=str(preproc_file),
bold_metadata=bold_metadata,
mask_file=str(brainmask_file),
events_file=str(sub_events),
confounds_file=str(confounds_file),
selected_confounds=selected_confounds,
hrf_model=hrf_model,
smoothing_kernel=None,
high_pass=0.008)
res = beta_series.run()
for beta_map in res.outputs.beta_maps:
assert os.path.isfile(beta_map)
os.remove(beta_map)
def test_lss_events_iterator(sub_events):
# all but the first instance of waffle
# should be changed to "other"
t_lst = ['other', 'fry', 'milkshake'] * 5
t_lst[0] = 'waffle'
res = _lss_events_iterator(sub_events)
out_df = list(res)[0][0]
out_lst = list(out_df['trial_type'])
assert t_lst == out_lst
def test_lsa_events_converter(sub_events):
# each instance of waffle, fry, and milkshake
# should have a different number
trial_type_lst = ['waffle', 'fry', 'milkshake'] * 5
number_lst = ['000{}'.format(x) for x in range(1, 6) for y in range(0, 3)]
t_lst = ['_'.join([trial, num]) for trial, num in zip(trial_type_lst, number_lst)]
res = _lsa_events_converter(sub_events)
out_lst = list(res['trial_type'])
assert t_lst == out_lst
```
#### File: workflows/tests/test_base.py
```python
import os.path as op
from nipype import config as ncfg
from ..base import init_nibetaseries_participant_wf
def test_init_nibetaseries_participant_wf(
bids_dir, deriv_dir, sub_fmriprep, sub_metadata, bold_file, preproc_file,
sub_events, confounds_file, brainmask_file, atlas_file, atlas_lut,
):
output_dir = op.join(str(bids_dir), 'derivatives', 'atlasCorr')
work_dir = op.join(str(bids_dir), 'derivatives', 'work')
deriv_dir = op.join(str(bids_dir), 'derivatives', 'fmriprep')
ncfg.update_config({
'logging': {'log_directory': work_dir,
'log_to_file': True},
'execution': {'crashdump_dir': work_dir,
'crashfile_format': 'txt',
'parameterize_dirs': False},
})
test_np_wf = init_nibetaseries_participant_wf(
estimator='lss',
atlas_img=str(atlas_file),
atlas_lut=str(atlas_lut),
bids_dir=str(bids_dir),
derivatives_pipeline_dir=deriv_dir,
exclude_description_label=None,
hrf_model='spm',
high_pass=0.008,
output_dir=output_dir,
run_label=None,
selected_confounds=['WhiteMatter', 'CSF'],
session_label=None,
smoothing_kernel=None,
space_label=None,
subject_list=["01"],
task_label=None,
description_label=None,
work_dir=work_dir)
assert test_np_wf.run()
test_np_wf = init_nibetaseries_participant_wf(
estimator='lsa',
atlas_img=str(atlas_file),
atlas_lut=str(atlas_lut),
bids_dir=str(bids_dir),
derivatives_pipeline_dir=deriv_dir,
exclude_description_label=None,
hrf_model='spm',
high_pass=0.008,
output_dir=output_dir,
run_label=None,
selected_confounds=['WhiteMatter', 'CSF'],
session_label=None,
smoothing_kernel=None,
space_label=None,
subject_list=["01"],
task_label=None,
description_label=None,
work_dir=work_dir)
assert test_np_wf.run()
```
|
{
"source": "jdkent/sdcflows",
"score": 2
}
|
#### File: sdcflows/workflows/syn.py
```python
from pkg_resources import resource_filename
from nipype import logging
from nipype.pipeline import engine as pe
from nipype.interfaces import fsl, utility as niu
from nipype.interfaces.image import Rescale
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.fixes import (FixHeaderApplyTransforms as ApplyTransforms,
FixHeaderRegistration as Registration)
from niworkflows.func.util import init_skullstrip_bold_wf
DEFAULT_MEMORY_MIN_GB = 0.01
LOGGER = logging.getLogger('nipype.workflow')
def init_syn_sdc_wf(omp_nthreads, epi_pe=None,
atlas_threshold=3, name='syn_sdc_wf'):
"""
Build the *fieldmap-less* susceptibility-distortion estimation workflow.
This workflow takes a skull-stripped T1w image and reference BOLD image and
estimates a susceptibility distortion correction warp, using ANTs symmetric
normalization (SyN) and the average fieldmap atlas described in
[Treiber2016]_.
SyN deformation is restricted to the phase-encoding (PE) direction.
If no PE direction is specified, anterior-posterior PE is assumed.
SyN deformation is also restricted to regions that are expected to have a
>3mm (approximately 1 voxel) warp, based on the fieldmap atlas.
This technique is a variation on those developed in [Huntenburg2014]_ and
[Wang2017]_.
Workflow Graph
.. workflow ::
:graph2use: orig
:simple_form: yes
from sdcflows.workflows.syn import init_syn_sdc_wf
wf = init_syn_sdc_wf(
epi_pe='j',
omp_nthreads=8)
Inputs
------
in_reference
reference image
in_reference_brain
skull-stripped reference image
t1w_brain
skull-stripped, bias-corrected structural image
std2anat_xfm
inverse registration transform of T1w image to MNI template
Outputs
-------
out_reference
the ``in_reference`` image after unwarping
out_reference_brain
the ``in_reference_brain`` image after unwarping
out_warp
the corresponding :abbr:`DFM (displacements field map)` compatible with
ANTs
out_mask
mask of the unwarped input file
References
----------
.. [Treiber2016] <NAME>. et al. (2016) Characterization and Correction
of Geometric Distortions in 814 Diffusion Weighted Images,
PLoS ONE 11(3): e0152472. doi:`10.1371/journal.pone.0152472
<https://doi.org/10.1371/journal.pone.0152472>`_.
.. [Wang2017] <NAME>, et al. (2017) Evaluation of Field Map and Nonlinear
Registration Methods for Correction of Susceptibility Artifacts
in Diffusion MRI. Front. Neuroinform. 11:17.
doi:`10.3389/fninf.2017.00017
<https://doi.org/10.3389/fninf.2017.00017>`_.
.. [Huntenburg2014] <NAME>. (2014) Evaluating Nonlinear
Coregistration of BOLD EPI and T1w Images. Berlin: Master
Thesis, Freie Universität. `PDF
<http://pubman.mpdl.mpg.de/pubman/item/escidoc:2327525:5/component/escidoc:2327523/master_thesis_huntenburg_4686947.pdf>`_.
"""
if epi_pe is None or epi_pe[0] not in ['i', 'j']:
LOGGER.warning('Incorrect phase-encoding direction, assuming PA (posterior-to-anterior).')
epi_pe = 'j'
workflow = Workflow(name=name)
workflow.__desc__ = """\
A deformation field to correct for susceptibility distortions was estimated
based on *fMRIPrep*'s *fieldmap-less* approach.
The deformation field is that resulting from co-registering the BOLD reference
to the same-subject T1w-reference with its intensity inverted [@fieldmapless1;
@fieldmapless2].
Registration is performed with `antsRegistration` (ANTs {ants_ver}), and
the process regularized by constraining deformation to be nonzero only
along the phase-encoding direction, and modulated with an average fieldmap
template [@fieldmapless3].
""".format(ants_ver=Registration().version or '<ver>')
inputnode = pe.Node(
niu.IdentityInterface(['in_reference', 'in_reference_brain',
't1w_brain', 'std2anat_xfm']),
name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(['out_reference', 'out_reference_brain',
'out_mask', 'out_warp']),
name='outputnode')
# Collect predefined data
# Atlas image and registration affine
atlas_img = resource_filename('sdcflows', 'data/fmap_atlas.nii.gz')
# Registration specifications
affine_transform = resource_filename('sdcflows', 'data/affine.json')
syn_transform = resource_filename('sdcflows', 'data/susceptibility_syn.json')
invert_t1w = pe.Node(Rescale(invert=True), name='invert_t1w',
mem_gb=0.3)
ref_2_t1 = pe.Node(Registration(from_file=affine_transform),
name='ref_2_t1', n_procs=omp_nthreads)
t1_2_ref = pe.Node(ApplyTransforms(invert_transform_flags=[True]),
name='t1_2_ref', n_procs=omp_nthreads)
# 1) BOLD -> T1; 2) MNI -> T1; 3) ATLAS -> MNI
transform_list = pe.Node(niu.Merge(3), name='transform_list',
mem_gb=DEFAULT_MEMORY_MIN_GB)
transform_list.inputs.in3 = resource_filename(
'sdcflows', 'data/fmap_atlas_2_MNI152NLin2009cAsym_affine.mat')
# Inverting (1), then applying in reverse order:
#
# ATLAS -> MNI -> T1 -> BOLD
atlas_2_ref = pe.Node(
ApplyTransforms(invert_transform_flags=[True, False, False]),
name='atlas_2_ref', n_procs=omp_nthreads,
mem_gb=0.3)
atlas_2_ref.inputs.input_image = atlas_img
threshold_atlas = pe.Node(
fsl.maths.MathsCommand(args='-thr {:.8g} -bin'.format(atlas_threshold),
output_datatype='char'),
name='threshold_atlas', mem_gb=0.3)
fixed_image_masks = pe.Node(niu.Merge(2), name='fixed_image_masks',
mem_gb=DEFAULT_MEMORY_MIN_GB)
fixed_image_masks.inputs.in1 = 'NULL'
restrict = [[int(epi_pe[0] == 'i'), int(epi_pe[0] == 'j'), 0]] * 2
syn = pe.Node(
Registration(from_file=syn_transform, restrict_deformation=restrict),
name='syn', n_procs=omp_nthreads)
unwarp_ref = pe.Node(ApplyTransforms(
dimension=3, float=True, interpolation='LanczosWindowedSinc'),
name='unwarp_ref')
skullstrip_bold_wf = init_skullstrip_bold_wf()
workflow.connect([
(inputnode, invert_t1w, [('t1w_brain', 'in_file'),
('in_reference', 'ref_file')]),
(inputnode, ref_2_t1, [('in_reference_brain', 'moving_image')]),
(invert_t1w, ref_2_t1, [('out_file', 'fixed_image')]),
(inputnode, t1_2_ref, [('in_reference', 'reference_image')]),
(invert_t1w, t1_2_ref, [('out_file', 'input_image')]),
(ref_2_t1, t1_2_ref, [('forward_transforms', 'transforms')]),
(ref_2_t1, transform_list, [('forward_transforms', 'in1')]),
(inputnode, transform_list, [
('std2anat_xfm', 'in2')]),
(inputnode, atlas_2_ref, [('in_reference', 'reference_image')]),
(transform_list, atlas_2_ref, [('out', 'transforms')]),
(atlas_2_ref, threshold_atlas, [('output_image', 'in_file')]),
(threshold_atlas, fixed_image_masks, [('out_file', 'in2')]),
(inputnode, syn, [('in_reference_brain', 'moving_image')]),
(t1_2_ref, syn, [('output_image', 'fixed_image')]),
(fixed_image_masks, syn, [('out', 'fixed_image_masks')]),
(syn, outputnode, [('forward_transforms', 'out_warp')]),
(syn, unwarp_ref, [('forward_transforms', 'transforms')]),
(inputnode, unwarp_ref, [('in_reference', 'reference_image'),
('in_reference', 'input_image')]),
(unwarp_ref, skullstrip_bold_wf, [
('output_image', 'inputnode.in_file')]),
(unwarp_ref, outputnode, [('output_image', 'out_reference')]),
(skullstrip_bold_wf, outputnode, [
('outputnode.skull_stripped_file', 'out_reference_brain'),
('outputnode.mask_file', 'out_mask')]),
])
return workflow
```
#### File: workflows/tests/test_pepolar.py
```python
from os import cpu_count
import pytest
from niworkflows.interfaces.bids import DerivativesDataSink
from nipype.pipeline import engine as pe
from ..pepolar import (
check_pes, _split_epi_lists, init_prepare_epi_wf, init_pepolar_unwarp_wf
)
def test_split_epi_lists(bids_layouts, tmpdir):
"""Test preparation workflow."""
tmpdir.chdir()
layout = bids_layouts['testdata']
bold = layout.get(suffix='bold', dir='LR', direction='LR',
extension=['.nii.gz', '.nii'])[0]
epidata = layout.get(suffix='epi', desc=None, extension=['.nii.gz', '.nii'])
# EPI fmaps in HCP101006 have 3 volumes each
a, b = _split_epi_lists(
in_files=[(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata],
pe_dir=bold.get_metadata()['PhaseEncodingDirection']
)
assert len(a) == 3
assert len(b) == 3
# If we append the BOLD aligned (not that you would do this), then the
# second array should have 53 volumes (50 from _bold, 3 from _epi)
a, b = _split_epi_lists(
in_files=[(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata + [bold]],
pe_dir=bold.get_metadata()['PhaseEncodingDirection']
)
assert len(a) == 3
assert len(b) == 53
def test_prepare_epi_wf0(bids_layouts, tmpdir):
"""Test preparation workflow."""
tmpdir.chdir()
layout = bids_layouts['testdata']
bold = layout.get(suffix='bold', dir='LR', direction='LR',
extension=['.nii.gz', '.nii'])[0]
epidata = layout.get(suffix='epi', dir='LR', direction='LR',
desc=None, extension=['.nii.gz', '.nii'])
with pytest.raises(ValueError):
check_pes([
(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata], bold.get_metadata()['PhaseEncodingDirection'])
def test_prepare_epi_wf1(bids_layouts, tmpdir):
"""Test preparation workflow."""
tmpdir.chdir()
layout = bids_layouts['testdata']
bold = layout.get(suffix='bold', dir='LR', direction='LR',
extension=['.nii.gz', '.nii'])[0]
boldref = layout.get(suffix='boldref', dir='LR', direction='LR',
extension=['.nii.gz', '.nii'])[0]
epidata = layout.get(suffix='epi', desc=None, extension=['.nii.gz', '.nii'])
matched_pe = check_pes([(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata], bold.get_metadata()['PhaseEncodingDirection'])
assert matched_pe is True
wf = init_prepare_epi_wf(omp_nthreads=1, matched_pe=matched_pe)
wf.inputs.inputnode.maps_pe = [(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata]
wf.inputs.inputnode.ref_brain = boldref.path
wf.inputs.inputnode.epi_pe = bold.get_metadata()['PhaseEncodingDirection']
def test_prepare_epi_wf2(bids_layouts, tmpdir):
"""Test preparation workflow."""
tmpdir.chdir()
layout = bids_layouts['testdata']
bold = layout.get(suffix='bold', dir='LR', direction='LR',
extension=['.nii.gz', '.nii'])[0]
boldref = layout.get(suffix='boldref', dir='LR', direction='LR',
extension=['.nii.gz', '.nii'])[0]
epidata = layout.get(suffix='epi', dir='RL', direction='RL',
desc=None, extension=['.nii.gz', '.nii'])
matched_pe = check_pes([(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata], bold.get_metadata()['PhaseEncodingDirection'])
assert matched_pe is False
wf = init_prepare_epi_wf(omp_nthreads=1, matched_pe=matched_pe)
wf.inputs.inputnode.maps_pe = [(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata]
wf.inputs.inputnode.ref_brain = boldref.path
wf.inputs.inputnode.epi_pe = bold.get_metadata()['PhaseEncodingDirection']
@pytest.mark.parametrize('dataset', [
'ds001600',
'testdata',
])
def test_pepolar_wf1(bids_layouts, output_path, dataset, workdir):
"""Test preparation workflow."""
layout = bids_layouts[dataset]
if dataset == 'testdata':
bold = layout.get(suffix='bold', dir='LR', direction='LR',
extension=['.nii.gz', '.nii'])[0]
boldref = layout.get(suffix='boldref', dir='LR', direction='LR', desc='brain',
extension=['.nii.gz', '.nii'])[0]
elif dataset == 'ds001600':
bold = layout.get(suffix='bold', acquisition='AP',
extension=['.nii.gz', '.nii'])[0]
epidata = layout.get(suffix='epi', desc=None, extension=['.nii.gz', '.nii'])
matched_pe = check_pes([(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata], bold.get_metadata()['PhaseEncodingDirection'])
wf = init_pepolar_unwarp_wf(omp_nthreads=cpu_count(), matched_pe=matched_pe)
wf.inputs.inputnode.fmaps_epi = [(im.path, im.get_metadata()['PhaseEncodingDirection'])
for im in epidata]
wf.inputs.inputnode.epi_pe_dir = bold.get_metadata()['PhaseEncodingDirection']
if output_path:
from nipype.interfaces import utility as niu
from ..pepolar import Workflow
from ...interfaces.reportlets import FieldmapReportlet
boiler = Workflow(name='pepolar_%s' % dataset)
split_field = pe.Node(niu.Function(function=_split_field), name='split_field')
if dataset == 'ds001600':
from niworkflows.func.util import init_bold_reference_wf
gen_ref = init_bold_reference_wf(
omp_nthreads=cpu_count(),
bold_file=bold.path)
boiler.connect([
(gen_ref, wf, [
('outputnode.ref_image', 'inputnode.in_reference'),
('outputnode.ref_image_brain', 'inputnode.in_reference_brain')])
])
else:
wf.inputs.inputnode.in_reference_brain = boldref.path
wf.inputs.inputnode.in_reference = boldref.path
rep = pe.Node(FieldmapReportlet(reference_label='EPI Reference'), 'simple_report')
rep.interface._always_run = True
dsink = pe.Node(DerivativesDataSink(
base_directory=str(output_path), keep_dtype=True,
desc='pepolar'), name='dsink')
dsink.interface.out_path_base = 'sdcflows'
dsink.inputs.source_file = epidata[0].path
boiler.connect([
(wf, split_field, [
('inputnode.epi_pe_dir', 'pe_dir'),
('outputnode.out_warp', 'in_field')]),
(split_field, rep, [
('out', 'fieldmap')]),
(wf, rep, [
# ('outputnode.out_warp', 'fieldmap'),
('outputnode.out_reference_brain', 'reference'),
('outputnode.out_mask', 'mask')]),
(rep, dsink, [('out_report', 'in_file')]),
])
if workdir:
boiler.base_dir = str(workdir)
boiler.run(plugin='MultiProc', plugin_args={'n_proc': cpu_count()})
def _split_field(in_field, pe_dir):
from os.path import abspath
import numpy as np
import nibabel as nb
axis = 'ijk'.index(pe_dir[0])
im = nb.load(in_field)
data = np.squeeze(im.get_fdata())[..., axis]
dirnii = nb.Nifti1Image(data, im.affine, im.header)
dirnii.to_filename('fieldmap.nii.gz')
return abspath('fieldmap.nii.gz')
```
|
{
"source": "jdkizer9/cs5555Lab2",
"score": 2
}
|
#### File: mydata/controllers/my_data_network_controller.py
```python
from rauth import OAuth2Service
import base64
from django.http import HttpResponseRedirect
import json
class DPUNetworkController(object):
__instance_map = {}
@classmethod
def RegisterClient(cls, name, instance):
cls.__instance_map[name] = instance
print cls.__instance_map
@classmethod
def CheckForClient(cls, name):
return name in cls.__instance_map
@classmethod
def GetClient(cls, name):
print cls.__instance_map
return cls.__instance_map[name]
def __init__(self, name, client_id, client_secret):
self.name = name
self.service_name = 'omh-client-' + name
self.client_id = client_id
self.client_secret = client_secret
self.service = OAuth2Service(
name=self.service_name,
client_id=self.client_id,
client_secret=self.client_id,
access_token_url='https://ohmage-omh.smalldata.io/dsu/oauth/token',
authorize_url='https://ohmage-omh.smalldata.io/dsu/oauth/authorize',
base_url='https://ohmage-omh.smalldata.io/dsu/')
self.session = None
self.access_token = None
def is_client_authenticated(self):
return self.access_token != None
def get_authorize_url(self):
params = {'redirect_uri': "http://localhost:8000/callback",
'response_type': 'code'}
url = self.service.get_authorize_url(**params)
return url
def configure_access_token(self, code):
data = {'code': code,
'redirect_uri': "http://localhost:8000/callback",
'grant_type': 'authorization_code'}
pt_auth_header = self.client_id + ':' + self.client_secret
en_auth_header = base64.b64encode(pt_auth_header)
headers = {'Authorization': "Basic " + en_auth_header}
session = self.service.get_auth_session(data=data, headers=headers, decoder=json.loads)
self.session = session
access_token_response_body = self.service.access_token_response.json()
self.access_token = access_token_response_body['access_token']
def getData(self, params):
headers = {'Authorization': "Bearer " + self.access_token}
r = self.session.get('https://ohmage-omh.smalldata.io/dsu/dataPoints', params=params, headers=headers)
return r
def getPAMData(self, ios=True, created_on_or_after=None, created_before=None, skip=None, limit=100):
if(ios):
params = {'schema_namespace' : 'cornell',
'schema_name' : 'photographic-affect-meter-scores',
'schema_version' : '1.0',
'limit' : str(limit) }
else:
params = {'schema_namespace' : 'omh',
'schema_name' : 'pam',
'schema_version' : '1.0',
'limit' : str(limit) }
if(created_on_or_after):
params['created_on_or_after'] = created_on_or_after
if(created_before):
params['created_before'] = created_before
if(skip):
params['skip'] = str(skip)
return self.getData(params)
def getMobilityDailySummaryData(self, created_on_or_after=None, created_before=None, skip=None, limit=1000):
params = {'schema_namespace' : 'cornell',
'schema_name' : 'mobility-daily-summary',
'schema_version' : '1.0',
'limit' : str(limit) }
if(created_on_or_after):
params['created_on_or_after'] = created_on_or_after
if(created_before):
params['created_before'] = created_before
if(skip):
params['skip'] = str(skip)
return self.getData(params)
def getMobilityDailySegmentsData(self, created_on_or_after=None, created_before=None, skip=None, limit=1000):
params = {'schema_namespace' : 'cornell',
'schema_name' : 'mobility-daily-segments',
'schema_version' : '1.0',
'limit' : str(limit) }
if(created_on_or_after):
params['created_on_or_after'] = created_on_or_after
if(created_before):
params['created_before'] = created_before
if(skip):
params['skip'] = str(skip)
return self.getData(params)
def getAndroidMobilitySensorData(self, created_on_or_after=None, created_before=None, skip=None, limit=100):
params = {'schema_namespace' : 'omh',
'schema_name' : 'mobility',
'schema_version' : '1.0',
'limit' : str(limit) }
if(created_on_or_after):
params['created_on_or_after'] = created_on_or_after
if(created_before):
params['created_before'] = created_before
if(skip):
params['skip'] = str(skip)
return self.getData(params)
def getAndroidLocationSensorData(self, created_on_or_after=None, created_before=None, skip=None, limit=100):
params = {'schema_namespace' : 'omh',
'schema_name' : 'location',
'schema_version' : '1.0',
'limit' : str(limit) }
if(created_on_or_after):
params['created_on_or_after'] = created_on_or_after
if(created_before):
params['created_before'] = created_before
if(skip):
params['skip'] = str(skip)
return self.getData(params)
def getIOSSensorData(self, created_on_or_after=None, created_before=None, skip=None, limit=100):
params = {'schema_namespace' : 'cornell',
'schema_name' : 'mobility-stream-iOS',
'schema_version' : '1.0',
'limit' : str(limit) }
if(created_on_or_after):
params['created_on_or_after'] = created_on_or_after
if(created_before):
params['created_before'] = created_before
if(skip):
params['skip'] = str(skip)
return self.getData(params)
class GithubNetworkController(object):
__instance_map = {}
@classmethod
def RegisterClient(cls, name, instance):
cls.__instance_map[name] = instance
print cls.__instance_map
@classmethod
def CheckForClient(cls, name):
return name in cls.__instance_map
@classmethod
def GetClient(cls, name):
print cls.__instance_map
return cls.__instance_map[name]
def __init__(self, name, client_id, client_secret):
self.name = name
self.service_name = 'github-' + name
self.client_id = client_id
self.client_secret = client_secret
self.service = OAuth2Service(
name=self.service_name,
client_id=self.client_id,
client_secret=self.client_id,
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize',
base_url='https://github.com/')
self.session = None
self.access_token = None
def is_client_authenticated(self):
return self.access_token != None
def get_authorize_url(self):
params = {'redirect_uri': "http://localhost:8000/callback/github_callback",
'state': 'A random string',
'scope' : 'user,repo'}
url = self.service.get_authorize_url(**params)
return url
def configure_access_token(self, code):
data = {'code': code,
'redirect_uri': "http://localhost:8000/callback/github_callback"}
session = self.service.get_auth_session(data=data, headers=headers, decoder=json.loads)
self.session = session
access_token_response_body = self.service.access_token_response.json()
self.access_token = access_token_response_body['access_token']
```
|
{
"source": "jdkizer9/ls2_app",
"score": 2
}
|
#### File: jdkizer9/ls2_app/datapoint_migration.py
```python
from study_management.models import Datapoint
import json
def migrate_datapoints():
for datapoint in Datapoint.objects.all():
datapoint.encrypted_body = json.dumps(datapoint.body)
datapoint.save()
```
#### File: LS2/settings/database_settings.py
```python
import urllib.request
import json
def get_database_settings(environ):
database_settings = {}
database_settings["DATABASES"] = environ.get("databases")
database_settings["DATABASE_ROUTERS"] = environ.get('database_routers', [])
fernet_keys = environ.get('fernet_keys')
if len(fernet_keys) == 0 or len(fernet_keys[0]) == 0 or len(fernet_keys[0]) == 1:
raise ImproperlyConfigured("Fernet keys improperly configured")
database_settings["FERNET_KEYS"] = environ.get('fernet_keys')
return database_settings
# def get_databases(environ):
# # try:
# # databases_json_string = environ.get('DATABASE_JSON_STRING', '')
# # databases = json.loads(databases_json_string)
# # return environ.get('DATABASE_DICT')
# # except ValueError:
# # return None
# return environ.get('DATABASE_DICT')
# def get_database_routers(environ):
# database_routers_string = environ.get('DATABASE_ROUTERS')
# if database_routers_string == None:
# return []
# else:
# return database_routers_string.split(',')
# def get_fernet_keys(environ):
# fernet_keys_string = environ.get('FERNET_KEYS')
# fernet_keys = fernet_keys_string.split('\n')
# fernet_keys.reverse()
# if len(fernet_keys) == 0 or len(fernet_keys[0]) == 0:
# raise ImproperlyConfigured("No FERNET_KEYS specified")
# return fernet_keys
```
#### File: ls2_app/study_management/database_routers.py
```python
import logging
logger = logging.getLogger(__name__)
class DatapointRouter:
def db_for_read(self, model, **hints):
# logger.debug('db_for_read')
# logger.debug(model)
# logger.debug(model._meta.label)
if model._meta.label == "study_management.Datapoint":
logger.debug("returning datapoints db")
return "datapoints"
return None
def db_for_write(self, model, **hints):
if model._meta.label == "study_management.Datapoint":
logger.debug("returning datapoints db")
return "datapoints"
return None
def allow_relation(self, obj1, obj2, **hints):
if obj1._meta.label == "study_management.Datapoint" or \
obj2._meta.label == "study_management.Datapoint":
return False
else:
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
if db == "datapoints":
return app_label == "study_management" and model_name == "datapoint"
else:
return None
```
#### File: management/commands/generatefernetkey.py
```python
from django.core.management.base import BaseCommand, CommandError
from study_management.models import Datapoint
from cryptography.fernet import Fernet
import base64
class Command(BaseCommand):
help = 'Generates a new Fernet key'
def handle(self, *args, **options):
key = Fernet.generate_key()
self.stdout.write(self.style.SUCCESS(key))
```
#### File: management/commands/rotatefernetkey.py
```python
from django.core.management.base import BaseCommand, CommandError
from study_management.models import Datapoint
class Command(BaseCommand):
help = 'Rotates the Fernet key to the latest for each Datapoint'
def handle(self, *args, **options):
datapoint_encrypted_fields = ['body', 'metadata']
self.stdout.write(self.style.SUCCESS('Roating %s datapoints' % Datapoint.objects.all().count()))
i = 0
for datapoint in Datapoint.objects.all():
datapoint.save(update_fields=datapoint_encrypted_fields, force_update=True)
i = i + 1
self.stdout.write(self.style.SUCCESS('Successfully rotated %s datapoints' % i))
```
#### File: management/commands/testldap.py
```python
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
import os
from LS2.settings import settings_backend
import ldap
import getpass
from django_auth_ldap.config import LDAPSearch
class Command(BaseCommand):
help = 'Tests the LDAP connection'
def add_arguments(self, parser):
parser.add_argument('username')
def handle(self, *args, **options):
os_environ_dict = dict(os.environ)
environ = settings_backend.get_settings_environ(os_environ_dict)
ldap_enabled = environ.get('LS2_LDAP_ENABLED', False)
if ldap_enabled == False:
raise CommandError("LDAP is not enabled")
server_uri = environ.get('LS2_LDAP_SERVER_URI')
if server_uri == None:
raise CommandError("LDAP server URI info missing")
self.stdout.write(self.style.SUCCESS(f'Attempting to connect to {server_uri}'))
##ignore cert errors for now
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
ldap.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF)
l = ldap.initialize(server_uri, trace_level=0)
self.stdout.write(self.style.SUCCESS(f'Server initialization success'))
bind_dn = environ.get('LS2_LDAP_BIND_DN')
bind_password = environ.get('LS2_LDAP_BIND_PASSWORD')
if bind_dn == None or bind_password == None:
raise CommandError("LDAP bind user info missing")
self.stdout.write(self.style.SUCCESS(f'Attempting to bind with DN {bind_dn}'))
r = l.simple_bind_s(bind_dn, bind_password)
self.stdout.write(self.style.SUCCESS(f'Bind Successful'))
ldap_search_base_dn = environ.get('LS2_LDAP_SEARCH_BASE_DN')
ldap_search_scope = ldap.SCOPE_SUBTREE
ldap_search_filter_template = environ.get('LS2_LDAP_SEARCH_FILTER')
user = options['username']
password = <PASSWORD>()
self.stdout.write(f'Attempting to search for {user} in \"{ldap_search_base_dn}\"')
self.stdout.write(f'Filter {filter}')
ldap_search = LDAPSearch(ldap_search_base_dn, ldap.SCOPE_SUBTREE, ldap_search_filter_template)
results = ldap_search.execute(l, {'user': user})
if results is not None and len(results) == 1:
pass
else:
raise CommandError("NOT FOUND")
user_dn = results[0][0]
l.simple_bind_s(user_dn, password)
self.stdout.write('Test Successful')
```
#### File: ls2_app/study_management/serializers.py
```python
from rest_framework import serializers
from .models import Datapoint
import json
class DatapointSerializer(serializers.ModelSerializer):
class Meta:
model = Datapoint
# fields = ('id', 'participant', 'study', 'uuid')
fields = '__all__'
def to_internal_value(self, data):
request = self.context.get('request')
if not request:
raise serializers.ValidationError({
'request': 'The request is required as part of the context.'
})
try:
participant = request.user.participant
participant_uuid = participant.uuid
study_uuid = participant.study.uuid
except Participant.DoesNotExist:
raise serializers.ValidationError({
'participant': 'The request must include a valid participant.'
})
header = data.get('header')
# Perform the data validation.
if not header:
raise serializers.ValidationError({
'header': 'This field is required.'
})
uuid = header.get('id')
if not uuid:
raise serializers.ValidationError({
'header.id': 'This field is required.'
})
schema = header.get('schema_id')
if not schema:
raise serializers.ValidationError({
'header.schema': 'This field is required.'
})
schema_namespace = schema.get('namespace')
if not schema_namespace:
raise serializers.ValidationError({
'header.schema.namespace': 'This field is required.'
})
schema_name = schema.get('name')
if not schema_name:
raise serializers.ValidationError({
'header.schema.name': 'This field is required.'
})
schema_version = schema.get('version')
if not schema_version:
raise serializers.ValidationError({
'header.schema.version': 'This field is required.'
})
split_version = schema_version.split('.')
if len(split_version) != 3:
raise serializers.ValidationError({
'header.schema.version': 'This field must support semantic versioning (i.e., major.minor.patch).'
})
try:
schema_version_major = int(split_version[0])
schema_version_minor = int(split_version[1])
schema_version_patch = int(split_version[2])
except ValueError:
raise serializers.ValidationError({
'header.schema.version': 'This field must support semantic versioning (i.e., major.minor.patch).'
})
except Exception:
raise serializers.ValidationError({
'header.schema.version': 'This field must support semantic versioning (i.e., major.minor.patch).'
})
acquisition_provenance = header.get('acquisition_provenance')
if not acquisition_provenance:
raise serializers.ValidationError({
'header.acquisition_provenance': 'This field is required.'
})
ap_source_name = acquisition_provenance.get('source_name')
if not ap_source_name:
raise serializers.ValidationError({
'header.acquisition_provenance.source_name': 'This field is required.'
})
ap_source_creation_date_time = acquisition_provenance.get('source_creation_date_time')
if not ap_source_creation_date_time:
raise serializers.ValidationError({
'header.acquisition_provenance.source_creation_date_time': 'This field is required.'
})
ap_source_modality = acquisition_provenance.get('modality')
if not ap_source_modality:
raise serializers.ValidationError({
'header.acquisition_provenance.modality': 'This field is required.'
})
body = json.dumps(data.get('body'))
if not body:
raise serializers.ValidationError({
'body': 'This field is required.'
})
internal_representation = {
'participant_uuid': participant_uuid,
'study_uuid': study_uuid,
'uuid': uuid,
'schema_namespace': schema_namespace,
'schema_name': schema_name,
'schema_version_major': schema_version_major,
'schema_version_minor': schema_version_minor,
'schema_version_patch': schema_version_patch,
'ap_source_name': ap_source_name,
'ap_source_creation_date_time': ap_source_creation_date_time,
'ap_source_modality': ap_source_modality,
'body': body
}
headerJSON = header.get('metadata')
if headerJSON != None:
internal_representation['metadata'] = json.dumps(headerJSON)
return internal_representation
def to_representation(self, obj):
schema_version = '.'.join([
str(obj.schema_version_major),
str(obj.schema_version_minor),
str(obj.schema_version_patch),
])
header = {
'participant_id': obj.participant_uuid,
'id': obj.uuid,
'creation_date_time': obj.created_date_time,
'schema_id': {
'namespace': obj.schema_namespace,
'name': obj.schema_name,
'version': schema_version,
},
'acquisition_provenance': {
'source_name': obj.ap_source_name,
'modality': obj.ap_source_modality,
'source_creation_date_time': obj.ap_source_creation_date_time,
}
}
if obj.metadata != None:
header['metadata'] = json.loads(obj.metadata)
return {
'header': header,
'body': json.loads(obj.body)
}
class ParticipantMappingSerializer(serializers.Serializer):
participant_id = serializers.UUIDField()
username = serializers.CharField()
def to_representation(self, obj):
return {
'participant_id': obj.uuid,
'username': obj.user.username
}
class ParticipantAccountGeneratorAuthenticationSerializer(serializers.Serializer):
generator_id = serializers.UUIDField()
generator_password = serializers.CharField()
class TokenBasedParticipantAccountGeneratorAuthenticationSerializer(serializers.Serializer):
generator_id = serializers.UUIDField()
token = serializers.CharField()
```
|
{
"source": "jdkleuver/PcodeSym",
"score": 3
}
|
#### File: PcodeSym/ghidra_scripts/RunSolve.py
```python
import argparse
def run_script(server_host, server_port):
import ghidra_bridge
# load something ghidra doesn't have
import angr
from angr.engines.pcode.lifter import IRSB, PcodeBasicBlockLifter, ExitStatement, IRSB_MAX_SIZE, IRSB_MAX_INST, MAX_INSTRUCTIONS, MAX_BYTES
import claripy
import sys
import pypcode
import archinfo
import time
print("Running inside the bridge!")
# create the bridge and load the flat API/ghidra modules into the namespace
with ghidra_bridge.GhidraBridge(connect_to_host=server_host, connect_to_port=server_port, namespace=globals()) as bridge:
class MemoryMapping():
def __init__(self, program, startAddress):
self.program = program
self.startAddress = startAddress
# when calling an external function, we need to remember which function and library it is that we call
next_function = ""
next_library = ""
class MySpace():
def __init__(self, name):
self.name = name
class MyAddress(pypcode.Address):
def __init__(self, ctx, space, offset, ghidra_address):
super().__init__(ctx, space, offset)
self.ghidra_address = ghidra_address
@property
def is_constant(self):
return self.ghidra_address.isConstantAddress()
class MyVarnode(pypcode.Varnode):
def __init__(self, ctx, space, offset, size, ghidra_varnode):
super().__init__(ctx, space, offset, size)
program = getCurrentProgram()
language = program.getLanguage()
programContext = bridge.get_ghidra_api().program.util.ProgramContextImpl(language)
spaceContext = bridge.get_ghidra_api().program.util.ProgramContextImpl(language)
self.vcontext = bridge.get_ghidra_api().program.util.VarnodeContext(program, programContext, spaceContext)
self.ghidra_varnode = ghidra_varnode
def get_register_name(self):
return self.vcontext.getRegister(self.ghidra_varnode).getName()
def get_space_from_const(self):
# self.ghidra_varnode.getAddress().getAddressSpace().getName() returns const, but for some reason that won't work
return MySpace("mem") # if the name of the address space is "const" then it expects this to return an addres space with a name of either "ram" or "mem", not sure exactly the consequences of faking this out are
def get_addr(self):
return MyAddress(self.ctx, self.space, self.offset, self.ghidra_varnode.getAddress())
class GhidraPcodeBlockLifter(PcodeBasicBlockLifter):
def __init__(self, arch):
super().__init__(arch)
'''
Mostly copied this whole function from PcodeBasicBlockLifter
just changed the line that calls out to pypcode translate to
do a direct mapping from pcode to TranslationResult instead
'''
def lift(self,
irsb,
program,
baseaddr,
adjusted_address,
pcodes,
bytes_offset = 0,
max_bytes = None,
max_inst = None):
if max_bytes is None or max_bytes > MAX_BYTES:
max_bytes = min(len(pcodes), MAX_BYTES)
if max_inst is None or max_inst > MAX_INSTRUCTIONS:
max_inst = MAX_INSTRUCTIONS
irsb.behaviors = self.behaviors # FIXME
# Translate
addr = baseaddr + bytes_offset
##### Start of modified block ######
pcode_array = []
for pcode in pcodes:
inputs_varnodes = []
# convert pcode input Varnodes to pypcode Varnodes
for inp in pcode.inputs:
inputs_varnodes.append(MyVarnode(self.context, inp.getAddress().getAddressSpace(), inp.offset, inp.size, inp))
# convert pcode output Varnode to pypcode Varnode
if pcode.output is not None:
output_varnode = MyVarnode(self.context, pcode.output.getAddress().getAddressSpace(), pcode.output.offset, pcode.output.size, pcode.output)
else:
output_varnode = None
# Convert Ghidra raw Pcode to pypcode PcodeOp
pcode_array.append(pypcode.PcodeOp(self.context, pcode.seqnum, pypcode.OpCode(pcode.opcode), inputs_varnodes, output_varnode))
translations = []
addrspace = getAddressFactory().getAddress(hex(baseaddr)).getAddressSpace()
address = pypcode.Address(self.context, addrspace, baseaddr)
instruction = program.getListing().getInstructionAt(getAddressFactory().getAddress(adjusted_address))
# Convert PcodeOps to Translations
translation = pypcode.Translation(
ctx = self.context,
address = address,
length = instruction.getLength(),
asm_mnem = instruction.getMnemonicString(),
asm_body = instruction.toString().split(instruction.getMnemonicString())[1],
ops = pcode_array
)
translations.append(translation)
##### End modified block #####
irsb._instructions = translations
# Post-process block to mark exits and next block
next_block = None
for insn in irsb._instructions:
for op in insn.ops:
if (op.opcode in [pypcode.OpCode.BRANCH, pypcode.OpCode.CBRANCH]
and op.inputs[0].get_addr().is_constant):
print('Block contains relative p-code jump at '
'instruction {}:{}, which is not emulated '
'yet.'.format(op.seq.getTarget().getOffset(), op.seq.getTime()))
if op.opcode == pypcode.OpCode.CBRANCH:
irsb._exit_statements.append((
op.seq.getTarget().getOffset(), op.seq.getTime(),
ExitStatement(op.inputs[0].offset, 'Ijk_Boring')))
elif op.opcode == pypcode.OpCode.BRANCH:
next_block = (op.inputs[0].offset, 'Ijk_Boring')
elif op.opcode == pypcode.OpCode.BRANCHIND:
next_block = (None, 'Ijk_Boring')
elif op.opcode == pypcode.OpCode.CALL:
next_block = (op.inputs[0].offset, 'Ijk_Call')
elif op.opcode == pypcode.OpCode.CALLIND:
next_block = (None, 'Ijk_Call')
elif op.opcode == pypcode.OpCode.RETURN:
next_block = (None, 'Ijk_Ret')
if len(irsb._instructions) > 0:
last_insn = irsb._instructions[-1]
fallthru_addr = last_insn.address.offset + last_insn.length
else:
fallthru_addr = addr
if next_block is None:
next_block = (fallthru_addr, 'Ijk_Boring')
irsb.next, irsb.jumpkind = next_block
def is_successful(state):
if(state.ip.args[0] == sink):
return True
return False
def get_func_address(funcName):
return int(getFunction(funcName).getBody().getMinAddress().toString(), 16)
def get_pcode_at_address(address):
# Fails when trying to get pcode of an external thunk-ed function
try:
return getCurrentProgram().getListing().getInstructionAt(getAddressFactory().getAddress(address)).getPcode(), getCurrentProgram(), address
except AttributeError:
# The address doesn't exist in the main program, check if globals are set
global next_library
global next_function
if next_library != "" and next_function != "":
external_program = get_external_program(next_library)
functionManager = external_program.getFunctionManager()
for fn in functionManager.getFunctions(True):
if fn.getName() == next_function:
function = fn
break
if function is None:
# couldn't find the function in external program, propagate exception
print("Couldn't find function {} in {}".format(next_function, next_library))
raise
functionAddress = function.getBody().getMinAddress().getOffset()
memory_start = int(address, 16) - (functionAddress - external_program.getImageBase().getOffset()) # find the address where this library is mapped in memory
address_in_program = hex(int(address, 16) - memory_start + external_program.getImageBase().getOffset())
print("Address {} is at {} in program {}".format(address, address_in_program, next_library))
next_library = ""
next_function = ""
return external_program.getListing().getInstructionAt(getAddressFactory().getAddress(address_in_program)).getPcode(), external_program, address_in_program
else:
raise
def successor_func(state, **run_args):
currentAddress = state.ip.args[0]
containingFunction = get_function_containing_address(hex(currentAddress))
print("current address in state:", hex(currentAddress))
# figure out if we are about to make a call to an external program
if containingFunction is not None and containingFunction.isThunk():
externalLibraryName = get_library_name(containingFunction)
print("Preparing for external function call to {} in {}".format(get_function_name(containingFunction), externalLibraryName))
# prepare to get the function in the external program
global next_library
global next_function
next_library = externalLibraryName
next_function = get_function_name(containingFunction)
try:
current_pcode, program, adjusted_address = get_pcode_at_address(hex(currentAddress))
except AttributeError:
print("Couldn't get pcode at address:", hex(currentAddress), "falling back to pypcode lifter")
# fallback to original lifter for external function
return state.project.factory.successors(state, **run_args)
irsb = IRSB.empty_block(archinfo.ArchAMD64, currentAddress, None, None, None, None, None, None)
block_lifter.lift(irsb, program, currentAddress, adjusted_address, current_pcode, 0, None, None)
return state.project.factory.successors(state, irsb=irsb, **run_args)
def get_function_containing_address(address):
return currentProgram.getFunctionManager().getFunctionContaining(getAddressFactory().getAddress(address))
def get_library_name(function):
if not function.isThunk():
print("Can't find library name for a non-Thunk function")
return None
thunked_function = function.getThunkedFunction(True)
if not thunked_function.isExternal():
print("Can't get library name for function that is not external")
return None
return thunked_function.getExternalLocation().getLibraryName()
def get_function_name(function):
return function.getName()
def get_external_program(library_name):
libraryPath = currentProgram.getExternalManager().getExternalLibrary(library_name).getAssociatedProgramPath()
libraryFile = state.getProject().getProjectData().getFile(libraryPath)
libraryProgram = libraryFile.getImmutableDomainObject(java.lang.Object(), ghidra.framework.model.DomainFile.DEFAULT_VERSION, None)
return libraryProgram
def get_pcode_of_external_function(program, function_name):
functionManager = program.getFunctionManager()
for fn in functionManager.getFunctions(True):
if fn.getName() == function_name:
function = fn
break
if function is None:
return None
firstInstruction = program.getListing().getInstructionAt(function.getBody().getMinAddress())
lastInstruction = program.getListing().getInstructionAt(function.getBody().getMaxAddress())
currentInstruction = firstInstruction
pcode = []
pcode += currentInstruction.getPcode()
while True:
currentInstruction = currentInstruction.getNext()
pcode += currentInstruction.getPcode()
if currentInstruction == lastInstruction.getNext():
# Reached the end of the function
break
print("Min address:", function.getBody().getMinAddress())
print("Max address:", function.getBody().getMaxAddress())
print("Pcodes:", pcode)
return pcode
def get_sink_address():
sink_addr = ghidra.concolic.ConcolicAnalyzer.getSink()
if sink_addr is None:
print('Please set the Sink address before running the script!')
sys.exit(1)
return int(sink_addr.toString(), 16)
def get_avoid_addresses():
avoid_addrs = [int(address.toString(), 16) for address in ghidra.concolic.ConcolicAnalyzer.getAvoidAddresses()]
if len(avoid_addrs) == 0:
print('WARN: list of avoid addresses is empty')
return avoid_addrs
def get_source_address():
source_addr = ghidra.concolic.ConcolicAnalyzer.getSource()
if source_addr is None:
print('Please set the Source address before running the script!')
sys.exit(1)
return int(source_addr.toString(), 16)
############ Setup state ##########
start_time = time.time()
# Get program name from ghidra
filename = getCurrentProgram().getExecutablePath()
base_address = getCurrentProgram().getImageBase().getOffset()
engine = ghidra.concolic.ConcolicAnalyzer.getEngine()
if engine.name() == "PYPCODE" or engine.name() == "PCODESYM":
project = angr.Project(filename, load_options={'main_opts':{'base_addr': base_address},'auto_load_libs':False}, engine=angr.engines.UberEnginePcode)
else:
project = angr.Project(filename, load_options={'main_opts':{'base_addr': base_address},'auto_load_libs':False})
sink = get_sink_address()
avoids = get_avoid_addresses()
start = get_source_address()
stdin_args = []
for buff in ghidra.concolic.ConcolicAnalyzer.getStdin():
if buff.getSymbolic():
stdin_args.append(claripy.BVS('arg' + str(len(stdin_args)), len(buff.getValue())*8))
else:
# process string with escape characters into a bytestring
value = buff.getValue().encode('utf-8').decode('unicode-escape').encode('utf-8')
stdin_args.append(claripy.BVV(value))
stdin_arg = angr.SimFileStream(name='stdin', content=claripy.Concat(*stdin_args), has_end=False)
func_args = []
for arg in ghidra.concolic.ConcolicAnalyzer.getArgs():
array_elems = []
for elem in arg.getValues():
if arg.getSymbolic():
array_elems.append(claripy.BVS('arg'+str(len(func_args)), len(elem)*8))
else:
# process string with escape characters into a bytestring
value = elem.encode('utf-8').decode('unicode-escape').encode('utf-8')
array_elems.append(claripy.BVV(value))
if arg.getArray():
func_args.append([angr.PointerWrapper(e) for e in array_elems])
else:
func_args.append(array_elems[0])
call_state = project.factory.call_state(start, *func_args, stdin=stdin_arg, add_options={angr.options.LAZY_SOLVES,
angr.options.ZERO_FILL_UNCONSTRAINED_MEMORY, angr.options.ZERO_FILL_UNCONSTRAINED_REGISTERS})
simulation = project.factory.simgr(call_state)
block_lifter = GhidraPcodeBlockLifter(archinfo.ArchAMD64)
######### Do symbolic execution ########
if engine.name() == "PCODESYM":
simulation.explore(find=is_successful, avoid=avoids, successor_func=successor_func)
else:
simulation.explore(find=is_successful, avoid=avoids)
######## Post run analysis #########
if len(simulation.found) > 0:
for solution_state in simulation.found:
for i, arg in enumerate(func_args):
if isinstance(arg, list):
print("[>>] arg {}:".format(i+1))
for k, elem in enumerate(arg):
print("\t{}: {!r}".format(k+1, solution_state.solver.eval(elem.value, cast_to=bytes).split(b"\0")[0]))
else:
print("[>>] arg {}: {!r}".format(i+1, solution_state.solver.eval(arg, cast_to=bytes).split(b"\0")[0]))
print("stdin: {}".format(solution_state.posix.dumps(0)))
else:
print("[>>>] no solution found :(")
print("Script ran in {} seconds".format(time.time() - start_time))
if __name__ == "__main__":
in_ghidra = False
try:
import ghidra
# we're in ghidra!
in_ghidra = True
except ModuleNotFoundError:
# not ghidra
pass
if in_ghidra:
import ghidra_bridge_server
script_file = getSourceFile().getAbsolutePath()
# spin up a ghidra_bridge_server and spawn the script in external python to connect back to it
python_path = ghidra.concolic.ConcolicAnalyzer.getPython()
ghidra_bridge_server.GhidraBridgeServer.run_script_across_ghidra_bridge(script_file, python=python_path)
else:
# we're being run outside ghidra! (almost certainly from spawned by run_script_across_ghidra_bridge())
parser = argparse.ArgumentParser(
description="Example py3 script that's expected to be called from ghidra with a bridge")
# the script needs to handle these command-line arguments and use them to connect back to the ghidra server that spawned it
parser.add_argument("--connect_to_host", type=str, required=False,
default="127.0.0.1", help="IP to connect to the ghidra_bridge server")
parser.add_argument("--connect_to_port", type=int, required=True,
help="Port to connect to the ghidra_bridge server")
args = parser.parse_args()
run_script(server_host=args.connect_to_host,
server_port=args.connect_to_port)
```
|
{
"source": "jdkloe/pygrib",
"score": 2
}
|
#### File: jdkloe/pygrib/setup.py
```python
import os
import glob
import setuptools
from Cython.Distutils import build_ext
class NumpyBuildExtCommand(build_ext):
"""
build_ext command for use when numpy headers are needed.
from https://stackoverflow.com/questions/2379898/
and https://stackoverflow.com/questions/48283503/
"""
def run(self):
import numpy
self.distribution.fetch_build_eggs(["numpy"])
self.include_dirs.append(numpy.get_include())
build_ext.run(self)
def extract_version(CYTHON_FNAME):
version = None
with open(CYTHON_FNAME) as fi:
for line in fi:
if line.startswith("__version__"):
_, version = line.split("=")
version = version.strip()[1:-1] # Remove quotation characters.
break
return version
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join("..", path, filename))
return paths
package_data = {}
if os.environ.get("PYGRIB_WHEEL") is not None:
package_data[""] = package_files("eccodes")
cmdclass = {"build_ext": NumpyBuildExtCommand}
searchdirs = []
if os.environ.get("GRIBAPI_DIR"):
searchdirs.append(os.environ["GRIBAPI_DIR"])
if os.environ.get("ECCODES_DIR"):
searchdirs.append(os.environ["ECCODES_DIR"])
if os.environ.get("CONDA_PREFIX"):
searchdirs.append(os.environ["CONDA_PREFIX"])
searchdirs += [
os.path.expanduser("~"),
"/usr",
"/usr/local",
"/opt/local",
"/opt",
"/sw",
]
# look for grib_api.h in searchdirs
eccdir = None
for d in searchdirs:
try:
incpath = os.path.join(os.path.join(d, "include"), "grib_api.h")
f = open(incpath)
eccdir = d
print("eccodes found in %s" % eccdir)
break
except IOError:
continue
if eccdir is not None:
incdirs = [os.path.join(eccdir, "include")]
libdirs = [os.path.join(eccdir, "lib"), os.path.join(eccdir, "lib64")]
else:
print("eccodes not found, build may fail...")
incdirs = []
libdirs = []
ext_modules = [
setuptools.Extension(
"pygrib._pygrib",
["pygrib/_pygrib.pyx"],
include_dirs=incdirs,
library_dirs=libdirs,
runtime_library_dirs=libdirs,
libraries=["eccodes"],
)
]
# Import README.md as PyPi long_description
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md")) as f:
long_description = f.read()
# man pages installed in MAN_DIR/man1
if os.environ.get("MAN_DIR"):
man_dir = os.environ.get("MAN_DIR")
manpages = glob.glob(os.path.join("man", "*.1"))
data_files = [(os.path.join(man_dir, "man1"), manpages)]
# if MAN_DIR not set, man pages not installed
else:
data_files = None
setuptools.setup(
name="pygrib",
version=extract_version("pygrib/_pygrib.pyx"),
description="Python module for reading/writing GRIB files",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/jswhit/pygrib",
download_url="http://python.org/pypi/pygrib",
license="License :: OSI Approved :: MIT License",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
cmdclass=cmdclass,
long_description=long_description,
long_description_content_type="text/markdown",
scripts=[
"utils/grib_list",
"utils/grib_repack",
"utils/cnvgrib1to2",
"utils/cnvgrib2to1",
],
ext_modules=ext_modules,
data_files=data_files,
packages=["pygrib"],
package_data=package_data,
setup_requires=["setuptools", "cython"],
install_requires=[
"pyproj",
"numpy",
],
)
```
|
{
"source": "jdkloe/python-metar",
"score": 3
}
|
#### File: jdkloe/python-metar/get_report.py
```python
from __future__ import print_function
import os
import sys
import getopt
import string
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
from metar import Metar
BASE_URL = "http://tgftp.nws.noaa.gov/data/observations/metar/stations"
def usage():
program = os.path.basename(sys.argv[0])
print("Usage: ", program, "<station> [ <station> ... ]")
print(
"""Options:
<station> . a four-letter ICAO station code (e.g., "KEWR")
"""
)
sys.exit(1)
stations = []
debug = False
try:
opts, stations = getopt.getopt(sys.argv[1:], "d")
for opt in opts:
if opt[0] == "-d":
debug = True
except:
usage()
if not stations:
usage()
for name in stations:
url = "%s/%s.TXT" % (BASE_URL, name)
if debug:
sys.stderr.write("[ " + url + " ]")
try:
urlh = urlopen(url)
report = ""
for line in urlh:
if not isinstance(line, str):
line = line.decode() # convert Python3 bytes buffer to string
if line.startswith(name):
report = line.strip()
obs = Metar.Metar(line)
print(obs.string())
break
if not report:
print("No data for ", name, "\n\n")
except Metar.ParserError as exc:
print("METAR code: ", line)
print(string.join(exc.args, ", "), "\n")
except:
import traceback
print(traceback.format_exc())
print("Error retrieving", name, "data", "\n")
```
#### File: python-metar/test/test_direction.py
```python
import pytest
from metar.Datatypes import direction
def test_usage():
"""Test basic usage."""
assert direction("90").value() == 90.0
assert direction(90).value() == 90.0
assert direction(90.0).value() == 90.0
assert direction("90").string() == "90 degrees"
assert direction("E").compass() == "E"
def test_error_checking():
"""Test that exceptions are raised."""
with pytest.raises(ValueError):
direction("North")
with pytest.raises(ValueError):
direction(-10)
with pytest.raises(ValueError):
direction("361")
def test_conversion():
"""Test conversion of str direction to numeric and back."""
assert direction("N").value() == 0.0
assert direction("NNE").value() == 22.5
assert direction("NE").value() == 45.0
assert direction("ENE").value() == 67.5
assert direction("E").value() == 90.0
assert direction("ESE").value() == 112.5
assert direction("SE").value() == 135.0
assert direction("SSE").value() == 157.5
assert direction("S").value() == 180.0
assert direction("SSW").value() == 202.5
assert direction("SW").value() == 225.0
assert direction("WSW").value() == 247.5
assert direction("W").value() == 270.0
assert direction("WNW").value() == 292.5
assert direction("NW").value() == 315.0
assert direction("NNW").value() == 337.5
assert direction("0").compass() == "N"
assert direction("5").compass() == "N"
assert direction("355").compass() == "N"
assert direction("20").compass() == "NNE"
assert direction("60").compass() == "ENE"
assert direction("247.5").compass() == "WSW"
```
#### File: python-metar/test/test_temperature.py
```python
import pytest
from metar.Datatypes import temperature, UnitsError
def test_defaults():
"""Test basic usage."""
assert temperature("32").value() == 32.0
assert temperature("32").value("C") == 32.0
assert temperature("32").string() == "32.0 C"
assert temperature("32", "F").string() == "32.0 F"
def test_inputs():
"""Test various inputs."""
assert temperature("32").value() == 32.0
assert temperature(32).value() == 32.0
assert temperature(32.0).value() == 32.0
assert temperature("32", "c").value() == 32.0
assert temperature("32", "f").value() == 32.0
assert temperature("32", "k").value() == 32.0
assert temperature("50", "F").value("c") == 10.0
assert temperature("50", "f").value("C") == 10.0
def test_error_checking():
"""Test exception raising."""
with pytest.raises(ValueError):
temperature("32C")
with pytest.raises(ValueError):
temperature("M10F")
with pytest.raises(UnitsError):
temperature("32", "J")
with pytest.raises(UnitsError):
temperature(temperature("32").value, "J")
with pytest.raises(UnitsError):
temperature(temperature("32").string, "J")
def test_conversions():
"""Test unit conversions."""
assert temperature("32", "F").value("F") == 32.0
assert temperature("32", "F").value("C") == 0.0
assert temperature("50", "F").value("C") == 10.0
assert temperature("32", "F").value("K") == 273.15
assert temperature("20", "C").value("C") == 20.0
assert temperature("M10", "C").value("F") == 14.0
assert temperature("M0", "C").value("F") == 32.0
assert temperature("20", "C").value("K") == 293.15
assert temperature("20", "C").value("F") == 68.0
assert temperature("30", "C").value("F") == 86.0
assert temperature("263.15", "K").value("K") == 263.15
assert temperature("263.15", "K").value("C") == -10.0
assert temperature("263.15", "K").value("F") == 14.0
assert temperature("10", "C").string("C") == "10.0 C"
assert temperature("10", "C").string("F") == "50.0 F"
assert temperature("10", "C").string("K") == "283.1 K"
```
|
{
"source": "jdknight/sphinxcontrib-blockdiag",
"score": 2
}
|
#### File: sphinxcontrib-blockdiag/tests/test_errors.py
```python
from mock import patch
from sphinx_testing import with_app
import sys
import unittest
class TestSphinxcontribBlockdiagErrors(unittest.TestCase):
@with_app(srcdir='tests/docs/basic', write_docstring=True)
def test_parse_error(self, app, status, warning):
"""
.. blockdiag::
{ A -> B;
"""
app.builder.build_all()
self.assertIn('got unexpected token:', warning.getvalue())
@with_app(srcdir='tests/docs/basic', confoverrides=dict(blockdiag_html_image_format='JPG'))
def test_unknown_format_error(self, app, status, warning):
app.builder.build_all()
self.assertIn('unknown format: JPG', warning.getvalue())
@with_app(srcdir='tests/docs/basic', confoverrides=dict(blockdiag_html_image_format='PDF'))
def test_reportlab_not_found_error(self, app, status, warning):
try:
# unload reportlab and make loading it impossible
sys.modules.pop('reportlab', None)
path = sys.path
sys.path = []
app.builder.build_all()
self.assertIn('Could not output PDF format. Install reportlab.',
warning.getvalue())
finally:
sys.path = path
@with_app(srcdir='tests/docs/basic')
@patch("blockdiag.utils.rst.nodes.blockdiag.processor.drawer.DiagramDraw")
def test_rendering_error(self, app, status, warning, DiagramDraw):
DiagramDraw.side_effect = RuntimeError("UNKNOWN ERROR!")
app.builder.build_all()
self.assertIn('UNKNOWN ERROR!', warning.getvalue())
@with_app(srcdir='tests/docs/basic')
@patch("sphinxcontrib.blockdiag.blockdiag.drawer.DiagramDraw.draw")
def test_font_settings_error(self, app, status, warning, draw):
draw.side_effect = UnicodeEncodeError("", "", 0, 0, "")
app.builder.build_all()
self.assertIn('UnicodeEncodeError caught (check your font settings)',
warning.getvalue())
```
|
{
"source": "jdkr/ImageFeature",
"score": 3
}
|
#### File: jdkr/ImageFeature/Test.py
```python
import os
from imageio import imread
import matplotlib.pyplot as plt
from Feature import detectFeaturepoints
def testDetectFeaturepoints():
imageArray = imread(os.path.dirname(__file__)+'/img/ruin.JPG', as_gray=True)
resolutionMax = 1
scalerangeFactor = 3
scalesCount = 8
hessianThreshold = 0.03
minFeatureScales = 3
featurepoints = detectFeaturepoints(
imageArray,
resolutionMax,
scalerangeFactor,
scalesCount,
hessianThreshold,
minFeatureScales)
featurepointsX = [f.x for f in featurepoints]
featurepointsY = [f.y for f in featurepoints]
plt.imshow(imageArray, cmap = plt.get_cmap('gray'))
plt.scatter(featurepointsX,featurepointsY,s=1,c="red")
plt.savefig("ruinFeatures.jpg")
if __name__ == "__main__":
testDetectFeaturepoints()
```
|
{
"source": "jdks/envmgr-cli",
"score": 2
}
|
#### File: emcli/commands/cycle.py
```python
import semver
import time
import sys
import os
from envmgr import EmClient, ASG
from emcli.commands.base import BaseCommand
from emcli.commands.patching.patch_operation import PatchOperation
from emcli.commands.patching.patch_table import patch_table
from emcli.commands.patching.validate import server_has_valid_ami
from emcli.commands.user_confirmation import confirm
from math import ceil
from repoze.lru import lru_cache
class CycleCommand(BaseCommand):
amis = []
servers = []
all_servers = []
def __init__(self, options, *args, **kwargs):
super(CycleCommand, self).__init__(options, *args, **kwargs)
self.api = EmClient()
self._register(('get', 'status'), self.describe_status)
self._register(('cycle', '!status'), self.run_patch_update)
def describe_status(self, cluster, env):
if PatchOperation.is_in_progress(cluster, env, True):
self.show_current_status(cluster, env)
else:
self.get_patch_status(cluster, env)
def show_current_status(self, cluster, env):
patch_operation = PatchOperation.get_current(cluster, env, True)
patch_status = PatchOperation.get_current_status(cluster, env, True)
self.stop_spinner()
self.show_result(patch_operation, patch_status)
def get_patch_status(self, cluster, env):
from_ami = self.opts.get('from-ami')
to_ami = self.opts.get('to-ami')
whitelist = self.get_user_filter('whitelist', 'match')
blacklist = self.get_user_filter('blacklist', 'ignore')
result = self.get_patch_requirements(cluster, env, from_ami, to_ami, whitelist, blacklist)
if not result:
self.patch_not_required(cluster, env)
else:
message = PatchOperation.describe_patches(result)
self.show_result(result, message)
def get_user_filter(self, filename, argname):
argvalue = self.opts.get(argname)
filter_file = self.opts.get(filename)
if len(argvalue):
return argvalue
elif filter_file is not None:
filepath = os.path.abspath(filter_file)
with open(filepath) as f:
filter_list = f.readlines()
return [x.strip() for x in filter_list]
def patch_not_required(self, cluster, env):
self.show_result({}, '{0} do not need to cycle any Windows servers in {1}'.format(cluster, env))
def run_patch_update(self, cluster, env):
env = env.lower()
if self.environment_is_protected(env):
self.stop_spinner()
print('Bulk cycle is temporarily disabled in {0}'.format(env))
return
if self.opts.get('kill', False):
self.stop_spinner()
PatchOperation.kill(cluster, env, True)
else:
patch_operation = PatchOperation(self.api)
current_operation = PatchOperation.get_current(cluster, env, True)
if current_operation is None:
from_ami = self.opts.get('from-ami')
to_ami = self.opts.get('to-ami')
whitelist = self.get_user_filter('whitelist', 'match')
blacklist = self.get_user_filter('blacklist', 'ignore')
current_operation = self.get_patch_requirements(cluster, env, from_ami, to_ami, whitelist, blacklist)
self.stop_spinner()
if not current_operation:
return self.patch_not_required(cluster, env)
if not self.confirm_patch(current_operation):
return
else:
print('')
patch_operation.run(current_operation, cluster, env, True)
def confirm_patch(self, patches):
to_patch = PatchOperation.get_patches_by_availability(patches, True)
to_ignore = PatchOperation.get_patches_by_availability(patches, False)
to_ignore += [ {'server_name':server, 'invalid_ami':True} for server in self.ignored_servers ]
message = PatchOperation.describe_patches(to_patch, to_ignore)
if not to_patch:
self.show_result({}, message)
return False
else:
message.append('Do you want to continue? (y/n) ')
return confirm(message)
def get_patch_requirements(self, cluster, env, from_ami=None, to_ami=None, whitelist=None, blacklist=None):
# We're only interested in Windows as Linux instances auto-update
self.amis = self.api.get_images()
self.validate_ami_compatibility(from_ami, to_ami)
# List of clusters' servers with AMI info
self.servers = self.api.get_environment_servers(env).get('Value')
self.servers = [ server for server in self.servers if
'Ami' in server and server.get('Cluster').lower() == cluster.lower()
and server.get('IsBeingDeleted') != True
]
# Filter out odd servers with no valid AMI info
self.ignored_servers = [ server.get('Name') for server in self.servers if not server_has_valid_ami(server) ]
self.servers = [ server for server in self.servers if server_has_valid_ami(server) ]
# List of requirements to be considered for updates
# Prefer 'latest stable' info from image, not server
update_requirements = [ lambda ami,server: ami.get('Name') == server.get('Ami').get('Name') ]
# Update any non-latest-stable if no "from ami" given
if from_ami is not None:
update_requirements.append( lambda ami,server: ami.get('Name') == from_ami )
if whitelist:
update_requirements.append( lambda ami,server: server.get('Name') in whitelist )
elif blacklist:
update_requirements.append( lambda ami,server: server.get('Name') not in blacklist )
# List of servers with Windows AMI that matches update requirement
servers_to_update = [ server for server in self.servers if
any(ami for ami in self.amis if
all([ requirement(ami,server) for requirement in update_requirements ])
)
]
# List of patches to apply
patches = list(map(self.create_patch_item, servers_to_update))
self.get_asg_details(patches, env)
return patches
def environment_is_protected(self, env):
result = self.api.get_environment_protected(env, 'BULK_PATCH_AMI')
return result.get('isProtected', False)
def get_asg_details(self, patches, env):
for p in patches:
asg_name = p.get('server_name')
asg = self.api.get_asg(env, asg_name)
# Calculate required scale out size
n_azs = len(list(asg.get('AvailabilityZones')))
n_instances = p.get('instances_count')
scale_up_count = n_instances * 2
if scale_up_count >= n_azs and scale_up_count % n_azs != 0:
scale_up_count += 1
p['az_count'] = n_azs
p['scale_up_count'] = scale_up_count
p['max_count'] = asg.get('MaxSize', n_instances)
# Check for any instances in standby
if any([ instance for instance in asg.get('Instances', []) if instance.get('LifecycleState') == 'Standby' ]):
p['has_standby_instances'] = True
# Check for overall health
asg_status = ASG(asg_name, env).get_health()
if not asg_status.get('is_healthy'):
p['unhealthy'] = asg_status
def create_patch_item(self, server):
from_name = server.get('Ami').get('Name')
from_ami = self.get_ami_by_key('Name', from_name)
target_name = self.opts.get('to-ami')
if target_name is not None:
target = self.get_ami_by_key('Name', target_name)
else:
target = self.get_target_ami(from_name)
target_name = target.get('Name')
from_version = from_ami.get('AmiVersion')
target_version = target.get('AmiVersion')
patch = {
'server_name': server.get('Name'),
'current_version': from_version,
'target_version': target_version,
'ami_type': target.get('AmiType'),
'new_ami_id': target.get('ImageId'),
'server_role': server.get('Role'),
'services_count': len(list(server.get('Services'))),
'instances_count': server.get('Size').get('Current')
}
# Warn if target version is older than current verion
if semver.compare(target_version, from_version) != 1:
patch['Warning'] = True
return patch
@lru_cache(128)
def get_target_ami(self, from_name):
from_ami = self.get_ami_by_key('Name', from_name)
ami_type = from_ami['AmiType']
ami = [ ami for ami in self.amis if ami['AmiType'] == ami_type and ami['IsLatestStable'] == True]
return ami[0]
@lru_cache(128)
def get_ami_by_key(self, key, value, unique=True):
ami = [ ami for ami in self.amis if ami[key] == value ]
if not ami:
raise ValueError('Could not find AMI with {0}={1}'.format(key, value))
elif unique and len(list(ami)) != 1:
raise ValueError('Multiple AMI definitions found with {0}={1}'.format(key, value))
return ami[0]
def validate_ami_compatibility(self, from_name=None, to_name=None):
from_ami = None
to_ami = None
if from_name is not None:
from_ami = self.get_ami_by_key('Name', from_name)
if to_name is not None:
to_ami = self.get_ami_by_key('Name', to_name)
if from_name is None:
raise ValueError('You must specify --from-ami if --to-ami is given')
if from_ami is not None and to_ami is not None:
if from_ami['AmiType'] != to_ami['AmiType']:
raise ValueError('AMI types for from_ami and to_ami must match')
```
#### File: commands/patching/patch_table.py
```python
import os
from emcli.commands.utils.asg_health import describe_asg_health
from tabulate import tabulate
def get_default_status(p):
if p.get('has_standby_instances'):
return 'Instances in standby'
elif p.get('unhealthy') is not None:
return describe_asg_health(p.get('unhealthy'))
elif p.get('invalid_ami'):
return 'Invalid AMI data'
elif p.get('warning'):
return 'Warning'
else:
return ''
def patch_table(patches, get_status=get_default_status):
table_data = map(lambda p: {
0: p.get('server_name'),
1: p.get('ami_type'),
2: p.get('current_version'),
3: p.get('target_version'),
4: p.get('instances_count'),
5: p.get('scale_up_count'),
6: get_status(p)
}, patches)
headers = {0:'ASG', 1:'AMI Type', 2:'Current', 3:'Target', 4:'Instances', 5:'Scale out', 6:'Status'}
return os.linesep + tabulate(table_data, headers, tablefmt="simple") + os.linesep
```
#### File: commands/patching/validate.py
```python
def server_has_valid_ami(server):
return server and server.get('Ami') and server.get('Ami').get('Name')
```
#### File: emcli/commands/toggle.py
```python
import time
from envmgr import Upstream, Service
from emcli.commands.base import BaseCommand
class ToggleCommand(BaseCommand):
def __init__(self, options, *args, **kwargs):
super(ToggleCommand, self).__init__(options, *args, **kwargs)
self._register('wait-for', self.wait_for_toggle, False)
self._register('status', self.get_upstream_status)
self._register(('!get', '!to'), self.toggle_service_slices)
def get_upstream_status(self, slice, service, env):
name = self.opts.get('upstream')
upstream = Upstream(service, slice, env, name)
status = upstream.get_status()
desc = "{0} is configured {1}, {2} of {3} upstreams are active across {4} load balancers".format(
slice,
status.slice_config,
status.active_upstreams,
status.total_upstreams,
status.total_load_balancers
)
self.show_result(status.__dict__, desc)
return status
def toggle_service_slices(self, service, env):
svc = Service(service, env)
upstream = svc.toggle()
self.show_result(upstream.__dict__, "{0} is now configured active for {1} in {2}".format(upstream.slice, service, env))
def wait_for_toggle(self, slice, service, env):
start = time.time()
timeout = int(self.opts.get('timeout', 0))
while True:
elapsed = int(time.time() - start)
if timeout is not 0 and elapsed > timeout:
self.show_result({}, "Timeout exceeded")
return 1
else:
status = self.get_upstream_status(slice, service, env)
if status.is_active:
return 0
else:
time.sleep(5)
```
#### File: commands/utils/file_utils.py
```python
import os
import errno
def safe_create_dir_path(filepath):
path = os.path.dirname(filepath)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path): pass
else: raise
```
#### File: emcli/commands/verify.py
```python
from envmgr import EmClient
from emcli.commands.base import BaseCommand
class VerifyCommand(BaseCommand):
def __init__(self, options, *args, **kwargs):
super(VerifyCommand, self).__init__(options, *args, **kwargs)
self._register('verify', self.verify_setup)
def verify_setup(self):
client = EmClient()
result = client.get_accounts_config()
if result is not None:
msg = 'Hooray, envmgr cli is correctly configured.'
else:
msg = 'Verify failed, see error for log for details'
self.show_result({}, msg)
```
#### File: commands/helpers/utils.py
```python
import random
import string
import json
from codecs import open
from os.path import abspath, dirname, join
MOCK_ENV_VARS = {
'ENVMGR_HOST':'envmgr.acme.com',
'ENVMGR_USER':'roadrunner',
'ENVMGR_PASS':'<PASSWORD>'
}
def load_json_data(file_name):
this_dir = abspath(dirname(__file__))
with open(join(this_dir, '../../data/', file_name), encoding='utf-8') as file_data:
json_data = json.load(file_data)
return json_data
def mock_asg():
name = 'asg-{0}'.format(rand_str())
arn = 'arn:aws:autoscaling:eu-west-1:123456789012:asg:{0}/mock'.format(rand_str())
return {
'AutoScalingGroupName': name,
'AutoScalingGroupARN': arn,
'Tags': [ mock_asg_tag('SecurityZone', 'Other') ]
}
def mock_asg_tag(key, value):
return {
'ResourceId': 'envmgr-cli-mock',
'ResourceType': 'auto-scaling-group',
'Key': key,
'Value': value,
'PropagateAtLaunch': True
}
def mock_server(cluster, ami_name, is_latest_stable, current=1, desired=1):
rnd = rand_str()
name = 'mock-server-{0}'.format(rnd)
role = 'mock-role-{0}'.format(rnd)
service = 'mock-service-{0}'.format(rnd)
server = {
'Name': name,
'Role': role,
'Cluster': cluster,
'Schedule': 'ON',
'IsBeingDeleted': False,
'Services': [ service ],
'Ami': {
'Name': ami_name,
'IsLatestStable': is_latest_stable
},
'Size': {
'Current': current,
'Desired': desired
}
}
return server
def rand_str(length=8):
return ''.join(random.choice(string.ascii_letters) for x in range(length))
```
#### File: tests/commands/test_patch.py
```python
import responses
import random
import string
import os
from unittest import TestCase
from emcli.commands import PatchCommand
from parameterized import parameterized, param
from mock import patch
from .helpers.api_test_case import APITestCase
from .helpers.utils import mock_server, MOCK_ENV_VARS
from .helpers.patch_scenarios import TEST_SCENARIOS
class PatchTest(APITestCase):
@parameterized.expand( TEST_SCENARIOS )
@responses.activate
@patch.dict(os.environ, MOCK_ENV_VARS)
def test_get_patch_requirements(self, *args, **kwargs):
patch_cluster = kwargs.get('patch_cluster')
expected_result = kwargs.get('expected')
from_ami = kwargs.get('from_ami')
servers_in_env = []
self.mock_response(r'/asgs/[\w\.\-]+', {'AvailabilityZones':[1]})
self.mock_response(r'/environments/[\w\.\-]+/servers/[\w\.\-]+',
{'ServicesCount':{'Expected':2}, 'Instances':[{'RunningServicesCount':2}]}
)
# Create a list of servers in env, based on test scenario
for server_desc in args:
servers_in_env += self.create_servers(**server_desc)
self.setup_responses()
self.respond_with_servers(servers_in_env)
sut = PatchCommand({})
result = sut.get_patch_requirements( **{
'cluster':patch_cluster,
'env':'staging',
'from_ami':from_ami
})
self.assertEqual(len(list(result)), expected_result)
def respond_with_servers(self, servers):
server_response = {
'EnvironmentName': 'staging',
'Value': servers
}
self.mock_response(r'/environments/[\w\.\-]+/servers', server_response)
def setup_responses(self):
self.mock_authentication()
self.mock_response_with_file(r'/images', 'ami_response.json')
def create_servers(self, cluster, n=1, ami='mock-ami-1.0.0', latest=False):
servers = [ mock_server(cluster, ami, latest) for x in range(n) ]
return servers
```
|
{
"source": "jdlabsco/wagtail-themes",
"score": 2
}
|
#### File: tests/unit/test_thread.py
```python
from wagtailthemes.thread import get_theme, set_theme
def test_get_theme():
# Check when no theme is set
set_theme(None)
assert not get_theme()
# Check when theme is set
set_theme('brand')
assert get_theme() == 'brand'
# Check when theme is overridden
set_theme('personal')
assert get_theme() == 'personal'
```
#### File: src/wagtailthemes/thread.py
```python
from threading import local
_thread_locals = local()
def set_theme(theme):
setattr(_thread_locals, 'wagtail_theme', theme)
def get_theme():
theme = getattr(_thread_locals, 'wagtail_theme', None)
return theme
```
|
{
"source": "jdlambright/Guess_who-1",
"score": 3
}
|
#### File: Guess_who-1/src/helper.py
```python
import sys
from os import system
from time import sleep
from .display import (MENU,
ABOUT,
CREDITS)
# Global variables
LINE = "=" * 66
def show_menu(first_option) -> None:
"""
Display the home page
"""
print(MENU.format(first_option))
def show_about_page() -> None:
"""
Display the about page
"""
print(ABOUT)
input()
def show_credits_page() -> None:
"""
Display the credits page
"""
print(CREDITS)
input()
def exit_game() -> None:
"""
Exit the game!
"""
print(f"{LINE}\n\t\t\t|Thanks for playing!|\n{LINE}")
sleep(0.4)
sys.exit()
def clear_screen() -> int:
"""
This function uses the system function from os to
clear the console screen. It passes 'clear' or 'cls'
as the arguments depending on the OS.
"""
return system("cls") if sys.__name__ == "nt" else system("clear")
```
|
{
"source": "jdlangs/tesseract_ros2",
"score": 2
}
|
#### File: tesseract_monitoring/launch/environment_monitor.launch.py
```python
import os
from ament_index_python.packages import get_package_share_directory
from ament_index_python.packages import get_package_prefix
import launch
import launch_ros.actions
def generate_launch_description():
if not "tesseract_collision" in os.environ["AMENT_PREFIX_PATH"]:
head, tail = os.path.split(get_package_prefix('tesseract_monitoring'))
path = os.path.join(head, 'tesseract_collision')
os.environ["AMENT_PREFIX_PATH"] += os.pathsep + path
print(os.environ["AMENT_PREFIX_PATH"])
urdf = os.path.join(get_package_share_directory('ur_description'), 'urdf', 'ur10_robot.urdf')
srdf = os.path.join(get_package_share_directory('ur_description'), 'urdf', 'ur10_robot.srdf')
return launch.LaunchDescription([
launch_ros.actions.Node(
node_name='environment_monitor',
package='tesseract_monitoring',
node_executable='tesseract_monitoring_environment_node',
output='screen',
arguments=[],
parameters=[{'desc_param': 'robot_description',
'robot_description': urdf,
'robot_description_semantic': srdf}]),
launch_ros.actions.Node(
node_name='joint_state_pub',
package='joint_state_publisher',
node_executable='joint_state_publisher',
output='screen',
arguments=[urdf],
parameters=[{'use_gui': 'false'}]),
])
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.