metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joaolovatti/bbb_22_bot",
"score": 3
} |
#### File: bbb_22_bot/logic/bot_manager.py
```python
from logic.click_point import ClickPoint
import pyautogui as pyauto
import time
from logic.exit_handler import ExitHandler
# Public
class BotManager:
'''
Controla a interação do BOT.
'''
def __init__(self):
self._computed_votes = 0
# Interface
def executar_voto(self):
'''
Executa a seguinte sequência de etapas:
1. Encontra a localização do botão do participante. E clica nele.
2. Encontra a localização do botão do Catcha. E clica nele.
3. Encontra a localização do botão de "Votar novamente". E clica nele.
No caso de erro, a página sofre um refresh.
'''
try:
self._click_on_participant()
time.sleep(1)
self._click_on_catcha()
time.sleep(1)
self._click_on_votar_novamente()
time.sleep(1)
self._increment_computed_votes()
print(f'Votos computados = {self._computed_votes}.')
except Exception as e:
self.refresh_page()
time.sleep(4)
def refresh_page(self):
'''
Executa um refresh na página do browser.
'''
pyauto.press('f5')
def start_cycle(self, quit_keyword):
'''
Inicia o ciclo de funcionamento do bot.
Sendo finalizado a partir do pressionar a "quit_keyword".
'''
_exit_handler = ExitHandler(exit_keyword = quit_keyword)
while True:
self.executar_voto()
if _exit_handler.check():
break
# Implementation
def _increment_computed_votes(self):
self._computed_votes += 1
def _click_on_participant(self):
_participante = ClickPoint(path='assets/participante.png', h_offset=100)
_participante.instant_click(1, duration=3,
noise_amplitude=30, is_random=True, duration_offset=0.5)
def _click_on_catcha(self):
_catpha = ClickPoint(path='assets/catcha.png', h_offset=50)
_catpha.instant_click(1, duration=3,
noise_amplitude=10, is_random=True, duration_offset=0.5)
def _click_on_votar_novamente(self):
_votar_novamente_button = ClickPoint(
path='assets/votar_novamete.png')
_votar_novamente_button.instant_click(
1, duration=3, is_random=True, noise_amplitude=20, duration_offset=0.5)
``` |
{
"source": "joaolovatti/cookie_clicker_bot",
"score": 3
} |
#### File: cookie_clicker_bot/logic/argparse_handler.py
```python
import argparse
#Public
class ArgparseHandler:
'''
Classe responsável por gerenciar os argumentos no CLI.
'''
#Constructor
def __init__(self):
self._parser = argparse.ArgumentParser()
self._define_all_arguments()
#Implementation
def _define_all_arguments(self):
self._parser.add_argument(
'-i',
'--Initial',
help = 'Define the initial interation cicle value',
required = False
)
def get_arguments_dict(self):
return self._parser.parse_args().__dict__
```
#### File: cookie_clicker_bot/logic/click_box.py
```python
import pyautogui as pyauto
#Public
class ClickBox:
'''
Classe responsável por representar um box que será clickado pelo BOT.
'''
#Constructor
def __init__(self, path = '', coordenate = (0, 0)):
'''
→ path: Path da imagem do elemento que deseja identificar na imagem.
Esse valor possui preferência na construção do ClickBox.
→ coordenate: Coordenadas na tela.
'''
if (path != ''):
self.coordenate = pyauto.locateCenterOnScreen(
path,
confidence = 0.8
)
elif (coordenate != (0, 0)):
self.coordenate = coordenate
#Interface
def click(self, n):
'''
Define a quanidade de cliques no elementos.
Argumentos:
→ n: Número de cliques.
'''
pyauto.click(clicks = n,
interval = 0.005,
x = self.coordenate[0],
y = self.coordenate[1]
)
```
#### File: cookie_clicker_bot/logic/exit_handler.py
```python
import keyboard
#Public
class ExitHandler:
'''
Gerencia a inativação do funcionamento do BOT.
'''
#Implementation
def check(self):
'''
Avalia se deve inativar o BOT.
'''
return keyboard.is_pressed('q')
``` |
{
"source": "joaolovatti/intervalo_confianca_enquetes_bbb",
"score": 3
} |
#### File: intervalo_confianca_enquetes_bbb/logic/data_loader.py
```python
import numpy as np
#Public:
class DataLoader:
'''
Classe responsável por gerenciar a padronização das entradas
das enquentes parciais.
'''
#Constructor
def __init__(
self,
fonte,
paredao
):
self._fonte = fonte
self._paredao_names = paredao.get_participantes()
self._content = []
#Interface
def set_manual_entries(
self,
num_participante_1,
num_participante_2,
num_participante_3
):
'''
Define as entradas de forma manual.
'''
for _ in range(0, num_participante_1):
self._content.append(
self._new_data_entry(
is_participante_1 = True
)
)
for _ in range(0, num_participante_2):
self._content.append(
self._new_data_entry(
is_participante_2 = True
)
)
for _ in range(0, num_participante_3):
self._content.append(
self._new_data_entry(
is_participante_3 = True
)
)
def get(self, max_samples = 0):
'''
Retorna os dados de uma fonte. Sendo possível definir um
número limite de amostras.
'''
if max_samples:
return list(np.random.choice(self._content, max_samples))
else:
return self._content
#Implementation
def _new_data_entry(
self,
is_participante_1 = False,
is_participante_2 = False,
is_participante_3 = False
):
return {
self._paredao_names[0] : is_participante_1,
self._paredao_names[1] : is_participante_2,
self._paredao_names[2] : is_participante_3,
'Fonte' : self._fonte
}
``` |
{
"source": "Joao-L-S-Almeida/MLExp",
"score": 3
} |
#### File: MLExp/numerics/timederivation.py
```python
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as ius
class CollocationDerivative:
def __init__(self, timestep=None):
self.dt = timestep
def solve(self, data=None, x=None):
# data must be a matrix with shape (n_timsteps, n_variables)
n_variables = data.shape[1]
n_timesteps = data.shape[0]
if self.dt:
times = np.arange(0, n_timesteps, 1)*self.dt
elif (not self.dt) and isinstance(x, np.ndarray):
times = x
else:
raise Exception('There is no axis to perform differentiation.')
data_derivatives_list = list()
for var in range(n_variables):
var_array = data[:, var]
interpolator = ius(times, var_array)
derivative_intp = interpolator.derivative()
derivatives = derivative_intp(times)
data_derivatives_list.append(derivatives[:, None])
data_derivatives = np.hstack(data_derivatives_list)
return data_derivatives
``` |
{
"source": "joaoluga/one-pass-vault",
"score": 2
} |
#### File: one_pass_vault/one_pass_parser/one_pass_parser_factory.py
```python
from one_pass_vault.one_pass_parser.parsers.one_pass_item_parser import OnePassItemParser
class OnePassParserFactory:
__PARSERS = {
'item': OnePassItemParser()
}
def __init__(self, parser_type):
self.__parser_type = parser_type
def build_parser(self):
return self.__PARSERS[self.__parser_type]
```
#### File: parsers/items/database_item.py
```python
from .item import Item
class DatabaseItem(Item):
item_type = 'database'
item_fields = ['database_type',
'hostname',
'port',
'database',
'username',
'password']
def parse_response(self, response):
parsed_response = {}
dict_fields = response['details']['sections'][0]['fields']
for field in dict_fields:
if field['n'] in self.item_fields:
parsed_response[field['n']] = field['v']
return parsed_response
```
#### File: parsers/items/item.py
```python
from abc import ABC, abstractmethod
class Item(ABC):
@property
def item_type(self):
return self.item_type
def item_fields(self):
return self.item_fields
@abstractmethod
def parse_response(self, response):
pass
``` |
{
"source": "JoaoLuis45/AppLanchonetePython",
"score": 3
} |
#### File: JoaoLuis45/AppLanchonetePython/lanchonete.py
```python
import PySimpleGUI as sg
from miau import *
def TelaLogin():
sg.theme('LightBrown')
layout = [
[sg.Text('Nome')],
[sg.Input('',key='nome')],
[sg.Text('Senha')],
[sg.Input('',password_char='*',key='senha')],
[sg.Button('Entrar',key='entrar'),sg.Button('Sair',key='Sair')],
[sg.Text('Não tem cadastro? '),sg.Button('Cadastro',key='cadastro')]
]
return sg.Window('Lanchonete SIGMA',layout=layout,finalize=True)
def TelaCadastro():
sg.theme('DarkBrown')
layout = [
[sg.Text('Nome'),sg.Input('',key='nomec')],
[sg.Text('Senha'),sg.Input('',password_char='*',key='senhac')],
[sg.Button('Cadastrar',key='cadastrar'),sg.Button('Voltar',key='voltar')]
]
return sg.Window('Tela de Cadastro',layout=layout,finalize=True)
def TelaLanchonete():
sg.theme('LightBrown')
pedidolanche = [
[sg.Text('Escolha o lanche:')],
[sg.Checkbox('Hamburguer',key='hamburguer'),sg.Checkbox('Pizza',key='pizza'),sg.Checkbox('Biscoito',key='biscoito')]
]
pedidobebida = [
[sg.Text('Escolha a bebida:')],
[sg.Checkbox('Refrigerante',key='refri'),sg.Checkbox('Suco',key='suco'),sg.Checkbox('MilkShake',key='milkshake')]
]
pagamento = [
[sg.Text('Escolha a forma de pagamento:')],
[sg.Radio('Dinheiro','pagamento'),sg.Radio('Cartão','pagamento')],
[sg.Button('Enviar',key='enviar')]
]
pag = [
[]
]
layout = [
[sg.Frame('Lanche',layout=pedidolanche)],
[sg.Frame('Bebida',layout=pedidobebida)],
[sg.Button('Pronto',key='pronto')],
[sg.Frame('',layout=pag,key='pag')]
]
return sg.Window('Tela lanchonete',layout=layout,finalize=True)
def ChamarApp():
janela,janelacadastro,janelalanchonete = TelaLogin(), None, None
if not ExisteArquivo():
CriarArquivo()
while True:
lista = LerArquivo()
window, event, values = sg.read_all_windows()
#TELA LOGIN
if window == janela and event == sg.WINDOW_CLOSED:
break
elif window == janela and event == 'Sair':
exit()
elif window == janela and event == 'entrar':
login = values['nome'] + ';' + values['senha'] + '\n'
if login in lista:
sg.popup(f'Bem vindo {values["nome"]}')
nome = values['nome']
janela.hide()
janelalanchonete = TelaLanchonete()
else:
sg.popup('Usuário ou senha incorretos!')
elif window == janela and event == 'cadastro':
janela.hide()
janelacadastro = TelaCadastro()
#TELA CADASTRO
if window == janelacadastro and event == sg.WINDOW_CLOSED:
break
elif window == janelacadastro and event == 'cadastrar':
EscreverArquivo(values['nomec'],values['senhac'])
elif window == janelacadastro and event == 'voltar':
janelacadastro.hide()
janela.un_hide()
#TELA LANCHONETE
if window == janelalanchonete and event == sg.WINDOW_CLOSED:
break
elif window == janelalanchonete and event == 'pronto':
janelalanchonete.extend_layout(janelalanchonete['pag'],[
[sg.Text('Escolha a forma de pagamento:')],
[sg.Radio('Dinheiro','pagamento',key='dinheiro'),sg.Radio('Cartão','pagamento',key='cartao')],
[sg.Button('Enviar',key='enviar')]
])
elif window == janelalanchonete and event == 'enviar':
pediu = []
if values['hamburguer'] == True:
hamburguer = 'hamburguer'
pediu.append(hamburguer)
if values['pizza'] == True:
pizza = 'pizza'
pediu.append(pizza)
if values['biscoito'] == True:
biscoito = 'biscoito'
pediu.append(biscoito)
if values['refri'] == True:
refri = 'refrigerante'
pediu.append(refri)
if values['suco'] == True:
suco = 'suco'
pediu.append(suco)
if values['milkshake'] == True:
milkshake = 'milkshake'
pediu.append(milkshake)
if values['dinheiro'] == True:
dinheiro = 'dinheiro'
pediu.append(dinheiro)
if values['cartao'] == True:
cartao = 'cartao'
pediu.append(cartao)
sg.popup(f'Nota de {nome}: {pediu}')
ChamarApp()
``` |
{
"source": "Joaoluislins/algotrader",
"score": 3
} |
#### File: dags/twitter/data_writer.py
```python
import os
import json
class DataWriter:
def __init__(self, api_type: str, user_id: str) -> None:
self.api_type = api_type
self.user_id = user_id
self.filename = f"{self.api_type}-{self.user_id}.json"
def _write_row(self, data: dict) -> None:
#os.makedirs(os.path.dirname(self.filename), exist_ok = True)
#print(os.getcwd())
#os.chdir('/opt/***')
print(os.getcwd())
#os.chdir('/home/ec2-user/outputs/')
#print(os.getcwd())
if not os.path.isdir(f"/opt/airflow/outputs/timelines/{self.user_id}/"):
os.makedirs(os.path.dirname(f"/opt/airflow/outputs/timelines/{self.user_id}/"), exist_ok = True)
#path = f"timelines/{self.user_id}/"
#os.chdir(path)
#print(os.getcwd())
with open(f"/opt/airflow/outputs/timelines/{self.user_id}/{self.filename}", 'a') as f:
for item in data['data']:
#print(type(item))
#print(item)
#f.write(json.dumps(item, indent=4, sort_keys=True))
json.dump(item, f)
f.write('\n')
#files.download(f)
def write(self, data: str) -> None:
self._write_row(data)
``` |
{
"source": "joaoluisro/gbn",
"score": 2
} |
#### File: gbn/gbn/cli.py
```python
import click
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor
from gbn.sbb.predict import OcrdGbnSbbPredict
from gbn.sbb.binarize import OcrdGbnSbbBinarize
from gbn.sbb.crop import OcrdGbnSbbCrop
from gbn.sbb.segment import OcrdGbnSbbSegment
@click.command()
@ocrd_cli_options
def ocrd_gbn_sbb_predict(*args, **kwargs):
return ocrd_cli_wrap_processor(OcrdGbnSbbPredict, *args, **kwargs)
@click.command()
@ocrd_cli_options
def ocrd_gbn_sbb_binarize(*args, **kwargs):
return ocrd_cli_wrap_processor(OcrdGbnSbbBinarize, *args, **kwargs)
@click.command()
@ocrd_cli_options
def ocrd_gbn_sbb_crop(*args, **kwargs):
return ocrd_cli_wrap_processor(OcrdGbnSbbCrop, *args, **kwargs)
@click.command()
@ocrd_cli_options
def ocrd_gbn_sbb_segment(*args, **kwargs):
return ocrd_cli_wrap_processor(OcrdGbnSbbSegment, *args, **kwargs)
```
#### File: gbn/lib/util.py
```python
import numpy as np
import cv2
import PIL.Image
def pil_to_cv2_rgb(image, bg_color=255):
'''
Converts PIL RGB image to cv2 (OpenCV) BGR image (Numpy array)
'''
# Remove alpha channel from image, if there is one:
if image.mode == 'LA' or image.mode == 'RGBA':
# Ensure RGBA:
image = image.convert('RGBA')
alpha = image.getchannel('A')
# Paste image on a canvas:
canvas = PIL.Image.new('RGBA', image.size, bg_color)
canvas.paste(image, mask=alpha)
image = canvas
else:
alpha = None
# Convert PIL image array to RGB then to Numpy array then to BGR (for OpenCV):
image = cv2.cvtColor(np.array(image.convert('RGB'), dtype=np.uint8), cv2.COLOR_RGB2BGR)
return image, alpha
def cv2_to_pil_rgb(image, alpha=None):
'''
Converts cv2 (OpenCV) BGR image to PIL RGB image
'''
# Convert OpenCV BGR image array (Numpy) to PIL RGB image with alpha channel:
image = PIL.Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Restore alpha channel, if there is one:
if alpha:
image.putalpha(alpha)
return image
def pil_to_cv2_gray(image, bg_color=255):
'''
Converts PIL grayscale image to cv2 (OpenCV) grayscale image (Numpy array)
'''
# Remove alpha channel from image, if there is one:
if image.mode == 'LA' or image.mode == 'RGBA':
# Ensure LA:
image = image.convert('LA')
alpha = image.getchannel('A')
# Paste image on a canvas:
canvas = PIL.Image.new('LA', image.size, bg_color)
canvas.paste(image, mask=alpha)
image = canvas
else:
alpha = None
# Convert PIL image array to Numpy array (for OpenCV):
image = np.array(image.convert('L'), dtype=np.uint8)
return image, alpha
def cv2_to_pil_gray(image, alpha=None):
'''
Converts cv2 (OpenCV) grayscale image to PIL grayscale image
'''
# Convert OpenCV grayscale image array (Numpy) to PIL grayscale image with alpha channel:
image = PIL.Image.fromarray(image)
# Restore alpha channel, if there is one:
if alpha:
image.putalpha(alpha)
return image
``` |
{
"source": "joaomacalos/imfx-python",
"score": 3
} |
#### File: joaomacalos/imfx-python/utils_statsmodels.py
```python
from statsmodels.tsa.stattools import adfuller
from statsmodels.stats.diagnostic import acorr_breusch_godfrey
from statsmodels.regression.linear_model import RegressionResultsWrapper
from pandas import Series, DataFrame
def adf_test(series, terms='c', title=''):
"""
Pass in a time series and an optional title, returns an ADF report
"""
print(f'Augmented Dickey-Fuller Test: {title}')
result = adfuller(series.dropna(),autolag='AIC', regression=terms) # .dropna() handles differenced data
labels = ['ADF test statistic','p-value','# lags used','# observations']
out = Series(result[0:4],index=labels)
for key,val in result[4].items():
out[f'critical value ({key})']=val
out['Terms'] = terms
print(out.to_string()) # .to_string() removes the line "dtype: float64"
if result[1] <= 0.05:
print("Strong evidence against the null hypothesis")
print("Data has no unit root and is stationary")
else:
print("Weak evidence against the null hypothesis")
print("Data has a unit root and is non-stationary")
def bglm_test(model, lags, title='') -> Series:
"""
Pass in a fitted model and return the Breusch-Godfrey LM Test
"""
print(f'Breusch-Godfrey LM Test: {title}')
result = acorr_breusch_godfrey(model, lags)
labels = ['LM test statistic','LM (Chi-sq) p-value', 'F test statistic','F p-value']
out = Series(result,index=labels)
print(out.to_string()) # .to_string() removes the line "dtype: float64"
print(f'Null hypothesis: No Autocorrelation of any order up to {lags} lags.')
if result[1] <= 0.05:
print("Reject the null hypothesis at the .05 significance level")
print("Evidence of Autocorrelation in the residuals.")
else:
print("Fail to reject the null hypothesis at the .05 significance level")
print("Data has no evidence of autocorrelation.")
def dynamic_pred(
model: RegressionResultsWrapper,
exog: DataFrame,
lag_endog: str) -> DataFrame:
"""
Takes a fitted model, a set of exogenous variables, and the name of the lagged
endogenous variable and returns a DataFrame with the summary of the dynamic prediction.
"""
exog = exog.copy()
steps = exog.shape[0]
ids = exog.index
fcast = None
ftbl = {'mean': [], 'mean_se': [],
'mean_ci_lower': [], 'mean_ci_upper': [],
'obs_ci_lower': [], 'obs_ci_upper': []}
#predictions = model.get_prediction(exog.iloc[[0]]).summary_frame()
#return predictions
for i, step in enumerate(range(steps)):
predictions = model.get_prediction(exog.iloc[[i]])
tbl = predictions.summary_frame()
ftbl['mean'].append(tbl['mean'].values[0])
ftbl['mean_se'].append(tbl['mean_se'].values[0])
ftbl['mean_ci_lower'].append(tbl['mean_ci_lower'].values[0])
ftbl['mean_ci_upper'].append(tbl['mean_ci_upper'].values[0])
ftbl['obs_ci_lower'].append(tbl['obs_ci_lower'].values[0])
ftbl['obs_ci_upper'].append(tbl['obs_ci_upper'].values[0])
if (i+1) > steps-1:
break
exog.loc[ids[i+1], lag_endog] = tbl['mean'].values[0]
df_ftbl = DataFrame(ftbl).set_index(exog.index)
return df_ftbl
from numpy import zeros, dot, quantile, mean
from numpy.random import choice
from numpy.linalg import lstsq
from statsmodels.api import OLS
def stochastic_forecast(
model: RegressionResultsWrapper,
X_train: DataFrame,
X_test: DataFrame,
ci: float = .95,
simulations: int = 1000) -> DataFrame:
"""
Use bootstrapped residuals to perform stochastic forecasting.
params:
model: a fitted OLS object from `statsmodels`
X_train: the exogenous variables for the training set.
X_test: the exogenous variables for the forecasting period.
interval: the confidence interval.
simulations: the number of bootstrapped repetitions.
"""
X_train = X_train.copy()
X_test = X_test.copy()
test_size = X_test.shape[0]
baseline = model.predict()
len_baseline = len(baseline)
residuals = model.resid
X_train = X_train.rename(columns={"const": "Intercept"})
X_test = X_test.rename(columns={"const": "Intercept"})
# Boot
bootstraps = zeros((simulations, test_size))
for i in range(simulations):
# boot_params = [np.random.normal(mean, sd) for mean, sd in zip(params, params_se)]
boot_residuals = choice(residuals, len_baseline, replace=True)
boot_y = baseline + boot_residuals
params, *_ = lstsq(X_train, boot_y, rcond=None)
pred = dot(X_test, params)
bootstraps[i, :] = pred
final_frame = zeros((test_size, 3))
quantile_intervals = [.5 - (ci/2), .5, .5 + (ci/2)]
for i in range(test_size):
final_frame[i, :] = quantile(bootstraps[:, i], quantile_intervals)
final_frame = DataFrame(final_frame).set_index(X_test.index)
final_frame.columns = ['lower_q', 'median', 'upper_q']
return final_frame
def stochastic_forecast2(
model,
X_train,
X_test,
ci=0.95,
simulations = 1000):
X_train = X_train.copy()
X_test = X_test.copy()
test_size = X_test.shape[0]
baseline = model.predict()
len_baseline = len(baseline)
residuals = model.resid
X_train = X_train.rename(columns={"const": "Intercept"})
X_test = X_test.rename(columns={"const": "Intercept"})
X_train = X_train.assign(Intercept=1.)
X_test = X_test.assign(Intercept=1.)
# Boot
boot_mean = zeros((simulations, test_size))
boot_upper = zeros((simulations, test_size))
boot_lower = zeros((simulations, test_size))
for i in range(simulations):
# boot_params = [np.random.normal(mean, sd) for mean, sd in zip(params, params_se)]
boot_residuals = choice(residuals, len_baseline, replace=True)
boot_y = baseline + boot_residuals
pred = OLS(boot_y, X_train).fit().get_prediction(X_test).summary_frame(alpha=ci)
boot_mean[i, :] = pred['mean']
boot_upper[i, :] = pred['obs_ci_upper']
boot_lower[i, :] = pred['obs_ci_lower']
final_frame = zeros((test_size, 3))
for i in range(test_size):
final_frame[i, :] = [mean(boot_mean[:, i]), mean(boot_lower[:, i]), mean(boot_upper[:, i])]
final_frame = DataFrame(final_frame).set_index(X_test.index)
final_frame.columns = ['mean', 'lower', 'upper']
return final_frame
from numpy import log
def aic_eviews(model):
df = model.df_model
llf = model.llf
n = model.nobs
return -2 * (llf/n) + 2 * (df/n)
def bic_eviews(model):
df = model.df_model
llf = model.llf
n = model.nobs
return -2 * (llf/n) + df * log(n) / (n)
def aic_lectures(model):
df = model.df_model
sse = model.sse
n = model.nobs
return n * log(sse) + 2 * df
def bic_lectures(model):
df = model.df_model
sse = model.sse
n = model.nobs
return n * log(sse) + df * log(n)
``` |
{
"source": "joaomacedoDS/House-Pricing-Model",
"score": 2
} |
#### File: joaomacedoDS/House-Pricing-Model/data_pipeline.py
```python
import sqlite3
import pandas as pd
default_args = {'owner': 'airflow'}
path = "C:\\Users\\joaoa\\Documents\\bootcamp"
path_db_producao = path+"\\data\\imoveis_prod.db"
path_db_datawarehouse = path+"\\data\\imoveis_dw.db"
path_temp_csv = path+"\\data\\dataset.csv"
#dag = DAG(dag_id='data_pipeline', default_args=default_args, schedule_interval='@daily',start_date=days_ago(2))
def _extract():
#conectando a base de dados de produção.
connect_db_imoveis = sqlite3.connect(path_db_producao)
#selecionando os dados.
dataset_df = pd.read_sql_query(r"""
SELECT CIDADE.NOME as 'cidade'
,ESTADO.NOME as 'estado'
,IMOVEIS.AREA as 'area'
,IMOVEIS.NUM_QUARTOS
,IMOVEIS.NUM_BANHEIROS
,IMOVEIS.NUM_ANDARES
,IMOVEIS.ACEITA_ANIMAIS
,IMOVEIS.MOBILIA
,IMOVEIS.VALOR_ALUGUEL
,IMOVEIS.VALOR_CONDOMINIO
,IMOVEIS.VALOR_IPTU
,IMOVEIS.VALOR_SEGURO_INCENDIO
FROM IMOVEIS INNER JOIN CIDADE
ON IMOVEIS.CODIGO_CIDADE = CIDADE.CODIGO
INNER JOIN ESTADO
ON CIDADE.CODIGO_ESTADO = ESTADO.CODIGO;
""",
connect_db_imoveis
)
#exportando os dados para a área de stage.
dataset_df.to_csv(
path_temp_csv,
index=False
)
#fechando a conexão com o banco de dados.
connect_db_imoveis.close()
return None
def _transform():
dataset_df = pd.read_csv(path_temp_csv)
#transformando os dados dos atributos.
dataset_df.aceita_animais.replace({'acept': 1, 'not acept':0}, inplace=True)
dataset_df.mobilia.replace({'furnished': 1, 'not furnished':0}, inplace=True)
#limpando os registros.
dataset_df.num_andares.replace({'-': 1}, inplace=True)
dataset_df.cidade = dataset_df.cidade.str.title()
dataset_df.cidade.replace({'Sao Paulo': 'São Paulo',
'Rio Janeiro': 'Rio de Janeiro'}, inplace=True)
#substituindo o dados originais pelos transformados
dataset_df.to_csv(path_temp_csv, index=False)
return None
def _load():
#conectando com o banco de dados Data Warehouse.
connect_db_imoveis_dw = sqlite3.connect(path_db_datawarehouse)
#lendo os dados a partir dos arquivos csv.
dataset_df = pd.read_csv(path_temp_csv)
#carregando os dados no banco de dados.
dataset_df.to_sql("imoveis", connect_db_imoveis_dw,
if_exists="replace", index=False)
return None
#---------------------------------------------
#ETL
#----------------------------------------------
#FUNÇÕES PARA MÉTODO DO AIRFLOW
#extract_task = PythonOperator(task_id="extract", python_callable=_extract, dag=dag)
#transform_task = PythonOperator(task_id="transform", python_callable=_transform, dag=dag)
#load_task = PythonOperator(task_id="load", python_callable=_load, dag=dag)
#extract_task >> transform_task >> load_task
#Coletando
_extract()
#transformando
_transform()
#processando
_load()
``` |
{
"source": "JoaoManoel/final-paper",
"score": 3
} |
#### File: email_service/src/email_client.py
```python
import grpc
# import the generated classes
import email_pb2
import email_pb2_grpc
def run():
# open a gRPC channel
channel = grpc.insecure_channel('localhost:50051')
stub = email_pb2_grpc.EmailSenderStub(channel)
email = email_pb2.EmailRequest(
From='<EMAIL>',
to='<EMAIL>',
subject='Suaaaa senha',
body='<b>Oloco</b>')
response = stub.sendEmail(email)
print response
if __name__ == '__main__':
run()
``` |
{
"source": "joaomanojr/udemy_deepQ",
"score": 3
} |
#### File: udemy_deepQ/lesson10/pytorch_example.py
```python
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch as T
class LinearClassifier(nn.Module):
def __init__(self, lr, n_classes, input_dims):
super(LinearClassifier, self).__init__()
self.fc1 = nn.Linear(*input_dims, 128)
self.fc2 = nn.Linear(128, 256)
self.fc3 = nn.Linear(256, n_classes)
# Author: self.parameters() from inherited class Module
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.loss = nn.CrossEntropyLoss() #nn.MSELoss()
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
# Author: pytorch have different tensors for cuda/cpu devices
self.to(self.device)
def forward(self, data):
layer1 = F.sigmoid(self.fc1(data))
layer2 = F.sigmoid(self.fc2(layer1))
# Author: CrossEntropy will take care of activation for us...
layer3 = self.fc3(layer2)
return layer3
# Author: DeepQ will get different parameters (state, action, ...)
def learn(self, data, labels):
# Author: pytorch keep values from previous iteration but they are
# not required/needed - so we clean it up.
self.optimizer.zero_grad()
# Author: pytorch requires conversion of types prior to use external
# types adapting them to required target tensor types.
# pytorch have Tensor class as well which defaults to float and/or 64bit
# internal types. We use tensor to save memory using regular 32bit
# whenever possible to save memory.
data = T.tensor(data).to(self.device)
labels = T.tensor(labels).to(self.device)
# Author: get predictions and evaluate cost (how far predictions are
# from actual labels
predictions = self.forward(data)
cost = self.loss(predictions, labels)
# Author: backpropagate cost and add a step on our optimizer.
# These two calls are critical for learn loop.
cost.backward()
self.optimizer.step()
``` |
{
"source": "JoaoMarcosCSilva/Fonte-3-12V",
"score": 3
} |
#### File: JoaoMarcosCSilva/Fonte-3-12V/script.py
```python
from math import sqrt
# CONFIGURAÇÃO
# Tensão máxima e mínima da fonte
V_max = 12
V_min = 3
# Queda de tensão (positiva) entre a base e o emissor do transistor
V_be = 1.292
# Resistência do resistor depois do potenciômetro
R_after = 4300
# Corrente passando pela base do transistor (deveria ser exatamente 1ma, mas no falstad dá esse valor aí)
iB = 2.475e-6
# Tensão mínima garantida pelo capacitor
Vc = 19
# Tensão de quebra do diodo zener
Vz = 15
# Calcula a corrente passando por um resistor
#=================================================
#
# +---------+
# V+-------+ R +------+0V
# +---------+
#
#=================================================
def current(V, R):
return V / R
# Calcula R_before dados Vz, V_medium, R_after e Ib
# No circuito final, R_before corresponde à soma das resistências do potenciômetro e do resistor anterior a ele
#=======================================================================
#
# Ib
# +-->
# +--------------
# |
# |
# +----------+ | +---------+
# Vz+-------+ R_before +--+ V_medium +--+--+ R_after +---------+0V
# +----------+ +---------+
#
#=======================================================================
def total_R_before(V_medium, R_after, iB, Vz):
total_current = current(V_medium, R_after) + iB
R_before = (Vz - V_medium)/total_current
return R_before
# Calcula R_A dados Vz, V_medium, R_after, Ib e R_before
# Esse passo efetivamente divide o resistor R_before do circuito anterior em dois resistores: o potenciômetro (R_pot) e o resistor anterior a ele (Ra)
#=======================================================================
#
# +----------+
# Vz+-------+ R_before +--+
# +----------+
#
# === === === === === === === === === === ===
#
# +--+ +----+
# Vz+-------+Ra+--+Rpot+--+
# +--+ +----+
#
#=======================================================================
def get_resistances(V_min, V_max, R_after, iB, Vz):
R_before = total_R_before(V_min, R_after, iB, Vz)
a = iB
b = (-1)*(Vz + iB*(R_after + R_before))
c = (Vz - V_max)*(R_after + R_before)
delta = b*b - 4*a*c
resistencia = (-b - sqrt(delta))/(2*a)
return resistencia, R_before - resistencia
# Adiciona a queda de tensão nos transistores às tensões desejadas, para compensar essa queda
V_max += V_be
V_min += V_be
# Calcula as resistências finais
Ra, Rpot = get_resistances(V_min, V_max, R_after, iB, Vz)
print("Potenciômetro:", Rpot)
print("Resistor à esquerda:", Ra)
# Calcula a corrente máxima que poderá passar pelo resistor à esquerda do Potenciômetro
max_current_Ra = current(V_max, Rpot + R_after) + iB
# Com esse valor, calcula a resistência máxima do resistor acima do diodo zener
R_zener = (Vc - Vz)/max_current_Ra
print("Resistor acima do zener:", R_zener)
``` |
{
"source": "JoaoMarcosCSilva/pesquisa-eleitoral-interpretador",
"score": 3
} |
#### File: JoaoMarcosCSilva/pesquisa-eleitoral-interpretador/download.py
```python
from kinto_http import Client
client = Client(server_url="https://kinto-pesquisa.herokuapp.com/v1",
auth=('Joao', 'Joao'))
collections = client.get_collections(bucket = 'pesquisa')
class Voto:
def __init__(this):
this.pesquisador = ''
this.genero = 0
this.classe = 0
this.candidato = 0
this.rejeita = []
this.hora = ''
def __tojson__(this):
r = '{'
r += '\n"pesquisador": "' + str(this.pesquisador)
r += '",\n"genero": "' + str(this.genero)
r += '",\n"classe": "' + str(this.classe)
r += '",\n"candidato": "' + str(this.candidato)
r += '",\n"rejeita": ' + json.dumps(this.rejeita)
r += ',\n"hora": "' + this.hora
r += '"\n}'
return r
votos = []
def getVotosString():
result = '[\n'
i = 0
for voto in votos:
result += voto.__tojson__()
if i < len(votos) - 1:
result += ','
result += '\n'
i += 1
result += ']'
return result
for collection in collections:
name = collection['id']
print(name + '...')
record_list = client.get_records(collection=name, bucket='pesquisa')
records = record_list[0]['info']
if len(record_list) > 1:
print ('found ' + str(len(record_list)) + ' entries.')
for record in records:
voto = Voto()
voto.pesquisador = name
voto.genero = record['genero']
voto.classe = record['classe_social']
voto.candidato = record['candidato']
voto.rejeita = record['rejeita']
voto.hora = record['criado_em']
votos.append(voto)
import json
file = open('resultados.txt', 'w')
file.write(getVotosString())
file.close()
```
#### File: JoaoMarcosCSilva/pesquisa-eleitoral-interpretador/script.py
```python
import numpy as np
import matplotlib.pyplot as plt
import json
import operator
generos = {
'0': 'Masculino',
'1': 'Feminino'
}
candidatos = {
'0': '<NAME>',
'1': '<NAME>',
'2': '<NAME>',
'3': 'Eymael',
'4': '<NAME>',
'5': '<NAME>',
'6': '<NAME>',
'7': '<NAME>',
'8': '<NAME>',
'9': '<NAME>',
'10': '<NAME>',
'11': '<NAME>',
'12': 'Vera Lúcia',
'13': 'Branco/Nulo',
'14': 'Não sabe/Não quis responder',
'15': 'Votaria em Qualquer Candidato'
}
classes = {
'0': 'A',
'1': 'B',
'2': 'C',
'3': 'D',
'4': 'E',
'5': 'Não quis responder'
}
with open('resultados.txt') as file:
votos = json.load(file)
def increment(dict, key):
if key in dict:
dict[key] += 1
else:
dict[key] = 1
entrevistados = len(votos)
total = {}
total_generos = {}
total_classes = {}
rej_total = {}
rej_total_generos = {}
rej_total_classes = {}
porc_generos = {}
porc_classes = {}
for voto in votos:
if voto['hora'][1] == '7':
continue
votou = candidatos[voto['candidato']]
increment(total, votou)
for g in generos:
genero = generos[g]
if genero not in total_generos:
total_generos[genero] = {}
if voto['genero'] == g:
increment(total_generos[genero], votou)
increment(porc_generos, genero)
for c in classes:
classe = classes[c]
if classe not in total_classes:
total_classes[classe] = {}
if voto['classe'] == c:
increment(total_classes[classe], votou)
increment(porc_classes, classe)
if len(voto['rejeita']) == 0:
rejeitado = candidatos['15']
increment(rej_total, rejeitado)
for g in generos:
genero = generos[g]
if genero not in rej_total_generos:
rej_total_generos[genero] = {}
if voto['genero'] == g:
increment(rej_total_generos[genero], rejeitado)
for c in classes:
classe = classes[c]
if classe not in rej_total_classes:
rej_total_classes[classe] = {}
if voto['classe'] == c:
increment(rej_total_classes[classe], rejeitado)
for rejeicao in voto['rejeita']:
rejeitado = candidatos[str(rejeicao)]
increment(rej_total, rejeitado)
for g in generos:
genero = generos[g]
if genero not in rej_total_generos:
rej_total_generos[genero] = {}
if voto['genero'] == g:
increment(rej_total_generos[genero], rejeitado)
for c in classes:
classe = classes[c]
if classe not in rej_total_classes:
rej_total_classes[classe] = {}
if voto['classe'] == c:
increment(rej_total_classes[classe], rejeitado)
explode = 0.01
def make_autopct(total):
def my_autopct(pct):
val = int(round(pct*total/100.0))
return '{p:1.1f}% ({v:d})'.format(p=pct,v=val)
return my_autopct
def plot(title, l, size = 5, total = entrevistados, folder = ''):
fig1, ax1 = plt.subplots()
plt.title(title, fontsize = 20)
fig1.set_size_inches(size, size, forward = True)
data = np.transpose([list(elem) for elem in sorted(l.items(), key=operator.itemgetter(1), reverse=True)])
plt.pie(data[1], labels = data[0], explode = (explode,) * len(data[1]), autopct = make_autopct(total))
path = 'generated\\' + folder + '\\' + title
plt.savefig(path, bbox_inches='tight')
def plotBar(title, l, size = 5, total = entrevistados, folder = ''):
fig1, ax1 = plt.subplots()
plt.title(title, fontsize = 20)
fig1.set_size_inches(size, size, forward = True)
data = np.transpose([list(elem) for elem in sorted(l.items(), key=operator.itemgetter(1), reverse=True)])
plt.barh(range(0, len(data[1])), [(int(elem)/total)*100 for elem in data[1]])
ax1.set_yticks(range(0, len(data[0])))
ax1.set_yticklabels(data[0])
for i, v in enumerate([(int(elem)/total)*100 for elem in data[1]]):
ax1.text(v+1, i-0.2, str(int(v)) + '%')
path = 'generated\\' + folder + '\\' + title
plt.savefig(path, bbox_inches='tight')
plot('Proporção entre Homens e Mulheres', porc_generos)
plot('Proporção entre Classes Sociais', porc_classes)
plot('Resultados Gerais', total, 9)
plot('Resultados entre os Homens', total_generos['Masculino'], 9, porc_generos['Masculino'], 'genero')
plot('Resultados entre as Mulheres', total_generos['Feminino'], 9, porc_generos['Feminino'], 'genero')
for classe in total_classes:
if classe == 'Não quis responder':
continue
plot('Resultados entre membros da classe ' + classe, total_classes[classe], 9, porc_classes[classe], 'classe')
plotBar('Rejeição Geral', rej_total, folder = 'rejeicao')
plotBar('Rejeição Entre os Homens', rej_total_generos['Masculino'], total = porc_generos['Masculino'], folder = 'rejeicao\\genero')
plotBar('Rejeição Entre as Mulheres', rej_total_generos['Feminino'], total = porc_generos['Feminino'], folder = 'rejeicao\\genero')
print('Total de entrevistados: ' + str(entrevistados))
for classe in total_classes:
if classe == 'Não quis responder':
continue
plotBar('Rejeição entre membros da classe ' + classe, rej_total_classes[classe], total = porc_classes[classe], folder = 'rejeicao\\classe')
``` |
{
"source": "JoaoMarcosCSilva/Pokemon-Interpolator",
"score": 3
} |
#### File: JoaoMarcosCSilva/Pokemon-Interpolator/Pokemon_Model.py
```python
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, Input, BatchNormalization, Flatten, Dense, MaxPooling2D, UpSampling2D
def get_Encoder(Layers, Hidden_Channels, Starting_Channels):
inputs = Input(shape = (64,64,3))
x = inputs
channels = Starting_Channels
for l in range(Layers-1):
x = Conv2D(channels, 3, activation = 'relu', padding = 'same')(x)
x = BatchNormalization()(x)
x = MaxPooling2D()(x)
channels = int(channels / 2)
x = Conv2D(Hidden_Channels*2, 3, activation = 'relu', padding = 'same')(x)
x = BatchNormalization()(x)
x = Conv2D(Hidden_Channels, 3, padding = 'same')(x)
Encoder = keras.Model(inputs, x)
return Encoder
def get_Decoder(Layers, Hidden_Shape, Encoder_Starting_Channels):
inputs = Input(shape = (Hidden_Shape))
x = inputs
x = Conv2D(Hidden_Shape[-1]*2, 3, activation = 'relu', padding = 'same')(x)
x = BatchNormalization()(x)
channels = int(Encoder_Starting_Channels / (2**(Layers-1)))
for l in range(Layers-1):
channels = channels * 2
x = UpSampling2D()(x)
x = Conv2D(channels, 3, activation = 'relu', padding = 'same')(x)
x = BatchNormalization()(x)
x = Conv2D(3, 3, activation = 'sigmoid', padding = 'same') (x)
Decoder = keras.Model(inputs, x)
return Decoder
def get_Model (Layers, Hidden_Channels, Starting_Channels):
inputs = Input(shape = (64,64,3))
Encoder = get_Encoder(Layers, Hidden_Channels, Starting_Channels)
Decoder = get_Decoder(Layers, Encoder.output_shape[1:], Starting_Channels)
x = inputs
x = Encoder(x)
x = Decoder(x)
Model = keras.Model(inputs, x)
return Encoder, Decoder, Model
``` |
{
"source": "Joao-Maria-Janeiro/SaoPerolasDjango",
"score": 2
} |
#### File: SaoPerolasDjango/cart/models.py
```python
from django.db import models
from products.models import Product
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.conf import settings
User = get_user_model()
# Create your models here.
class CartProduct(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.IntegerField(default=1)
class Cart(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
products = models.ManyToManyField(CartProduct, blank=True)
total_price = models.IntegerField(default=0)
def post_save_cart_create(sender, instance, created, *args, **kwargs):
if created:
Cart.objects.get_or_create(user=instance)
post_save.connect(post_save_cart_create, sender=settings.AUTH_USER_MODEL)
class Cart_userless(models.Model):
products = models.ManyToManyField(CartProduct, blank=True)
total_price = models.IntegerField(default=0)
class ShippingDetails(models.Model):
full_name = models.CharField(max_length = 300)
adress = models.CharField(max_length = 300)
city = models.CharField(max_length = 300)
localidade = models.CharField(max_length = 300)
zip = models.CharField(max_length = 300)
country = models.CharField(max_length = 300)
phone_number = models.CharField(max_length = 300)
email = models.EmailField(null=True)
class OrderUserless(models.Model):
cart = models.ForeignKey(Cart_userless, on_delete=models.CASCADE, null=True)
total_price = models.IntegerField(default=0)
date_ordered = models.DateTimeField(auto_now=True)
shipping_details = models.ForeignKey(ShippingDetails, on_delete=models.CASCADE, null=True)
class Order(models.Model):
cart = models.ForeignKey(Cart, on_delete=models.CASCADE, null=True)
total_price = models.IntegerField(default=0)
date_ordered = models.DateTimeField(auto_now=True)
shipping_details = models.ForeignKey(ShippingDetails, on_delete=models.CASCADE, null=True)
``` |
{
"source": "joaomateusferr/teach_programming",
"score": 4
} |
#### File: Python/1 - if,else,switch case and math operations/6_check_divisible_by_2.py
```python
def main():
n = int(input('Insert a number: '))
#the % operation can only be used with integer values
if n % 2 == 0:
print("Is divisible by 2")
else:
print("It is not divisible by 2")
#being divisible by 2 is the same as being even and not being divisible by 2 is the same as being odd
main()
``` |
{
"source": "Joaomc15/MDC-COVID-19-visuals",
"score": 3
} |
#### File: Joaomc15/MDC-COVID-19-visuals/FIPS.py
```python
from datetime import date, timedelta
import pandas as pd
from point import Point
import os
from urllib.error import HTTPError
import datetime
import numpy as np
class County:
def __init__(self, FIPS): #( county_list, data_list, label_list):
self.FIPS = FIPS
county_name = ""
def get_data(self, labels_list):
sdate = datetime.date(2020, 3, 22) # start date || this is the first day that JHU posted data for MDC
edate = datetime.date.today() # end date || currently set to yesterday's date because it turned to midnight and I was getting an error cause JHU did not publish it yet for 3/28
delta = edate - sdate # as timedelta
county_FIPS = self.FIPS
big_list = []
values_list =[]
x = []
# labels_list = ['FIPS', 'Admin2', 'Province_State', 'Country_Region', 'Last_Update', 'Lat', 'Long_', 'Confirmed', 'Deaths', 'Recovered', 'Active', 'Combined_Key']
# labels_list = [ 'Last_Update', 'Confirmed', 'Deaths', 'Combined_Key']
# big_list += labels_list + ['|||']
for i in range(delta.days + 1):
date = sdate + timedelta(days=i)
printable_date = str(date)
month = date.month
if len(str(month))<2:
month = '0'+str(month)
day = date.day
if len(str(day))<2:
day = '0'+str(day)
url = f'''https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{month}-{day}-2020.csv'''
try:
db = pd.read_csv(url, error_bad_lines=False)
df = pd.DataFrame(db)
except HTTPError:
print(f'''There is no file for {month}/{day} yet, so it was skipped.''')
continue
values_list.clear()
x.clear()
for item in labels_list:
location = df.loc[df['FIPS']==county_FIPS].index[0]
cell_value = df[item][location]
if item =='Last_Update':
values_list.insert(0,cell_value)
continue
values_list.append(cell_value)
break_value ="|||"
x.append(values_list)
# print (values_list)
big_list += values_list
big_list.append(break_value)
# print(big_list)
# print(big_list)
# point1 = Point(printable_date, cell_value)
# new_list.append(point1.get_point())
return big_list
def get_name(self):
sdate = datetime.date(2020, 3, 22) # start date || this is the first day that JHU posted data for MDC
edate = datetime.date.today() # end date || currently set to yesterday's date because it turned to midnight and I was getting an error cause JHU did not publish it yet for 3/28
delta = edate - sdate # as timedelta
values_list =[]
url = f'''https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/03-27-2020.csv'''
try:
db = pd.read_csv(url, error_bad_lines=False)
df = pd.DataFrame(db)
except HTTPError:
print(f'''There is no file for that day yet, so it was skipped.''')
county_FIPS = self.FIPS
location = df.loc[df['FIPS']==county_FIPS].index[0]
county_name = df['Admin2'][location]
return str(county_name)
def get_date(self):
return str(datetime.date.today() )
``` |
{
"source": "joaomcm/Klampt",
"score": 3
} |
#### File: Python/control-examples/rosserialrelay.py
```python
from klampt.control.io import roscontroller
from klampt.control.io.serialcontroller import ControllerClient
from klampt import *
import asyncore
import rospy
def main():
#read klampt_robot_file and optionally klampt_serial_port from parameter server
rospy.init_node('klampt_sim')
print rospy.get_param('/klampt_robot_file')
try:
klampt_robot_model_fn = rospy.get_param('/klampt_robot_file')
except KeyError:
print 'Error, ROS parameter "/klampt_model_name" doesn\'t exist.'
print 'Set this using rosparam set klampt_model_name [KLAMPT .rob FILE]'
exit(1)
try:
klampt_serial_port = rospy.get_param('/klampt_serial_port')
print "Using serial port",klampt_serial_port,"from parameter /klampt_serial_port"
except KeyError:
klampt_serial_port = 3456
print "Using serial port 3456 by default, use rosparam set"
print "klampt_serial_port [PORT] if you want to change this."
#load robot file
world = WorldModel()
world.enableGeometryLoading(False)
res = world.readFile(klampt_robot_model_fn)
if not res:
print 'Error, could not load klampt model from',klampt_robot_model_fn
exit(1)
if world.numRobots()==0:
print 'Error, klampt model',klampt_robot_model_fn,'did not contain a robot'
exit(1)
klampt_robot_model = world.robot(0)
print "Load successful"
#print some info
robotName = klampt_robot_model.getName()
linkNames = [klampt_robot_model.link(i).getName() for i in range(klampt_robot_model.numLinks())]
print "Running controller listening on topic /%s/joint_trajectory and"%(robotName,)
print "publishing on topic /%s/joint_state"%(robotName,)
print "Klamp't link names are:",linkNames
#create the ROS controller
c = roscontroller.make(klampt_robot_model)
#launch the serial client to connect to a given host and relay messages from the socket to/from ROS
host = 'localhost'
port = klampt_serial_port
s = ControllerClient((host,port),c)
asyncore.loop()
if __name__ == '__main__':
main()
```
#### File: control/blocks/estimators.py
```python
from klampt.math import vectorops
from ..controller import ControllerBlock
from .utils import LambdaBlock
from collections import deque
class DerivativeEstimator(ControllerBlock):
"""An estimator computes the derivative of some input (typically 'q') using
finite differences. Outputs to 'dq' or ('d'+name in general).
"""
def __init__(self,name='q',robot=None):
self.name = name
self.robot = robot
self.qlast = None
def inputNames(self):
return ['dt',self.name]
def outputNames(self):
return ['d'+self.name]
def getState(self):
return {'last':self.qlast}
def setState(self,state):
self.qlast = state['last']
def advance(self,**inputs):
try:
dt = inputs["dt"]
q = inputs[self.name]
except KeyError:
raise ValueError("Input needs to have value '%s' and timestep 'dt'"%(self.name,))
if len(q)==0: return None
if self.qlast==None:
dq = [0]*len(q)
else:
if self.robot==None:
dq = vectorops.div(self.robot.sub(q,self.qlast),dt)
else:
assert(len(self.qlast)==len(q))
dq = vectorops.div(self.robot.interpolate_deriv(self.qlast,q),dt)
self.qlast = q
return {'d'+self.name:dq}
def signal(self,type,**inputs):
if type=='reset':
self.qlast=None
class IntegralEstimator(ControllerBlock):
"""An estimator computes the integral of some input using the
trapezoidal rule.
"""
def __init__(self,name):
self.name = name
self.integral = None
def inputNames(self):
return ['dt',self.name]
def outputNames(self):
return ['I'+self.name]
def getState(self):
return self.integral
def setState(self,state):
self.integral = state
def advance(self,**inputs):
try:
dt = inputs["dt"]
v = inputs[self.name]
except KeyError:
raise ValueError("Input needs to have value %s and timestep 'dt'"%(self.name,))
if len(v)==0: return None
if self.integral is None:
self.integral = vectorops.mul(v,dt)
else:
self.integral = vectorops.madd(self.integral,v,dt)
result = vectorops.madd(self.integral,v,-0.5*dt)
return {'I'+self.name:result}
def signal(self,type,**inputs):
if type=='reset':
self.integral=None
class FIRFilter(ControllerBlock):
"""An estimator that filters some other signal using a Finite Impulse Response
filter. `b` is the vector of coefficients.
For example, a k-moving average filter would set the b vector to
[1/k,...,1/k]
"""
def __init__(self,argname,b,outname=None):
self.argname = argname
self.outname = argname + " filtered" if outname is None else outname
self.b = b
assert hasattr(b,'__iter__')
assert len(b) > 0
from collections import deque
self.history = deque()
def inputNames(self):
return [self.argname]
def outputNames(self):
return [self.outname]
def advance(self,**inputs):
val = inputs[self.argname]
if hasattr(val,'__iter__'):
res = vectorops.mul(val,self.b[0])
assert len(self.history)+1 <= len(self.b)
for i,v in enumerate(self.history):
res = vectorops.madd(res,v,self.b[i+1])
if len(self.history) + 1 < len(self.b):
res = vectorops.madd(res,self.history[-1],sum(self.b[len(self.history)+1:]))
else:
res = val*self.b[0]
assert len(self.history)+1 <= len(self.b)
for i,v in enumerate(self.history):
res += v*self.b[i+1]
if len(self.history) + 1 < len(self.b):
res += self.history[-1]*sum(self.b[len(self.history)+1:])
#advance history
self.history.appendleft(val)
while len(self.history) >= len(self.b):
self.history.pop()
return res
def getState(self):
return {'history',self.history}
def setState(self,state):
self.history = state['history']
def signal(self,type,**inputs):
if type=='reset':
self.history = deque()
```
#### File: control/blocks/utils.py
```python
from .state_machine import TransitionStateMachine
from ..controller import ControllerBlock
from klampt.math import vectorops
class RemappedBlock(ControllerBlock):
"""A remapping of a block's inputs and outputs"""
def __init__(self,block,inmap=None,outmap=None):
self.block = block
self.inmap = inmap
self.outmap = outmap
def __str__(self):
instr = str(self.inmap) if self.inmap is not None else ''
outstr = str(self.outmap) if self.outmap is not None else ''
return str(self.block)+'{'+instr+';'+outstr+'}'
def inputValid(self,**inputs):
if self.inmap is None:
return self.block.inputValid(**inputs)
else:
return ControllerBlock.inputValid(self,**inputs)
def inputNames(self):
if self.inmap is None:
return self.block.inputNames()
else:
return list(self.inmap.keys())
def outputNames(self):
if self.inmap is None:
return self.block.inputNames()
else:
return list(self.inmap.keys())
def advance(self,**inputs):
blockInputs = inputs if self.inmap is None else dict((v,inputs[k]) for v,k in self.inmap.items())
blockOutputs = self.block.advance(**blockInputs)
if self.outmap is None:
return blockOutputs
else:
return dict((v,blockOutputs[k]) for (v,k) in self.outmap.items())
def signal(self,type,**inputs):
blockInputs = inputs if self.inmap is None else dict((v,inputs[k]) for v,k in self.inmap.items())
self.block.signal(type,**blockInputs)
def getState(self):
return self.block.getState()
def setState(self,state):
return self.block.setState(state)
def drawGL(self):
return self.block.drawGL()
class MultiBlock(ControllerBlock):
"""A controller that runs several other subcontrollers each time step
and emulates a sort of blackboard architecture. Basically used to
emulate a multiprocessor on a robot.
For example, a state estimator can be run before a controller to process
the robot's state into more meaningful information.
The controller stores a register (a dict) that is sent to the
the input of each subcontroller, and the output of each subcontroller
is added to the register.
Selective input/output or renaming of registers can be accomplished via
register-to-input and register-to-output mappings. This makes it easier
to reuse individual subcontrollers when other subcontrollers change their
output.
"""
def __init__(self,controllers=[]):
"""Given a list of controllers, will execute all of them (in sequence)
on each time step.
"""
self.controllers = controllers[:]
self.register = {}
self.outregister = {}
self.inmap = [None for c in self.controllers]
self.outmap = [None for c in self.controllers]
self.myoutmap = None
def inputNames(self):
res = set()
for i in self.inmap:
if i is not None:
res |= set(i.keys())
return res
def outputNames(self):
return self.myoutmap.values()
def launch(self,c):
"""Start running a new controller and returns its id. By default,
all items in the registry are sent to the controllers. To remap,
call the `map_input` method."""
self.controllers.append(c)
self.inmap.append(None)
self.outmap.append(None)
return len(self.controllers)-1
def map_input(self,c,regitem,citem=None):
"""Sends register `regitem` to the input of controller `c`.
Args:
c (int): the index of a sub-controller
regitem (str): the name of an input arg.
citem (str, optional): if specified, the input arg name is
mapped to c's argument `citem`.
If this is not called for a given controller, then all items
in the register are automatically sent to the controller.
"""
if self.inmap[c]==None:
self.inmap[c] = {}
if citem == None:
self.inmap[c][regitem]=regitem
else:
self.inmap[c][citem]=regitem
return
def map_output(self,c,citem,regitem=None):
"""Sends output citem of controller c to the register.
Args:
c (int): the index of a sub-controller
citem (str): the name of an output item of c.
regitem (str, optional): if specified, c's output item cname is
mapped to name regitem.
If this is not called for a given controller, then all items in
the controller's output are automatically sent to the register
"""
if self.outmap[c]==None:
self.outmap[c] = {}
if regitem == None:
self.outmap[c][citem]=citem
else:
self.outmap[c][citem]=regitem
return
def map_my_output(self,regitem,outitem=None):
"""Sends register item regitem to the output of the controller.
Args:
regitem (str): the name of an item in the register
outitem (str, optional): if specified, maps the data to the output
name outitem.
If this is not called, then the entire register is sent to the
output.
"""
if self.myoutmap == None:
self.myoutmap = {}
if outitem == None:
self.myoutmap[regitem] = regitem
else:
self.myoutmap[regitem] = outitem
return
def signal(self,type,**inputs):
self.register.update(inputs)
for i,c in enumerate(self.controllers):
c.signal(type,**self.controller_inputs(i))
return
def controller_inputs(self,c):
if self.inmap[c] == None: return self.register
return dict((k,self.register[v]) for k,v in self.inmap[c].iteritems())
def advance(self,**inputs):
assert len(self.inmap)==len(self.controllers),"%d inmaps != %d controlers"%(len(self.inmap),len(self.controllers))
assert len(self.outmap)==len(self.controllers),"%d outmaps != %d controlers"%(len(self.outmap),len(self.controllers))
self.register.update(inputs)
self.outregister = {}
for i,c in enumerate(self.controllers):
cout = c.advance(**self.controller_inputs(i))
if not cout: continue
if self.outmap[i] == None:
self.register.update(cout)
self.outregister.update(cout)
else:
for (k,v) in self.outmap[i].iteritems():
self.register[v] = cout[k]
self.outregister[v] = cout[k]
if self.myoutmap == None:
return self.outregister
else:
res = {}
for k,v in self.myoutmap.iteritems():
try:
res[v] = self.outregister[k]
except KeyError:
print("Warning, output item",k,"not present in register")
return res
def getState(self):
res = []
for c in self.controllers:
try:
s = c.getState()
except NotImplementedError:
s = None
res.append(s)
return res
def setState(self,state):
assert len(state) == len(self.controllers)
for c,s in zip(self.controllers,state):
if s is not None:
c.setState(s)
def drawGL(self):
for c in self.controllers:
c.drawGL()
return
class LambdaBlock(ControllerBlock):
"""A fixed-function controller that simply evaluates a function. The
function arguments and return values are mapped from/to the input/output
dictionaries.
"""
def __init__(self,f,argnames,outnames=None):
self.f = f
self.argnames = argnames
self.outnames = outnames
def inputNames(self):
return self.argnames
def outputNames(self):
return self.outnames
def advance(self,**inputs):
try:
args = [inputs[a] for a in self.argnames]
except KeyError:
print("LambdaBlock: Warning, argument does not exist in inputs")
return None
res = self.f(*args)
if isinstance(self.outnames,(list,tuple)):
return dict(zip(self.outnames,res))
elif self.outnames != None:
return {self.outnames:res}
else:
if isinstance(res,dict):
return res
return None
class SumBlock(LambdaBlock):
"""An estimator that produces an output "A + B [ + ...]" for two or more arguments"""
def __init__(self,*args):
def add(*args):
if hasattr(args[0],'__iter__'):
return vectorops.add(*args)
else:
return sum(args)
LambdaBlock.__init__(self,add,args,' + '.join(args))
class DifferenceBlock(LambdaBlock):
"""An estimator that produces an output "A - B" for two arguments "A" and "B"."""
def __init__(self,arg1,arg2):
def diff(x,y):
if hasattr(x,'__iter__'):
return vectorops.sub(x,y)
else:
return x-y
LambdaBlock.__init__(self,diff,[arg1,arg2],arg1+" - "+arg2)
class ProductBlock(LambdaBlock):
"""An estimator that produces an output "A*B" for two arguments "A" and "B"."""
def __init__(self,arg1,arg2):
def prod(x,y):
if hasattr(x,'__iter__') or hasattr(y,'__iter__'):
return vectorops.mul(x,y)
else:
return x*y
LambdaBlock.__init__(self,prod,[arg1,arg2],arg1+" * "+arg2)
class LinearBlock(ControllerBlock):
"""Implements a linear function
u = K1*input[x1] + ... + Kn*input[xn] + offset
The user must fill out the self.gains member using the addGain()
method.
To use this, Numpy must be available on your system.
"""
def __init__(self,type='torquecmd'):
import numpy as np
self.outputType = type
self.gains = dict()
self.offset = None
def addGain(self,inputTerm,K):
self.gains[inputTerm] = K
def inputNames(self):
return self.gains.keys()
def outputNames(self):
return [self.outputType]
def setConstant(self,offset):
self.offset = offset
def advance(self,**inputs):
import numpy as np
res = self.offset
for (x,K) in self.gains.iteritems():
if x not in inputs:
print("LinearController: warning, input",x,"doesn't exist, ignoring")
continue
if res==None:
res = np.dot(K,inputs[x])
else:
res += np.dot(K,inputs[x])
return {self.outputType:res}
class CounterBlock(ControllerBlock):
"""A block that just counts the number of times that it's run"""
def __init__(self,initial=0):
self.counter = initial
def inputNames(self):
return []
def outputNames(self):
return ['counter']
def advance(self,**inputs):
res = {'counter':self.counter}
self.counter = self.counter+1
return res
def getState(self):
return self.counter
def setState(self,state):
self.counter = state
def signal(self,type,**inputs):
if type=='reset':
self.counter = 0
class ComposeController(ControllerBlock):
"""Concatenates vectors from multiple items into a single vector.
Useful for when you have one controller for each arm, one for a lower body,
etc.
Arguments:
itemindices (dict): a map from items to indices
outitem (str): the name of the output
flatten (bool, optional): true if you want a list rather than a dict,
in which case the indices are assumed to be all integers. Empty
indices are filled in with 'self.fill'. By default this produces
a vector.
"""
def __init__(self,itemindices,outitem,flatten=True):
self.itemindices = itemindices
self.outitem = outitem
self.flatten = flatten
self.fill = 0.0
def advance(self,**inputs):
res = {}
for (k,v) in self.itemindices.iteritems():
try:
for index in v:
res[index] = inputs[k][index]
except KeyError:
print("ComposeController: Warning, item",k,"does not exist in index")
pass
if self.flatten:
inds = sorted(res.keys())
vres = [self.fill]*(max(inds)+1)
for (k,v) in res.iteritems():
vres[k] = v
return {self.outitem:vres}
else:
return {self.outitem:res}
class TimedSequenceBlock(TransitionStateMachine):
"""A state-machine controller that goes through each sub-controller
in sequence.
"""
def __init__(self,controllers,times):
assert len(times)==len(controllers)
trans = [{} for c in controllers]
for i in range(len(controllers)-1):
trans[i][i+1] = lambda input:input['t'] >= times[i]
trans[-1][-1] = lambda input:input['t'] >= times[-1]
TransitionStateMachine.__init__(self,controllers,trans)
class SourceBlock(ControllerBlock):
"""Outputs the dictionary self.values, which may be constant or written to
by an external process (that is, code outside of the ControllerBlock
architecture).
"""
def __init__(self,values=None):
if values is None:
values = dict()
self.values = values
def outputNames(self):
return list(self.values.keys())
def inputNames(self):
return []
def advance(self,**inputs):
return self.values
def getState(self):
return self.values
def setState(self,state):
self.values = state
class SinkBlock(ControllerBlock):
"""Inputs to the dictionary self.values, which may be read by an external
process (that is, code outside of the ControllerBlock architecture).
"""
def __init__(self,values=None):
self.values = None
def outputNames(self):
return None
def advance(self,**inputs):
self.values = inputs
return
def getState(self):
return self.values
def setState(self,state):
self.values = state
def signal(self,type,**inputs):
if type=='reset':
self.values = None
```
#### File: math/autodiff/kinematics_ad.py
```python
import numpy as np
from .ad import ADFunctionInterface,ADFunctionCall,ADTerminal,sum_
from . import math_ad,so3_ad,se3_ad
from .. import vectorops,so3,se3
from ...robotsim import RobotModel,RobotModelLink
class WorldPosition(ADFunctionInterface):
"""Autodiff wrapper of the link.getWorldPosition() function as a function
of robot configuration q.
"""
def __init__(self,link,localPos):
self.robot = link.robot()
self.link = link
self.localPos = localPos
def __str__(self):
return "kinematics.WorldPosition[%s,%s]"%(self.link.getName(),str(self.localPos))
def n_args(self):
return 1
def n_in(self,arg):
return self.robot.numLinks()
def n_out(self):
return 3
def eval(self,q):
self.robot.setConfig(q.tolist())
return np.array(self.link.getWorldPosition(self.localPos))
def derivative(self,arg,q):
assert arg == 0
self.robot.setConfig(q.tolist())
return np.array(self.link.getPositionJacobian(self.localPos))
def gen_derivative(self,arg,q):
if len(arg) == 1:
return self.derivative(arg,q)
elif len(arg) == 2:
self.robot.setConfig(q.tolist())
Hx,Hy,Hz = self.link.getPositionHessian(self.localPos)
return np.array([Hx,Hy,Hz])
else:
raise NotImplementedError()
class WorldDirection(ADFunctionInterface):
"""Autodiff wrapper of the link.getWorldDirection() function as a function
of robot configuration q.
"""
def __init__(self,link,localDir):
self.robot = link.robot()
self.link = link
self.localDir = localDir
def __str__(self):
return "kinematics.WorldDirection[%s,%s]"%(self.link.getName(),str(self.localDir))
def n_args(self):
return 1
def n_in(self,arg):
return self.robot.numLinks()
def n_out(self):
return 3
def eval(self,q):
self.robot.setConfig(q.tolist())
return np.array(self.link.getWorldDirection(self.localDir))
def derivative(self,arg,q):
assert arg == 0
self.robot.setConfig(q.tolist())
Jo = self.link.getOrientationJacobian()
for i in range(3):
Jo[i] = np.array(Jo[i])
return np.array(vectorops.cross(Jo,self.localDir))
def gen_derivative(self,arg,q):
if len(arg) == 1:
return self.derivative(arg[0],q)
elif len(arg) == 2:
self.robot.setConfig(q.tolist())
Hx,Hy,Hz = self.link.getOrientationHessian()
Hx = np.array(Hx)
Hy = np.array(Hy)
Hz = np.array(Hz)
return np.array(vectorops.cross([Hx,Hy,Hz],self.localDir))
else:
raise NotImplementedError()
class WorldOrientation(ADFunctionInterface):
"""Autodiff wrapper of the link.getTransform()[0] function as a function of
robot configuration q.
"""
def __init__(self,link):
self.robot = link.robot()
self.link = link
def __str__(self):
return "kinematics.WorldOrientation[%s]"%(self.link.getName(),)
def n_args(self):
return 1
def n_in(self,arg):
return self.robot.numLinks()
def n_out(self):
return 9
def eval(self,q):
self.robot.setConfig(q.tolist())
return np.array(self.link.getTransform()[0])
def derivative(self,arg,q):
assert arg == 0
self.robot.setConfig(q.tolist())
Jo = self.link.getOrientationJacobian()
return _cross_product_twiddle(Jo)
def gen_derivative(self,arg,q):
if len(arg) == 1:
return self.derivative(arg[0],q)
elif len(arg) == 2:
self.robot.setConfig(q.tolist())
Hx,Hy,Hz = self.link.getOrientationHessian()
return _cross_product_twiddle([Hx,Hy,Hz])
else:
raise NotImplementedError()
class WorldTransform(ADFunctionInterface):
"""Autodiff wrapper of the link.getTransform() function as a function of robot
configuration q.
"""
def __init__(self,link,localPos=None):
self.robot = link.robot()
self.link = link
self.localPos = localPos
def __str__(self):
if self.localPos is not None:
return "kinematics.WorldTransform[%s,%s]"%(self.link.getName(),self.localPos)
return "kinematics.WorldTransform[%s]"%(self.link.getName(),)
def n_args(self):
return 1
def n_in(self,arg):
return self.robot.numLinks()
def n_out(self):
return 12
def eval(self,q):
self.robot.setConfig(q.tolist())
T = self.link.getTransform()
if self.localPos is not None:
T = (T[0],vectorops.add(so3.apply(T[0],self.localPos),T[1]))
return np.array(T[0]+T[1])
def derivative(self,arg,q):
assert arg == 0
self.robot.setConfig(q.tolist())
J = self.link.getJacobian([0]*3 if self.localPos is None else self.localPos)
return np.vstack([_cross_product_twiddle(J[:3])]+[J[3:]])
def gen_derivative(self,arg,q):
if len(arg) == 1:
return self.derivative(arg[0],q)
elif len(arg) == 2:
self.robot.setConfig(q.tolist())
Hx,Hy,Hz = self.link.getPositionHessian([0]*3 if self.localPos is None else self.localPos)
Hox,Hoy,Hoz = self.link.getOrientationHessian()
Hox = np.array(Hox)
Hoy = np.array(Hoy)
Hoz = np.array(Hoz)
return np.vstack([_cross_product_twiddle([Hox,Hoy,Hoz])]+[[Hx,Hy,Hz]])
else:
raise NotImplementedError()
class WorldVelocity(ADFunctionInterface):
"""Autodiff wrapper of the link.getPointVelocity() function as a function
of robot configuration q and velocity dq.
"""
def __init__(self,link,localPos):
self.robot = link.robot()
self.link = link
self.localPos = localPos
def __str__(self):
return "kinematics.WorldVelocity[%s,%s]"%(self.link.getName(),str(self.localPos))
def n_args(self):
return 2
def n_in(self,arg):
return self.robot.numLinks()
def n_out(self):
return 3
def eval(self,q,dq):
self.robot.setConfig(q.tolist())
self.robot.setVelocity(dq.tolist())
return np.array(self.link.getPointVelocity(self.localPos))
def derivative(self,arg,q,dq):
if arg == 1:
self.robot.setConfig(q.tolist())
return np.array(self.link.getPositionJacobian(self.localPos))
else:
self.robot.setVelocity(dq.tolist())
Hx,Hy,Hz = self.link.getPositionHessian(self.localPos)
return np.row_stack([np.dot(Hx,dq),np.dot(Hy,dq),np.dot(Hz,dq)])
class WorldAngularVelocity(ADFunctionInterface):
"""Autodiff wrapper of the link.getAngularVelocity() function, as a
function of robotconfiguration q and velocity dq.
"""
def __init__(self,link):
self.robot = link.robot()
self.link = link
def __str__(self):
return "kinematics.WorldAngularVelocity[%s]"%(self.link.getName(),)
def n_args(self):
return 2
def n_in(self,arg):
return self.robot.numLinks()
def n_out(self):
return 3
def eval(self,q,dq):
self.robot.setConfig(q.tolist())
self.robot.setVelocity(dq.tolist())
return np.array(self.link.getAngularVelocity())
def derivative(self,arg,q,dq):
if arg == 1:
self.robot.setConfig(q.tolist())
return np.array(self.link.getOrientationJacobian())
else:
self.robot.setVelocity(dq.tolist())
Hx,Hy,Hz = self.link.getOrientationHessian()
return np.row_stack([np.dot(Hx,dq),np.dot(Hy,dq),np.dot(Hz,dq)])
class DriversToLinks(ADFunctionInterface):
"""Autodiff function to convert driver values to link values."""
def __init__(self,robot):
self.robot = robot
self.drivers = [robot.driver(i) for i in robot.numDrivers()]
def __str__(self):
return "kinematics.DriversToLinks[%s]"%(self.robot.getName(),)
def n_args(self):
return 1
def n_in(self,arg):
return self.robot.numDrivers()
def n_out(self):
return self.robot.numLinks()
def eval(self,qdriver):
for driver,q in zip(self.drivers,qdriver):
driver.setValue(q)
return np.array(self.robot.getConfig())
def jvp(self,arg,dqdriver,qdriver):
for driver,q,v in zip(self.drivers,qdriver,dqdriver):
driver.setValue(q)
driver.setVelocity(v)
return np.array(self.robot.getVelocity())
class DriverDerivsToLinks(ADFunctionInterface):
"""Autodiff function to convert driver velocities to link velocities."""
def __init__(self,robot):
self.robot = robot
self.drivers = [robot.driver(i) for i in robot.numDrivers()]
def __str__(self):
return "kinematics.DriverDerivsToLinks[%s]"%(self.robot.getName(),)
def n_args(self):
return 1
def n_in(self,arg):
return self.robot.numDrivers()
def n_out(self):
return self.robot.numLinks()
def eval(self,vdriver):
for driver,v in zip(self.drivers,vdriver):
driver.setVelocity(v)
return np.array(self.robot.getConfig())
def jvp(self,arg,dvdriver,vdriver):
for driver,q,v in zip(self.drivers,vdriver,dvdriver):
driver.setVelocity(v)
return np.array(self.robot.getVelocity())
class LinksToDrivers(ADFunctionInterface):
"""Autodiff function to convert link values to driver values."""
def __init__(self,robot):
self.robot = robot
self.drivers = [robot.driver(i) for i in robot.numDrivers()]
def __str__(self):
return "kinematics.LinksToDrivers[%s]"%(self.robot.getName(),)
def n_args(self):
return 1
def n_in(self,arg):
return self.robot.numLinks()
def n_out(self):
return self.robot.numDrivers()
def eval(self,q):
self.robot.setConfig(q)
return np.array([driver.getValue() for driver in self.drivers])
def jvp(self,arg,dq,q):
self.robot.setConfig(q)
self.robot.setVelocity(dq)
return np.array([driver.getVelocity() for driver in self.drivers])
class LinkDerivsToDrivers(ADFunctionInterface):
"""Autodiff function to convert link velocities to driver velocities."""
def __init__(self,robot):
self.robot = robot
self.drivers = [robot.driver(i) for i in robot.numDrivers()]
def __str__(self):
return "kinematics.LinkDerivsToDrivers[%s]"%(self.robot.getName(),)
def n_args(self):
return 1
def n_in(self,arg):
return self.robot.numLinks()
def n_out(self):
return self.robot.numDrivers()
def eval(self,v):
self.robot.setVelocity(v)
return np.array([driver.getVelocity() for driver in self.drivers])
def jvp(self,arg,dv,v):
self.robot.setVelocity(dv)
return np.array([driver.getVelocity() for driver in self.drivers])
class ConfigInterpolate(ADFunctionInterface):
"""Autodiff wrapper of the RobotModel.interpolate function"""
def __init__(self,robot):
self.robot = robot
def __str__(self):
return "kinematics.ConfigInterpolate[%s]"%(self.robot.getName(),)
def n_args(self):
return 3
def n_in(self,arg):
if arg <= 1:
return self.robot.numLinks()
return 1
def n_out(self):
return self.robot.numLinks()
def eval(self,a,b,u):
return np.array(self.robot.interpolate(a,b,u))
def _cross_product_twiddle(J):
"""Does the same thing as so3.cross_product, but with a matrix"""
assert len(J) == 3
n = len(J[0])
J = [np.asarray(row) for row in J]
res = np.empty((9,)+J[0].shape,dtype=float)
res[0,:] = 0
res[1,:] = J[2]
res[2,:] = -J[1]
res[3,:] = -J[2]
res[4,:] = 0
res[5,:] = J[0]
res[6,:] = J[1]
res[7,:] = -J[0]
res[8,:] = 0
return res
class KinematicsBuilder:
"""A class that computes the entire computation graph of forward kinematics
and caches it so that multiple queries are auto-diffable and share the same
intermediate computations.
Args:
robot (RobotModel): the robot
configuration (array, AD expression, or list of expressions, optional):
the robot's configuration, either as a fixed configuration or a
variable. By default, this is fixed at the robot's configuration.
velocity (array, AD expression, or list of expressions, optional): if
given, the X_velocity methods are available. This gives the
robot's velocity, either as a fixed vector or a variable. By
default, no velocity expression tree is created.
relative_transform (array or list of AD so3 expressions, optional): if
given, the relative transforms of the robot's links. By default
these are taken from the robot model.
axes (array or list of AD R^3 expressions, optional): if given, the
axes relative transforms of the robot's links. By default these are
taken from the robot model.
Example::
kb = KinematicsBuilder(robot,'q','dq')
print(kb.world_position(robot.numLinks()-1))
print(kb.world_velocity(robot.numLinks()-1))
"""
def __init__(self,robot,configuration='fixed',velocity=None,relative_transforms='fixed',axes='fixed'):
if configuration == 'fixed':
configuration = robot.getConfig()
else:
if isinstance(configuration,str):
configuration = ADTerminal(configuration)
if not isinstance(configuration,ADTerminal) and not isinstance(configuration,ADFunctionCall):
assert len(configuration) == robot.numLinks()
if relative_transforms == 'fixed':
relative_transforms = []
for i in range(robot.numLinks()):
T = robot.link(i).getParentTransform()
relative_transforms.append(np.array(T[0] + T[1]))
else:
assert len(relative_transforms) == robot.numLinks()
if axes == 'fixed':
axes = [np.array(robot.link(i).getAxis()) for i in range(robot.numLinks())]
else:
assert len(axes) == robot.numLinks()
self.robot = robot
self.axes = axes
self.link_transforms = []
self.link_rotations = []
self.link_positions = []
self.link_inv_transforms = []
self.link_inv_rotations = []
for i in range(robot.numLinks()):
link = robot.link(i)
p = link.getParent()
q = configuration[i]
axis = axes[i]
Trel = relative_transforms[i]
if link.isPrismatic():
link_t = Trel[9:] + q*axis
link_R = Trel[:9]
else:
link_t = Trel[9:]
Rq = so3_ad.from_axis_angle(axis,q)
link_R = so3_ad.mul(Trel[:9],Rq)
assert p < i
if p < 0:
self.link_positions.append(link_t)
self.link_rotations.append(link_R)
self.link_transforms.append(se3_ad.join(self.link_rotations[-1],self.link_positions[-1]))
else:
self.link_positions.append(se3_ad.apply(self.link_transforms[p],link_t))
self.link_rotations.append(so3_ad.mul(self.link_rotations[p],link_R))
self.link_transforms.append(se3_ad.mul(self.link_transforms[p],se3_ad.join(link_R,link_t)))
self.link_inv_transforms.append(se3_ad.inv(self.link_transforms[-1]))
self.link_inv_rotations.append(so3_ad.inv(self.link_rotations[-1]))
if velocity is not None:
if velocity == 'fixed':
velocity = robot.getVelocity()
if isinstance(velocity,str):
velocity = ADTerminal(velocity)
if not isinstance(velocity,ADTerminal) and not isinstance(velocity,ADFunctionCall):
assert len(velocity) == robot.numLinks()
self.link_world_axes = [so3_ad.apply(self.link_rotations[i],axes[i]) for i in range(robot.numLinks())]
self.link_velocities = []
self.link_angular_velocities = []
for i in range(robot.numLinks()):
link = robot.link(i)
p = link.getParent()
world_axis = self.link_world_axes[i]
angvel_terms = []
vel_terms = []
if link.isRevolute():
angvel_terms.append(world_axis*velocity[i])
else:
vel_terms.append(world_axis*velocity[i])
while p >= 0:
link = robot.link(p)
world_axis = self.link_world_axes[p]
if link.isRevolute():
angvel_terms.append(world_axis*velocity[p])
vel_terms.append(math_ad.cross(world_axis,self.link_positions[i]-self.link_positions[p])*velocity[p])
else:
vel_terms.append(world_axis*velocity[p])
p = link.getParent()
if len(vel_terms) == 0:
vel = np.zeros(3)
else:
vel = sum_(*vel_terms)
if len(angvel_terms) == 0:
angvel = np.zeros(3)
else:
angvel = sum_(*angvel_terms)
self.link_velocities.append(vel)
self.link_angular_velocities.append(angvel)
#done with velocities
def _link_index(self,link):
if isinstance(link,RobotModelLink):
return link.index
if isinstance(link,str):
return self.robot.link(link).index
return link
def world_transform(self,link):
"""Returns an autodiff expression for the transform of the given link.
Expression evaluates to a 12-D se3_ad array.
"""
link = self._link_index(link)
return self.link_transforms[link]
def world_position(self,link,localPos=None):
"""Returns an autodiff expression for the world position of the point
localPos on the given link. If localPos isn't given, the link origin's
position is returned.
"""
link = self._link_index(link)
if localPos is None:
return self.link_positions[link]
return se3_ad.apply(self.link_transforms[link],localPos)
def world_direction(self,link,localDir):
"""Returns an autodiff expression for the world direction of the
direction localDir on the given link.
"""
link = self._link_index(link)
return so3_ad.apply(self.link_rotations[link],localDir)
def world_orientation(self,link):
"""Returns an autodiff expression for the orientation of the given
link.
Expression evaluates to a 9-D so3_ad array.
"""
link = self._link_index(link)
return self.link_rotations[link]
def world_velocity(self,link,localPos=None):
"""Returns an autodiff expression for the world velocity of the point
localPos on the given link. If localPos isn't given, the link origin's
velocity is returned.
"""
link = self._link_index(link)
if localPos is None:
return self.link_velocities[link]
return self.link_velocities[link] + so3.apply(self.link_rotations[link],math_ad.cross(self.axes[link],localPos))
def world_angular_velocity(self,link):
"""Returns an autodiff expression for the world angular velocity of the
given link.
Expression evaluates to a 9-D so3_ad array.
"""
link = self._link_index(link)
return self.link_angular_velocities[link]
def local_position(self,link,localPos):
link = self._link_index(link)
return se3_ad.apply(self.link_inv_transforms[link],localPos)
def local_direction(self,link,localDir):
link = self._link_index(link)
return so3_ad.apply(self.link_inv_rotations[link],localDir)
def inv_orientation(self,link):
link = self._link_index(link)
return self.link_inv_rotations[link]
```
#### File: klampt/math/symbolic_linalg.py
```python
from .symbolic import *
from . import vectorops
import numpy as np
import weakref
_x = Variable("x","V")
_y = Variable("y","V")
_A = Variable("A","M")
norm = Function('norm',np.linalg.norm,['x'],returnType='N')
norm.description = "Returns the L-2 norm of a vector"
norm2 = Function('norm2',dot(_x,_x),['x'],returnType='N')
norm_L1 = Function('norm_L1',lambda x:np.linalg.norm(x,ord=1),returnType='N')
_inf = float('inf')
norm_Linf = Function('norm_Linf',lambda x:np.linalg.norm(x,ord=_inf),returnType='N')
norm_fro = Function('norm_fro',np.linalg.norm,['A'],returnType='N')
norm2_fro = Function('norm2_fro',lambda x:np.linalg.norm(x)**2,['A'],returnType='N')
distance = Function('distance',norm(_x-_y),['x','y'],returnType='N')
distance2 = Function('distance2',norm2(_x-_y),['x','y'],returnType='N')
distance_L1 = Function('distance_L1',norm_L1(_x-_y),['x','y'],returnType='N')
distance_Linf = Function('distance_Linf',norm_Linf(_x-_y),['x','y'],returnType='N')
unit = Function('unit',vectorops.unit,['x'])
norm.argTypes = [Vector]
norm2.argTypes = [Vector]
norm_L1.argTypes = [Vector]
norm_Linf.argTypes = [Vector]
norm_fro.argTypes = [Matrix]
norm.properties['nonnegative'] = True
norm2.properties['nonnegative'] = True
norm_L1.properties['nonnegative'] = True
norm_Linf.properties['nonnegative'] = True
norm_fro.properties['nonnegative'] = True
norm2_fro.properties['nonnegative'] = True
norm.addSimplifier(['zero'],lambda x:0)
norm.addSimplifier(['unit'],lambda x:1)
norm2.addSimplifier(['zero'],lambda x:0)
norm2.addSimplifier(['unit'],lambda x:1)
norm_L1.addSimplifier(['zero'],lambda x:0)
norm_L1.addSimplifier(['basis'],lambda x:1)
norm_Linf.addSimplifier(['zero'],lambda x:0)
norm_Linf.addSimplifier(['basis'],lambda x:1)
norm.setDeriv(0,(lambda x,dx:dot(x,dx)/norm(x)),asExpr=True,stackable=True)
norm2.setDeriv(0,(lambda x,dx:dot(x,dx)*2),asExpr=True,stackable=True)
norm_L1.setDeriv(0,lambda x,dx:dot(sign(x),dx),asExpr=True,stackable=True)
def _norm_Linf_deriv(x,dx):
imax = argmax(abs_(x))
return sign(x[imax])*dx[imax]
norm_Linf.setDeriv(0,_norm_Linf_deriv,asExpr=True,stackable=True)
norm_fro.setDeriv(0,(lambda A,dA:mul(A,dA)/norm_fro(A)),asExpr=True,stackable=True)
norm2_fro.setDeriv(0,(lambda A,dA:mul(A,dA)*2),asExpr=True,stackable=True)
distance.argTypes = [Vector,Vector]
distance2.argTypes = [Vector,Vector]
distance_L1.argTypes = [Vector,Vector]
distance_Linf.argTypes = [Vector,Vector]
distance.properties['nonnegative'] = True
distance2.properties['nonnegative'] = True
distance_L1.properties['nonnegative'] = True
distance_Linf.properties['nonnegative'] = True
distance.addSimplifier(['zero',None],lambda x,y:norm(y))
distance.addSimplifier([None,'zero'],lambda x,y:norm(x))
distance2.addSimplifier(['zero',None],lambda x,y:norm2(y))
distance2.addSimplifier([None,'zero'],lambda x,y:norm2(x))
distance_L1.addSimplifier(['zero',None],lambda x,y:norm_L1(y))
distance_L1.addSimplifier([None,'zero'],lambda x,y:norm_L1(x))
distance_Linf.addSimplifier(['zero',None],lambda x,y:norm_Linf(y))
distance_Linf.addSimplifier([None,'zero'],lambda x,y:norm_Linf(x))
distance.setDeriv(0,(lambda x,y,dx:dot(x-y,dx)/distance(x,y)),asExpr=True,stackable=True)
distance.setDeriv(1,(lambda x,y,dy:dot(x-y,dy)/distance(x,y)),asExpr=True,stackable=True)
distance2.setDeriv(0,(lambda x,y,dx:dot(x-y,dx)*2),asExpr=True,stackable=True)
distance2.setDeriv(1,(lambda x,y,dy:dot(y-x,dy)*2),asExpr=True,stackable=True)
distance_L1.autoSetJacobians()
distance_Linf.autoSetJacobians()
linear = Function('linear',dot(_A,_x),["x","A"])
linear.argTypes = [Vector,Matrix]
linear.autoSetJacobians()
bilinear = Function('bilinear',dot(_x,dot(_A,_y)),["x","A","y"])
bilinear.argTypes = [Vector,Matrix,Vector]
bilinear.setJacobian(0,lambda x,A,y:dot(A,y),asExpr=True)
bilinear.setJacobian(1,lambda x,A,y:outer(x,y),asExpr=True)
bilinear.setJacobian(2,lambda x,A,y:dot(A,x),asExpr=True)
quadratic = Function('quadratic',dot(_x,dot(_A,_x)),["x","A"],returnType='N')
quadratic.argTypes = [Vector,Matrix]
quadratic.setJacobian('x',lambda x,A:2*dot(x,A),asExpr=True)
quadratic.setJacobian('A',lambda x,A:outer(x,x),asExpr=True)
mahalanobis_distance2 = Function('mahalanobis_distance2',quadratic(_x-_y,_A),['x','y','A'])
mahalanobis_distance2.autoSetJacobians()
mahalanobis_distance = Function('mahalanobis_distance',sqrt(mahalanobis_distance2(_x,_y,_A)),['x','y','A'])
mahalanobis_distance.autoSetJacobians()
unit.setDeriv(0,lambda x,dx:if_(x==0,zero(shape(x)),dx/norm(x)-x*(dot(x,dx)/norm(x))**3),stackable=True)
inv = Function('inv',np.linalg.inv,['A'],returnType='M')
inv.argTypes = [Matrix]
def _inv_deriv(A,dA):
Ainv = inv(A)
return -dot(Ainv,dot(dA,Ainv))
inv.properties['inverse'] = weakref.proxy(inv)
#patch the dot function
dot.addSimplifier(['inv','inv'],lambda Ainv,Binv:inv(dot(Binv.args[0],Ainv.args[0])))
dot.addSimplifier(['linalg.inv','linalg.inv'],lambda Ainv,Binv:inv(dot(Binv.args[0],Ainv.args[0])))
inv.addSimplifier(['neg'],lambda x:-inv(x.args[0]))
inv.setDeriv(0,_inv_deriv,asExpr=True)
inv.printers['str'] = lambda expr,astr:astr[0]+'^-1'
pinv = Function('pinv',np.linalg.pinv,['A'],returnType='M')
pinv.argTypes = [Matrix]
pinv.addSimplifier(['neg'],lambda x:-pinv(x.args[0]))
pinv.printers['str'] = lambda expr,astr:astr[0]+'^+'
def _pinv_deriv(A,dA):
Ainv = pinv(A)
return -dot(Ainv,dot(dA,Ainv))
pinv.setDeriv(0,_pinv_deriv,asExpr=True)
def _bound_contains(xmin,xmax,x):
return all(a <= v and v <= b for v,a,b in zip(x,xmin,xmax))
def _bound_overlaps(xmin,xmax,ymin,ymax):
for a,b,c,d in zip(xmin,xmax,ymin,ymax):
if d < a or c > b: return False
if b < c or a > d: return False
return True
def _bound_margin(xmin,xmax,x):
return min(min(v-a,b-v) for v,a,b in zip(x,xmin,xmax))
bound_contains = Function('bound_contains',_bound_contains,returnType = 'B')
bound_overlaps = Function('bound_overlaps',_bound_overlaps,returnType = 'B')
bound_margin = Function('bound_margin',_bound_margin,returnType = 'N')
bound_contains.argTypes = [Vector]*3
bound_overlaps.argTypes = [Vector]*4
bound_margin.argTypes = [Vector]*3
#bound_margin = Function('bound_margin',min_(min_(Variable('x','V',None)-Variable('xmin','V',None)),min_(Variable('xmax','V',None)-Variable('x','V',None))),['xmin','xmax','x'],returnType = 'N')
class LinAlgContext(Context):
def __init__(self):
Context.__init__(self)
self.norm = self.declare(norm)
self.norm2 = self.declare(norm2)
self.norm_L1 = self.declare(norm_L1)
self.norm_Linf = self.declare(norm_Linf)
self.distance = self.declare(distance)
self.distance2 = self.declare(distance2)
self.distance_L1 = self.declare(distance_L1)
self.distance_Linf = self.declare(distance_Linf)
self.unit = self.declare(unit)
self.linear = self.declare(linear)
self.bilinear = self.declare(bilinear)
self.quadratic = self.declare(quadratic)
self.mahalanobis_distance = self.declare(mahalanobis_distance)
self.mahalanobis_distance2 = self.declare(mahalanobis_distance2)
self.inv = self.declare(inv)
self.pinv = self.declare(pinv)
self.bound_contains = self.declare(bound_contains)
self.bound_overlaps = self.declare(bound_overlaps)
self.bound_margin = self.declare(bound_margin)
```
#### File: klampt/model/geometry.py
```python
from ..robotsim import Geometry3D,PointCloud
import math
from .create import primitives
from ..math import vectorops,so3,se3
_has_numpy = False
_tried_numpy_import = False
np = None
_has_scipy = False
_tried_scipy_import = False
sp = None
box = primitives.box
"""Alias for :func:`klampt.model.create.primitives.box`"""
sphere = primitives.sphere
"""Alias for :func:`klampt.model.create.primitives.sphere`"""
def _try_numpy_import():
global _has_numpy,_tried_numpy_import
global np
if _tried_numpy_import:
return _has_numpy
_tried_numpy_import = True
try:
import numpy as np
_has_numpy = True
#sys.modules['numpy'] = numpy
except ImportError:
import warnings
warnings.warn("klampt.model.geometry.py: numpy not available.",ImportWarning)
_has_numpy = False
return _has_numpy
def _try_scipy_import():
global _has_scipy,_tried_scipy_import
global sp
if _tried_scipy_import:
return _has_scipy
_tried_scipy_import = True
try:
import scipy as sp
_has_scipy = True
#sys.modules['scipy'] = scipy
except ImportError:
import warnings
warnings.warn("klampt.model.geometry.py: scipy not available.",ImportWarning)
_has_scipy = False
return _has_scipy
class PlaneFitter:
"""
Online fitting of planes through 3D point clouds
Attributes:
normal (3-vector): best-fit normal
centroid (3-vector): centroid of points
count (int): # of points
sse (float): fitting sum of squared errors
cov (3x3 array): covariance of points
"""
def __init__(self,points=None):
_try_numpy_import()
if points is None:
self.count = 0
self.centroid = np.zeros(3)
self.cov = np.zeros((3,3))
self.normal = np.array([0,0,1])
self.sse = 0
else:
self.count = len(points)
self.centroid = np.average(points,axis=0)
pprime = points - [self.centroid]*len(points)
self.cov = np.dot(pprime.T,pprime)/self.count
self._update_plane()
def plane_equation(self):
"""Returns (a,b,c,d) with ax+by+cz+d=0 the plane equation"""
offset = np.dot(self.centroid,self.normal)
return (self.normal[0],self.normal[1],self.normal[2],-offset)
def goodness_of_fit(self):
"""Returns corrected RMSE"""
if self.count <= 3:
return float('inf')
return math.sqrt(self.sse*self.count / (self.count-3))
def add_point(self,pt):
"""Online estimation of best fit plane"""
new_count = self.count + 1
new_centroid = self.centroid + (pt-self.centroid)/new_count
old_sse = (self.cov + np.outer(self.centroid,self.centroid))*self.count
new_sse = old_sse + np.outer(pt,pt)
new_cov = new_sse/new_count - np.outer(new_centroid,new_centroid)
self.count = new_count
self.centroid = new_centroid
self.cov = new_cov
self._update_plane()
def merge(self,fitter,inplace = False):
"""Online merging of two plane fitters.
If inplace = False, returns a new PlaneFitter.
If inplace = True, self is updated with the result.
"""
if not inplace:
res = PlaneFitter()
else:
res = self
new_count = self.count + fitter.count
old_sum = self.centroid*self.count
new_sum = old_sum + fitter.centroid*fitter.count
new_centroid = new_sum/new_count
old_sse = (self.cov + np.outer(self.centroid,self.centroid))*self.count
fitter_sse = (fitter.cov + np.outer(fitter.centroid,fitter.centroid))*fitter.count
new_sse = old_sse + fitter_sse
new_cov = new_sse/new_count - np.outer(new_centroid,new_centroid)
res.count = new_count
res.centroid = new_centroid
res.cov = new_cov
res._update_plane()
return res
def distance(self,pt):
"""Returns the signed distance to this plane"""
return np.dot(self.normal,pt)-np.dot(self.normal,self.centroid)
def _update_plane(self):
w,v = np.linalg.eig(self.cov)
index = np.argmin(w)
self.normal = v[:,index]
self.sse = self.count * np.dot(self.normal,np.dot(self.cov,self.normal))
def point_cloud_simplify(pc,radius):
"""Simplifies a point cloud by averaging points within neighborhoods. Uses
a fast hash grid data structure.
Args:
pc (Geometry3D or PointCloud): the point cloud
radius (float): the neighborhood radius.
"""
if radius <= 0:
raise ValueError("radius must be > 0")
if isinstance(pc,Geometry3D):
assert pc.type() == 'PointCloud',"Must provide a point cloud to point_cloud_simplify"
return pc.convert('PointCloud',radius)
else:
return Geometry3D(pc).convert('PointCloud',radius).getPointCloud()
def point_cloud_normals(pc,estimation_radius=None,estimation_knn=None,estimation_viewpoint=None,add=True):
"""Returns the normals of the point cloud. If pc has the standard
``normal_x, normal_y, normal_z`` properties, these will be returned.
Otherwise, they will be estimated using plane fitting.
The plane fitting method uses scipy nearest neighbor detection if
scipy is available. Otherwise it uses a spatial grid. The process is as
follows:
- If ``estimation_radius`` is provided, then it will use neighbors within
this range. For a spatial grid, this is the grid size.
- If ``estimation_knn`` is provided, then planes will be fit to these
number of neighbors.
- If neither is provided, then estimation_radius is set to 3 * max
dimension of the point cloud / sqrt(N).
- If not enough points are within a neighborhood (either 4 or
``estimation_knn``, whichever is larger), then the normal is set to 0.
- If ``estimation_viewpoint`` is provided, this must be a 3-list. The
normals are oriented such that they point toward the viewpoint.
Returns:
A list of N 3-lists, or an N x 3 numpy array if numpy is available.
If ``add=True``, estimated normals will be added to the point cloud
under the ``normal_x, normal_y, normal_z`` properties.
"""
geom = None
if isinstance(pc,Geometry3D):
assert pc.type() == 'PointCloud',"Must provide a point cloud to point_cloud_normals"
geom = pc
pc = pc.getPointCloud()
assert isinstance(pc,PointCloud)
inds = [-1,-1,-1]
props = ['normal_x','normal_y','normal_z']
for i in range(pc.numProperties()):
try:
ind = props.index(pc.propertyNames[i])
inds[ind] = i
except ValueError:
pass
if all(i>=0 for i in inds):
#has the properties!
normal_x = pc.getProperties(inds[0])
normal_y = pc.getProperties(inds[1])
normal_z = pc.getProperties(inds[2])
if _has_numpy:
return np.array([normal_x,normal_y,normal_z]).T
else:
return list(zip(normal_x,normal_y,normal_z))
if not all(i < 0 for i in inds):
raise ValueError("Point cloud has some normal components but not all of them?")
#need to estimate normals
_try_numpy_import()
_try_scipy_import()
N = len(pc.vertices)//3
if not _has_numpy:
raise RuntimeError("Need numpy to perform plane fitting")
positions = np.array(pc.vertices)
positions = positions.reshape((N,3))
if estimation_radius is None and estimation_knn is None:
R = max(positions.max(axis=0)-positions.min(axis=0))
estimation_radius = 3*R/math.sqrt(N)
if estimation_knn is None or estimation_knn < 4:
estimation_knn = 4
normals = []
if _has_scipy:
import scipy.spatial
tree = scipy.spatial.cKDTree(positions)
if estimation_radius is not None:
neighbors = tree.query_ball_point(positions,estimation_radius)
for n in neighbors:
if len(n) < estimation_knn:
normals.append([0,0,0])
else:
#fit a plane to neighbors
normals.append(fit_plane([positions[i] for i in n])[:3])
else:
d,neighbors = tree.query(positions,estimation_knn)
for n in neighbors:
normals.append(fit_plane([positions[i] for i in n])[:3])
else:
if estimation_radius is None:
raise ValueError("Without scipy, can't do a k-NN plane estimation")
#do a spatial hash
normals = np.zeros((N,3))
indices = (positions * (1.0/estimation_radius)).astype(int)
from collections import defaultdict
pt_hash = defaultdict(list)
for i,(ind,p) in enumerate(zip(indices,positions)):
pt_hash[ind].append((i,p))
successful = 0
for (ind,iplist) in pt_hash.items():
if len(iplist) < estimation_knn:
pass
else:
pindices = [ip[0] for ip in iplist]
pts = [ip[1] for ip in iplist]
n = fit_plane(pts)[:3]
normals[pindices,:] = n
successful += len(pindices)
normals = np.asarray(normals)
if estimation_viewpoint is not None:
#flip back-facing normals
disp = positions - estimation_viewpoint
for i,(n,d) in enumerate(zip(normals,disp)):
if np.dot(n,d) < 0:
normals[i,:] = -n
else:
#flip back-facing normals assuming centroid is interior
centroid = np.average(positions,axis=0)
for i,(n,p) in enumerate(zip(normals,positions)):
if np.dot(n,p-centroid) < 0:
normals[i,:] = -n
if add:
normal_x = normals[:,0].tolist()
normal_y = normals[:,1].tolist()
normal_z = normals[:,2].tolist()
pc.addProperty('normal_x',normal_x)
pc.addProperty('normal_y',normal_y)
pc.addProperty('normal_z',normal_z)
if geom is not None:
geom.setPointCloud(pc)
return normals
def fit_plane3(point1,point2,point3):
"""Returns a 3D plane equation fitting the 3 points.
The result is (a,b,c,d) with the plane equation ax+by+cz+d=0
"""
_try_numpy_import()
normal = np.cross(point2-point1,point3-point1)
nlen = np.linalg.norm(normal)
if nlen < 1e-4:
#degenerate
raise ValueError("Points are degenerate")
normal = normal / nlen
offset = -np.dot(normal,point1)
return (normal[0],normal[1],normal[2],offset)
def fit_plane(points):
"""Returns a 3D plane equation that is a least squares fit
through the points (len(points) >= 3)."""
centroid,normal = fit_plane_centroid(points)
return normal[0],normal[1],normal[2],-vectorops.dot(centroid,normal)
def fit_plane_centroid(points):
"""Similar to :func:`fit_plane`, but returns a (centroid,normal) pair."""
if len(points)<3:
raise ValueError("Need to have at least 3 points to fit a plane")
#if len(points)==3:
# return fit_plane3(points[0],points[1],points[2])
_try_numpy_import()
points = np.asarray(points)
centroid = np.average(points,axis=0)
U,W,Vt = np.linalg.svd(points-[centroid]*len(points),full_matrices=False)
if np.sum(W<1e-6) > 1:
raise ValueError("Point set is degenerate")
normal = Vt[2,:]
return centroid.tolist(),normal.tolist()
def _color_format_from_uint8_channels(format,r,g,b,a=None):
import numpy as np
if a is None:
a = 0xff
if format == 'rgb':
return np.bitwise_or.reduce((np.left_shift(r,16),np.left_shift(g,8),b)).tolist()
elif format == 'bgr':
return np.bitwise_or.reduce((np.left_shift(b,16),np.left_shift(g,8),r)).tolist()
elif format=='rgba':
return np.bitwise_or.reduce((np.left_shift(r,24),np.left_shift(g,16),np.left_shift(b,8),a)).tolist()
elif format=='bgra':
return np.bitwise_or.reduce((np.left_shift(g,24),np.left_shift(g,16),np.left_shift(r,8),a)).tolist()
elif format=='argb':
return np.bitwise_or.reduce((np.left_shift(a,24),np.left_shift(r,16),np.left_shift(g,8),b)).tolist()
elif format=='abgr':
return np.bitwise_or.reduce((np.left_shift(a,24),np.left_shift(b,16),np.left_shift(g,8),r)).tolist()
elif format=='channels':
one_255 = 1.0/255.0
if not hasattr(a,'__iter__'):
return (r*one_255).tolist(),(g*one_255).tolist(),(b*one_255).tolist()
else:
return (r*one_255).tolist(),(g*one_255).tolist(),(b*one_255).tolist(),(a*one_255).tolist()
elif format=='opacity':
one_255 = 1.0/255.0
if not hasattr(a,'__iter__'):
return np.ones(len(r))
return (a*one_255).tolist()
elif tuple(format)==('r','g','b'):
one_255 = 1.0/255.0
return np.column_stack((r*one_255,g*one_255,b*one_255)).tolist()
elif tuple(format)==('r','g','b','a'):
one_255 = 1.0/255.0
if not hasattr(a,'__iter__'):
a = np.full(len(r),a)
return np.column_stack((r*one_255,g*one_255,b*one_255,a*one_255)).tolist()
else:
raise ValueError("Invalid format specifier "+str(format))
def _color_format_to_uint8_channels(format,colors):
import numpy as np
if format=='channels':
return tuple((np.asarray(c)*255).astype(np.uint8).tolist() for c in colors)
colors = np.asarray(colors)
if format == 'rgb':
r,g,b = np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist()
elif format == 'bgr':
b,g,r = np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist()
elif format=='rgba':
r,g,b,a = np.right_shift(np.bitwise_and(colors,0xff000000),24),np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist(),a.tolist()
elif format=='bgra':
b,g,r,a = np.right_shift(np.bitwise_and(colors,0xff000000),24),np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist(),a.tolist()
elif format=='argb':
a,r,g,b = np.right_shift(np.bitwise_and(colors,0xff000000),24),np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist(),a.tolist()
elif format=='abgr':
a,b,g,r = np.right_shift(np.bitwise_and(colors,0xff000000),24),np.right_shift(np.bitwise_and(colors,0xff0000),16),np.right_shift(np.bitwise_and(colors,0xff00),8),np.bitwise_and(colors,0xff)
return r.tolist(),g.tolist(),b.tolist(),a.tolist()
elif format=='opacity':
r = [0xff]*len(colors)
return r,r,r,(colors*255).astype(np.uint8).tolist()
elif tuple(format)==('r','g','b'):
colors = (colors*255).astype(np.uint8)
r = colors[:,0]
g = colors[:,1]
b = colors[:,2]
return r.tolist(),g.tolist(),b.tolist()
elif tuple(format)==('r','g','b','a'):
colors = (colors*255).astype(np.uint8)
r = colors[:,0]
g = colors[:,1]
b = colors[:,2]
a = colors[:,3]
return r.tolist(),g.tolist(),b.tolist(),a.tolist()
else:
raise ValueError("Invalid format specifier "+str(format))
def point_cloud_colors(pc,format='rgb'):
"""Returns the colors of the point cloud in the given format. If the
point cloud has no colors, this returns None. If the point cloud has no
colors but has opacity, this returns white colors.
Args:
pc (PointCloud): the point cloud
format: describes the output color format, either:
- 'rgb': packed 24bit int, with the hex format 0xrrggbb,
- 'bgr': packed 24bit int, with the hex format 0xbbggrr,
- 'rgba': packed 32bit int, with the hex format 0xrrggbbaa,
- 'bgra': packed 32bit int, with the hex format 0xbbggrraa,
- 'argb': packed 32bit int, with the hex format 0xaarrggbb,
- 'abgr': packed 32bit int, with the hex format 0xaabbggrr,
- ('r','g','b'): triple with each channel in range [0,1]
- ('r','g','b','a'): tuple with each channel in range [0,1]
- 'channels': returns a list of channels, in the form (r,g,b) or
(r,g,b,a), where each value in the channel has range [0,1].
- 'opacity': returns opacity only, in the range [0,1].
Returns:
list: A list of pc.numPoints() colors corresponding to the points
in the point cloud. If format='channels', the return value is
a tuple (r,g,b) or (r,g,b,a).
"""
rgbchannels = []
alphachannel = None
for i,prop in enumerate(pc.propertyNames):
if prop in ['r','g','b','rgb']:
rgbchannels.append((prop,i))
elif prop == 'rgba':
rgbchannels.append((prop,i))
if alphachannel is not None:
alphachannel = (prop,i)
elif prop in ['opacity','a','c']:
if alphachannel is not None:
alphachannel = (prop,i)
if len(rgbchannels)==0 and alphachannel is None:
return
if len(rgbchannels)==1:
rgb = pc.getProperties(rgbchannels[0][1])
if format == rgbchannels[0][0]:
return rgb
import numpy as np
rgb = np.array(rgb,dtype=int)
r = np.right_shift(np.bitwise_and(rgb,0xff0000),16)
g = np.right_shift(np.bitwise_and(rgb,0xff00),8)
b = np.bitwise_and(rgb,0xff)
if alphachannel is not None: #rgba
if alphachannel[0] == 'rgba':
a = np.right_shift(np.bitwise_and(rgb,0xff000000),24)
elif alphachannel[0] == 'opacity':
a = pc.getProperties(alphachannel[0][1])
a = (np.array(a)*255).astype(np.uint32)
elif alphachannel[0] == 'c':
a = pc.getProperties(alphachannel[0][1])
else:
raise ValueError("Weird type of alpha channel? "+alphachannel[0])
return _color_format_from_uint8_channels(format,r,g,b,a)
else:
return _color_format_from_uint8_channels(format,r,g,b)
elif len(rgbchannels) == 3:
r=None
g=None
b=None
for (name,index) in rgbchannels:
if name=='r':
r = pc.getProperties(index)
elif name=='g':
g = pc.getProperties(index)
elif name=='b':
b = pc.getProperties(index)
else:
raise ValueError("Strange, have some subset of r,g,b and other channels in point cloud? "+name)
if r is None or g is None or b is None:
raise ValueError("Strange, point cloud has some weird subset of r,g,b channels? "+','.join(v[0] for v in rgbchannels))
if alphachannel is None:
a = 1.0
elif alphachannel[0] == 'opacity':
a = pc.getProperties(alphachannel[0][1])
elif alphachannel[0] == 'c':
import numpy as np
one_255 = 1.0/255.0
a = (np.array(pc.getProperties(alphachannel[0][1]))*one_255).tolist()
else:
raise ValueError("Weird type of alpha channel? "+alphachannel[0])
if format=='channels':
if alphachannel is None:
return r,g,b
else:
return r,g,b,a
elif isinstance(format,(list,tuple)) and tuple(format)==('r','g','b'):
return list(zip(r,g,b))
elif isinstance(format,(list,tuple)) and tuple(format)==('r','g','b','a'):
if alphachannel is None:
a = [1.0]*pc.numPoints()
return list(zip(r,g,b,a))
import numpy as np
r = (np.array(r)*255.0).astype(np.uint32)
g = (np.array(g)*255.0).astype(np.uint32)
b = (np.array(b)*255.0).astype(np.uint32)
if alphachannel is not None:
a = (np.array(a)*255.0).astype(np.uint32)
return _color_format_from_uint8_channels(format,r,g,b,a)
else:
return _color_format_from_uint8_channels(format,r,g,b)
elif len(rgbchannels)==0 and alphachannel is not None:
if alphachannel[0] == 'opacity':
import numpy as np
a = pc.getProperties(alphachannel[0][1])
a = (np.array(a)*255).astype(np.uint32)
elif alphachannel[0] == 'c':
import numpy as np
a = pc.getProperties(alphachannel[0][1])
else:
raise ValueError("Weird type of alpha channel? "+alphachannel[0])
r = [0xff]*pc.numPoints()
return _color_format_from_uint8_channels(format,r,r,r,a)
else:
raise ValueError("Invalid colors in point cloud? found "+str(len(rgbchannels))+" color channels")
def point_cloud_set_colors(pc,colors,color_format='rgb',pc_property='auto'):
"""Sets the colors of a point cloud.
Args:
pc (PointCloud): the point cloud
colors (list): the list of colors, which can be either ints, tuples, or
channels, depending on color_format.
color_format: describes the format of each element of ``colors``, and
can be:
- 'rgb': packed 24bit int, with the hex format 0xrrggbb,
- 'bgr': packed 24bit int, with the hex format 0xbbggrr,
- 'rgba': packed 32bit int, with the hex format 0xrrggbbaa,
- 'bgra': packed 32bit int, with the hex format 0xbbggrraa,
- 'argb': packed 32bit int, with the hex format 0xaarrggbb,
- 'abgr': packed 32bit int, with the hex format 0xaabbggrr,
- ('r','g','b'): triple with each channel in range [0,1]
- ('r','g','b','a'): tuple with each channel in range [0,1]
- 'channels': ``colors`` is a list of channels, in the form (r,g,b)
or (r,g,b,a), where each value in the channel has range [0,1].
- 'opacity': opacity only, in the range [0,1].
pc_property (str): describes to which property the colors should be
set. 'auto' determines chooses the property from the point cloud
if it's already colored, or color_format if not. 'channels' sets
the 'r', 'g', 'b', and optionally 'a' properties.
Returns:
None
"""
rgbchannels = []
alphachannel = None
for i,prop in enumerate(pc.propertyNames):
if prop in ['r','g','b','rgb']:
rgbchannels.append((prop,i))
elif prop == 'rgba':
rgbchannels.append((prop,i))
if alphachannel is not None:
alphachannel = (prop,i)
elif prop in ['opacity','a','c']:
if alphachannel is not None:
alphachannel = (prop,i)
rgbdict = dict(rgbchannels)
if pc_property == 'auto':
if len(rgbchannels) == 0 and alphachannel is None:
if color_format=='channels' or isinstance(color_format,(list,tuple)):
pc_property = 'channels'
else:
if 'a' in color_format:
pc_property = 'rgba'
else:
pc_property = 'rgb'
elif len(rgbchannels) == 3:
pc_property = 'channels'
elif len(rgbchannels) == 1:
if alphachannel is not None:
pc_property = 'rgba'
else:
pc_property = rgbchannels[0][0]
if color_format == pc_property:
if color_format == 'channels':
assert len(colors)==3 or len(colors)==4,'Channels must give a 3-tuple or 4-tuple'
for c,values in zip('rgb',colors):
if c in rgbdict:
pc.setProperties(rgbdict[c],values)
else:
pc.addProperty(c,values)
if len(colors)==4:
if alphachannel[0] == 'a':
pc.setProperties(alphachannel[1],colors[3])
else:
pc.addProperty('a',colors[3])
else:
if color_format in rgbdict:
pc.setProperties(rgbdict[color_format],colors)
else:
pc.addProperty(color_format,colors)
else:
channels = _color_format_to_uint8_channels(color_format,colors)
packed = _color_format_from_uint8_channels(pc_property,*channels)
if pc_property in rgbdict:
pc.setProperties(rgbdict[pc_property],packed)
elif alphachannel is not None and pc_property == alphachannel[0]:
pc.setProperties(alphachannel[1],packed)
else:
pc.addProperty(pc_property,packed)
``` |
{
"source": "joaomcm/reem",
"score": 3
} |
#### File: reem/reem/datatypes.py
```python
from __future__ import print_function
from six import iterkeys
from builtins import str
from .utilities import *
from .supports import ReadablePathHandler, PathHandler, ChannelListener, ActiveSubscriberPathHandler
from rejson import Path
import logging
import queue
logger = logging.getLogger("reem")
_ROOT_VALUE_READ_NAME = "{}ROOT{}".format(ROOT_VALUE_SEQUENCE, ROOT_VALUE_SEQUENCE)
class Writer(object):
"""Responsible for setting data inside Redis
The Writer class is an internal class that is used for all data sent to Redis (not including pub/sub messages).
Each key that will have nested data below requires a new instantiation of Writer
Attributes:
top_key_name (str): The name of the Redis key under which JSON data will be stored. To Redis,
this will become a ReJSON key name. It is also used to generate the Redis key name that ``ship``'s use to
store non JSON data.
interface (RedisInterface): Defines the connection to Redis this writer will use
"""
def __init__(self, top_key_name, interface):
self.interface = interface
self.top_key_name = top_key_name
self.metadata_key_name = self.top_key_name + SEPARATOR_CHARACTER + "metadata"
self.metadata = None
self.__initialize_metadata()
self.sp_to_label = self.metadata["special_paths"]
self.pipeline = self.interface.client.pipeline()
self.do_metadata_update = True
def __initialize_metadata(self):
""" Pull metadata for this key from Redis or set a default
Returns:
None
"""
try:
pulled = self.interface.client.jsonget(self.metadata_key_name, ".")
if pulled is not None:
self.metadata = pulled
return
except TypeError:
pass
self.metadata = {"special_paths": {}, "required_labels": self.interface.shipper_labels}
def send_to_redis(self, set_path, set_value):
""" Execute equivalent of ``JSON.SET self.top_key_name <set_path> <set_value>``
From the user's perspective, it executes ``JSON.SET self.top_key_name <set_path> <set_value>``
except that ``set_value`` can be json-incompatible. This is the only public
method of this class. It determines what non-serializable types are inside set_value, stores the
serializable data as a JSON, and stores the non-serializable data using ``self.interface``'s ships
Args:
set_path (str): path underneath JSON key to set
set_value: value to set
Returns:
None
"""
#logger.info("SET {} {} = {}".format(self.top_key_name, set_path, type(set_value)))
self.__process_metadata(set_path, set_value)
#logger.debug("SET {} {} Metadata: {}".format(self.top_key_name, set_path, self.metadata))
self.__publish_non_serializables(set_path, set_value)
#logger.debug("SET {} {} Non-Serializables Pipelined".format(self.top_key_name, set_path))
self.__publish_serializables(set_path, set_value)
#logger.debug("SET {} {} Serializables Pipelined".format(self.top_key_name, set_path))
self.pipeline.execute()
#logger.debug("SET {} {} Pipeline Executed".format(self.top_key_name, set_path))
def __process_metadata(self, set_path, set_value):
""" Handle metadata updates
Given the path and value the user would like to set, check if there are non-serializable data types and update
metadata locally and in Redis. Happens without pipeline
Args:
set_path (str): path underneath JSON key to set
set_value: value to set
Returns:
None
"""
if not self.do_metadata_update:
return
overridden_paths = set()
for existing_path in iterkeys(self.sp_to_label):
if existing_path.startswith(set_path):
overridden_paths.add(existing_path)
[self.sp_to_label.pop(op) for op in overridden_paths]
special_paths = get_special_paths(set_path, set_value, self.sp_to_label, self.interface.label_to_shipper)
dels, adds = overridden_paths - special_paths, special_paths - overridden_paths
for set_path, label in special_paths:
self.sp_to_label[set_path] = label
if len(adds) > 0 or len(dels) > 0:
self.pipeline.jsonset(self.metadata_key_name, ".", self.metadata)
channel, message = "__keyspace@0__:"+ self.metadata_key_name, "set"
self.pipeline.publish(channel, message) # Homemade key-space notification for metadata updates
def __publish_non_serializables(self, set_path, set_value):
"""Publish JSON incompatible data to Redis
Given a set, publish the non-serializable components to redis, given that metadata has been updated already
Args:
set_path (str): path underneath JSON key to set
set_value: value to set
Returns:
None
"""
overridden_paths, suffixes = filter_paths_by_prefix(iterkeys(self.sp_to_label), set_path)
for full_path, suffix in zip(overridden_paths, suffixes):
#logger.debug("Suffix = {}".format(suffix))
update_value = extract_object(set_value, path_to_key_sequence(suffix))
special_path_redis_key_name = self.top_key_name+full_path
#logger.debug("SET {} {} Non-serializable update {} = {}".format(
# self.top_key_name, set_path, special_path_redis_key_name, type(update_value))
#)
self.interface.label_to_shipper[self.sp_to_label[full_path]].write(
key=special_path_redis_key_name,
value=update_value,
client=self.pipeline
)
def __publish_serializables(self, set_path, set_value):
""" Publish the serializable portion of ``set_value
Take out the non-serializable part of set_value and publish it at set_path
Args:
set_path (str): path underneath JSON key to set
set_value: value to set
Returns: None
"""
if type(set_value) is dict:
intrusive_paths, suffixes = filter_paths_by_prefix(iterkeys(self.sp_to_label), set_path)
excised_copy = copy_dictionary_without_paths(set_value, [path_to_key_sequence(s) for s in suffixes])
self.pipeline.jsonset(self.top_key_name, set_path, excised_copy)
#logger.debug("SET {} {} Serializable update {} = {}".format(self.top_key_name, set_path, set_path, excised_copy))
elif set_path not in self.sp_to_label:
self.pipeline.jsonset(self.top_key_name, set_path, set_value)
#logger.debug("SET {} {} Serializable update {} = {}".format(self.top_key_name, set_path, set_path, set_value))
class Reader(object):
"""Responsible for getting data from Redis
The Reader class is an internal class that is used for all read from Redis (not including pub/sub messages).
Each key that will have nested data below requires a new instantiation of Reader
Attributes:
top_key_name (str): The name of the Redis key under which JSON data is stored
interface (RedisInterface): Defines the connection to Redis this reader will use
"""
def __init__(self, top_key_name, interface):
self.interface = interface
self.top_key_name = top_key_name
self.metadata = {"special_paths": {}, "required_labels": self.interface.shipper_labels}
self.sp_to_label = self.metadata["special_paths"]
self.pipeline = self.interface.client.pipeline()
self.pipeline_no_decode = self.interface.client_no_decode.pipeline()
self.metadata_key_name = "{}{}metadata".format(self.top_key_name, SEPARATOR_CHARACTER)
self.interface.metadata_listener.add_listener(self.metadata_key_name, self)
self.pull_metadata = True
# Will need to update metadata on first read regardless so the simple initialization we have here is sufficient
def read_from_redis(self, read_path):
""" Read specified path from Redis
This is the only public method of the Reader class. It will retrieve the data stored at a specified path
from Redis. At a high level, it reads data stored with ReJSON and inserts non-JSON compatible data
at appropriate paths using the metadata associated with this key.
Args:
read_path (str): path the user wants to read
Returns: data stored at value in Redis
"""
self.interface.INTERFACE_LOCK.acquire()
#logger.info("GET {} {} pull_metadata = {}".format(self.top_key_name, read_path, self.pull_metadata))
self.update_metadata()
#logger.debug("GET {} {} Using Metadata: {}".format(self.top_key_name, read_path, self.metadata))
if read_path in self.sp_to_label:
ret_val = self.pull_special_path(read_path)
else:
self.queue_reads(read_path)
#logger.debug("GET {} {} Reads Queued".format(self.top_key_name, read_path))
ret_val = self.build_dictionary(read_path)
#logger.debug("GET {} {} Dictionary Built".format(self.top_key_name, read_path))
self.interface.INTERFACE_LOCK.release()
return ret_val
def update_metadata(self):
""" Update the local copy of metadata if a relevant path has been updated.
The metadata listener is a redis client subscribed to key-space notifications. If a relevant path is updated,
this Reader's ``pull_metadata`` flag will be turned on. If ``pull_metadata`` is ``True``, then the reader
will fetch metadata from the Redis server.
Returns: None
"""
self.interface.metadata_listener.flush()
if self.pull_metadata:
try:
pulled = self.interface.client.jsonget(self.metadata_key_name, ".")
if pulled is not None:
self.metadata = pulled
except TypeError: # No Metadata online
return
self.sp_to_label = self.metadata["special_paths"]
self.pull_metadata = False
def queue_reads(self, read_path):
""" Queue reads in a pipeline
Queue all redis queries necessary to read data at path into the appropriate redis pipeline.
First, queue decoded pipeline with the ReJSON query
Next, queue all the special path reads with the non-decoded pipeline and ships
Args:
read_path: path user wants to read
Returns: None
"""
self.pipeline.jsonget(self.top_key_name, read_path)
special_paths, suffixes = filter_paths_by_prefix(iterkeys(self.sp_to_label), read_path)
for p in special_paths:
special_path_redis_key_name = self.top_key_name + p
#logger.debug("type(sp to label) = {}, type(p) = {}".format(type(self.sp_to_label), type(p)))
ship = self.interface.label_to_shipper[self.sp_to_label[p]]
ship.read(special_path_redis_key_name, self.pipeline_no_decode)
def build_dictionary(self, read_path):
""" Execute pipelines and consolidate data into a dictionary
Args:
read_path: path user wants to read
Returns: The data stored at ``read_path`` in Redis
"""
return_val = self.pipeline.execute()[0]
#logger.debug("GET {} {} Serializable Pipeline Executed".format(self.top_key_name, read_path))
responses = self.pipeline_no_decode.execute()
special_paths, suffixes = filter_paths_by_prefix(iterkeys(self.sp_to_label), read_path)
#logger.debug("special_path = {}, suffixes = {}".format(special_paths, suffixes))
for i, (sp, suffix) in enumerate(zip(special_paths, suffixes)):
value = self.interface.label_to_shipper[self.sp_to_label[sp]].interpret_read(responses[i: i + 1])
insert_into_dictionary(return_val, path_to_key_sequence(suffix), value)
#logger.debug("GET {} {} Nonserializable Pipeline Inserted {} = {}"
# .format(self.top_key_name, read_path, sp, type(value))
# )
#logger.debug("GET {} {} Dictionary Built".format(self.top_key_name, read_path))
return return_val
def pull_special_path(self, read_path):
""" Directly pull a non-JSON path
If the user specified path is not in JSON, this will retrieve the data directly without going through ReJSON.
Args:
read_path: path user wants to read
Returns:
"""
shipper = self.interface.label_to_shipper[self.sp_to_label[read_path]]
special_name = str(self.top_key_name) + str(read_path)
shipper.read(special_name, self.pipeline_no_decode)
responses = self.pipeline_no_decode.execute()
return shipper.interpret_read(responses)
class KeyValueStore(object):
"""Dictionary Like object used for get/set paradigm
The ``KeyValueStore`` object is one that users will frequently use. It keeps ``Reader`` and ``Writer`` objects
for each key that the user read or written to. It does not actually handle the getting and setting of data
but produces ``ReadablePathHandler`` objects that assist with path construction and call the reader and
writer's write and read methods.
Attributes:
interface (RedisInterface): Defines the connection to Redis this reader will use
"""
def __init__(self, interface):
self.interface = interface
self.entries = {}
self.track_schema = True
def __setitem__(self, key, value):
""" Only used for setting key on first level of KVS. i.e. KVS["top_key"] = value. Otherwise see __getitem__
Args:
key (str): "dictionary" key. It becomes a top-level Redis key
value: value to store
Returns: None
"""
assert check_valid_key_name(key), "Invalid Key: {}".format(key)
if type(value) != dict:
value = {_ROOT_VALUE_READ_NAME: value}
self.__ensure_key_existence(key)
writer, reader = self.entries[key]
writer.send_to_redis(Path.rootPath(), value)
def __getitem__(self, item):
"""Used to retrieve ReadablePathHandler object for handling path construction when setting/getting Redis
Args:
item (str): "dictionary" key. It must be a top-level Redis key
Returns:
"""
assert check_valid_key_name(item), "Invalid Key: {}".format(item)
#logger.debug("KVS GET {}".format(item))
self.__ensure_key_existence(item)
writer, reader = self.entries[item]
return ReadablePathHandler(writer=writer, reader=reader, initial_path=Path.rootPath())
def __ensure_key_existence(self, key):
""" Ensure that the requested key has a reader and writer associated with it.
Returns: None
"""
assert check_valid_key_name(key), "Invalid Key: {}".format(key)
if key not in self.entries:
self.entries[key] = (Writer(key, self.interface), Reader(key, self.interface))
self.entries[key][0].do_metadata_update = self.track_schema
def track_schema_changes(self, set_value, keys=None):
""" Performance optimization for skipping schema update checks
Stop checking for schema updates when setting data. Use ONLY if your data's schema is static
and you are really trying to eek out every bit of optimization.
Args:
set_value (bool): True/False indicating if the keys' schema should be tracked
keys (List[str]): List of keys to track. If None, all present and future keys are tracked
according to ``set_value``
Returns: None
"""
if keys is None:
keys = iterkeys(self.entries)
self.track_schema = set_value
for k in keys:
self.entries[k][0].do_metadata_update = set_value
class Publisher(Writer):
""" Defines Publisher behavior
The Publisher is identical to the writer but publishes a message when it writes a value.
"""
def __init__(self, top_key_name, interface):
super(Publisher,self).__init__(top_key_name, interface)
self.message = "Publish"
def send_to_redis(self, set_path, set_value):
""" Publisher equivalent of Writer ``send_to_redis``
This is an equivalent function to ``Writer``'s ``send_to_redis`` method but also publishes a message
indicating what channel has been updated.
Args:
set_path (str): path underneath JSON key to set
set_value: value to set
Returns: None
"""
#logger.info("PUBLISH {} {} = {}".format(self.top_key_name, set_path, type(set_value)))
#logger.debug("PUBLISH {} {} Metadata Update?: {}".format(self.top_key_name, set_path, self.do_metadata_update))
self.__process_metadata(set_path, set_value)
#logger.debug("PUBLISH {} {} Metadata: {}".format(self.top_key_name, set_path, self.metadata))
self.__publish_non_serializables(set_path, set_value)
self.__publish_serializables(set_path, set_value)
# Addition to Writer class
if set_path == Path.rootPath():
set_path = ""
channel_name = "__pubspace@0__:" + self.top_key_name + set_path
self.pipeline.publish(channel_name, self.message)
# Resume Writer Class
self.pipeline.execute()
#logger.debug("PUBLISH {} {} pipeline executed, published {} to {}".format(
# self.top_key_name, set_path, self.message, channel_name)
#)
class PublishSpace(KeyValueStore):
""" Convenience class for publishing
This class keeps track of ``Publisher`` objects for each key the user has published on.
"""
def __getitem__(self, item):
""" Identical to ``KeyValueStore`` method of same name but provides non-readable ``PathHandler`` objects
"""
assert type(item) == str
self.__ensure_key_existence(item)
publisher, _ = self.entries[item]
return PathHandler(writer=publisher, reader=_, initial_path=Path.rootPath())
def __ensure_key_existence(self, key):
""" Identical to ``KeyValueStore`` method of same name but does not instantiate a ``Reader`` object
"""
assert check_valid_key_name(key), "Invalid Key: {}".format(key)
if key not in self.entries:
self.entries[key] = (Publisher(key, self.interface), None)
self.entries[key][0].do_metadata_update = self.track_schema
class RawSubscriber:
def __init__(self, channel_name, interface, callback_function, kwargs):
self.listening_channel = '__pubspace@0__:{}'.format(channel_name)
self.listener = ChannelListener(interface, self.listening_channel, callback_function, kwargs)
self.listener.setDaemon(True)
def listen(self):
self.listener.start()
class SilentSubscriber(Reader):
""" Silent Subscriber Implementation"""
def __init__(self, channel, interface):
super(SilentSubscriber,self).__init__(channel, interface)
self.local_copy = {}
self.passive_subscriber = RawSubscriber(channel + "*", interface, self.update_local_copy, {})
self.prefix = "__pubspace@0__:" + self.top_key_name
def update_local_copy(self, channel, message):
""" Update the local copy of the data stored under this channel name in redis.
Args:
channel: the name of the channel that was published.
message: message published on that channel
Returns: None
"""
#logger.info("SILENT_SUBSCRIBER @{} : channel={} message={}".format(self.prefix, channel, message))
try:
message = message.decode("utf-8")
except Exception as e:
return
if message != "Publish":
return
if channel == self.prefix:
self.local_copy = self.read_from_redis(Path.rootPath())
return
path = channel[len(self.prefix):]
redis_value = self.read_from_redis(path)
#logger.debug("SILENT_SUBSCRIBER @{} : Read from Redis: {}".format(self.prefix, redis_value))
insert_into_dictionary(self.local_copy, path_to_key_sequence(path), redis_value)
#logger.debug("SILENT_SUBSCRIBER @{} : Local Copy: {}".format(self.prefix, self.local_copy))
def listen(self):
""" Makes this subscriber start listening
Returns: None
"""
self.passive_subscriber.listen()
def value(self):
""" Get data stored at root
Where as the reader can do ``server["foo"].read()`` with if server is a ``KeyValueStore``,
accessing the root value of a subscriber is not as easy. This method retrieves all data stored underneath
a top level key.
Returns: all data stored underneath a top level Redis key.
"""
if _ROOT_VALUE_READ_NAME in self.local_copy:
return self.local_copy[_ROOT_VALUE_READ_NAME]
# Copy dictionary - paths to omit is blank, so we copy everything
return copy_dictionary_without_paths(self.local_copy, [])
def __getitem__(self, item):
""" Implement dictionary API for local copy
We do not give the user direct access to the dictionary representing the lcoal
Args:
item:
Returns:
"""
assert type(item) == str, "Key name must be string"
return ActiveSubscriberPathHandler(None, self, Path.rootPath() + item)
class CallbackSubscriber(SilentSubscriber):
""" Callback Subscriber Implementation"""
def __init__(self, channel, interface, callback_function, kwargs):
super(CallbackSubscriber,self).__init__(channel, interface)
self.queue = queue.Queue()
self.passive_subscriber = RawSubscriber(channel + "*", interface, self.call_user_function, {})
self.callback_function = callback_function
self.kwargs = kwargs
def call_user_function(self, channel, message):
"""
Wrapper callback function (wrapping user function) for this class to work with a RawSubscriber object
Fits required interface for a ChannelSubscriber callback function
:param channel: channel published to
:param message: message that was published
:return: None
:rtype: None
"""
self.update_local_copy(channel, message)
channel_name = channel.split("__pubspace@0__:")[1]
self.callback_function(data=self.value(), updated_path=channel_name, **self.kwargs)
```
#### File: reem/reem/ships.py
```python
from __future__ import print_function,unicode_literals
from abc import ABCMeta, abstractmethod
import numpy as np
import io
class SpecialDatatypeShip(object):
__metaclass__ = ABCMeta
def __init__(self):
super(SpecialDatatypeShip, self).__init__()
@abstractmethod
def check_fit(self, value):
""" Determine if this ship will handle ``value``
This method returns true if ``value`` is data that this ship is supposed to handle. If this ship handled all
numpy arrays, it would check if ``value``'s type is a numpy array.
Args:
value: object to check
Returns: True if ship will handle ``value``
"""
pass
@abstractmethod
def write(self, key, value, client):
""" Write ``value`` to Redis at the specified ``key`` using ``client``
Given a Redis client, execute any number of needed commands to store the ``value`` in Redis. You
are required to use the key given for REEM to find it. If you must store multiple pieces of information,
use a `Redis Hash <https://redis.io/topics/data-types>`_ which acts like a one level dictionary.
Args:
key (str): The Redis key name this ship must store data under
value: The value to write into Redis
client: A `ReJSON Redis Client <https://github.com/RedisJSON/rejson-py>`_ pipeline
Returns: None
"""
pass
@abstractmethod
def read(self, key, client):
""" Retrieve necessary information from Redis
Given a Redis client, execute ONE command to retrieve all the information you need to rebuild the data
that was stored in ``write`` from Redis. This method should execute the command that allows you to retrieve
all data stored under key
Args:
key (str): a keyname that contains data stored by ``write``
client: A `ReJSON Redis Client <https://github.com/RedisJSON/rejson-py>`_ pipeline
Returns: None
"""
pass
@abstractmethod
def interpret_read(self, responses):
""" Translate Redis data into a local object
Redis will reply to you with something according to what read command you executed in ``read``. This method
takes whatever Redis replied with and turns it into an object identical to what was initially passed to
``write`` as value.
Args:
responses: Redis's reply data based on ``read`` method
Returns: An object identical to what was initially written to Redis.
"""
pass
@abstractmethod
def get_label(self):
""" Return a unique string identifier
This method should return a string that uniquely identifies this ship. REEM will use it to determine what ship
to use to decode data that is already stored in Redis.
Returns:
str: the string identifier
"""
pass
class NumpyShip(SpecialDatatypeShip):
def check_fit(self, value):
return type(value) in [np.array, np.ndarray]
def write(self, key, value, client):
client.hset(key, "arr", memoryview(value.data).tobytes())
client.hset(key, "dtype", str(value.dtype))
client.hset(key, "shape", str(value.shape))
client.hset(key, "strides", str(value.strides))
def get_label(self):
return "default_numpy_handler"
def read(self, key, client):
client.hgetall(key)
def interpret_read(self, responses):
hash = responses[0]
dtype = eval("np.{}".format(hash[b'dtype'].decode('utf-8')))
shape = hash[b'shape'].decode("utf-8")[1:-1]
shape = tuple([int(s) for s in shape.split(",") if len(s) > 0])
arr = np.frombuffer(hash[b'arr'], dtype)
arr = np.reshape(arr, shape)
return arr
``` |
{
"source": "joaomcteixeira/haddock3",
"score": 2
} |
#### File: joaomcteixeira/haddock3/setup.py
```python
from glob import glob
from os.path import basename, dirname, join, splitext
from setuptools import find_packages, setup
def read(*names, **kwargs):
"""Read description files."""
path = join(dirname(__file__), *names)
with open(path, encoding=kwargs.get('encoding', 'utf8')) as fh:
return fh.read()
# activate once added, do not remove
#long_description = '{}\n{}'.format(
# read('README.rst'),
# read(join('docs', 'CHANGELOG.rst')),
# )
setup(
name='haddock3',
version='0.0.3-alpha',
description='Haddock 3.',
long_description='',#long_description,
long_description_content_type='text/x-rst',
license='Apache License 2.0',
author='HADDOCK',
author_email='A.<EMAIL>',
url='https://github.com/haddocking/haddock3',
packages=find_packages('src'),
package_dir={'': 'src'},
#py_modules=[splitext(basename(i))[0] for i in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list:
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
project_urls={
'webpage': 'https://github.com/haddocking/haddock3',
'Documentation': 'https://github.com/haddocking/haddock3#readme',
'Changelog': '',
'Issue Tracker': 'https://github.com/haddocking/haddock3/issues',
'Discussion Forum': 'https://github.com/haddocking/haddock3/issues',
},
keywords=[
'Structural Biology',
'Biochemistry',
'Docking',
'Protein docking',
'Proteins',
],
python_requires='>=3.8, <4',
install_requires=[
# not added on purpose
],
extras_require={
},
setup_requires=[
],
entry_points={
'console_scripts': [
'haddock3 = haddock.clis.cli:maincli',
]
},
# cmdclass={'build_ext': optional_build_ext},
# ext_modules=[
# Extension(
# splitext(relpath(path, 'src').replace(os.sep, '.'))[0],
# sources=[path],
# include_dirs=[dirname(path)]
# )
# for root, _, _ in os.walk('src')
# for path in glob(join(root, '*.c'))
# ],
)
```
#### File: haddock/clis/cli.py
```python
import argparse
import logging
import sys
from argparse import ArgumentTypeError
from functools import partial
from haddock import current_version
from haddock.libs.libutil import file_exists
from haddock.gear.restart_run import add_restart_arg
# Command line interface parser
ap = argparse.ArgumentParser()
_arg_file_exist = partial(
file_exists,
exception=ArgumentTypeError,
emsg="File {!r} does not exist or is not a file.")
ap.add_argument(
"recipe",
type=_arg_file_exist,
help="The input recipe file path",
)
add_restart_arg(ap)
ap.add_argument(
"--setup",
help="Only setup the run, do not execute",
action="store_true",
dest='setup_only',
)
_log_levels = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")
ap.add_argument(
"--log-level",
default="INFO",
choices=_log_levels,
)
ap.add_argument(
"-v",
"--version",
help="show version",
action="version",
version=f'{ap.prog} - {current_version}',
)
def load_args(ap):
"""Load argument parser args."""
return ap.parse_args()
def cli(ap, main):
"""Command-line interface entry point."""
cmd = load_args(ap)
main(**vars(cmd))
def maincli():
"""Main client execution."""
cli(ap, main)
def main(
recipe,
restart=None,
setup_only=False,
log_level="INFO",
):
"""
Execute HADDOCK3 client logic.
Parameters
----------
recipe : str or pathlib.Path
The path to the recipe (config file).
restart : int
At which step to restart haddock3 run.
setup_only : bool
Whether to setup the run without running it.
log_level : str
The logging level: INFO, DEBUG, ERROR, WARNING, CRITICAL.
"""
# anti-pattern to speed up CLI initiation
from haddock.libs.libworkflow import WorkflowManager
from haddock.gear.greetings import get_adieu, get_initial_greeting
from haddock.gear.prepare_run import setup_run
from haddock.core.exceptions import HaddockError, ConfigurationError
# Configuring logging
logging.basicConfig(
level=log_level,
format="[%(asctime)s] %(name)s:L%(lineno)d %(levelname)s - %(message)s",
)
# Special case only using print instead of logging
logging.info(get_initial_greeting())
try:
params, other_params = setup_run(recipe, restart_from=restart)
except HaddockError as err:
logging.error(err)
raise err
if not setup_only:
try:
workflow = WorkflowManager(
workflow_params=params,
start=restart,
**other_params,
)
# Main loop of execution
workflow.run()
except HaddockError as err:
raise err
logging.error(err)
# Finish
logging.info(get_adieu())
if __name__ == "__main__":
sys.exit(maincli())
```
#### File: haddock/libs/libparallel.py
```python
import logging
from multiprocessing import Process
from haddock.libs.libutil import parse_ncores
logger = logging.getLogger(__name__)
class Worker(Process):
def __init__(self, tasks):
super(Worker, self).__init__()
self.tasks = tasks
logger.info(f"Worker ready with {len(self.tasks)} tasks")
def run(self):
for task in self.tasks:
task.run()
logger.info(f"{self.name} executed")
class Scheduler:
def __init__(self, tasks, ncores=None):
"""
Schedule tasks to a defined number of processes.
Parameters
----------
tasks : list
The list of tasks to execute. Tasks must be subclass of
`multiprocessing.Process`.
ncores : None or int
The number of cores to use. If `None` is given uses the
maximum number of CPUs allowed by
`libs.libututil.parse_ncores` function.
"""
self.num_tasks = len(tasks)
self.num_processes = ncores # first parses num_cores
# Do not waste resources
self.num_processes = min(self.num_processes, self.num_tasks)
# step trick by @brianjimenez
_n = self.num_processes
job_list = [tasks[i::_n] for i in range(_n)]
self.task_list = [Worker(jobs) for jobs in job_list]
logger.info(f"{self.num_tasks} tasks ready.")
@property
def num_processes(self):
return self._ncores
@num_processes.setter
def num_processes(self, n):
self._ncores = parse_ncores(n)
logger.info(f"Scheduler configurated for {self._ncores} cpu cores.")
def run(self):
try:
for task in self.task_list:
task.start()
for task in self.task_list:
task.join()
logger.info(f"{self.num_tasks} tasks finished")
except KeyboardInterrupt:
# Q: why have a keyboard interrupt here?
self.terminate()
def terminate(self):
logger.warning("Something went wrong")
for task in self.task_list:
task.terminate()
logger.warning("The workers have stopped")
```
#### File: haddock/libs/libutil.py
```python
import logging
import shutil
import subprocess
from copy import deepcopy
from functools import partial
from operator import ge
from os import cpu_count
from pathlib import Path
from haddock.core.exceptions import SetupError
logger = logging.getLogger(__name__)
check_subprocess = partial(
subprocess.run,
shell=True,
check=True,
stdout=subprocess.DEVNULL,
)
def get_result_or_same_in_list(function, value):
"""
Return the result if True or the value within a list.
Applies `function` to `value` and returns its result if it evaluates
to True. Otherwise, return the value within a list.
`function` should receive a single argument, the `value`.
"""
result = function(value)
return result if result else [value]
def make_list_if_string(item):
if isinstance(item, str):
return [item]
return item
def copy_files_to_dir(paths, directory):
"""
Copy files to directory.
Parameters
----------
paths : iterable of paths
Source files.
directory : path
Where to copy files to.
"""
for path in paths:
shutil.copy(path, directory)
def zero_fill(number, digits=2):
"""Makes a number string zero filled to the left."""
return str(number).zfill(digits)
def remove_folder(folder):
"""Removes a folder if it exists."""
if folder.exists():
logger.warning(f'{folder} exists and it will be REMOVED!')
shutil.rmtree(folder)
def remove_dict_keys(d, keys):
"""
Remove `keys` from dictionary (`d`).
Return
------
dict
A copy of `d` dictionary without the `keys`.
"""
return {k: deepcopy(v) for k, v in d.items() if k not in keys}
def parse_ncores(n=None, njobs=None, max_cpus=None):
"""
Check the number of cores according to HADDOCK3 architecture.
Parameters
----------
n : int or str
The desired number of cores. If `None` is given, returns the
maximum number of cores allowed, see `max_cpus`.
njobs : int
The number of jobs to execute. Optional. The number of cores
will be compared to `njobs`.
max_cpus : int
The maximum number of CPUs allowed. If not specified, defaults
to the available CPUs minus one.
Raises
------
SetupError
If `n` is not positive or not convertable to `int`.
Returns
-------
int
A correct number of cores according to specifications.
"""
max_cpus = max_cpus or max(cpu_count() - 1, 1)
if n is None:
return max_cpus
try:
n = int(n)
except (TypeError, ValueError) as err:
_msg = f"`n` must be `int` or `int`-convertable `str`: {n!r} given."
raise SetupError(_msg) from err
if n < 1:
_msg = f"`n` is not positive, this is not possible: {n!r}"
raise SetupError(_msg)
if njobs:
ncores = min(n, njobs, max_cpus)
logger.info(
f"Selected {ncores} cores to process {njobs} jobs, with {max_cpus} "
"maximum available cores."
)
return ncores
logger.info(f"`njobs` not specified, evaluating initial value {n}...")
ncores = min(n, max_cpus)
logger.info(f"Selected {ncores} for a maximum of {max_cpus} CPUs")
return ncores
def non_negative_int(
n,
exception=ValueError,
emsg="`n` do not satisfies",
):
"""
Transform `n` in int and returns if `compare` evaluates to True.
Parameters
----------
n : int-convertable
Something that can be converted to int.
exception : Exception
The Exception to raise in case `n` is not a positive integer.
emsg : str
The error message to give to `exception`. May accept formatting
to pass `n`.
Raises
------
ValueError, TypeError
If `n` cannot be converted to `int`
"""
n1 = int(n)
if n1 >= 0:
return n1
# don't change to f-strings, .format has a purpose
raise exception(emsg.format(n))
def file_exists(
path,
exception=ValueError,
emsg="`path` is not a file or does not exist",
):
"""
Asserts file exist.
Parameters
----------
path : str or pathlib.Path
The file path.
exception : Exception
The Exception to raise in case `path` is not file or does not
exist.
emsg : str
The error message to give to `exception`. May accept formatting
to pass `path`.
Raises
------
Exception
Any exception that pathlib.Path can raise.
"""
p = Path(path)
valid = [p.exists, p.is_file]
if all(f() for f in valid):
return p
# don't change to f-strings, .format has a purpose
raise exception(emsg.format(str(path)))
```
#### File: haddock/modules/__init__.py
```python
import os
import logging
import contextlib
from abc import ABC, abstractmethod
from pathlib import Path
from haddock.core.defaults import MODULE_PATH_NAME, MODULE_IO_FILE
from haddock.core.exceptions import StepError
from haddock.gear.config_reader import read_config
from haddock.libs.libontology import ModuleIO
logger = logging.getLogger(__name__)
modules_folder = Path(__file__).resolve().parent
_folder_match_regex = '[a-zA-Z]*/'
modules_category = {
module.name: category.name
for category in modules_folder.glob(_folder_match_regex)
for module in category.glob(_folder_match_regex)
}
"""Indexes each module in its specific category. Keys are Paths to the module,
values are their categories. Categories are the modules parent folders."""
general_parameters_affecting_modules = {'ncores', 'cns_exec'}
"""These parameters are general parameters that may be applicable to modules
specifically. Therefore, they should be considered as part of the "default"
module's parameters. Usually, this set is used to filter parameters during
the run prepraration phase. See, `gear.prepare_run`."""
class BaseHaddockModule(ABC):
def __init__(self, order, path, params, cns_script=""):
"""
Base class for any HADDOCK module
Parameters
----------
params : dict or path to HADDOCK3 configuration file
A dictionary or a path to an HADDOCK3 configuration file
containing the initial module parameters. Usually this is
defined by the default params.
"""
self.order = order
self.path = path
self.previous_io = self._load_previous_io()
if cns_script:
self.cns_folder_path = cns_script.resolve().parent
self.cns_protocol_path = cns_script
self.params = params
try:
with open(self.cns_protocol_path) as input_handler:
self.recipe_str = input_handler.read()
except FileNotFoundError:
_msg = f"Error while opening workflow {self.cns_protocol_path}"
raise StepError(_msg)
except AttributeError:
# No CNS-like module
pass
@property
def params(self):
return self._params
@params.setter
def params(self, path_or_dict):
if isinstance(path_or_dict, dict):
self._params = path_or_dict
else:
try:
self._params = read_config(path_or_dict)
except FileNotFoundError as err:
_msg = f"Default configuration file not found: {str(path_or_dict)!r}"
raise FileNotFoundError(_msg) from err
except TypeError as err:
_msg = (
"Argument does not satisfy condition, must be path or "
f"dict. {type(path_or_dict)} given."
)
raise TypeError(_msg) from err
@abstractmethod
def run(self, params):
self.update_params(**params)
self.params.setdefault('ncores', None)
self.params.setdefault('cns_exec', None)
@classmethod
@abstractmethod
def confirm_installation(self):
"""
Confirm the third-party software needed for the module is installed.
HADDOCK3's own modules should just return.
"""
return
def finish_with_error(self, message=""):
if not message:
message = "Module has failed"
logger.error(message)
raise SystemExit
def _load_previous_io(self):
if self.order == 0:
return ModuleIO()
io = ModuleIO()
previous_io = self.previous_path() / MODULE_IO_FILE
if previous_io.is_file():
io.load(previous_io)
return io
def previous_path(self):
previous = sorted(list(self.path.resolve().parent.glob('[0-9][0-9]*/')))
try:
return previous[self.order - 1]
except IndexError:
return self.path
def update_params(self, **parameters):
"""Update defaults parameters with run-specific parameters."""
self._params.update(parameters)
@contextlib.contextmanager
def working_directory(path):
"""Changes working directory and returns to previous on exit"""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
```
#### File: sampling/rigidbody/__init__.py
```python
import logging
from pathlib import Path
from os import linesep
from haddock.gear.haddockmodel import HaddockModel
from haddock.libs.libcns import generate_default_header, load_ambig
from haddock.libs.libcns import load_workflow_params, prepare_multiple_input
from haddock.libs.libontology import Format, ModuleIO, PDBFile
from haddock.libs.libparallel import Scheduler
from haddock.libs.libsubprocess import CNSJob
from haddock.modules import BaseHaddockModule
logger = logging.getLogger(__name__)
RECIPE_PATH = Path(__file__).resolve().parent
DEFAULT_CONFIG = Path(RECIPE_PATH, "defaults.cfg")
def generate_docking(identifier, input_files, step_path, recipe_str, defaults, ambig=None):
"""Generate the .inp file that will run the docking."""
# prepare the CNS header that will read the input
# read the default parameters
default_params = load_workflow_params(defaults)
param, top, link, topology_protonation, \
trans_vec, tensor, scatter, \
axis, water_box = generate_default_header()
# input_files is the ontology, unwrap it
pdb_list = []
psf_list = []
for element in input_files:
pdb = Path(element.path, element.file_name)
psf = Path(element.path, element.topology.file_name)
pdb_list.append(str(pdb))
psf_list.append(str(psf))
input_str = prepare_multiple_input(pdb_list, psf_list)
if ambig:
ambig_str = load_ambig(ambig)
else:
ambig_str = ""
output_pdb_filename = step_path / f'rigidbody_{identifier}.pdb'
output = f"{linesep}! Output structure{linesep}"
output += (f"eval ($output_pdb_filename="
f" \"{output_pdb_filename}\"){linesep}")
inp = default_params + param + top + input_str + output \
+ topology_protonation + ambig_str + recipe_str
inp_file = step_path / f'rigidbody_{identifier}.inp'
with open(inp_file, 'w') as fh:
fh.write(inp)
return inp_file
class HaddockModule(BaseHaddockModule):
def __init__(self, order, path, initial_params=DEFAULT_CONFIG):
cns_script = RECIPE_PATH / "cns" / "rigidbody.cns"
super().__init__(order, path, initial_params, cns_script)
@classmethod
def confirm_installation(cls):
return
def run(self, **params):
logger.info("Running [rigidbody] module")
super().run(params)
# Pool of jobs to be executed by the CNS engine
jobs = []
# Get the models generated in previous step
models_to_dock = [p for p in self.previous_io.output if p.file_type == Format.PDB]
# TODO: Make the topology aquisition generic, here its expecting this module
# to be preceeded by topology
topologies = [p for p in self.previous_io.output if p.file_type == Format.TOPOLOGY]
# Sampling
structure_list = []
for idx in range(params['sampling']):
inp_file = generate_docking(
idx,
models_to_dock,
self.path,
self.recipe_str,
self.params,
ambig=self.params.get('ambig', None),
)
out_file = self.path / f"rigidbody_{idx}.out"
structure_file = self.path / f"rigidbody_{idx}.pdb"
structure_list.append(structure_file)
job = CNSJob(
inp_file,
out_file,
cns_folder=self.cns_folder_path,
cns_exec=self.params['cns_exec'],
)
jobs.append(job)
# Run CNS engine
logger.info(f"Running CNS engine with {len(jobs)} jobs")
engine = Scheduler(jobs, ncores=self.params['ncores'])
engine.run()
logger.info("CNS engine has finished")
# Get the weights according to CNS parameters
_weight_keys = \
('w_vdw_0', 'w_elec_0', 'w_desolv_0', 'w_air_0', 'w_bsa_0')
weights = {e: self.params[e] for e in _weight_keys}
expected = []
not_found = []
for model in structure_list:
if not model.exists():
not_found.append(model.name)
haddock_score = HaddockModel(model).calc_haddock_score(**weights)
pdb = PDBFile(model, path=self.path)
pdb.score = haddock_score
pdb.topology = topologies
expected.append(pdb)
if not_found:
# Check for generated output,
# fail if not all expected files are found
self.finish_with_error("Several files were not generated:"
f" {not_found}")
# Save module information
io = ModuleIO()
io.add(structure_list)
io.add(expected, "o")
io.save(self.path)
```
#### File: haddock3/tests/test_examples_general.py
```python
import importlib
from pathlib import Path
import pytest
from haddock.gear.config_reader import read_config
examples_path = Path(
Path(__file__).resolve().parents[1],
'examples',
)
examples_cfg_files = list(examples_path.rglob('*.cfg'))
def test_there_are_config_examples():
"""Test there are configuration files for examples."""
assert examples_cfg_files
@pytest.fixture(params=examples_cfg_files)
def example_config(request):
return request.param
def test_config_reader_can_read_example_configs(example_config):
"""Test gear.config_reader can read examples' configuration file."""
read_config(example_config)
``` |
{
"source": "joaomcteixeira/HyperSimpleDocstring2Md",
"score": 3
} |
#### File: joaomcteixeira/HyperSimpleDocstring2Md/hypersimpledocstring2md.py
```python
import os
import sys
import argparse
import itertools as it
import importlib.machinery
import inspect
import pydoc
from pathlib import Path
_descriptors = [
"class",
"method",
"function",
]
def get_args():
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument(
"path",
metavar="path",
type=str,
help="PATH to Library.",
)
ap.add_argument(
"--baselink",
metavar="baselink",
type=str,
default="",
help=(
"The base Web URL where the .md will be hosted to"
"allow Index-Header linking. Defaults to no link."
),
)
ap.add_argument(
"--toplink",
metavar="toplink",
type=str,
default=True,
help=(
"Adds a quick link to the Index bellow each header."
" Defaults to True."
),
)
ap.add_argument(
"--output",
default="docs.md",
help="The OUTPUT Markdown file. Defaults to 'docs.md'.",
)
return ap.parse_args()
def valid_folder(path_):
"""
Returns True if path_.match any of the conditions. False otherwise.
conditions assigned:
- "_*"
"""
# ignores dunders, __pycache__
conditions = (
"_*",
)
p = Path(path_)
return not(any(p.match(condition) for condition in conditions))
def valid_file(file_name):
"""
Returns True if file_name matches the condition assigned.
False otherwise.
"""
condition = (
file_name.endswith(".py")
and not(file_name.startswith("_")) or file_name == "__init__.py"
)
return condition
def load_module(path_):
"""
Loads module from Path object.
Returns the module
"""
module = importlib.machinery.SourceFileLoader(
path_.stem,
os.fspath(path_.resolve())
)
return module.load_module()
def goto_link(base_link, where):
return f"[Go to {where}]({base_link.rstrip('#')}#{where})\n"
def add_index(title, descriptor="", base_link="", spacer=0):
to_strip = ('()')
to_translate = str.maketrans(dict.fromkeys("."))
index = (
f"{spacer * ' '}- "
f"[{descriptor}... {title}]"
f"({base_link + title.rstrip(to_strip).translate(to_translate)})\n"
)
return index
def add_header(title, spacer=1):
"""
Adds Markdown header based on module_path (pathlib.Path object).
Defines subheaders based on Path.parents
Path("package/subpackage/module.py")
## module.py
"""
return f"{spacer * '#'} {title}\n"
def read_docstring(object_):
"""
Returns object docstring without the FILE information.
"""
fmt = "```\n{}\n```\n"
docs = pydoc.plain(pydoc.render_doc(object_)).split("FILE")[0].rstrip()
return fmt.format(docs)
def gen_index_doc(
title,
object_,
spacer=1,
base_link="",
descriptor="",
toplink=False,
):
index = add_index(
title,
spacer=spacer,
base_link=base_link,
descriptor=descriptor,
)
# 6 is the Markdown subheader limit
# 5 and not 6 because of spacer + 1 in gen_index_doc
if spacer >= 5:
spacer = 5
doc = add_header(
title,
spacer=spacer + 1,
)
doc += goto_link(base_link, "Index") if toplink else ""
doc += read_docstring(object_)
return index, doc
def gen(name_, object_, doc_list, spacer=1, **kwargs):
"""
Recursive function.
"""
spacer += 1
doc_list.append(gen_index_doc(name_, object_, spacer=spacer, **kwargs))
_ = list(
filter(
lambda x: not(x[0].startswith("_")),
inspect.getmembers(object_)
),
) or [("1", "2")]
for name_, obj_ in _:
whatis = [
inspect.isclass(obj_),
inspect.ismethod(obj_),
inspect.isfunction(obj_),
]
if any(whatis):
gen(
name_,
obj_,
doc_list,
spacer=spacer,
**{
**kwargs,
"descriptor": list(it.compress(_descriptors, whatis))[0],
},
)
else:
return doc_list
def get_documentation(
module_abs_path,
spacer=1,
base_link="",
toplink=True,
index="",
documentation="",
):
module = load_module(module_abs_path)
if module_abs_path.name == "__init__.py":
title = module_abs_path.parent.name
descriptor = "package"
spacer = spacer - 1
else:
title = module_abs_path.name
descriptor = "module"
spacer = spacer
index_, doc_ = list(
zip(
*gen(
title,
module,
[],
spacer=spacer,
base_link=base_link,
toplink=toplink,
descriptor=descriptor,
)
)
)
index += "".join(index_)
documentation += "".join(doc_)
return index, documentation
if __name__ == "__main__":
args = get_args()
base_link = args.baselink + "#"
toplink = args.toplink
rootdir = Path(args.path).resolve()
rootlib = rootdir.name
sys.path.append(os.fspath(rootdir.parent))
lib_docs = [
get_documentation(
Path(folder).joinpath(file_),
spacer=len(Path(folder[folder.find(rootlib):]).parents) - 1,
base_link=base_link,
toplink=toplink,
)
for folder, _, files in os.walk(rootdir) if valid_folder(folder)
for file_ in sorted(files) if valid_file(file_)
]
index, docs = list(zip(*lib_docs))
with open(args.output, 'w') as output:
output.write("# Index\n" + "".join(index))
output.write("".join(docs))
print(f"* Saved: {args.output}")
``` |
{
"source": "joaomcteixeira/md_scripts",
"score": 2
} |
#### File: md_scripts/utils/dcd2pdb.py
```python
from __future__ import print_function, division
import argparse
import logging
import os
import sys
import numpy as np
import mdtraj as md
import simtk.openmm.app as app # necessary for topology reading from mmCIF
# Format logger
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
def check_file(fpath):
"""Returns absolute path of file if it exists and is readable,
raises IOError otherwise"""
if os.path.isfile(fpath):
return os.path.abspath(fpath)
else:
raise IOError('File not found/readable: {}'.format(fpath))
if __name__ == '__main__':
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('topology', help='Topology file corresponding to DCD')
ap.add_argument('trajectory', help='DCD trajectory file')
ap.add_argument('--output', default=None,
help='Root for naming PDB files: root + _ + frame + .pdb (e.g. trj_1.pdb)')
ap.add_argument('--stride', default=1, type=int,
help='Read only i-th frame. Default: reads all (i=1)')
cmd = ap.parse_args()
# Read/Parse Topology
topology_fpath = check_file(cmd.topology)
if topology_fpath.endswith('cif'):
structure = app.PDBxFile(topology_fpath)
topology = md.Topology.from_openmm(structure.topology)
else:
structure = md.load(cmd.topology)
topology = structure.topology
logging.info('Read topology from file: {}'.format(topology_fpath))
# Read trajectory
trajectory_fpath = check_file(cmd.trajectory)
logging.info('Reading trajectory from file: {}'.format(trajectory_fpath))
trj = md.load(trajectory_fpath, top=topology,
stride=cmd.stride)
logging.info('Removing PBCs and imaging molecules')
topology.create_standard_bonds()
anchors = topology.find_molecules()
sorted_bonds = sorted(topology.bonds, key=lambda x: x[0].index)
sorted_bonds = np.asarray([[b0.index, b1.index] for b0, b1 in sorted_bonds])
trj.image_molecules(inplace=True, anchor_molecules=anchors, sorted_bonds=sorted_bonds, make_whole=True)
# Write PDBs
logging.info('Writing {} PDB files of {} atoms'.format(trj.n_frames, trj.n_atoms))
froot = 'frame' if cmd.output is None else cmd.output
n_frames = len(str(len(trj))) # 1: 1, 10: 2, 100: 3, ...
for idx, frame in enumerate(trj, start=1):
frame_name = froot + '_' + str(idx).zfill(n_frames) + '.pdb'
frame.save(frame_name, force_overwrite=True)
logging.info('Wrote frame {}/{} to \'{}\''.format(idx, trj.n_frames, frame_name))
logging.info('Done')
``` |
{
"source": "joaomcteixeira/passgen",
"score": 4
} |
#### File: joaomcteixeira/passgen/passgen.py
```python
import argparse
import random
import secrets
import string
from contextlib import suppress
class _JoinDisable(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
disable = ''.join(chars for d in values for chars in d).replace(" ", "")
setattr(namespace, self.dest, disable)
class InputError(Exception):
pass
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('-l', help='Password length. Defaults to 16.', default=16, type=int)
ap.add_argument('-lo', help='Disable lower case chars.', action="store_false")
ap.add_argument('-up', help='Disable upper case chars.', action="store_false")
ap.add_argument('-di', help='Disable digits.', action="store_false")
ap.add_argument('-pu', help='Disable punctuation.', action="store_false")
ap.add_argument(
'-D',
'--disable',
help=(
'Disable characters. Give a list of characters to disregard. '
'For example: --disable "A B u ( )". '
'To disable complex punctuation you might need to use the backslash: '
'--disable "\`" for example.'
),
nargs="*",
action=_JoinDisable,
)
chars_possibilities = {
'lower': string.ascii_lowercase,
'upper': string.ascii_uppercase,
'digits': string.digits,
'punctuation': string.punctuation,
}
def main(l=16, lo=True, up=True, di=True, pu=True, disable=None):
"""Create a password."""
original_len = l
# maps CLI choices
choices = {
'lower': lo,
'upper': up,
'digits': di,
'punctuation': pu,
}
CA = chars_possibilities
# disables chars. Simplistic implementation
if disable:
for char_type, chars in CA.items():
char_list = list(chars)
for char in disable:
with suppress(ValueError):
char_list.remove(char)
CA[char_type] = ''.join(char_list)
# gets one char for each type the user selected
pass_chars = []
for char_type, user_choice in choices.items():
if user_choice:
pass_chars.append(secrets.choice(CA[char_type]))
l -= 1
# all possible chars according to the user choices
all_chars = ''.join(opt for typ, opt in CA.items() if choices[typ])
# chooses chars for the remaining pass length
try:
rest_chars = [secrets.choice(all_chars) for _ in range(l)]
except IndexError:
raise InputError(
'There are no characters left to create a password. '
'You have likely disabled all of them. '
'Please review your input. '
) from None
# joins the first unique-type chars with the random selection
password = <PASSWORD> + rest_<PASSWORD>
# further shuffles the password
len_range = list(range(original_len))
final_pass = ''
while len_range:
idx = secrets.choice(len_range)
final_pass += password[idx]
len_range.remove(idx)
print(final_pass)
if __name__ == '__main__':
args = vars(ap.parse_args())
main(**args)
``` |
{
"source": "joaomcteixeira/pdb-tools",
"score": 2
} |
#### File: pdb-tools/pdbtools/pdb_fetch.py
```python
import gzip
import re
import sys
# Python 3 vs Python 2
if sys.version_info[0] < 3:
from cStringIO import StringIO as IO
from urllib2 import Request, build_opener
from urllib2 import HTTPError
else:
from io import BytesIO as IO
from urllib.request import Request, build_opener
from urllib.error import HTTPError
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
option = False
if len(args) == 1:
# pdb code only
if not re.match(r'[0-9a-zA-Z]{4}$', args[0]):
emsg = 'ERROR!! Invalid PDB code: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
pdb_code = args[0]
elif len(args) == 2:
# biounit & pdb code
if not re.match(r'\-biounit$', args[0]):
emsg = 'ERROR!! Invalid option: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if not re.match(r'[0-9a-zA-Z]{4}$', args[1]):
emsg = 'ERROR!! Invalid PDB code: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
option = True
pdb_code = args[1]
else:
sys.stderr.write(__doc__)
sys.exit(1)
return (pdb_code, option)
def fetch_structure(pdbid, biounit=False):
"""Downloads the structure in PDB format from the RCSB PDB website.
"""
base_url = 'https://files.rcsb.org/download/'
pdb_type = '.pdb1' if biounit else '.pdb'
pdb_url = base_url + pdbid.lower() + pdb_type + '.gz'
try:
request = Request(pdb_url)
opener = build_opener()
url_data = opener.open(request).read()
except HTTPError as e:
emsg = '[!] Error fetching structure: ({0}) {1}\n'
sys.stderr.write(emsg.format(e.code, e.msg))
return
else:
try:
buf = IO(url_data)
gz_handle = gzip.GzipFile(fileobj=buf, mode='rb')
for line in gz_handle:
yield line.decode('utf-8')
except IOError as e:
emsg = '[!] Error fetching structure: ({0}) {1}\n'
sys.stderr.write(emsg.format(e.code, e.msg))
return
finally:
gz_handle.close()
def main():
# Check Input
pdb_code, biounit = check_input(sys.argv[1:])
# Do the job
new_pdb = fetch_structure(pdb_code, biounit)
try:
_buffer = []
_buffer_size = 5000 # write N lines at a time
for lineno, line in enumerate(new_pdb):
if not (lineno % _buffer_size):
sys.stdout.write(''.join(_buffer))
_buffer = []
_buffer.append(line)
sys.stdout.write(''.join(_buffer))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
sys.exit(0)
if __name__ == '__main__':
main()
```
#### File: pdb-tools/pdbtools/pdb_splitseg.py
```python
import os
import sys
__author__ = "<NAME>"
__email__ = "<EMAIL>"
USAGE = __doc__.format(__author__, __email__)
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
else: # Whatever ...
emsg = 'ERROR!! Script takes 1 argument, not \'{}\'\n'
sys.stderr.write(emsg.format(len(args)))
sys.stderr.write(__doc__)
sys.exit(1)
return fh
def split_segment(fhandle):
"""Splits the contents of the PDB file into new files, each containing a
segment of the original file.
"""
fname_root = fhandle.name[:-4] if fhandle.name != '<stdin>' else 'output'
basename = os.path.basename(fname_root)
segment_data = {} # {segment_id: lines}
prev_segment = None
records = ('ATOM', 'HETATM', 'ANISOU', 'TER')
for line in fhandle:
if line.startswith(records):
line_segment = line[72:76].strip()
if line_segment != prev_segment:
if line_segment not in segment_data:
segment_data[line_segment] = []
prev_segment = line_segment
segment_data[line_segment].append(line)
for segment_id in sorted(segment_data.keys()):
if not segment_id:
continue # skip empty segment
lines = segment_data[segment_id]
with open(basename + '_' + segment_id + '.pdb', 'w') as fh:
fh.write(''.join(lines))
def main():
# Check Input
pdbfh = check_input(sys.argv[1:])
# Do the job
split_segment(pdbfh)
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
if __name__ == '__main__':
main()
```
#### File: pdb-tools/tests/test_pdb_selchain.py
```python
import os
import sys
import unittest
from config import data_dir
from utils import OutputCapture
class TestTool(unittest.TestCase):
"""
Generic class for testing tools.
"""
def setUp(self):
# Dynamically import the module
name = 'pdbtools.pdb_selchain'
self.module = __import__(name, fromlist=[''])
def exec_module(self):
"""
Execs module.
"""
with OutputCapture() as output:
try:
self.module.main()
except SystemExit as e:
self.retcode = e.code
self.stdout = output.stdout
self.stderr = output.stderr
return
def test_one_option(self):
"""$ pdb_selchain -A data/dummy.pdb"""
# Simulate input
# pdb_selchain dummy.pdb
sys.argv = ['', '-A', os.path.join(data_dir, 'dummy.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0) # ensure the program exited OK.
self.assertEqual(len(self.stdout), 76) # selected c.A
self.assertEqual(len(self.stderr), 0) # no errors
def test_one_option_CAPS_lowercase(self):
"""$ pdb_selchain -A data/dummy_az09.pdb"""
# Simulate input
# pdb_selchain dummy_az09.pdb
sys.argv = ['', '-A', os.path.join(data_dir, 'dummy_az09.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0) # ensure the program exited OK.
self.assertEqual(len(self.stdout), 76) # selected c.A
self.assertEqual(len(self.stderr), 0) # no errors
def test_one_option_lowercase(self):
"""$ pdb_selchain -b data/dummy_az09.pdb"""
# Simulate input
# pdb_selchain dummy.pdb
sys.argv = ['', '-b', os.path.join(data_dir, 'dummy_az09.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0) # ensure the program exited OK.
self.assertEqual(len(self.stdout), 69) # selected c.b
self.assertEqual(len(self.stderr), 0) # no errors
def test_one_option_digit(self):
"""$ pdb_selchain -1 data/dummy_az09.pdb"""
# Simulate input
# pdb_selchain dummy.pdb
sys.argv = ['', '-1', os.path.join(data_dir, 'dummy_az09.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0) # ensure the program exited OK.
self.assertEqual(len(self.stdout), 71) # selected c.1
self.assertEqual(len(self.stderr), 0) # no errors
def test_multiple(self):
"""
$ pdb_selchain -A,B data/dummy.pdb
"""
sys.argv = ['', '-A,B', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 0)
self.assertEqual(len(self.stdout), 129) # c.A + c.B
self.assertEqual(len(self.stderr), 0)
def test_file_not_found(self):
"""$ pdb_selchain not_existing.pdb"""
afile = os.path.join(data_dir, 'not_existing.pdb')
sys.argv = ['', afile]
self.exec_module()
self.assertEqual(self.retcode, 1) # exit code is 1 (error)
self.assertEqual(len(self.stdout), 0) # nothing written to stdout
self.assertEqual(self.stderr[0][:22],
"ERROR!! File not found") # proper error message
def test_file_missing(self):
"""$ pdb_selchain -A"""
sys.argv = ['', '-A']
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr[0],
"ERROR!! No data to process!")
def test_helptext(self):
"""$ pdb_selchain"""
sys.argv = ['']
self.exec_module()
self.assertEqual(self.retcode, 1) # ensure the program exited gracefully.
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr, self.module.__doc__.split("\n")[:-1])
def test_invalid_option(self):
"""$ pdb_selchain data/dummy.pdb"""
sys.argv = ['', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0][:47],
"ERROR!! You must provide at least one chain ide")
def test_invalid_option_2(self):
"""$ pdb_selchain -AB data/dummy.pdb"""
sys.argv = ['', '-AB', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0][:40],
"ERROR!! Chain identifier name is invalid")
def test_not_an_option(self):
"""$ pdb_selchain 20 data/dummy.pdb"""
sys.argv = ['', '20', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0],
"ERROR! First argument is not an option: '20'")
if __name__ == '__main__':
from config import test_dir
mpath = os.path.abspath(os.path.join(test_dir, '..'))
sys.path.insert(0, mpath) # so we load dev files before any installation
unittest.main()
```
#### File: pdb-tools/tests/test_pdb_splitmodel.py
```python
import os
import shutil
import sys
import tempfile
import unittest
from config import data_dir
from utils import OutputCapture
class TestTool(unittest.TestCase):
"""
Generic class for testing tools.
"""
def setUp(self):
# Dynamically import the module
name = 'pdbtools.pdb_splitmodel'
self.module = __import__(name, fromlist=[''])
self.tempdir = tempfile.mkdtemp() # set temp dir
os.chdir(self.tempdir)
def tearDown(self):
os.chdir(os.path.dirname(os.path.abspath('.'))) # cd ../
shutil.rmtree(self.tempdir)
def exec_module(self):
"""
Execs module.
"""
with OutputCapture() as output:
try:
self.module.main()
except SystemExit as e:
self.retcode = e.code
self.stdout = output.stdout
self.stderr = output.stderr
return
def test_default(self):
"""$ pdb_splitmodel data/ensemble_OK.pdb"""
# Copy input file to tempdir
# Simulate input
src = os.path.join(data_dir, 'ensemble_OK.pdb')
dst = os.path.join(self.tempdir, 'ensemble_OK.pdb')
shutil.copy(src, dst)
sys.argv = ['', dst]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0) # ensure the program exited OK.
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(len(self.stderr), 0) # no errors
# Read files created by script
ofiles = [f for f in os.listdir(self.tempdir)
if f.startswith('ensemble_OK')]
self.assertEqual(len(ofiles), 2 + 1) # ori + 2 models
for fpath in ofiles:
if fpath == 'ensemble_OK.pdb':
continue
with open(os.path.join(self.tempdir, fpath), 'r') as handle:
n_lines = len(handle.readlines())
self.assertEqual(n_lines, 2)
def test_file_not_found(self):
"""$ pdb_splitmodel not_existing.pdb"""
afile = os.path.join(data_dir, 'not_existing.pdb')
sys.argv = ['', afile]
self.exec_module()
self.assertEqual(self.retcode, 1) # exit code is 1 (error)
self.assertEqual(len(self.stdout), 0) # nothing written to stdout
self.assertEqual(self.stderr[0][:22],
"ERROR!! File not found") # proper error message
def test_file_missing(self):
"""$ pdb_splitmodel -10"""
sys.argv = ['', '-10']
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr[0][:38],
"ERROR!! File not found or not readable")
def test_helptext(self):
"""$ pdb_splitmodel"""
sys.argv = ['']
self.exec_module()
self.assertEqual(self.retcode, 1) # ensure the program exited gracefully.
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr, self.module.__doc__.split("\n")[:-1])
def test_invalid_option(self):
"""$ pdb_splitmodel -A data/ensemble_OK.pdb"""
sys.argv = ['', '-A', os.path.join(data_dir, 'ensemble_OK.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0][:22],
"ERROR!! Script takes 1") # proper error message
if __name__ == '__main__':
from config import test_dir
mpath = os.path.abspath(os.path.join(test_dir, '..'))
sys.path.insert(0, mpath) # so we load dev files before any installation
unittest.main()
``` |
{
"source": "joaomcteixeira/python-bioplottemplates",
"score": 2
} |
#### File: src/bioplottemplates/cli_labeldots.py
```python
import argparse
from bioplottemplates.libs import libcli, libio
from bioplottemplates.plots import label_dots
ap = libcli.CustomParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
ap.add_argument(
'data_csv',
help='The CSVs files to plot',
nargs='+',
)
ap.add_argument(
'-v',
'--plotvars',
help=(
'Plot variables. '
'Example: -v xlabel=frames ylabel=RMSD color=red.'
),
nargs='*',
action=libcli.ParamsToDict,
)
def maincli():
cmd = load_args()
main(**vars(cmd))
def main(data_csv, plotvars, **kwargs):
data, labels = libio.extract_labels_data(*data_csv)
plotvars = plotvars or dict()
plotvars.setdefault('series_labels', data_csv)
print(plotvars['series_labels'])
label_dots.plot(
labels,
data,
**plotvars,
)
pass
if __name__ == '__main__':
maincli()
```
#### File: bioplottemplates/plots/label_dots.py
```python
import itertools
import numpy as np
from matplotlib import pyplot as plt
from bioplottemplates import log
from bioplottemplates.libs import libmsg, libutil
from bioplottemplates.logger import S, T
def plot(
x_labels,
y_data,
title=None,
xlabel=None,
ylabel=None,
series_labels=None,
legend=True,
legend_fs=6,
legend_loc=4,
numeric_x_labels=False,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k'),
alpha=0.7,
grid=True,
grid_color="lightgrey",
grid_ls="-",
grid_lw=1,
grid_alpha=0.5,
figsize=(10, 6),
filename='plot_param.pdf',
**kwargs,
):
"""
"""
log.info(T('Plotting Labeled Dots'))
# prepares data
if isinstance(y_data, (list, np.ndarray)):
if not isinstance(y_data[0], (list, np.ndarray)):
y_data = [y_data]
else:
raise ValueError('y_data must be list or np.ndarray')
if not isinstance(series_labels, list):
series_labels = [series_labels]
elif series_labels is None:
series_labels = [None] * len(y_data)
plot_colors = itertools.cycle(libutil.make_list(colors))
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
if xlabel:
plt.tight_layout(rect=[0.05, 0.15, 0.995, 0.985])
else:
plt.tight_layout(rect=[0.05, 0.10, 0.995, 0.985])
for dataset in y_data:
fig.suptitle(
title,
x=0.5,
y=0.990,
va="top",
ha="center",
)
#ax.margins(x=1)
for i, yy in enumerate(y_data):
ax.scatter(
range(int(numeric_x_labels), int(numeric_x_labels) + len(yy)),
yy,
label=series_labels[i],
color=next(plot_colors),
alpha=alpha,
zorder=10)
ax.set_xlabel(xlabel, weight='bold')
ax.set_ylabel(ylabel, weight='bold')
#ax.set_xlim(x_data[0], x_data[-1])
ax.set_ylim(0)
if numeric_x_labels:
ax.set_xlim(numeric_x_labels, len(x_labels))
else:
ax.set_xticks(range(len(x_labels)))
ax.set_xticklabels(x_labels, rotation=90)
if grid:
ax.grid(
which='major',
color=grid_color,
linestyle=grid_ls,
linewidth=grid_lw,
alpha=grid_alpha,
zorder=1,
)
if legend:
ax.legend(
fontsize=legend_fs,
loc=legend_loc,
)
fig.savefig(filename)
``` |
{
"source": "joaometocean/seapy",
"score": 2
} |
#### File: seapy/roms/obs.py
```python
import numpy as np
import netCDF4
import seapy
from collections import namedtuple
from warnings import warn
# Define a named tuple to store raw data for the gridder
raw_data = namedtuple('raw_data', 'type provenance values error min_error')
# Define the observation type
obs_types = {
1: "ZETA",
2: "UBAR",
3: "VBAR",
4: "U",
5: "V",
6: "TEMP",
7: "SALT",
20: "RADIAL"
}
# Define the observation provenances used within my applications
obs_provenance = {
0: "UNKNOWN",
100: "GLIDER",
102: "GLIDER_SG022",
103: "GLIDER_SG023",
114: "GLIDER_SG114",
139: "GLIDER_SG139",
146: "GLIDER_SG146",
147: "GLIDER_SG147",
148: "GLIDER_SG148",
150: "GLIDER_SG500",
151: "GLIDER_SG511",
152: "GLIDER_SG512",
153: "GLIDER_SG513",
154: "GLIDER_SG523",
155: "GLIDER_SG626",
200: "CTD",
210: "CTD_HOT",
220: "CTD_ARGO",
230: "CORA_T",
240: "CORA_S",
300: "SST",
301: "SST_OSTIA",
315: "SST_NAVO_MAP",
317: "SST_AVHRR_17",
318: "SST_AVHRR_18",
330: "SST_MODIS_AQUA",
331: "SST_MODIS_TERRA",
332: "SST_VIIRS",
340: "SST_REMSS",
341: "SST_AMSRE",
342: "SST_TMI",
400: "SSH",
410: "SSH_AVISO_MAP",
411: "SSH_AVISO_TOPEX_POSEIDON",
412: "SSH_AVISO_JASON1",
413: "SSH_AVISO_JASON2",
414: "SSH_AVISO_JASON3",
420: "SSH_AVISO_GFO",
421: "SSH_AVISO_ENVISAT",
422: "SSH_AVISO_ERS1",
423: "SSH_AVISO_ERS2",
430: "SSH_AVISO_ALTIKA",
431: "SSH_AVISO_CRYOSAT2",
432: "SSH_AVISO_HAIYANG",
433: "SSH_AVISO_SENTINEL3A",
450: "SSH_HYCOM",
460: "SSS_AQUARIUS",
500: "DRIFTERS",
600: "RADAR",
610: "RADAR_KOK",
620: "RADAR_KAK",
630: "RADAR_KAL",
640: "RADAR_KAP",
650: "RADAR_KNA",
660: "RADAR_KKH",
670: "RADAR_PPK",
700: "ADCP",
800: "MOORING",
810: "TAO_ARRAY"
}
def _type_from_string(s):
"""
PRIVATE method: Search the type dictionary for the key of the
given the value. If the key isn't a string or properly resolves, try to
return the value as such
"""
try:
return list(obs_types.keys())[
list(obs_types.values()).index(s.upper())]
except AttributeError:
return int(s)
def _provenance_from_string(s):
"""
PRIVATE method: Search the provenance dictionary for the key of the
given the value. If the key isn't a string or properly resolves, try to
return the value as such
"""
try:
return list(obs_provenance.keys())[
list(obs_provenance.values()).index(s.upper())]
except AttributeError:
return int(s)
def asobs(obs):
"""
Return the input as an observation array if possible. If the parameter
is already an observation, just return; otherwise, create a new class.
Parameters
----------
obs: obs class, string, or list
what to cast as observation
Output
------
obs: seapy.roms.obs.obs
"""
if obs is None:
raise AttributeError("No obs were specified")
if isinstance(obs, seapy.roms.obs.obs):
return obs
else:
return seapy.roms.obs.obs(filename=obs)
def astype(otype):
"""
Return the integer type of the given observation array.
Input
-----
type: ndarray,
List of types to put into integer form
Output
------
types: array,
List of integer types
"""
otype = np.atleast_1d(otype)
if otype.dtype.type == np.str_:
return np.array([_type_from_string(s) for s in otype])
else:
return otype
def asprovenance(prov):
"""
Return the integer provenance of the given provenance array.
Input
-----
prov: array,
List of provenances to put into integer form
Output
------
provs: ndarray,
List of integer provenances
"""
prov = np.atleast_1d(prov)
if prov.dtype.type == np.str_:
return np.array([_provenance_from_string(s) for s in prov])
else:
return prov
class obs:
def __init__(self, filename=None, time=None, x=None, y=None, z=None,
lat=None, lon=None, depth=None, value=None, error=None,
type=None, provenance=None, meta=None,
title="ROMS Observations"):
"""
Class to deal with ROMS observations for data assimilation
Parameters
----------
filename : string or list, optional,
if filename is given, the data are loaded from a netcdf file
time : ndarray, optional,
time of observation in days
x : ndarray, optional,
obs location on grid in x (eta)
y : ndarray, optional,
obs location on grid in y (xi)
z : ndarray, optional,
obs location on grid in z (positive layers or negative depth [m])
lat : ndarray, optional,
obs true latitude [deg]
lon : ndarray, optional,
obs true longitude [deg]
depth : ndarray, optional,
obs true depth [m]
value : ndarray, optional,
obs value [units]
error : ndarray, optional,
obs error [units**2]
type : ndarray, optional,
obs type [1-zeta, 2-ubar, 3-vbar, 4-u, 5-v, 6-temp, 7-salt]
provenance : ndarray, optional,
obs provenance
meta : ndarray, optional,
obs additional information
"""
self.title = title
if filename is not None:
nc = seapy.netcdf(filename)
# Construct an array from the data in the file. If obs_meta
# exists in the file, then load it; otherwise, fill with zeros
self.filename = filename
self.time = nc.variables["obs_time"][:]
self.x = nc.variables["obs_Xgrid"][:]
self.y = nc.variables["obs_Ygrid"][:]
self.z = nc.variables["obs_Zgrid"][:]
self.lat = nc.variables["obs_lat"][:]
self.lon = nc.variables["obs_lon"][:]
self.depth = nc.variables["obs_depth"][:]
self.value = nc.variables["obs_value"][:]
self.error = nc.variables["obs_error"][:]
self.type = nc.variables["obs_type"][:]
self.provenance = nc.variables["obs_provenance"][:]
# Update the provenance definitions
try:
obs_provenance.update(dict((int(k.strip()), v.strip())
for v, k in
(it.split(':') for it in
nc.obs_provenance.split(','))))
except (AttributeError, ValueError):
pass
try:
self.meta = nc.variables["obs_meta"][:]
except KeyError:
self.meta = np.zeros(self.value.size)
finally:
nc.close()
else:
self.filename = None
if time is not None:
self.time = np.atleast_1d(time)
if x is not None:
self.x = np.atleast_1d(x)
if y is not None:
self.y = np.atleast_1d(y)
if z is not None:
self.z = np.atleast_1d(z)
if lat is not None:
self.lat = np.atleast_1d(lat)
if lon is not None:
self.lon = np.atleast_1d(lon)
if depth is not None:
self.depth = np.atleast_1d(depth)
if value is not None:
self.value = np.atleast_1d(value)
if error is not None:
self.error = np.atleast_1d(error)
if type is not None:
self.type = astype(type)
if provenance is not None:
self.provenance = asprovenance(provenance)
else:
self.provenance = 0
if meta is not None:
self.meta = np.atleast_1d(meta)
self._consistent()
def _consistent(self):
"""
PRIVATE method: try to make the structure self-consistent. Throw
an exception if not possible.
"""
# Make sure required arrays are a 1d array
self.time = self.time.ravel()
self.x = self.x.ravel()
self.y = self.y.ravel()
self.value = self.value.ravel()
self.error = self.error.ravel()
self.type = astype(self.type.ravel())
lt = self.time.size
if not lt == self.x.size == self.y.size == \
self.value.size == self.error.size == self.type.size:
# If these lengths are not equal, then there is a serious issue
raise ValueError(
"Lengths of observation attributes are not equal.")
else:
# For the others, we can pad the information to ensure
# consistency
def _resizearr(key, n):
arr = getattr(self, key, np.zeros(n))
if arr.size == n:
return arr
return np.resize(arr, n)
self.z = _resizearr('z', lt)
self.lat = _resizearr('lat', lt)
self.lon = _resizearr('lon', lt)
self.depth = _resizearr('depth', lt)
self.provenance = asprovenance(_resizearr('provenance', lt))
self.meta = _resizearr('meta', lt)
# Eliminate bad values
good_vals = np.logical_and.reduce((
np.isfinite(self.value),
np.isfinite(self.x),
np.isfinite(self.y),
np.isfinite(self.error),
np.isfinite(self.time)))
if np.any(~good_vals):
self.delete(np.where(good_vals == False))
# Set the shape parameter
self.shape = self.value.shape
# Ensure consistency in depth and z
self.z[self.depth > 0] = self.depth[self.depth > 0]
def __len__(self):
self.shape = self.value.shape
return self.value.size
def __getitem__(self, l):
return obs(time=self.time[l], x=self.x[l], y=self.y[l],
z=self.z[l], lon=self.lon[l], lat=self.lat[l],
depth=self.depth[l], value=self.value[l],
error=self.error[l], type=self.type[l],
provenance=self.provenance[l], meta=self.meta[l])
def __setitem__(self, l, new_obs):
if not isinstance(new_obs, seapy.roms.obs.obs):
raise TypeError("Trying to assign obs to a non-obs type.")
self.time[l] = new_obs.time
self.x[l] = new_obs.x
self.y[l] = new_obs.y
self.z[l] = new_obs.z
self.lon[l] = new_obs.lon
self.lat[l] = new_obs.lat
self.depth[l] = new_obs.depth
self.value[l] = new_obs.value
self.error[l] = new_obs.error
self.type[l] = new_obs.type
self.provenance[l] = new_obs.provenance
self.meta[l] = new_obs.meta
self._consistent()
def __repr__(self):
return "< {:d} obs: {:.1f} to {:.1f} >".format(self.value.size,
np.min(self.time), np.max(self.time))
def __str__(self):
return "\n".join([repr(self), "\n".join(
"{:.3f}, [{:s}:{:s}] ({:.2f},{:.2f},{:.2f}) = {:.4f} +/- {:.4f}".format(
t, obs_types[self.type[n]],
obs_provenance.get(self.provenance[n], "UNKNOWN"),
self.lon[n], self.lat[n], self.depth[n],
self.value[n], self.error[n])
for n, t in enumerate(self.time))])
def add(self, new_obs):
"""
Add another class of obs into this one
Parameters
----------
new_obs : obs,
Class of obs to append to the existing
Returns
-------
None
Examples
--------
Load a list from netcdf, then append a new set of values
>>> a=obs("test.nc")
>>> b=obs(time=4,x=3.2,y=2.8,z=0,value=23.44,error=0.5,type="temp",
>>> provenance="glider")
>>> a.add(b)
"""
self._consistent()
new_obs._consistent()
self.time = np.append(self.time, new_obs.time)
self.x = np.append(self.x, new_obs.x)
self.y = np.append(self.y, new_obs.y)
self.z = np.append(self.z, new_obs.z)
self.lat = np.append(self.lat, new_obs.lat)
self.lon = np.append(self.lon, new_obs.lon)
self.depth = np.append(self.depth, new_obs.depth)
self.value = np.append(self.value, new_obs.value)
self.error = np.append(self.error, new_obs.error)
self.type = np.append(self.type, new_obs.type)
self.provenance = np.append(self.provenance, new_obs.provenance)
self.meta = np.append(self.meta, new_obs.meta)
def copy(self):
"""
deep copy this class and return the new copy.
Returns
-------
obs : obs,
deep copy of the class
"""
import copy
return copy.deepcopy(self)
def delete(self, obj):
"""
delete observations from the record.
Parameters
----------
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
Returns
-------
Nothing: updates the class arrays
Examples
--------
Delete every other observation
>>> myobs.delete(np.s_[::2])
"""
self.time = np.delete(self.time, obj)
self.x = np.delete(self.x, obj)
self.y = np.delete(self.y, obj)
self.z = np.delete(self.z, obj)
self.lat = np.delete(self.lat, obj)
self.lon = np.delete(self.lon, obj)
self.depth = np.delete(self.depth, obj)
self.value = np.delete(self.value, obj)
self.error = np.delete(self.error, obj)
self.type = np.delete(self.type, obj)
self.provenance = np.delete(self.provenance, obj)
self.meta = np.delete(self.meta, obj)
def create_survey(self, dt=0):
"""
Build the survey structure from the observations
"""
# Generate the sort list
self.sort = np.argsort(self.time, kind='mergesort')
# Build the survey structure
times, counts = np.unique(self.time[self.sort], return_counts=True)
# Make sure everything is within dt
if dt:
delta = np.diff(times)
while np.any(delta < dt):
idx = np.argmin(delta)
self.time[self.time == times[idx + 1]] = times[idx]
times[idx + 1] = times[idx]
times = np.unique(times)
delta = np.diff(times)
# Re-sort the surveys
times, counts = np.unique(self.time[self.sort], return_counts=True)
self.survey_time = times
self.nobs = counts
def to_netcdf(self, filename=None, dt=0, clobber=True):
"""
Write out the observations into the specified netcdf file
Parameters
----------
filename : string, optional
name of file to save. If obs were loaded from a file and filename
is not specified, then write to the same.
dt : float,
ensure data are at least separated in time by dt; otherwise,
make as part of same survey
clobber : bool, optional
if True, any existing file is overwritten
"""
import os
# Check filename
if filename is None and self.filename is not None:
filename = self.filename
if filename is None:
error("No filename given")
# Save out the observations by survey
self._consistent()
self.create_survey(dt)
if not self.value.size:
warn(
"No observations are available to be written to {:s}".format(filename))
return None
if not clobber and os.path.exists(filename):
warn("{:s} exists with no clobber.".format(filename))
return None
state_vars = np.maximum(7, np.max(self.type))
nc = seapy.roms.ncgen.create_da_obs(filename,
survey=self.survey_time.size,
state_variable=state_vars,
provenance=','.join((':'.join(
(obs_provenance.get(v, "UNKNOWN"), str(v)))
for v in np.unique(self.provenance))),
clobber=True, title=self.title)
nc.variables["spherical"][:] = 1
nc.variables["Nobs"][:] = self.nobs
nc.variables["survey_time"][:] = self.survey_time
nc.variables["obs_variance"][:] = np.ones(state_vars) * 0.1
nc.variables["obs_time"][:] = self.time[self.sort]
# setattr(nc.variables['obs_time'], 'units', 'days since ' + self.reftime[0].strftime("%Y-%m-%d %H:%M:%S") + ' GMT')
# setattr(nc.variables['survey_time'], 'units', 'days since ' + self.reftime[0].strftime("%Y-%m-%d %H:%M:%S") + ' GMT')
setattr(nc.variables['obs_time'], 'units', 'days since ' + self.reftime.strftime("%Y-%m-%d %H:%M:%S") + ' GMT')
setattr(nc.variables['survey_time'], 'units', 'days since ' + self.reftime.strftime("%Y-%m-%d %H:%M:%S") + ' GMT')
nc.variables["obs_Xgrid"][:] = self.x[self.sort]
nc.variables["obs_Ygrid"][:] = self.y[self.sort]
nc.variables["obs_Zgrid"][:] = self.z[self.sort]
nc.variables["obs_lat"][:] = self.lat[self.sort]
nc.variables["obs_lon"][:] = self.lon[self.sort]
nc.variables["obs_depth"][:] = self.depth[self.sort]
nc.variables["obs_value"][:] = self.value[self.sort]
nc.variables["obs_error"][:] = self.error[self.sort]
nc.variables["obs_type"][:] = self.type[self.sort]
nc.variables["obs_provenance"][:] = self.provenance[self.sort]
nc.variables["obs_meta"][:] = self.meta[self.sort]
nc.close()
def gridder(grid, time, lon, lat, depth, data, dt, depth_adjust=False,
title='ROMS Observations'):
"""
Construct an observations set from raw observations by placing them
onto a grid.
Parameters
----------
grid : seapy.model.grid or filename string,
Grid to place the raw observations onto
time : ndarray,
Time of the observations. This can be a scalar and all values
will be assigned to the single time; otherwise, there must be a
corresponding time to each value in the data.
lon : ndarray,
longitude of the observations. This can be a scalar and all values
will be assigned to the single location; otherwise, there must be a
corresponding longitude to each value in the data.
lat : ndarray,
latitude of the observations. This can be a scalar and all values
will be assigned to the single location; otherwise, there must be a
corresponding latitude to each value in the data.
depth : ndarray or None,
depth of the observations. If None, then all values are placed on
the surface; otherwise, must be a corresponding depth for each
value in the data.
data : list of named tuples of seapy.roms.obs.raw_data,
This list is comprised of each set of observation data types that
are to be gridded together. If there is only one type (e.g.,
SSH observations, there is only one item). An Argo float would have
two items in the list (temperature and salinity observations).
The list is comprised of named tuples of the raw observations
with the following fields:
"type" : string (or integer) of the type from
seapy.roms.obs.obs_types
"provenance" : string (or integer) of the type from
seapy.roms.obs.obs_provenance
"values" : ndarray of actual observed values in units
for type
"error" : ndarray (or None) of individual observational
uncertainty (same units of values). If not known,
use None
"min_error" : float of the minimum error that should be
prescribed to the observations (typically,
the instrument error) in the same units of
values.
dt : float
The bin size of time for observations to be considered at the
same time. The units must be the same as the provided time.
title : string, optional,
Title to assign the observations structure for output
Returns
-------
obs : seapy.obs class
Resulting observations from the raw data as placed onto grid.
Examples
--------
A profile of temp and salt observations at a given lat/lon:
>>> obs = seapy.obs.gridder(grid, times, lon, lat,
[ seapy.roms.obs.raw_data("TEMP", "CTD_ARGO", temp, None, 0.1),
seapy.roms.obs.raw_data("SALT", "CTD_ARGO", salt, None, 0.05)],
dt = 1/24, title="Argo")
Satellite Data from a number of lat/lons at a single time
>>> obs = seapy.obs.gridder(grid, time, lon, lat,
seapy.roms.obs.raw_data("ZETA", "SSH_AVISO", sla, sla_err, 0.05),
dt = 2/24, title="SSH")
These will generate new observation structures from the raw data.
"""
from numpy_groupies import aggregate
# Make sure the input is of the proper form
grid = seapy.model.asgrid(grid)
time = np.atleast_1d(time)
lon = np.atleast_1d(lon)
lat = np.atleast_1d(lat)
# First, before relying on gridding, extract only the data that are
# encompassed by the grid
region_list = np.where(np.logical_and.reduce((
lat >= np.min(grid.lat_rho), lat <= np.max(grid.lat_rho),
lon >= np.min(grid.lon_rho), lon <= np.max(grid.lon_rho))))
if not np.any(region_list):
warn("No observations were located within grid region_list")
return None
lat = lat[region_list]
lon = lon[region_list]
# Get the appropriate k-dimension depending on whether the data
# are 2-D or 3-D
if depth is None:
# Get the grid locations from the data locations
subsurface_values = False
(j, i) = grid.ij((lon, lat))
#depth = grid.n * np.ones(i.size)
depth = np.zeros(i.size)
k = np.ma.array(np.resize(grid.n, i.size))
else:
# Get the grid locations from the data locations
subsurface_values = True
depth = np.atleast_1d(depth)[region_list]
(k, j, i) = grid.ijk((lon, lat, depth), depth_adjust)
# Sub-select only the points that lie on our grid
valid_list = np.where((~i.mask * ~j.mask * ~k.mask) == True)
i = i[valid_list].compressed()
j = j[valid_list].compressed()
k = k[valid_list].compressed()
depth = depth[valid_list]
# Make sure the times are consistent and in dt-space
if time.size == 1:
time = np.resize(time, valid_list[0].size)
else:
time = time[region_list][valid_list]
dtime = np.floor(time / dt)
# Loop over all time intervals putting everything together. NOTE: The
# preference is to use aggregate over the time-dimension just as we do
# in the spatial-dimension; however, this led to crashing.
ot = list()
ox = list()
oy = list()
oz = list()
odep = list()
olon = list()
olat = list()
oval = list()
oerr = list()
oprov = list()
otype = list()
for t in seapy.progressbar.progress(np.unique(dtime)):
time_list = np.where(dtime == t)
mtime = np.nanmean(time[time_list])
for v in data:
valid_data = np.s_[:]
if isinstance(v.values, np.ma.core.MaskedArray):
valid_data = \
(v.values[region_list][valid_list][time_list].nonzero())[0]
if not valid_data.size:
continue
# Put together the indices based on the type of data we have
if subsurface_values:
idx = (k[time_list][valid_data],
j[time_list][valid_data],
i[time_list][valid_data])
else:
idx = (j[time_list][valid_data],
i[time_list][valid_data])
indices = np.floor(idx).astype(int)
# Grid the data onto our grid and compute the mean and variance
ii = aggregate(indices, i[time_list][valid_data], func='mean')
jj = aggregate(indices, j[time_list][valid_data], func='mean')
binned = np.where(ii * jj > 0)
ii = ii[binned].ravel()
jj = jj[binned].ravel()
(latl, lonl) = grid.latlon((ii, jj))
Nd = ii.size
# Put the co-located values together
nvalues = aggregate(indices,
v.values[region_list][valid_list][
time_list][valid_data],
func='mean')
# Get their variance
vari = aggregate(indices,
v.values[region_list][valid_list][
time_list][valid_data],
func='var')
# Put together the known observation values
if v.error is not None:
errs = aggregate(indices,
v.error[region_list][valid_list][
time_list][valid_data]**2,
func='mean')
errs = errs[binned].flatten()
else:
errs = 0.0
# Build the depth vectors
if subsurface_values:
dd = aggregate(indices, depth[time_list][valid_data],
func='mean')
kk = aggregate(indices, k[time_list][valid_data],
func='mean')
dd = dd[binned].ravel()
# ROMS counts from 1 for depth layers
kk = kk[binned].ravel() + 1
else:
kk = np.resize(grid.n, Nd)
dd = np.zeros(kk.size)
#dd = kk
# Put all of the data from this time into our lists
ot.append(np.resize(mtime, Nd))
ox.append(ii)
oy.append(jj)
oz.append(kk)
odep.append(dd)
olon.append(lonl)
olat.append(latl)
oval.append(nvalues[binned].flatten())
otype.append(np.resize(seapy.roms.obs.astype(v.type), Nd))
oprov.append(np.resize(
seapy.roms.obs.asprovenance(v.provenance), Nd))
oerr.append(np.maximum(v.min_error**2,
np.maximum(vari[binned].flatten(),
errs)))
# Make sure that we have something relevant
if not oval:
return None
# Put everything together and create an observation class
return seapy.roms.obs.obs(time=np.hstack(ot).ravel(),
x=np.hstack(ox).ravel(),
y=np.hstack(oy).ravel(),
z=np.hstack(oz).ravel(),
lat=np.hstack(olat).ravel(),
lon=np.hstack(olon).ravel(),
depth=np.hstack(odep).ravel(),
value=np.hstack(oval).ravel(),
error=np.hstack(oerr).ravel(),
type=np.hstack(otype).ravel(),
provenance=np.hstack(oprov).ravel(),
title=title)
def merge_files(obs_files, out_files, days, dt, reftime, limits=None, clobber=True):
"""
merge together a group of observation files into combined new files
with observations that lie only within the corresponding dates
Parameters
----------
obs_files : list,
List of files to merge together (a single file will work, it will
just be filtered by the dates)
out_files : list or string,
list of the filenames to create for each of the output periods.
If a single string is given, the character '#' will be replaced
by the starting time of the observation (e.g. out_files="out_#.nc"
will become out_03234.nc)
days : list of tuples,
List of starting and ending day numbers for each cycle to process.
The first value is the start day, the second is the end day. The
number of tuples is the number of files to output.
dt : float,
Time separation of observations. Observations that are less than
dt apart in time will be set to the same time.
reftime :
Reference time used to process the observations. The merged files
are now timed in relation to the beginning of the assimilation cycle
limits : dict, optional
Set the limits of the grid points that observations are allowed
within, {'north':i, 'south':i, 'east':i, 'west':i }. As obs near
the boundaries are not advisable, this allows you to specify the
valid grid range to accept obs within.
clobber: bool, optional
If True, output files are overwritten. If False, they are skipped.
Returns
-------
None
Examples
--------
Put together three files into 5 separate files in two day intervals from
day 10 through day 20:
>>> merge_files(["obs_1.nc", "obs_2.nc", "obs_3.nc"], "new_#.nc",
[(i, i+2) for i in range(10, 20, 2)])
Put together same three files into 3 overlapping separate files in five
day intervals with one overlapping day:
>>> merge_files(["obs_1.nc", "obs_2.nc", "obs_3.nc"], "new_#.nc",
[(i, i+5) for i in range(10, 20, 4)])
"""
import re
import os
# Only unique files
obs_files = set().union(seapy.flatten(obs_files))
outtime = False
if isinstance(out_files, str):
outtime = True
time = re.compile('\#')
# Go through the files to determine which periods they cover
myobs = list()
sdays = list()
edays = list()
for file in obs_files:
nc = seapy.netcdf(file)
fdays = nc.variables['survey_time'][:]
nc.close()
l = np.where(np.logical_and(fdays >= np.min(days),
fdays <= np.max(days)))[0]
if not l.size:
continue
myobs.append(file)
sdays.append(fdays[0])
edays.append(fdays[-1])
sdays = np.asarray(sdays)
edays = np.asarray(edays)
# Loop over the dates in pairs
for n, t in enumerate(seapy.progressbar.progress(days)):
# Set output file name
if outtime:
outfile = time.sub("{:05d}".format(t[0]), out_files)
else:
outfile = out_files[n]
if os.path.exists(outfile) and not clobber:
continue
# Find the files that cover the current period
fidx = np.where(np.logical_and(sdays <= t[1], edays >= t[0]))[0]
if not fidx.size:
continue
# Create new observations for this time period
nobs = obs(myobs[fidx[0]])
l = np.where(np.logical_or(nobs.time < t[0], nobs.time > t[1]))
nobs.delete(l)
for idx in fidx[1:]:
o = obs(myobs[idx])
l = np.where(np.logical_and(o.time >= t[0], o.time <= t[1]))
nobs.add(o[l])
# Remove any limits
if limits is not None:
l = np.where(np.logical_or.reduce((
nobs.x < limits['west'],
nobs.x > limits['east'],
nobs.y < limits['south'],
nobs.y > limits['north'])))
nobs.delete(l)
# Make time relative to the assimilation window
nobs.reftime = reftime
#nobs.reftime = seapy.day2date(t[0],epoch=reftime)
#nobs.time = abs(abs(nobs.time) - abs(t[0]))
# Save out the new observations
nobs.to_netcdf(outfile, dt=dt)
pass
``` |
{
"source": "joaomheusi/TCCJose",
"score": 2
} |
#### File: joaomheusi/TCCJose/clean.py
```python
import matplotlib.pyplot as plt
from scipy.io import wavfile
import argparse
import os
from glob import glob
import numpy as np
import pandas as pd
from librosa.core import resample, to_mono
from tqdm import tqdm
import wavio
ORIGINPATH = os.path.realpath(__file__)
def envelope(y, rate, threshold):
mask = []
y = pd.Series(y).apply(np.abs)
y_mean = y.rolling(window=int(rate/20),
min_periods=1,
center=True).max()
for mean in y_mean:
if mean > threshold:
mask.append(True)
else:
mask.append(False)
return mask, y_mean
def downsample_mono(path, sr):
obj = wavio.read(path)
wav = obj.data.astype(np.float32, order='F')
rate = obj.rate
try:
channel = wav.shape[1]
if channel == 2:
wav = to_mono(wav.T)
elif channel == 1:
wav = to_mono(wav.reshape(-1))
except IndexError:
wav = to_mono(wav.reshape(-1))
pass
except Exception as exc:
raise exc
wav = resample(wav, rate, sr)
wav = wav.astype(np.int16)
return sr, wav
def save_sample(sample, rate, target_dir, fn, ix):
fn = fn.split('.wav')[0]
dst_path = os.path.join(target_dir.split('.')[0], fn+'_{}.wav'.format(str(ix)))
if os.path.exists(dst_path):
return
wavfile.write(dst_path, rate, sample)
def check_dir(path):
if os.path.exists(path) is False:
os.mkdir(path)
def split_wavs(args):
src_root = args.src_root
dst_root = args.dst_root
dt = args.delta_time
wav_paths = glob('{}/**'.format(src_root), recursive=True)
wav_paths = [x for x in wav_paths if '.wav' in x]
dirs = os.listdir(src_root)
check_dir(dst_root)
classes = os.listdir(src_root)
for _cls in classes:
target_dir = os.path.join(dst_root, _cls)
check_dir(target_dir)
src_dir = os.path.join(src_root, _cls).replace("\\\\","\\")
for fn in tqdm(os.listdir(src_dir)):
src_fn = os.path.join(src_dir, fn)
rate, wav = downsample_mono(src_fn, args.sr)
mask, y_mean = envelope(wav, rate, threshold=args.threshold)
wav = wav[mask]
delta_sample = int(dt*rate)
# cleaned audio is less than a single sample
# pad with zeros to delta_sample size
if wav.shape[0] < delta_sample:
sample = np.zeros(shape=(delta_sample,), dtype=np.int16)
sample[:wav.shape[0]] = wav
save_sample(sample, rate, target_dir, fn, 0)
# step through audio and save every delta_sample
# discard the ending audio if it is too short
else:
trunc = wav.shape[0] % delta_sample
for cnt, i in enumerate(np.arange(0, wav.shape[0]-trunc, delta_sample)):
start = int(i)
stop = int(i + delta_sample)
sample = wav[start:stop]
save_sample(sample, rate, target_dir, fn, cnt)
def test_threshold(args):
src_root = args.src_root
wav_paths = glob('{}/**'.format(src_root), recursive=True)
wav_path = [x for x in wav_paths if args.fn in x]
if len(wav_path) != 1:
print('audio file not found for sub-string: {}'.format(args.fn))
return
rate, wav = downsample_mono(wav_path[0], args.sr)
mask, env = envelope(wav, rate, threshold=args.threshold)
plt.style.use('ggplot')
plt.title('Signal Envelope, Threshold = {}'.format(str(args.threshold)))
plt.plot(wav[np.logical_not(mask)], color='r', label='remove')
plt.plot(wav[mask], color='c', label='keep')
plt.plot(env, color='m', label='envelope')
plt.grid(False)
plt.legend(loc='best')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Cleaning audio data')
parser.add_argument('--src_root', type=str, default='wavfiles',
help='directory of audio files in total duration')
parser.add_argument('--dst_root', type=str, default='clean',
help='directory to put audio files split by delta_time')
parser.add_argument('--delta_time', '-dt', type=float, default=1.0,
help='time in seconds to sample audio')
parser.add_argument('--sr', type=int, default=16000,
help='rate to downsample audio')
parser.add_argument('--fn', type=str, default='1ACuiOPY_x8_1-firetruck.wav',
help='file to plot over time to check magnitude')
parser.add_argument('--threshold', type=str, default=20,
help='threshold magnitude for np.int16 dtype')
args, _ = parser.parse_known_args()
print(args)
#test_threshold(args)
split_wavs(args)
``` |
{
"source": "joaomh/hacker-rank-python",
"score": 4
} |
#### File: data_structures/linked_list/insertNodeAtPosition.py
```python
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data # Pointer to data
self.next = None # Initialize next as null
def insertNodeAtPosition(head, data, position):
i = 0
current_head = head
previous_head = head
while i <= position:
if i == position:
new_head = Node(data)
previous_head.next = new_head
new_head.next = current_head
return head
else:
i += 1
previous_head = current_head
current_head = current_head.next
return head
```
#### File: data_structures/linked_list/printLinkedList.py
```python
def printLinkedList(head):
if head:
current = head
while current:
print(current.data)
current = current.next
```
#### File: data_structures/linked_list/removeDuplicates.py
```python
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data # Pointer to data
self.next = None # Initialize next as null
def removeDuplicates(head):
if head is None:
return None
current = head
while current.next:
if current.data == current.next.data:
nextNext = current.next.next
current.next = nextNext
else:
current = current.next
return head
```
#### File: data_structures/linked_list/reverseLinkedList.py
```python
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data # Pointer to data
self.next = None # Initialize next as null
def reverse(head):
previous_node = None
current_node = head
while current_node is not None:
next = current_node.next
current_node.next = previous_node
previous_node = current_node
current_node = next
head = previous_node
return head
```
#### File: data_structures/linked_list/sortedInsert.py
```python
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data
self.next = None # Reference to next node
self.previous = None # Reference to the previous node
def sortedInsert(head, data):
node = Node(data)
if head == None:
return node
if data <= head.data:
node.next, head.prev = head, node
return node
current = head
while current.next != None and data >= current.next.data:
current = current.next
node.prev = current
node.next = current.next
current.next = node
current.next.prev = node
return head
``` |
{
"source": "joaomh/Python-Data-Structures-and-Algorithms",
"score": 4
} |
#### File: data_structures/arrays/reverse_array.py
```python
def reverseArray_1(a):
b = []
for i in a:
b.insert(0, i)
return b
# Method 2
# Copies the list prior to reversing it
def reverseArray_2(a):
return a[::-1]
# Method 3
# The fastest way to reverse a long list
def reverseArray_3(a):
a = a.reverse()
return a
a = [1, 4, 3, 2]
print (reverseArray_1(a))
print (reverseArray_2(a))
reverseArray_3(a)
print(a)
```
#### File: data_structures/queue/queues_using_linked_list.py
```python
class Queue:
def __init__(self):
self.head = None
self.tail = None
self.num_elements = 0
def enqueue(self, value):
new_node = Node(value)
if self.head is None:
self.head = new_node
self.tail = self.head
else:
self.tail.next = new_node # add data to the next attribute of the tail (i.e. the end of the queue)
self.tail = self.tail.next # shift the tail (i.e., the back of the queue)
self.num_elements += 1
def dequeue(self):
if self.is_empty():
return None
value = self.head.value # copy the value to a local variable
self.head = self.head.next # shift the head (i.e., the front of the queue)
self.num_elements -= 1
return value
def size(self):
return self.num_elements
def is_empty(self):
return self.num_elements == 0
```
#### File: src/efficiency/big_o_efficiency.py
```python
import matplotlib.pyplot as plt
from scipy.special import gamma
import math
import numpy as np
n = np.linspace(1,101,100)
O1 = gamma(n)
O2 = 2**n
O3 = n**2
O4 = n*np.log(n) / np.log(2)
O5 = n
O6 = np.sqrt(n)
O7 = np.log(n) / np.log(2)
plt.plot(n, O1, '--k', label='n!')
plt.plot(n, O2, '--r', label='2^n')
plt.plot(n, O3, '--g', label='n^2')
plt.plot(n, O4, 'y', label='nlog(n)')
plt.plot(n, O5, 'c', label='n')
plt.plot(n, O6, '--m', label='sqrt(n)')
plt.plot(n, O7, 'b', label='log(n)')
axes = plt.gca()
axes.set(xlim=(0, 100), ylim=(0, 100))
leg = axes.legend()
plt.show()
# O(N!)
# This is the Heap's algorithm, which is used for generating all possible permutation of n objects
# Another example could be the Travelling Salesman Problem
def Permutation(data, n):
if n == 1:
print(data)
return
for i in range(n):
Permutation(data, n - 1)
if n % 2 == 0:
data[i], data[n-1] = data[n-1], data[i]
else:
data[0], data[n-1] = data[n-1], data[0]
data = [1, 2]
Permutation(data,len(data))
get_ipython().run_line_magic('time', '')
# O(2^n)
# Recursive calculation of Fibonacci numbers
def fibonacci(n):
if n <= 1:
return n
return fibonacci(n-1) + fibonacci(n-2)
print(fibonacci(20))
get_ipython().run_line_magic('time', '')
# O(N^2)
# Print pair of numbers in the data
def Print_Pair(some_list):
for i in some_list:
for j in some_list:
print("Items: {}, {}".format(i,j))
Print_Pair([1, 2, 3, 4])
get_ipython().run_line_magic('time', '')
# O(nlog(n))
# Mergesort algorithm
def Merge_Sort(data):
if len(data) <= 1:
return
mid = len(data) // 2
left_data = data[:mid]
right_data = data[mid:]
Merge_Sort(left_data)
Merge_Sort(right_data)
left_index = 0
right_index = 0
data_index = 0
while left_index < len(left_data) and right_index < len(right_data):
if left_data[left_index] < right_data[right_index]:
data[data_index] = left_data[left_index]
left_index += 1
else:
data[data_index] = right_data[right_index]
right_index += 1
data_index += 1
if left_index < len(left_data):
del data[data_index:]
data += left_data[left_index:]
elif right_index < len(right_data):
del data[data_index:]
data += right_data[right_index:]
data = [9, 0, 8, 6, 2, 5, 7, 3, 4, 1]
Merge_Sort(data)
print(data)
get_ipython().run_line_magic('time', '')
# O(n)
# Just print some itens
def Print_Item(data):
for i in data:
print(i)
Print_Item([1, 2, 3, 4])
get_ipython().run_line_magic('time', '')
# Linear search
def Linear_Search(data, value):
for index in range(len(data)):
if value == data[index]:
return index
raise ValueError('Value not found in the list')
data = [1, 3, 7, 4, 5, 9, 0, 11]
print(Linear_Search(data,9))
get_ipython().run_line_magic('time', '')
# O(log(n))
# This algorithms with logarithmic time complexity are commonly found on binary trees
for idx in range(0, len(data), 3):
print(data[idx])
get_ipython().run_line_magic('time', '')
# Binary search
def binary_search(data, value):
n = len(data)
left = 0
right = n - 1
while left <= right:
middle = (left + right) // 2
if value < data[middle]:
right = middle - 1
elif value > data[middle]:
left = middle + 1
else:
return middle
raise ValueError('Value is not in the list')
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(binary_search(data, 8))
# O(0n + 1)
def First_Idx(data):
return data[0]
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(First_Idx(data))
get_ipython().run_line_magic('time', '')
``` |
{
"source": "Joaomlg/multilayer-perceptron-mnist",
"score": 3
} |
#### File: multilayer-perceptron-mnist/data/__init__.py
```python
import numpy as np
import gzip
import pickle
import os
import urllib.request
class MNIST:
host = 'http://yann.lecun.com/exdb/mnist/'
filenames = {
'train': ('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz'),
'test': ('t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'),
}
dataset_filename = 'mnist.pkl.gz'
train_samples = 50000
validation_samples = 10000
test_samples = 10000
def __init__(self):
self.current_dir = os.path.dirname(__file__)
if not self.is_dataset_available():
print('Dataset not available! It will be downloaded and decoded, and can be take a while, please wait!')
datasets = self.get_base_datasets_filenames()
for dataset in datasets:
if not self.is_base_dataset_downloaded(dataset):
print(f'Downloading {dataset}...')
self.download_dataset(dataset)
print('Decoding files and saving it...')
self.decode_and_save()
print('Deleting base files (downloaded)...')
for dataset in datasets:
self.delete_dataset(dataset)
print('Done.')
def is_dataset_available(self):
return os.path.exists(os.path.join(self.current_dir, self.dataset_filename))
def get_base_datasets_filenames(self):
return self.filenames['train'] + self.filenames['test']
def is_base_dataset_downloaded(self, filename):
return os.path.exists(os.path.join(self.current_dir, filename))
def download_dataset(self, filename):
url = self.host + filename
dest = os.path.join(self.current_dir, filename)
urllib.request.urlretrieve(url, dest)
def delete_dataset(self, filename):
os.remove(os.path.join(self.current_dir, filename))
def decode_and_save(self):
data = {}
for key, (images_filename, labels_filename) in self.filenames.items():
with gzip.open(os.path.join(self.current_dir, images_filename), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16).reshape(-1, 28*28)
with gzip.open(os.path.join(self.current_dir, labels_filename), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
data[key] = (images, labels)
training = tuple(x[:self.train_samples] for x in data['train'])
validation = tuple(x[self.train_samples:] for x in data['train'])
test = tuple(data['test'])
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'wb') as file:
pickle.dump((training, validation, test), file)
def load(self):
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'rb') as file:
training, validation, test = pickle.load(file)
return training, validation, test
``` |
{
"source": "Joaomlg/Python-Modbus",
"score": 3
} |
#### File: Joaomlg/Python-Modbus/modbus_exceptions.py
```python
class ModbusException(Exception):
@staticmethod
def fromExceptionCode(exception_code: int):
if exception_code == 1:
return IllegalFunctionError
elif exception_code == 2:
return IllegalDataAddressError
elif exception_code == 3:
return IllegalDataValueError
elif exception_code == 4:
return SlaveDeviceFailureError
elif exception_code == 5:
return AcknowledgeError
elif exception_code == 6:
return SlaveDeviceBusyError
elif exception_code == 7:
return NegativeAcknowledgeError
elif exception_code == 8:
return MemoryParityError
elif exception_code == 10:
return GatewayPathUnavailableError
elif exception_code == 11:
return GatewayTargetDeviceFailedToRespondError
else:
return Exception(f'Slave reported a unknown error, exception code: {exception_code}')
class IllegalFunctionError(ModbusException):
pass
class IllegalDataAddressError(ModbusException):
pass
class IllegalDataValueError(ModbusException):
pass
class SlaveDeviceFailureError(ModbusException):
pass
class AcknowledgeError(ModbusException):
pass
class SlaveDeviceBusyError(ModbusException):
pass
class NegativeAcknowledgeError(ModbusException):
pass
class MemoryParityError(ModbusException):
pass
class GatewayPathUnavailableError(ModbusException):
pass
class GatewayTargetDeviceFailedToRespondError(ModbusException):
pass
```
#### File: Joaomlg/Python-Modbus/modbus_message.py
```python
import struct
from modbus_utils import calculateDataCRC16, verifyPayloadCRC16
from modbus_constants import LITTLE_ENDIAN, BIG_ENDIAN
class ModbusMessageBuilder:
def __init__(self, endian=BIG_ENDIAN):
self.__message_endian = endian
self.__struct_format = bytes()
self.__payload_buffer = list()
def setPayload(self, payload: bytes):
self.__payload = payload
def addChar(self, char: int):
self.__struct_format += 'b'
self.__payload_buffer.append(char)
def addUnsignedChar(self, char: int):
self.__struct_format += 'B'
self.__payload_buffer.append(char)
def addShort(self, short: int):
self.__struct_format += 'h'
self.__payload_buffer.append(short)
def addUnsignedShort(self, short: int):
self.__struct_format += 'H'
self.__payload_buffer.append(short)
def build(self) -> bytes:
struct_format_with_endian = self.__message_endian + self.__struct_format
message_payload_data = struct.pack(struct_format_with_endian, *self.__payload_buffer)
message_payload_crc16 = calculateDataCRC16(message_payload_data)
return message_payload_data + message_payload_crc16
class ReadMessageBuilder:
def __init__(self, slave_address, function_code, start_register, number_of_registers):
self.__slave_address = slave_address
self.__function_code = function_code
self.__start_register = start_register
self.__number_of_registers = number_of_registers
self.__assertDeviceAddress()
self.__assertFunctionCode()
self.__assertRegisterAddress()
self.__assertRegisterCount()
@property
def slave_address(self) -> int:
return self.__slave_address
def __assertDeviceAddress(self):
assert self.slave_address > 0, ValueError('Device address must be greater than zero for read functions.')
@property
def function_code(self) -> int:
return self.__function_code
def __assertFunctionCode(self):
assert self.function_code > 0, ValueError('Function code must be greater than zero.')
@property
def start_register(self) -> int:
return self.__start_register
def __assertRegisterAddress(self):
assert self.start_register >= 0, ValueError('Register address must be greater or equal than zero.')
@property
def number_of_registers(self) -> int:
return self.__number_of_registers
def __assertRegisterCount(self):
assert self.number_of_registers > 0, ValueError('Register count must be at least one.')
def build(self) -> bytes:
message_builder = ModbusMessageBuilder()
message_builder.addUnsignedChar(self.slave_address)
message_builder.addUnsignedChar(self.function_code)
message_builder.addUnsignedShort(self.start_register)
message_builder.addUnsignedShort(self.number_of_registers)
return message_builder.build()
class ReadCoilStatusMessageBuilder(ReadMessageBuilder):
def __init__(self, slave_address, start_coil, number_of_coils):
super().__init__(slave_address=slave_address, function_code=1, start_register=start_coil, number_of_registers=number_of_coils)
class ReadHoldingRegisterMessageBuilder(ReadMessageBuilder):
def __init__(self, slave_address, start_register, number_of_registers):
super().__init__(slave_address=slave_address, function_code=3, start_register=start_register, number_of_registers=number_of_registers)
class ReadInputRegisterMessageBuilder(ReadMessageBuilder):
def __init__(self, slave_address, start_register, number_of_registers):
super().__init__(slave_address=slave_address, function_code=4, start_register=start_register, number_of_registers=number_of_registers)
``` |
{
"source": "Joaomlg/RPi-Manage",
"score": 3
} |
#### File: RPi-Manage/RPi/service.py
```python
import os
import re
def get_status(service_name):
command = '/etc/init.d/%s status | grep Active' % (str(service_name))
regex = r'Active: ([a-zA-Z]*)'
response = os.popen(command).read()
if response:
status = re.search(regex, response)
if status:
return str(status.group(1))
return None
def is_loaded(service_name):
command = '/etc/init.d/%s status | grep Loaded' % (str(service_name))
regex = r'Loaded: ([a-zA-Z]*)'
response = os.popen(command).read()
if response:
status = re.search(regex, response)
if status:
if 'loaded' in status.group(1):
return True
return False
return None
def unmask_service(service_name):
command = 'sudo systemctl unmask %s' % (str(service_name))
os.popen(command)
``` |
{
"source": "joaomlneto/tuenti-challenge-2020",
"score": 4
} |
#### File: tuenti-challenge-2020/05/main.py
```python
import argparse
import icu
import os
import re
import sys
parser = argparse.ArgumentParser(description="Tuenti Challenge 2020 - Problem 05")
parser.add_argument("-f", "--file", dest="filename", type=str,
help="the text file to analyze", required=True)
args = parser.parse_args()
def is_tuentistic_sum_impossible(n):
return (n < 20) or (30 <= n < 40) or (n == 59);
def tuentistic_sum(n):
return 'IMPOSSIBLE' if is_tuentistic_sum_impossible(n) else n // 20
# compute how often each word appears
with open(args.filename, 'r') as file:
num_cases = int(next(file))
for i in range(0, num_cases):
n = int(next(file))
print('Case #%d: %s' % (i + 1, tuentistic_sum(n)))
```
#### File: tuenti-challenge-2020/06/main.py
```python
import argparse
import icu
import os
import re
import socket
import sys
# We consider the following axis:
# y
# ^
# |
# |
# +--------> x
# Princess is at (1, 0)
# We start at (0, 0)
# A certain position on the map (or ∆positions)
class Position:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "(%d %d)" % (self.x, self.y)
def __eq__(self, other):
if not isinstance(other, Position):
return TypeError
return (self.x == other.x) and (self.y == other.y)
def distance_to(self, pos):
xdiff = abs(self.x - pos.x)
ydiff = abs(self.y - pos.y)
return max(xdiff, ydiff)
class Displacement(Position):
def __init(self, x, y):
super().__init__(x, y)
# A knight move
class Move:
def __init__(self, displacement):
self.displacement = displacement
def __str__(self):
return "%s" % (self.command())
def apply(self, position):
return Position(position.x + self.displacement.x, position.y + self.displacement.y)
def getTile(self, tiles):
return tiles[2 - self.displacement.y][2 + self.displacement.x]
def command(self):
command = ''
command += str(abs(self.displacement.x))
command += 'R' if self.displacement.x > 0 else 'L'
command += str(abs(self.displacement.y))
command += 'U' if self.displacement.y > 0 else 'D'
return command
def getReverseMove(self):
return Move(Displacement(-self.displacement.x, -self.displacement.y))
# A "map" (the current context)
class Map:
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.current_position = Position(0, 0)
self.princess_position = Position(1, 0)
self.tiles = None
def __str__(self):
result = '----- MAP -----\n'
for i in range(0, 5):
result += "%s\n" % self.tiles[i]
result += 'Pos: %s\n' % (str(self.current_position))
result += 'Available moves: %s\n' % ', '.join(str(x) for x in self.available_moves())
result += 'Visited: %d\n' % len(self.visited)
result += 'Path: %s\n' % ' '.join([str(x) for x in self.path])
result += '---------------'
return result
def update_tiles(self):
data = str(self.sock.recv(4096), encoding='utf-8').split('\n')
print(*data, sep='\n')
tiles = data[0:5]
assert len(tiles) == 5
for i in range(0, 5): assert len(tiles[i]) == 5
self.tiles = tiles
print(self)
def available_moves(self):
moves = []
for move in KNIGHT_MOVES:
if move.getTile(self.tiles) != '#': moves.append(move)
moves.sort(key = lambda x: move.apply(self.current_position).distance_to(self.princess_position))
return moves
def apply_move(self, move):
self.sock.send(move.command().encode())
self.current_position = move.apply(self.current_position)
if self.found_princess():
print('yay!')
data = str(self.sock.recv(4096), encoding='utf-8').split('\n')
print(*data, sep='\n')
sys.exit()
self.update_tiles()
print(self)
def found_princess(self):
return (self.current_position.x == self.princess_position.x) and \
(self.current_position.y == self.princess_position.y)
def search_princess(self):
self.visited = []
self.path = []
map.update_tiles()
while not self.found_princess():
self.do_search_step()
def do_search_step(self):
if self.found_princess():
print("FOUND IT!")
sys.exit()
self.visited.append(self.current_position)
self.path.append(self.current_position)
available_moves = self.available_moves()
for move in available_moves:
if move.apply(self.current_position) not in self.visited:
self.apply_move(move)
self.do_search_step()
self.apply_move(move.getReverseMove())
self.path.pop()
host = "172.16.31.10"
port = 2003
# compute valid moves by a knight
KNIGHT_MOVES = [
Move(Displacement(-1, +2)),
Move(Displacement(+1, +2)),
Move(Displacement(-2, +1)),
Move(Displacement(+2, +1)),
Move(Displacement(-2, -1)),
Move(Displacement(+2, -1)),
Move(Displacement(-1, -2)),
Move(Displacement(+1, -2)),
]
map = Map()
map.search_princess()
```
#### File: tuenti-challenge-2020/14/main.py
```python
import argparse
import icu
import os
import re
import socket
import sys
import time
import concurrent.futures
host = '172.16.31.10'
port = '2092'
master_running = True
master_id = 9
sybil_ids = []
# parse list of servers from line
def extract_server_list(line_str):
regex_servers = 'servers: \\[[0-9]+(,[0-9]+)*\\]'
m = re.search(regex_servers, line_str)
if m is None:
print('why is this none? -->', line_str)
assert m is not None
return [int(x) for x in line_str[m.start():m.end()][10:-1].split(',')]
# parse secret owner from line
def extract_secret_owner(line_str):
regex_owner = 'secret_owner: [0-9]+'
m = re.search(regex_owner, line_str)
if m is None: return None
return int(line_str[m.start():m.end()][14:])
def extract_own_id(line_str):
regex_serverid = 'SERVER ID: [0-9]+'
m = re.search(regex_serverid, line_str)
if m is None: return None
return int(line_str[m.start():m.end()][11:])
def extract_msg_sender(line_str):
regex_sender = ': [0-9]+ ->'
m = re.search(regex_sender, line_str)
if m is None: sender = None
else: sender = int(line_str[m.start():m.end()][2:-3])
return sender
def extract_prepare_id(line_str):
regex_id = 'PREPARE {[0-9]+,[0-9]+}'
m = re.search(regex_id, line_str)
assert m is not None
return line_str[m.start():m.end()][8:]
def prepare_msg(seq_number, my_id):
return 'PREPARE {%d,%d}' % (seq_number, my_id)
def accept_msg(seq_number, my_id, next_servers, next_owner):
return 'ACCEPT {id: {%d,%d}, value: {servers: %s, secret_owner: %d}}' % \
(seq_number, my_id, str(next_servers).replace(' ', ''), next_owner)
def promise_msg(proposal_id, prev_proposal):
return 'PROMISE %s %s' % (proposal_id, prev_proposal)
def broadcast_message(sock, id, msg, servers):
print('broadcasting %s to %s' % (msg, servers))
for server in servers:
if server is not id:
sock.send(('%s -> %d\n' % (msg, server)).encode('ascii'))
def send_message(sock, id, msg, receiver):
broadcast_message(sock, id, msg, [receiver])
def get_node_to_remove(servers, owner, master, sybils):
removables = set(servers)
removables = removables.difference(set(sybils))
removables = removables.difference([owner, master])
if len(removables) == 0: return None
return removables.pop()
def get_node_to_add(servers, owner, master, sybils):
missing_sybils = set(sybils).difference(servers)
if len(missing_sybils) == 0: return None
return missing_sybils.pop()
def get_next_membership(servers, owner, master, sybils):
# check if we have sybils of ours missing
node_to_add = get_node_to_add(servers, owner, master, sybils)
if node_to_add is not None:
print('Decision - Membership Change - Add', node_to_add)
return list(set(servers).union([node_to_add]))
# check if we can shoot down other nodes
node_to_remove = get_node_to_remove(servers, owner, master, sybils)
if node_to_remove is not None:
print('Decision - Membership Change - Remove' ,node_to_remove)
return list(set(servers).difference([node_to_remove]))
return None
def we_have_majority(servers, sybils):
return 2 * (len(sybils) + 1) > len(servers)
def run_sybil_node(id, sock):
global sybil_ids
sybil_ids.append(id)
sock.settimeout(1)
while master_running:
try:
data = sock.recv(1048576)
if not data: break
except socket.timeout: continue
sock.close()
def run_master_node(id, sock):
global master_running
proposal_id = 10
promises = []
step = 0
while master_running:
data = sock.recv(1048576)
if not data:
break
lines = data.decode('ascii').splitlines()
for line in lines:
#print('[%2d] %s' % (id, line))
# broadcast the proposal everyone has ever seen
if step == 0 and 'ROUND FINISHED' in line:
promises = []
servers = extract_server_list(line)
owner = extract_secret_owner(line)
msg = prepare_msg(proposal_id, id)
broadcast_message(sock, id, msg, servers)
step = 1
# collect promises -- if we have enough, do something nasty
if step == 1 and 'PROMISE' in line:
#print(line)
promises.append(extract_msg_sender(line))
#print('got promises:', promises, 'from servers:', servers)
# check if we have promises from majority of acceptors
if 2 * len(promises) > len(servers):
# check if we are already a majority of the member nodes
if we_have_majority(servers, sybil_ids):
msg = accept_msg(proposal_id, id, servers, id)
broadcast_message(sock, id, msg, servers)
# we are not the majority (yet) - lets move closer to that goal
else:
membership = get_next_membership(servers, owner, id, sybil_ids)
assert membership is not None
msg = accept_msg(proposal_id, id, membership, owner)
broadcast_message(sock, id, msg, servers)
proposal_id += 10
step = 0
if 'SECRET IS' in line:
print(line)
step = 0
master_running = False
break
master_running = False
sock.close()
def spawn_node():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, int(port)))
data = sock.recv(15)
lines = data.decode('ascii').splitlines()
id = extract_own_id(data.decode('ascii').splitlines()[0])
assert id is not None
if id == master_id: run_master_node(id, sock)
else: run_sybil_node(id, sock)
num_nodes = 4
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for i in range(0, num_nodes):
futures.append(executor.submit(spawn_node))
for future in concurrent.futures.as_completed(futures):
future.result()
``` |
{
"source": "joaommartins/ProtNets",
"score": 2
} |
#### File: ProtNets/test/test_generator_infer.py
```python
import sys
sys.path.append('../')
from utils.SparseGenerator import SparseGenerator
import glob
import os
from timeit import default_timer as timer
import numpy as np
def test_load_sample_data():
aa1 = "ACDEFGHIKLMNPQRSTVWYX"
data_dir = '/Users/jmartins/PycharmProjects/ProtNets/data/infer'
# fixed_params must be a string to be passed in the shell, let's use JSON
high_res_protein_feature_filenames = sorted(
glob.glob(os.path.join(data_dir, "*protein_features.npz")))
high_res_grid_feature_filenames = sorted(
glob.glob(os.path.join(data_dir, "*residue_features.npz")))
infer_data = SparseGenerator()
infer_data.load_data(high_res_protein_feature_filenames,
high_res_grid_feature_filenames)
diff = []
for index in range(1, 805):
start = timer()
indices, values, hots = infer_data.infer(index)
print index, aa1[np.argmax(hots)]
# print indices
# print values.shape
# print hots.shape
end = timer()
diff.append(end-start)
print 'Average loop: {}'.format(np.average(diff))
``` |
{
"source": "joaomonteirof/dcase",
"score": 2
} |
#### File: dcase/fuse_spec/train_loop.py
```python
import torch
import torch.nn.functional as F
import numpy as np
import random
import os
from tqdm import tqdm
from losses import LabelSmoothingLoss
class TrainLoop(object):
def __init__(self, model, optimizer, train_loader, valid_loader, max_gnorm, label_smoothing, verbose=-1, cp_name=None, save_cp=False, checkpoint_path=None, checkpoint_epoch=None, cuda=True, logger=None):
if checkpoint_path is None:
# Save to current directory
self.checkpoint_path = os.getcwd()
else:
self.checkpoint_path = checkpoint_path
if not os.path.isdir(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.save_epoch_fmt = os.path.join(self.checkpoint_path, cp_name) if cp_name else os.path.join(self.checkpoint_path, 'checkpoint_{}ep.pt')
self.cuda_mode = cuda
self.model = model
self.optimizer = optimizer
self.max_gnorm = max_gnorm
self.train_loader = train_loader
self.valid_loader = valid_loader
self.total_iters = 0
self.cur_epoch = 0
self.verbose = verbose
self.save_cp = save_cp
self.device = next(self.model.parameters()).device
self.logger = logger
self.history = {'train_loss': [], 'train_loss_batch': []}
self.disc_label_smoothing = label_smoothing
self.best_er = np.inf
if label_smoothing>0.0:
self.ce_criterion = LabelSmoothingLoss(label_smoothing, lbl_set_size=self.model.n_classes)
else:
self.ce_criterion = torch.nn.CrossEntropyLoss()
if self.valid_loader is not None:
self.history['ER'] = []
if checkpoint_epoch is not None:
self.load_checkpoint(self.save_epoch_fmt.format(checkpoint_epoch))
def train(self, n_epochs=1, save_every=1, eval_every=1000):
while (self.cur_epoch < n_epochs):
self.cur_epoch += 1
np.random.seed()
if self.verbose>0:
print(' ')
print('Epoch {}/{}'.format(self.cur_epoch, n_epochs))
train_iter = tqdm(enumerate(self.train_loader), total=len(self.train_loader))
else:
train_iter = enumerate(self.train_loader)
self.save_epoch_cp = False
train_loss_epoch=0.0
for t, batch in train_iter:
train_loss = self.train_step(batch)
self.history['train_loss_batch'].append(train_loss)
train_loss_epoch+=train_loss
self.total_iters += 1
if self.logger:
self.logger.add_scalar('Train/Train Loss', train_loss, self.total_iters)
self.logger.add_scalar('Info/LR', self.optimizer.param_groups[0]['lr'], self.total_iters)
if self.total_iters % eval_every == 0:
self.evaluate()
if self.save_cp and ( self.history['ER'][-1] < np.min([np.inf]+self.history['ER'][:-1]) ):
self.checkpointing()
self.save_epoch_cp = True
self.history['train_loss'].append(train_loss_epoch/(t+1))
if self.verbose>0:
print(' ')
print('Total train loss: {:0.4f}'.format(self.history['train_loss'][-1]))
print('Current LR: {}'.format(self.optimizer.param_groups[0]['lr']))
print(' ')
if self.save_cp and self.cur_epoch % save_every == 0 and not self.save_epoch_cp:
self.checkpointing()
if self.verbose>0:
print('Training done!')
if self.valid_loader is not None:
if self.verbose>0:
print('Best error rate and corresponding epoch and iteration: {:0.4f}, {}, {}'.format(np.min(self.history['ER']), self.best_er_epoch, self.best_er_iteration))
return np.min(self.history['ER'])
else:
return [np.min(self.history['train_loss'])]
def train_step(self, batch):
self.model.train()
self.optimizer.zero_grad()
x_spec, x_mod, y = batch
x_spec = x_spec.to(self.device, non_blocking=True)
x_mod = x_mod.to(self.device, non_blocking=True)
y = y.to(self.device, non_blocking=True)
out = self.model.forward(x_spec, x_mod)
loss = self.ce_criterion(out, y)
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.max_gnorm)
self.optimizer.step()
if self.logger:
self.logger.add_scalar('Info/Grad_norm', grad_norm, self.total_iters)
return loss.item()
def valid(self, batch):
self.model.eval()
with torch.no_grad():
x_spec, x_mod, y = batch
x_spec = x_spec.to(self.device, non_blocking=True)
x_mod = x_mod.to(self.device, non_blocking=True)
y = y.to(self.device, non_blocking=True)
out = self.model.forward(x_spec, x_mod)
_, pred = out.topk(1, 1, True, True)
pred = pred.t()
correct = pred.eq(y.view(1, -1).expand_as(pred))
correct = correct[:1].view(-1).float().sum(0, keepdim=True).item()
return correct
def evaluate(self):
if self.verbose>0:
print('\n\nIteration - Epoch: {} - {}'.format(self.total_iters, self.cur_epoch))
total_correct, total = 0, 0
for t, batch in enumerate(self.valid_loader):
correct = self.valid(batch)
total_correct += correct
total += batch[0].size(0)
self.history['ER'].append(1.-total_correct/total)
if self.history['ER'][-1]<self.best_er:
self.best_er = self.history['ER'][-1]
self.best_er_epoch = self.cur_epoch
self.best_er_iteration = self.total_iters
if self.logger:
self.logger.add_scalar('Valid/ER', self.history['ER'][-1], self.total_iters)
self.logger.add_scalar('Valid/Best ER', np.min(self.history['ER']), self.total_iters)
if self.verbose>0:
print(' ')
print('Current ER, best ER, and epoch - iteration: {:0.4f}, {:0.4f}, {}, {} \n'.format(self.history['ER'][-1], np.min(self.history['ER']), self.best_er_epoch, self.best_er_iteration))
def checkpointing(self):
# Checkpointing
if self.verbose>0:
print('Checkpointing...')
ckpt = {'model_state': self.model.state_dict(),
'n_classes': self.model.n_classes,
'optimizer_state': self.optimizer.state_dict(),
'history': self.history,
'total_iters': self.total_iters,
'cur_epoch': self.cur_epoch}
try:
torch.save(ckpt, self.save_epoch_fmt.format(self.cur_epoch))
except:
torch.save(ckpt, self.save_epoch_fmt)
def load_checkpoint(self, ckpt):
if os.path.isfile(ckpt):
ckpt = torch.load(ckpt, map_location = lambda storage, loc: storage)
# Load model state
self.model.load_state_dict(ckpt['model_state'])
# Load optimizer state
self.optimizer.load_state_dict(ckpt['optimizer_state'])
# Load history
self.history = ckpt['history']
self.total_iters = ckpt['total_iters']
self.cur_epoch = ckpt['cur_epoch']
if self.cuda_mode:
self.model = self.model.cuda(self.device)
else:
print('No checkpoint found at: {}'.format(ckpt))
```
#### File: dcase/kaldi_features/combine_scores.py
```python
from __future__ import print_function
import argparse
import torch
import torch.nn.functional as F
import numpy as np
import os
import sys
from tqdm import tqdm
from utils import parse_csv
import glob
def get_header(path):
with open(path, 'r') as file:
data = file.readlines()
return data[0].strip().split('\t')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Score level fusion')
parser.add_argument('--data-path', type=str, default='./data/', metavar='Path', help='Path to data')
parser.add_argument('--out-path', type=str, default='./out.csv', metavar='Path', help='Path to output scores')
args = parser.parse_args()
file_list = glob.glob(args.data_path + '*.csv')
print(file_list)
assert len(file_list)>1, 'Not enough files found in the specified folder. At least two files with score should be available in the folder.'
score_files = []
for score_file in file_list:
score_files.append(parse_csv(score_file))
out_data = [get_header(file_list[0])]
classes = out_data[0][2:]
idx_to_class = {}
for i, clss in enumerate(classes):
idx_to_class[str(i)] = clss
with torch.no_grad():
iterator = tqdm(score_files[0], total=len(score_files))
for filename in iterator:
out = 0.0
for score_dict in score_files:
out += score_dict[filename]
out /= len(score_files)
scores = {}
for index in idx_to_class:
scores[idx_to_class[index]] = out[0, int(index)].item()
pred_idx = str(out.max(1)[1].long().item())
pred = idx_to_class[pred_idx]
out_data.append([filename, pred, *[str(scores[class_name]) for class_name in classes]])
print('Storing scores in output file:')
print(args.out_path)
with open(args.out_path, 'w') as f:
for line in out_data:
f.write("%s" % '\t'.join(line)+'\n')
```
#### File: mel_spec/models/vgg.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name, n_classes=1000):
super(VGG, self).__init__()
self.n_classes = n_classes
self.features = self._make_layers(cfg[vgg_name])
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.lin_proj = nn.Sequential(nn.Linear(512 * 7 * 7, n_classes))
def forward(self, x):
x = self.avgpool(self.features(x))
x = x.view(x.size(0), -1)
x = self.lin_proj(x)
return x
def _make_layers(self, cfg):
layers = []
in_channels = 1
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
```
#### File: dcase/modmel_spec/losses.py
```python
import math
import torch
from torch import nn
from scipy.special import binom
import torch.nn.functional as F
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing, lbl_set_size, dim=1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - label_smoothing
self.smoothing = label_smoothing
self.cls = lbl_set_size
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
# true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
``` |
{
"source": "joaomonteirof/e2e_antispoofing",
"score": 3
} |
#### File: joaomonteirof/e2e_antispoofing/data_load.py
```python
import h5py
import numpy as np
import glob
import torch
from torch.utils.data import Dataset
import os
import random
def augment_spec(example):
with torch.no_grad():
if random.random()>0.5:
example = freq_mask(example, F=50, dim=1)
if random.random()>0.5:
example = freq_mask(example, F=50, dim=2)
if random.random()>0.5:
example += torch.randn_like(example)*random.choice([1e-1, 1e-2, 1e-3])
return example
def freq_mask(spec, F=100, num_masks=1, replace_with_zero=False, dim=1):
"""Frequency masking
adapted from https://espnet.github.io/espnet/_modules/espnet/utils/spec_augment.html
:param torch.Tensor spec: input tensor with shape (T, dim)
:param int F: maximum width of each mask
:param int num_masks: number of masks
:param bool replace_with_zero: if True, masked parts will be filled with 0,
if False, filled with mean
:param int dim: 1 or 2 indicating to which axis the mask corresponds
"""
assert dim==1 or dim==2, 'Only 1 or 2 are valid values for dim!'
F = min(F, spec.size(dim))
with torch.no_grad():
cloned = spec.clone()
num_bins = cloned.shape[dim]
for i in range(0, num_masks):
f = random.randrange(0, F)
f_zero = random.randrange(0, num_bins - f)
# avoids randrange error if values are equal and range is empty
if f_zero == f_zero + f:
return cloned
mask_end = random.randrange(f_zero, f_zero + f)
if replace_with_zero:
if dim==1:
cloned[:, f_zero:mask_end, :] = 0.0
elif dim==2:
cloned[:, :, f_zero:mask_end] = 0.0
else:
if dim==1:
cloned[:, f_zero:mask_end, :] = cloned.mean()
elif dim==2:
cloned[:, :, f_zero:mask_end] = cloned.mean()
return cloned
class Loader(Dataset):
def __init__(self, hdf5_clean, hdf5_attack, max_nb_frames, n_cycles=1, augment=False, label_smoothing=0.0):
super(Loader, self).__init__()
self.hdf5_1 = hdf5_clean
self.hdf5_2 = hdf5_attack
self.n_cycles = n_cycles
self.max_nb_frames = max_nb_frames
file_1 = h5py.File(self.hdf5_1, 'r')
self.idxlist_1 = list(file_1.keys())
self.len_1 = len(self.idxlist_1)
file_1.close()
file_2 = h5py.File(self.hdf5_2, 'r')
self.idxlist_2 = list(file_2.keys())
self.len_2 = len(self.idxlist_2)
file_2.close()
self.open_file_1 = None
self.open_file_2 = None
self.augment = augment
self.label_smoothing = label_smoothing>0.0
self.label_dif = label_smoothing
print('Number of genuine and spoofing recordings: {}, {}'.format(self.len_1, self.len_2))
def __getitem__(self, index):
if not self.open_file_1: self.open_file_1 = h5py.File(self.hdf5_1, 'r')
if not self.open_file_2: self.open_file_2 = h5py.File(self.hdf5_2, 'r')
index_1 = index % self.len_1
utt_clean = self.prep_utterance( self.open_file_1[self.idxlist_1[index_1]][0] )
index_2 = index % self.len_2
utt_attack = self.prep_utterance( self.open_file_2[self.idxlist_2[index_2]][0] )
return utt_clean, utt_attack, torch.zeros(1), torch.ones(1)
if self.label_smoothing:
return utt_clean, utt_attack, torch.rand(1)*self.label_dif, torch.rand(1)*self.label_dif+(1.-self.label_dif)
else:
return utt_clean, utt_attack, torch.zeros(1), torch.ones(1)
def __len__(self):
return self.n_cycles*np.maximum(self.len_1, self.len_2)
def prep_utterance(self, data):
data = np.expand_dims(data, 0)
if data.shape[-1]>self.max_nb_frames:
ridx = np.random.randint(0, data.shape[-1]-self.max_nb_frames)
data_ = data[:, :, ridx:(ridx+self.max_nb_frames)]
else:
mul = int(np.ceil(self.max_nb_frames/data.shape[-1]))
data_ = np.tile(data, (1, 1, mul))
data_ = data_[:, :, :self.max_nb_frames]
data_ = torch.from_numpy(data_).float().contiguous()
if self.augment:
data_ = augment_spec(data_)
return data_
class Loader_all(Dataset):
def __init__(self, hdf5_la_clean, hdf5_la_attack, hdf5_pa, hdf5_mix, max_nb_frames, label_smoothing=0.0, n_cycles=1):
super(Loader_all, self).__init__()
self.hdf5_la_clean = hdf5_la_clean
self.hdf5_la_attack = hdf5_la_attack
self.hdf5_pa = hdf5_pa
self.hdf5_mix = hdf5_mix
self.n_cycles = n_cycles
self.max_nb_frames = max_nb_frames
file_1 = h5py.File(self.hdf5_la_clean, 'r')
self.idxlist_1 = list(file_1.keys())
self.len_1 = len(self.idxlist_1)
file_1.close()
file_2 = h5py.File(self.hdf5_la_attack, 'r')
self.idxlist_2 = list(file_2.keys())
self.len_2 = len(self.idxlist_2)
file_2.close()
self.open_file_la_clean = None
self.open_file_la_attack = None
self.open_file_pa = None
self.open_file_mix = None
self.label_smoothing = label_smoothing>0.0
self.label_dif = label_smoothing
print('Number of genuine, spoofing, and total recordings: {}, {}, {}'.format(self.len_1, self.len_2, self.len_1+self.len_2))
def __getitem__(self, index):
if not self.open_file_la_clean: self.open_file_la_clean = h5py.File(self.hdf5_la_clean, 'r')
if not self.open_file_la_attack: self.open_file_la_attack = h5py.File(self.hdf5_la_attack, 'r')
if not self.open_file_pa: self.open_file_pa = h5py.File(self.hdf5_pa, 'r')
if not self.open_file_mix: self.open_file_mix = h5py.File(self.hdf5_mix, 'r')
index_1 = index % self.len_1
utt_clean = self.idxlist_1[index_1]
utt_clean_la = self.prep_utterance( self.open_file_la_clean[utt_clean][0] )
utt_clean_pa = self.prep_utterance( self.open_file_pa[utt_clean][0] )
utt_clean_mix = self.prep_utterance( self.open_file_mix[utt_clean][0] )
index_2 = index % self.len_2
utt_attack = self.idxlist_2[index_2]
utt_attack_la = self.prep_utterance( self.open_file_la_attack[utt_attack][0] )
utt_attack_pa = self.prep_utterance( self.open_file_pa[utt_attack][0] )
utt_attack_mix = self.prep_utterance( self.open_file_mix[utt_attack][0] )
if self.label_smoothing:
return utt_clean_la, utt_clean_pa, utt_clean_mix, utt_attack_la, utt_attack_pa, utt_attack_mix, torch.rand(1)*self.label_dif, torch.rand(1)*self.label_dif+(1.-self.label_dif), self.get_label(utt_clean), self.get_label(utt_attack)
else:
return utt_clean_la, utt_clean_pa, utt_clean_mix, utt_attack_la, utt_attack_pa, utt_attack_mix, torch.zeros(1), torch.ones(1), self.get_label(utt_clean), self.get_label(utt_attack)
def __len__(self):
return self.n_cycles*np.maximum(self.len_1, self.len_2)
def prep_utterance(self, data):
data = np.expand_dims(data, 0)
if data.shape[-1]>self.max_nb_frames:
ridx = np.random.randint(0, data.shape[-1]-self.max_nb_frames)
data_ = data[:, :, ridx:(ridx+self.max_nb_frames)]
else:
mul = int(np.ceil(self.max_nb_frames/data.shape[-1]))
data_ = np.tile(data, (1, 1, mul))
data_ = data_[:, :, :self.max_nb_frames]
data_ = torch.from_numpy(data_).float().contiguous()
data_ = augment_spec(data_)
return data_
def get_label(self, utt):
prefix = utt.split('-_-')[0]
assert (prefix=='LA' or prefix=='PA' or prefix=='CLEAN')
if prefix=='LA':
if self.label_smoothing:
return torch.rand(1)*self.label_dif+(1.-self.label_dif)
else:
return torch.ones(1)
elif prefix=='PA':
if self.label_smoothing:
return torch.rand(1)*self.label_dif
else:
return torch.zeros(1)
elif prefix=='CLEAN':
if self.label_smoothing:
return 0.5*torch.ones(1) + torch.rand(1)*self.label_dif-self.label_dif*0.5
else:
return 0.5*torch.ones(1)
class Loader_all_valid(Dataset):
def __init__(self, hdf5_la_clean, hdf5_la_attack, hdf5_pa, hdf5_mix, max_nb_frames, n_cycles=1):
super(Loader_all_valid, self).__init__()
self.hdf5_la_clean = hdf5_la_clean
self.hdf5_la_attack = hdf5_la_attack
self.hdf5_pa = hdf5_pa
self.hdf5_mix = hdf5_mix
self.n_cycles = n_cycles
self.max_nb_frames = max_nb_frames
file_1 = h5py.File(self.hdf5_la_clean, 'r')
self.idxlist_1 = list(file_1.keys())
self.len_1 = len(self.idxlist_1)
file_1.close()
file_2 = h5py.File(self.hdf5_la_attack, 'r')
self.idxlist_2 = list(file_2.keys())
self.len_2 = len(self.idxlist_2)
file_2.close()
self.open_file_la_clean = None
self.open_file_la_attack = None
self.open_file_pa = None
self.open_file_mix = None
print('Number of genuine, spoofing, and total recordings: {}, {}, {}'.format(self.len_1, self.len_2, self.len_1+self.len_2))
def __getitem__(self, index):
if not self.open_file_la_clean: self.open_file_la_clean = h5py.File(self.hdf5_la_clean, 'r')
if not self.open_file_la_attack: self.open_file_la_attack = h5py.File(self.hdf5_la_attack, 'r')
if not self.open_file_pa: self.open_file_pa = h5py.File(self.hdf5_pa, 'r')
if not self.open_file_mix: self.open_file_mix = h5py.File(self.hdf5_mix, 'r')
index_1 = index % self.len_1
utt_clean = self.idxlist_1[index_1]
utt_clean_la = self.prep_utterance( self.open_file_la_clean[utt_clean][0] )
utt_clean_pa = self.prep_utterance( self.open_file_pa[utt_clean][0] )
utt_clean_mix = self.prep_utterance( self.open_file_mix[utt_clean][0] )
index_2 = index % self.len_2
utt_attack = self.idxlist_2[index_2]
utt_attack_la = self.prep_utterance( self.open_file_la_attack[utt_attack][0] )
utt_attack_pa = self.prep_utterance( self.open_file_pa[utt_attack][0] )
utt_attack_mix = self.prep_utterance( self.open_file_mix[utt_attack][0] )
return utt_clean_la, utt_clean_pa, utt_clean_mix, utt_attack_la, utt_attack_pa, utt_attack_mix, torch.zeros(1), torch.ones(1)
def __len__(self):
return self.n_cycles*np.maximum(self.len_1, self.len_2)
def prep_utterance(self, data):
data = np.expand_dims(data, 0)
if data.shape[-1]>self.max_nb_frames:
ridx = np.random.randint(0, data.shape[-1]-self.max_nb_frames)
data_ = data[:, :, ridx:(ridx+self.max_nb_frames)]
else:
mul = int(np.ceil(self.max_nb_frames/data.shape[-1]))
data_ = np.tile(data, (1, 1, mul))
data_ = data_[:, :, :self.max_nb_frames]
data_ = torch.from_numpy(data_).float().contiguous()
return data_
class Loader_mcc(Dataset):
def __init__(self, hdf5_clean, hdf5_attack, max_nb_frames, file_lists_path, n_cycles=1):
super(Loader_mcc, self).__init__()
self.labels_dict = {'AA':1, 'AB':2, 'AC':3, 'BA':4, 'BB':5, 'BC':6, 'CA':7, 'CB':8, 'CC':9}
self.utt2att = self.read_files_lists(file_lists_path)
self.hdf5_1 = hdf5_clean
self.hdf5_2 = hdf5_attack
self.n_cycles = n_cycles
self.max_nb_frames = max_nb_frames
file_1 = h5py.File(self.hdf5_1, 'r')
self.idxlist_1 = list(file_1.keys())
self.len_1 = len(self.idxlist_1)
file_1.close()
file_2 = h5py.File(self.hdf5_2, 'r')
self.idxlist_2 = list(file_2.keys())
self.len_2 = len(self.idxlist_2)
file_2.close()
self.open_file_1 = None
self.open_file_2 = None
print('Number of genuine and spoofing recordings: {}, {}'.format(self.len_1, self.len_2))
def __getitem__(self, index):
if not self.open_file_1: self.open_file_1 = h5py.File(self.hdf5_1, 'r')
if not self.open_file_2: self.open_file_2 = h5py.File(self.hdf5_2, 'r')
index_1 = index % self.len_1
utt_clean = self.prep_utterance( self.open_file_1[self.idxlist_1[index_1]][0] )
index_2 = index % self.len_2
utt_attack = self.prep_utterance( self.open_file_2[self.idxlist_2[index_2]][0] )
if np.random.rand() > 0.5:
return utt_clean, utt_attack, torch.zeros(1).long(), (torch.ones(1)*self.labels_dict[self.utt2att[self.idxlist_2[index_2]]]).long()
else:
return utt_attack, utt_clean, (torch.ones(1)*self.labels_dict[self.utt2att[self.idxlist_2[index_2]]]).long(), torch.zeros(1).long()
def __len__(self):
return self.n_cycles*np.maximum(self.len_1, self.len_2)
def prep_utterance(self, data):
data = np.expand_dims(data, 0)
if data.shape[-1]>self.max_nb_frames:
ridx = np.random.randint(0, data.shape[-1]-self.max_nb_frames)
data_ = data[:, :, ridx:(ridx+self.max_nb_frames)]
else:
mul = int(np.ceil(self.max_nb_frames/data.shape[-1]))
data_ = np.tile(data, (1, 1, mul))
data_ = data_[:, :, :self.max_nb_frames]
data_ = torch.from_numpy(data_).float().contiguous()
return data_
def read_files_lists(self, files_path):
files_list = glob.glob(files_path + '*.lst')
utt2att = {}
for file_ in files_list:
attack_type = file_.split('_')[-1].split('.')[0]
utts_list = self.read_utts(file_)
for utt in utts_list:
utt2att[utt] = attack_type
return utt2att
def read_utts(self, file_):
with open(file_, 'r') as file:
utt_attacks = file.readlines()
utt_list = []
for line in utt_attacks:
utt = line.split('/')[-1].split('.')[0]
utt_list.append(utt)
return utt_list
```
#### File: joaomonteirof/e2e_antispoofing/detect_features.py
```python
import argparse
import numpy as np
from sklearn import metrics
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import pickle
from kaldi_io import read_vec_flt_scp
import glob
import os
def compute_eer(y, y_score):
fpr, tpr, thresholds = metrics.roc_curve(y, y_score, pos_label=1)
fnr = 1 - tpr
eer_threshold = thresholds[np.nanargmin(np.abs(fnr-fpr))]
eer = fpr[np.nanargmin(np.abs(fnr-fpr))]
return eer
def get_data_old(path1, path2):
files_list_1 = glob.glob(path1 + 'ivector.*.ark')
if not len(files_list_1)>0:
raise ValueError('Nothing found at {}'.format(path1))
features_1 = {}
for file_ in files_list_1:
features_1.update( { k:m for k,m in read_vec_flt_ark(file_) } )
if path2 is not None:
files_list_2 = glob.glob(path2 + 'ivector.*.ark')
features_2 = {}
for file_ in files_list_2:
features_2.update( { k:m for k,m in read_vec_flt_ark(file_) } )
x, y = [], []
for k, m in features_1.items():
utt_type = k.split('-')[-1]
y.append(0 if utt_type=='spoof' else 1)
if path2 is not None and k in features_2:
x.append(np.concatenate([m, features_2[k]], 0))
else:
x.append(m)
return np.asarray(x), np.asarray(y)
def get_data(path1, path2):
file_ = path1 + 'ivector.scp'
features_1 = { k:m for k,m in read_vec_flt_scp(file_) }
if path2 is not None:
file_ = path2 + 'ivector.scp'
features_2 = { k:m for k,m in read_vec_flt_scp(file_) }
x, y = [], []
for k, m in features_1.items():
utt_type = k.split('-')[-1]
y.append(0 if utt_type=='spoof' else 1)
if path2 is not None and k in features_2:
x.append(np.concatenate([m, features_2[k]], 0))
else:
x.append(m)
return np.asarray(x), np.asarray(y)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute embeddings')
parser.add_argument('--path-to-data', type=str, default='./data/', metavar='Path', help='Path to set of ark files containing features with naming following: ivectors.*.ark')
parser.add_argument('--path2-to-data', type=str, default=None, metavar='Path', help='extra set of features for bi-model setting')
parser.add_argument('--out-path', type=str, default='./trained_model.p', metavar='Path', help='Path to output trained model')
parser.add_argument('--eval-dev', action='store_true', default=False, help='Enables evaluation on dev data')
parser.add_argument('--no-out', action='store_true', default=False, help='Disables saving of best model')
args = parser.parse_args()
###############################################################################
# Read data
print('Loading train data')
X, Y = get_data(path1=args.path_to_data+'ivectors_train/', path2=args.path2_to_data+'ivectors_train/' if args.path2_to_data is not None else None)
print(X.shape, Y.shape)
###############################################################################
# Set up and run grid search
print('Training model')
forest = RandomForestClassifier()
parameters = {'criterion':['gini', 'entropy'], 'n_estimators':[50, 100, 200, 400, 600]}
clf = GridSearchCV(forest, parameters, cv=5, scoring='roc_auc', n_jobs=-1, verbose=2)
clf.fit(X, Y)
print('Training done!')
###############################################################################
# Printing results
print('Random Forest')
print('Best AUC: {}'.format(clf.best_score_) )
print('Parameters yielding best AUC: {}'.format(clf.best_params_) )
print('All results:')
print(clf.cv_results_)
###############################################################################
# Saving final model
if not args.no_out:
print('Saving model')
if os.path.isfile(args.out_path):
os.remove(args.out_path)
pickle.dump(clf.best_estimator_, open(args.out_path, 'wb'))
###############################################################################
# Evaluation on final data if provided
if args.eval_dev:
print('Evaluating EER on development data')
X, Y = get_data(path1=args.path_to_data+'ivectors_dev/', path2=args.path2_to_data+'ivectors_dev/' if args.path2_to_data is not None else None)
eer = compute_eer(Y, clf.best_estimator_.predict_proba(X)[:,1])
print('EER: {}'.format(eer))
```
#### File: joaomonteirof/e2e_antispoofing/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import math
class SelfAttention(nn.Module):
def __init__(self, hidden_size, mean_only=False):
super(SelfAttention, self).__init__()
#self.output_size = output_size
self.hidden_size = hidden_size
self.att_weights = nn.Parameter(torch.Tensor(1, hidden_size),requires_grad=True)
self.mean_only = mean_only
init.kaiming_uniform_(self.att_weights)
def forward(self, inputs):
batch_size = inputs.size(0)
weights = torch.bmm(inputs, self.att_weights.permute(1, 0).unsqueeze(0).repeat(batch_size, 1, 1))
if inputs.size(0)==1:
attentions = F.softmax(torch.tanh(weights),dim=1)
weighted = torch.mul(inputs, attentions.expand_as(inputs))
else:
attentions = F.softmax(torch.tanh(weights.squeeze()),dim=1)
weighted = torch.mul(inputs, attentions.unsqueeze(2).expand_as(inputs))
if self.mean_only:
return weighted.sum(1)
else:
noise = 1e-5*torch.randn(weighted.size())
if inputs.is_cuda:
noise = noise.to(inputs.device)
avg_repr, std_repr = weighted.sum(1), (weighted+noise).std(1)
representations = torch.cat((avg_repr,std_repr),1)
return representations
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride, *args, **kwargs):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride, *args, **kwargs):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
def conv3x3(in_planes, out_planes, stride=1, groups=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class SEBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,
*, reduction=16, **kwargs):
super(SEBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm2d(planes)
self.se = SELayer(planes, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,
*, reduction=16, **kwargs):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se = SELayer(planes * 4, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
x = self.avg_pool(input)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return input * x
class Res2NetBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, scales=4, groups=1, se=False, norm_layer=None, **kwargs):
super(Res2NetBottleneck, self).__init__()
if planes % scales != 0:
raise ValueError('Planes must be divisible by scales')
if norm_layer is None:
norm_layer = nn.BatchNorm2d
bottleneck_planes = groups * planes
self.conv1 = conv1x1(inplanes, bottleneck_planes, stride)
self.bn1 = norm_layer(bottleneck_planes)
self.conv2 = nn.ModuleList([conv3x3(bottleneck_planes // scales, bottleneck_planes // scales, groups=groups) for _ in range(scales-1)])
self.bn2 = nn.ModuleList([norm_layer(bottleneck_planes // scales) for _ in range(scales-1)])
self.conv3 = conv1x1(bottleneck_planes, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.se = SEModule(planes * self.expansion) if se else None
self.downsample = downsample
self.stride = stride
self.scales = scales
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
xs = torch.chunk(out, self.scales, 1)
ys = []
for s in range(self.scales):
if s == 0:
ys.append(xs[s])
elif s == 1:
ys.append(self.relu(self.bn2[s-1](self.conv2[s-1](xs[s]))))
else:
ys.append(self.relu(self.bn2[s-1](self.conv2[s-1](xs[s] + ys[-1]))))
out = torch.cat(ys, 1)
out = self.conv3(out)
out = self.bn3(out)
if self.se is not None:
out = self.se(out)
if self.downsample is not None:
identity = self.downsample(identity)
out += identity
out = self.relu(out)
return out
class cnn_lstm(nn.Module):
def __init__(self, n_z=256, nclasses=-1):
super(cnn_lstm, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=(5,5), padding=(1,2), dilation=(1,2), stride=(2,3), bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=(5,5), padding=(1,2), dilation=(1,2), stride=(2,2), bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=(5,5), padding=(1,2), dilation=(1,1), stride=(2, 1), bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=(5,5), padding=(1,2), dilation=(1,1), stride=(2, 1), bias=False),
nn.BatchNorm2d(256),
nn.ReLU() )
self.conv_fin = nn.Conv2d(256, 256, kernel_size=(15,3), stride=(1,1), padding=(0,1), bias=False)
self.bn_fin = nn.BatchNorm2d(256)
self.lstm = nn.LSTM(256, 512, 2, bidirectional=True, batch_first=False)
self.fc_mu = nn.Linear(512*2, nclasses) if nclasses>2 else nn.Linear(512*2, 1)
self.initialize_params()
def forward(self, x):
x = self.features(x)
x = self.conv_fin(x)
feats = F.relu(self.bn_fin(x)).squeeze(2)
feats = feats.permute(2,0,1)
batch_size = feats.size(1)
seq_size = feats.size(0)
h0 = torch.zeros(2*2, batch_size, 512)
c0 = torch.zeros(2*2, batch_size, 512)
if x.is_cuda:
h0 = h0.to(x.device)
c0 = c0.to(x.device)
out_seq, h_c = self.lstm(feats, (h0, c0))
out_end = out_seq.mean(0)
mu = self.fc_mu(out_end)
return mu
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight)
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
RESNET_CONFIGS = {'18':[[2,2,2,2], PreActBlock],
'28':[[3,4,6,3], PreActBlock],
'34':[[3,4,6,3], PreActBlock],
'50':[[3,4,6,3], PreActBottleneck],
'101':[[3,4,23,3], PreActBottleneck],
'se_18':[[2,2,2,2], SEBasicBlock],
'se_28':[[3,4,6,3], SEBasicBlock],
'se_34':[[3,4,6,3], SEBasicBlock],
'se_50':[[3,4,6,3], SEBottleneck],
'se_101':[[3,4,23,3], SEBottleneck],
'2net_18':[[2,2,2,2], Res2NetBottleneck],
'2net_se_18':[[2,2,2,2], Res2NetBottleneck]}
class ResNet(nn.Module):
def __init__(self, resnet_type='18', nclasses=-1, groups=1, width=16, scales=4):
self.in_planes = 16
super(ResNet, self).__init__()
layers, block = RESNET_CONFIGS[resnet_type]
self._norm_layer = nn.BatchNorm2d
self.conv1 = nn.Conv2d(1, 16, kernel_size=(9,3), stride=(3,1), padding=(1,1), bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.activation = nn.ReLU()
self.layer1 = self._make_layer(block, 64, layers[0], stride=1, scales=scales, groups=groups, se='se_' in resnet_type)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, scales=scales, groups=groups, se='se_' in resnet_type)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, scales=scales, groups=groups, se='se_' in resnet_type)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, scales=scales, groups=groups, se='se_' in resnet_type)
self.conv5 = nn.Conv2d(512*block.expansion, 256, kernel_size=(11,3), stride=(1,1), padding=(0,1), bias=False)
self.bn5 = nn.BatchNorm2d(256)
self.fc = nn.Linear(256*2,256)
self.lbn = nn.BatchNorm1d(256)
self.fc_mu = nn.Linear(256, nclasses) if nclasses>2 else nn.Linear(256, 1)
self.initialize_params()
self.attention = SelfAttention(256)
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride=1, **kwargs):
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
downsample = nn.Sequential( conv1x1(self.in_planes, planes * block.expansion, stride), norm_layer(planes * block.expansion) )
layers = []
layers.append(block(self.in_planes, planes, stride, downsample, 1, 64, 1, norm_layer))
self.in_planes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.in_planes, planes, 1, groups=1, base_width=64, dilation=False, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.activation(self.bn1(x))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv5(x)
x = self.activation(self.bn5(x)).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
fc = F.relu(self.lbn(self.fc(stats)))
mu = self.fc_mu(fc)
#embs = torch.div(mu, torch.norm(mu, 2, 1).unsqueeze(1).expand_as(mu))
return mu
class ResNet_pca(nn.Module):
def __init__(self, resnet_type='18', nclasses=-1):
self.in_planes = 16
super(ResNet_pca, self).__init__()
layers, block = RESNET_CONFIGS[resnet_type]
self._norm_layer = nn.BatchNorm2d
self.conv1 = nn.Conv2d(1, 16, kernel_size=(9,3), stride=(3,1), padding=(1,1), bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.activation = nn.ReLU()
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.conv5 = nn.Conv2d(512*block.expansion, 256, kernel_size=(5,3), stride=(1,1), padding=(0,1), bias=False)
self.bn5 = nn.BatchNorm2d(256)
self.fc = nn.Linear(256*2,256)
self.lbn = nn.BatchNorm1d(256)
self.fc_mu = nn.Linear(256, nclasses) if nclasses>2 else nn.Linear(256, 1)
self.initialize_params()
self.attention = SelfAttention(256)
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride=1):
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
downsample = nn.Sequential( conv1x1(self.in_planes, planes * block.expansion, stride), norm_layer(planes * block.expansion) )
layers = []
layers.append(block(self.in_planes, planes, stride, downsample, 1, 64, 1, norm_layer))
self.in_planes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.in_planes, planes, 1, groups=1, base_width=64, dilation=False, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.activation(self.bn1(x))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv5(x)
x = self.activation(self.bn5(x)).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
fc = F.elu(self.lbn(self.fc(stats)))
mu = self.fc_mu(fc)
#embs = torch.div(mu, torch.norm(mu, 2, 1).unsqueeze(1).expand_as(mu))
return mu
class ResNet_CC(nn.Module):
def __init__(self, n_z=256, resnet_type='18', nclasses=-1, ncoef=90, init_coef=0):
self.in_planes = 16
super(ResNet_CC, self).__init__()
layers, block = RESNET_CONFIGS[resnet_type]
self._norm_layer = nn.BatchNorm2d
self.ncoef=ncoef
self.init_coef=init_coef
self.conv1 = nn.Conv2d(1, 16, kernel_size=(ncoef,3), stride=(1,1), padding=(0,1), bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.activation = nn.ReLU()
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.fc_1 = nn.Linear(block.expansion*512*2,256)
self.lbn = nn.BatchNorm1d(256)
self.fc_2 = nn.Linear(256, nclasses) if nclasses>2 else nn.Linear(256, 1)
self.initialize_params()
self.attention = SelfAttention(block.expansion*512)
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride=1):
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
downsample = nn.Sequential( conv1x1(self.in_planes, planes * block.expansion, stride), norm_layer(planes * block.expansion) )
layers = []
layers.append(block(self.in_planes, planes, stride, downsample, 1, 64, 1, norm_layer))
self.in_planes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.in_planes, planes, 1, groups=1, base_width=64, dilation=False, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = x[:,:,self.init_coef:,:]
x = self.conv1(x)
x = self.activation(self.bn1(x))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
fc = F.relu(self.lbn(self.fc_1(stats)))
out = self.fc_2(fc)
return out
class BasicBlock(nn.Module):
def __init__(self, inplane, outplane, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplane)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(inplane, outplane, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(outplane)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(outplane, outplane, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (inplane == outplane)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(inplane, outplane, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i==0 and in_planes or out_planes, out_planes, i==0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=28, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(1, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
#global average pooling
self.relu = nn.ReLU(inplace=True)
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.conv_out = nn.Conv2d(nChannels[3], 256, kernel_size=(7,3), stride=(1,1), padding=(0,1), bias=False)
self.bn_out = nn.BatchNorm2d(256)
self.attention = SelfAttention(256)
self.out = nn.Linear(256*2, 1)
# normal weight init
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.relu(self.bn1(x))
x = F.avg_pool2d(x, 4)
x = F.relu(self.bn_out(self.conv_out(x))).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.out(stats)
return x
class mfm(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1):
super(mfm, self).__init__()
self.out_channels = out_channels
if type == 1:
self.filter = nn.Conv2d(in_channels, 2*out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.filter = nn.Linear(in_channels, 2*out_channels)
def forward(self, x):
x = self.filter(x)
out = torch.split(x, self.out_channels, 1)
return torch.max(out[0], out[1])
class group(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(group, self).__init__()
self.conv_a = mfm(in_channels, in_channels, 1, 1, 0)
self.conv = mfm(in_channels, out_channels, kernel_size, stride, padding)
def forward(self, x):
x = self.conv_a(x)
x = self.conv(x)
return x
class resblock(nn.Module):
def __init__(self, in_channels, out_channels):
super(resblock, self).__init__()
self.conv1 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x):
res = x
out = self.conv1(x)
out = self.conv2(out)
out = out + res
return out
class lcnn_9layers(nn.Module):
def __init__(self, nclasses=-1):
super(lcnn_9layers, self).__init__()
self.features = nn.Sequential(
mfm(1, 48, 5, 1, 2),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(48, 96, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(96, 192, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(192, 128, 3, 1, 1),
group(128, 128, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) )
self.conv_final = nn.Conv2d(128, 128, kernel_size=(17,3), stride=(1,1), padding=(0,1), bias=False)
self.attention = SelfAttention(128)
self.fc = nn.Linear(2*128,128)
self.fc1 = mfm(128, 128, type=0)
self.fc2 = nn.Linear(128, nclasses) if nclasses>2 else nn.Linear(128, 1)
def forward(self, x):
x = self.features(x)
x = F.relu(self.conv_final(x)).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.fc(stats)
x = self.fc1(x)
x = F.dropout(x, training=self.training)
out = self.fc2(x)
return out
class lcnn_9layers_CC(nn.Module):
def __init__(self, nclasses=-1, ncoef=90, init_coef=0):
super(lcnn_9layers_CC, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=(ncoef,3), stride=(1,1), padding=(0,1), bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.activation = nn.ReLU()
self.ncoef=ncoef
self.init_coef=init_coef
self.features = nn.Sequential(
mfm(16, 48, 5, 1, 2),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(48, 96, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(96, 192, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(192, 128, 3, 1, 1),
group(128, 128, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) )
self.attention = SelfAttention(128)
self.fc = nn.Linear(2*128,128)
self.fc1 = mfm(128, 128, type=0)
self.fc2 = nn.Linear(128, nclasses) if nclasses>2 else nn.Linear(128, 1)
def forward(self, x):
x = x[:,:,self.init_coef:,:]
x = self.conv1(x)
x = self.activation(self.bn1(x))
x = self.features(x).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.fc(stats)
x = self.fc1(x)
x = F.dropout(x, training=self.training)
out = self.fc2(x)
return out
class lcnn_29layers_CC(nn.Module):
def __init__(self, block=resblock, layers=[1, 2, 3, 4], nclasses=-1, ncoef=90, init_coef=0):
super(lcnn_29layers_CC, self).__init__()
self.ncoef=ncoef
self.init_coef=init_coef
self.conv1_ = nn.Conv2d(1, 32, kernel_size=(ncoef,3), stride=(1,1), padding=(0,1), bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.activation = nn.ReLU()
self.conv1 = mfm(32, 48, 5, 1, 2)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.group1 = group(48, 96, 3, 1, 1)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.group2 = group(96, 192, 3, 1, 1)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.group3 = group(192, 128, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 128, 128)
self.group4 = group(128, 128, 3, 1, 1)
self.attention = SelfAttention(128)
self.fc = nn.Linear(2*128,128)
self.fc1 = nn.Linear(128, 128)
self.fc2 = nn.Linear(128, nclasses) if nclasses>2 else nn.Linear(128, 1)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
x = x[:,:,self.init_coef:,:]
x = self.conv1_(x)
x = self.activation(self.bn1(x))
x = self.conv1(x)
x = F.max_pool2d(x, 2, ceil_mode=True) + F.avg_pool2d(x, 2, ceil_mode=True)
x = self.block1(x)
x = self.group1(x)
x = F.max_pool2d(x, 2, ceil_mode=True) + F.avg_pool2d(x, 2, ceil_mode=True)
x = self.block2(x)
x = self.group2(x)
x = F.max_pool2d(x, 2, ceil_mode=True) + F.avg_pool2d(x, 2, ceil_mode=True)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = F.max_pool2d(x, 2, ceil_mode=True) + F.avg_pool2d(x, 2, ceil_mode=True)
x = x.squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.fc(stats)
fc = self.fc1(x)
x = F.dropout(fc, training=self.training)
out = self.fc2(x)
return out
class lcnn_29layers_v2(nn.Module):
def __init__(self, block=resblock, layers=[1, 2, 3, 4], nclasses=-1):
super(lcnn_29layers_v2, self).__init__()
self.conv1 = mfm(1, 48, 5, 1, 2)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.group1 = group(48, 96, 3, 1, 1)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.group2 = group(96, 192, 3, 1, 1)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.group3 = group(192, 128, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 128, 128)
self.group4 = group(128, 128, 3, 1, 1)
self.conv_final = nn.Conv2d(128, 128, kernel_size=(16,3), stride=(1,1), padding=(0,1), bias=False)
self.attention = SelfAttention(128)
self.fc = nn.Linear(2*128,128)
self.fc1 = nn.Linear(128, 128)
self.fc2 = nn.Linear(128, nclasses) if nclasses>2 else nn.Linear(128, 1)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block1(x)
x = self.group1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block2(x)
x = self.group2(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = F.relu(self.conv_final(x)).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.fc(stats)
fc = self.fc1(x)
x = F.dropout(fc, training=self.training)
out = self.fc2(x)
return out
class lcnn_9layers_pca(nn.Module):
def __init__(self, nclasses=-1):
super(lcnn_9layers_pca, self).__init__()
self.features = nn.Sequential(
mfm(1, 48, 5, 1, 2),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(48, 96, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(96, 192, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(192, 128, 3, 1, 1),
group(128, 128, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) )
self.conv_final = nn.Conv2d(128, 128, kernel_size=(8,3), stride=(1,1), padding=(0,1), bias=False)
self.attention = SelfAttention(128)
self.fc = nn.Linear(2*128,128)
self.fc1 = mfm(128, 128, type=0)
self.fc2 = nn.Linear(128, nclasses) if nclasses>2 else nn.Linear(128, 1)
def forward(self, x):
x = self.features(x)
x = F.relu(self.conv_final(x)).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.fc(stats)
x = self.fc1(x)
x = F.dropout(x, training=self.training)
out = self.fc2(x)
return out
class lcnn_9layers_prodspec(nn.Module):
def __init__(self, nclasses=-1):
super(lcnn_9layers_prodspec, self).__init__()
self.features = nn.Sequential(
mfm(1, 48, 5, 1, 2),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(48, 96, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(96, 192, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(192, 128, 3, 1, 1),
group(128, 128, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) )
self.conv_final = nn.Conv2d(128, 128, kernel_size=(17,3), stride=(1,1), padding=(0,1), bias=False)
self.attention = SelfAttention(128)
self.fc = nn.Linear(2*128,128)
self.fc1 = mfm(128, 128, type=0)
self.fc2 = nn.Linear(128, nclasses) if nclasses>2 else nn.Linear(128, 1)
def forward(self, x):
x = self.features(x)
x = F.relu(self.conv_final(x)).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.fc(stats)
x = self.fc1(x)
x = F.dropout(x, training=self.training)
out = self.fc2(x)
return out
class lcnn_9layers_icqspec(nn.Module):
def __init__(self, nclasses=-1):
super(lcnn_9layers_icqspec, self).__init__()
self.features = nn.Sequential(
mfm(1, 48, 5, 1, 2),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(48, 96, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(96, 192, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),
group(192, 128, 3, 1, 1),
group(128, 128, 3, 1, 1),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) )
self.conv_final = nn.Conv2d(128, 128, kernel_size=(16,3), stride=(1,1), padding=(0,1), bias=False)
self.attention = SelfAttention(128)
self.fc = nn.Linear(2*128,128)
self.fc1 = mfm(128, 128, type=0)
self.fc2 = nn.Linear(128, nclasses) if nclasses>2 else nn.Linear(128, 1)
def forward(self, x):
x = self.features(x)
x = F.relu(self.conv_final(x)).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.fc(stats)
x = self.fc1(x)
x = F.dropout(x, training=self.training)
out = self.fc2(x)
return out
class lcnn_29layers_v2_pca(nn.Module):
def __init__(self, block=resblock, layers=[1, 2, 3, 4], nclasses=-1):
super(lcnn_29layers_v2_pca, self).__init__()
self.conv1 = mfm(1, 48, 5, 1, 2)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.group1 = group(48, 96, 3, 1, 1)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.group2 = group(96, 192, 3, 1, 1)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.group3 = group(192, 128, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 128, 128)
self.group4 = group(128, 128, 3, 1, 1)
self.conv_final = nn.Conv2d(128, 128, kernel_size=(7,3), stride=(1,1), padding=(0,1), bias=False)
self.attention = SelfAttention(128)
self.fc = nn.Linear(2*128,128)
self.fc1 = nn.Linear(128, 128)
self.fc2 = nn.Linear(128, nclasses) if nclasses>2 else nn.Linear(128, 1)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block1(x)
x = self.group1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block2(x)
x = self.group2(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = F.relu(self.conv_final(x)).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.fc(stats)
fc = self.fc1(x)
x = F.dropout(fc, training=self.training)
out = self.fc2(x)
return out
class StatisticalPooling(nn.Module):
def forward(self, x):
# x is 3-D with axis [B, feats, T]
mu = x.mean(dim=2, keepdim=False)
std = (x+torch.randn_like(x)*1e-6).std(dim=2, keepdim=False)
return torch.cat((mu, std), dim=1)
class TDNN(nn.Module):
def __init__(self, nclasses=-1, ncoef=90, init_coef=0):
super(TDNN, self).__init__()
self.ncoef=ncoef
self.init_coef=init_coef
self.model = nn.Sequential( nn.Conv1d(ncoef, 512, 5, padding=2),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 3, dilation=2, padding=2),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 3, dilation=3, padding=3),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 1500, 1),
nn.BatchNorm1d(1500),
nn.ReLU(inplace=True) )
self.pooling = StatisticalPooling()
self.post_pooling = nn.Sequential(nn.Linear(3000, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, nclasses) if nclasses>2 else nn.Linear(512, 1) )
def forward(self, x):
x = x[:,:,self.init_coef:,:].squeeze(1)
x = self.model(x)
x = self.pooling(x)
out = self.post_pooling(x)
return out
class TDNN_multipool(nn.Module):
def __init__(self, nclasses=-1, ncoef=90, init_coef=0, n_heads=16):
super().__init__()
self.ncoef=ncoef
self.init_coef=init_coef
self.model_1 = nn.Sequential( nn.Conv1d(ncoef, 512, 5, padding=2),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.model_2 = nn.Sequential( nn.Conv1d(512, 512, 5, padding=2),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.model_3 = nn.Sequential( nn.Conv1d(512, 512, 5, padding=3),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.model_4 = nn.Sequential( nn.Conv1d(512, 512, 7),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.model_5 = nn.Sequential( nn.Conv1d(512, 512, 1),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.stats_pooling = StatisticalPooling()
self.multihead_pooling = nn.TransformerEncoderLayer(d_model=1024, nhead=n_heads, dim_feedforward=512, dropout=0.1)
self.post_pooling_1 = nn.Sequential(nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.post_pooling_2 = nn.Sequential(nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, nclasses) if nclasses>2 else nn.Linear(512, 1) )
def forward(self, x):
x_pool = []
x = x.squeeze(1)
x_1 = self.model_1(x)
x_pool.append(self.stats_pooling(x_1).unsqueeze(-1))
x_2 = self.model_2(x_1)
x_pool.append(self.stats_pooling(x_2).unsqueeze(-1))
x_3 = self.model_3(x_2)
x_pool.append(self.stats_pooling(x_3).unsqueeze(-1))
x_4 = self.model_4(x_3)
x_pool.append(self.stats_pooling(x_4).unsqueeze(-1))
x_5 = self.model_5(x_4)
x_pool.append(self.stats_pooling(x_5).unsqueeze(-1))
x_pool = torch.cat(x_pool, -1).permute(2,0,1)
x_pool = self.multihead_pooling(x_pool)
x_pool = x_pool.permute(1,2,0).mean(-1)
x = self.post_pooling_1(x_pool)
out = self.post_pooling_2(x)
return out
class TDNN_ablation(nn.Module):
def __init__(self, nclasses=-1, ncoef=90, init_coef=0):
super().__init__()
self.ncoef=ncoef
self.init_coef=init_coef
self.model_1 = nn.Sequential( nn.Conv1d(ncoef, 512, 5, padding=2),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.model_2 = nn.Sequential( nn.Conv1d(512, 512, 5, padding=2),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.model_3 = nn.Sequential( nn.Conv1d(512, 512, 5, padding=3),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.model_4 = nn.Sequential( nn.Conv1d(512, 512, 7),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.model_5 = nn.Sequential( nn.Conv1d(512, 512, 1),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.stats_pooling = StatisticalPooling()
self.post_pooling_1 = nn.Sequential(nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512) )
self.post_pooling_2 = nn.Sequential(nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, nclasses) if nclasses>2 else nn.Linear(512, 1) )
def forward(self, x):
x_pool = []
x = x.squeeze(1)
x_1 = self.model_1(x)
x_pool.append(self.stats_pooling(x_1).unsqueeze(-1))
x_2 = self.model_2(x_1)
x_pool.append(self.stats_pooling(x_2).unsqueeze(-1))
x_3 = self.model_3(x_2)
x_pool.append(self.stats_pooling(x_3).unsqueeze(-1))
x_4 = self.model_4(x_3)
x_pool.append(self.stats_pooling(x_4).unsqueeze(-1))
x_5 = self.model_5(x_4)
x_pool.append(self.stats_pooling(x_5).unsqueeze(-1))
x_pool = torch.cat(x_pool, -1).mean(-1)
x = self.post_pooling_1(x_pool)
out = self.post_pooling_2(x)
return out
class TDNN_LSTM(nn.Module):
def __init__(self, nclasses=-1, ncoef=90, init_coef=0):
super(TDNN_LSTM, self).__init__()
self.ncoef=ncoef
self.init_coef=init_coef
self.model_1 = nn.Sequential( nn.Conv1d(ncoef, 512, 5, padding=2),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True) )
self.lstm = nn.LSTM(input_size=512, hidden_size=512, num_layers=1, bidirectional=False, batch_first=False)
self.model_2 = nn.Sequential(
nn.Conv1d(512, 512, 3, dilation=2, padding=2),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 3, dilation=3, padding=3),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 1500, 1),
nn.BatchNorm1d(1500),
nn.ReLU(inplace=True) )
self.pooling = StatisticalPooling()
self.post_pooling = nn.Sequential(nn.Linear(3000, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, nclasses) if nclasses>2 else nn.Linear(512, 1) )
def forward(self, x):
x = x[:,:,self.init_coef:,:].squeeze(1)
batch_size = x.size(0)
h0 = torch.zeros(1, batch_size, 512).to(x.device)
c0 = torch.zeros(1, batch_size, 512).to(x.device)
x = self.model_1(x)
x = x.permute(2,0,1)
x_rec, h_c = self.lstm(x, (h0, c0))
x = (x_rec+x).permute(1,2,0)
x = self.model_2(x)
x = self.pooling(x)
out = self.post_pooling(x)
return out
class Linear(nn.Module):
def __init__(self, nclasses=-1, ncoef=90, init_coef=0):
super(Linear, self).__init__()
self.ncoef=ncoef
self.init_coef=init_coef
self.pooling = StatisticalPooling()
self.post_pooling = nn.Sequential( nn.Linear(2*(ncoef-init_coef), nclasses) if nclasses>2 else nn.Linear(2*(ncoef-init_coef), 1) )
def forward(self, x):
x = x[:,:,self.init_coef:,:].squeeze(1)
x = self.pooling(x)
out = self.post_pooling(x)
return out
class SOrthConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, padding_mode='zeros'):
'''
Conv1d with a method for stepping towards semi-orthongonality
http://danielpovey.com/files/2018_interspeech_tdnnf.pdf
'''
super(SOrthConv, self).__init__()
kwargs = {'bias': False}
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False, padding_mode=padding_mode)
self.reset_parameters()
def forward(self, x):
x = self.conv(x)
return x
def step_semi_orth(self):
with torch.no_grad():
M = self.get_semi_orth_weight(self.conv)
self.conv.weight.copy_(M)
def reset_parameters(self):
# Standard dev of M init values is inverse of sqrt of num cols
nn.init._no_grad_normal_(self.conv.weight, 0.,
self.get_M_shape(self.conv.weight)[1]**-0.5)
def orth_error(self):
return self.get_semi_orth_error(self.conv).item()
@staticmethod
def get_semi_orth_weight(conv1dlayer):
# updates conv1 weight M using update rule to make it more semi orthogonal
# based off ConstrainOrthonormalInternal in nnet-utils.cc in Kaldi src/nnet3
# includes the tweaks related to slowing the update speed
# only an implementation of the 'floating scale' case
with torch.no_grad():
update_speed = 0.125
orig_shape = conv1dlayer.weight.shape
# a conv weight differs slightly from TDNN formulation:
# Conv weight: (out_filters, in_filters, kernel_width)
# TDNN weight M is of shape: (in_dim, out_dim) or [rows, cols]
# the in_dim of the TDNN weight is equivalent to in_filters * kernel_width of the Conv
M = conv1dlayer.weight.reshape(
orig_shape[0], orig_shape[1]*orig_shape[2]).T
# M now has shape (in_dim[rows], out_dim[cols])
mshape = M.shape
if mshape[0] > mshape[1]: # semi orthogonal constraint for rows > cols
M = M.T
P = torch.mm(M, M.T)
PP = torch.mm(P, P.T)
trace_P = torch.trace(P)
trace_PP = torch.trace(PP)
ratio = trace_PP * P.shape[0] / (trace_P * trace_P)
# the following is the tweak to avoid divergence (more info in Kaldi)
assert ratio > 0.99
if ratio > 1.02:
update_speed *= 0.5
if ratio > 1.1:
update_speed *= 0.5
scale2 = trace_PP/trace_P
update = P - (torch.matrix_power(P, 0) * scale2)
alpha = update_speed / scale2
update = (-4.0 * alpha) * torch.mm(update, M)
updated = M + update
# updated has shape (cols, rows) if rows > cols, else has shape (rows, cols)
# Transpose (or not) to shape (cols, rows) (IMPORTANT, s.t. correct dimensions are reshaped)
# Then reshape to (cols, in_filters, kernel_width)
return updated.reshape(*orig_shape) if mshape[0] > mshape[1] else updated.T.reshape(*orig_shape)
@staticmethod
def get_M_shape(conv_weight):
orig_shape = conv_weight.shape
return (orig_shape[1]*orig_shape[2], orig_shape[0])
@staticmethod
def get_semi_orth_error(conv1dlayer):
with torch.no_grad():
orig_shape = conv1dlayer.weight.shape
M = conv1dlayer.weight.reshape(
orig_shape[0], orig_shape[1]*orig_shape[2]).T
mshape = M.shape
if mshape[0] > mshape[1]: # semi orthogonal constraint for rows > cols
M = M.T
P = torch.mm(M, M.T)
PP = torch.mm(P, P.T)
trace_P = torch.trace(P)
trace_PP = torch.trace(PP)
scale2 = torch.sqrt(trace_PP/trace_P) ** 2
update = P - (torch.matrix_power(P, 0) * scale2)
return torch.norm(update, p='fro')
class SharedDimScaleDropout(nn.Module):
def __init__(self, alpha: float = 0.5, dim=1):
'''
Continuous scaled dropout that is const over chosen dim (usually across time)
Multiplies inputs by random mask taken from Uniform([1 - 2\alpha, 1 + 2\alpha])
'''
super(SharedDimScaleDropout, self).__init__()
if alpha > 0.5 or alpha < 0:
raise ValueError("alpha must be between 0 and 0.5")
self.alpha = alpha
self.dim = dim
self.register_buffer('mask', torch.tensor(0.))
def forward(self, X):
if self.training:
if self.alpha != 0.:
# sample mask from uniform dist with dim of length 1 in self.dim and then repeat to match size
tied_mask_shape = list(X.shape)
tied_mask_shape[self.dim] = 1
repeats = [1 if i != self.dim else X.shape[self.dim]
for i in range(len(X.shape))]
return X * self.mask.repeat(tied_mask_shape).uniform_(1 - 2*self.alpha, 1 + 2*self.alpha).repeat(repeats)
# expected value of dropout mask is 1 so no need to scale outputs like vanilla dropout
return X
class FTDNNLayer(nn.Module):
def __init__(self, in_dim, out_dim, bottleneck_dim, context_size=2, dilations=None, paddings=None, alpha=0.0):
'''
3 stage factorised TDNN http://danielpovey.com/files/2018_interspeech_tdnnf.pdf
'''
super(FTDNNLayer, self).__init__()
paddings = [1, 1, 1] if not paddings else paddings
dilations = [2, 2, 2] if not dilations else dilations
assert len(paddings) == 3
assert len(dilations) == 3
self.factor1 = SOrthConv(
in_dim, bottleneck_dim, context_size, padding=paddings[0], dilation=dilations[0])
self.factor2 = SOrthConv(bottleneck_dim, bottleneck_dim,
context_size, padding=paddings[1], dilation=dilations[1])
self.factor3 = nn.Conv1d(bottleneck_dim, out_dim, context_size,
padding=paddings[2], dilation=dilations[2], bias=False)
self.nl = nn.ReLU()
self.bn = nn.BatchNorm1d(out_dim)
self.dropout = SharedDimScaleDropout(alpha=alpha, dim=1)
def forward(self, x):
''' input (batch_size, seq_len, in_dim) '''
assert (x.shape[-1] == self.factor1.conv.weight.shape[1])
x = self.factor1(x.transpose(1, 2))
x = self.factor2(x)
x = self.factor3(x)
x = self.nl(x)
x = self.bn(x).transpose(1, 2)
x = self.dropout(x)
return x
def step_semi_orth(self):
for layer in self.children():
if isinstance(layer, SOrthConv):
layer.step_semi_orth()
def orth_error(self):
orth_error = 0
for layer in self.children():
if isinstance(layer, SOrthConv):
orth_error += layer.orth_error()
return orth_error
class DenseReLU(nn.Module):
def __init__(self, in_dim, out_dim):
super(DenseReLU, self).__init__()
self.fc = nn.Linear(in_dim, out_dim)
self.bn = nn.BatchNorm1d(out_dim)
self.nl = nn.ReLU()
def forward(self, x):
x = self.fc(x)
x = self.nl(x)
if len(x.shape) > 2:
x = self.bn(x.transpose(1, 2)).transpose(1, 2)
else:
x = self.bn(x)
return x
class StatsPool(nn.Module):
def __init__(self, floor=1e-10, bessel=False):
super(StatsPool, self).__init__()
self.floor = floor
self.bessel = bessel
def forward(self, x):
means = torch.mean(x, dim=1)
_, t, _ = x.shape
if self.bessel:
t = t - 1
residuals = x - means.unsqueeze(1)
numerator = torch.sum(residuals**2, dim=1)
stds = torch.sqrt(torch.clamp(numerator, min=self.floor)/t)
x = torch.cat([means, stds], dim=1)
return x
class TDNN_(nn.Module):
def __init__(
self,
input_dim=23,
output_dim=512,
context_size=5,
stride=1,
dilation=1,
batch_norm=True,
dropout_p=0.0,
padding=0
):
super(TDNN_, self).__init__()
self.context_size = context_size
self.stride = stride
self.input_dim = input_dim
self.output_dim = output_dim
self.dilation = dilation
self.dropout_p = dropout_p
self.padding = padding
self.kernel = nn.Conv1d(self.input_dim,
self.output_dim,
self.context_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation)
self.nonlinearity = nn.ReLU()
self.batch_norm = batch_norm
if batch_norm:
self.bn = nn.BatchNorm1d(output_dim)
self.drop = nn.Dropout(p=self.dropout_p)
def forward(self, x):
'''
input: size (batch, seq_len, input_features)
outpu: size (batch, new_seq_len, output_features)
'''
_, _, d = x.shape
assert (d == self.input_dim), 'Input dimension was wrong. Expected ({}), got ({})'.format(
self.input_dim, d)
x = self.kernel(x.transpose(1, 2))
x = self.nonlinearity(x)
x = self.drop(x)
if self.batch_norm:
x = self.bn(x)
return x.transpose(1, 2)
class FTDNN(nn.Module):
def __init__(self, nclasses=-1, ncoef=90, init_coef=0):
'''
The FTDNN architecture from
"State-of-the-art speaker recognition with neural network embeddings in
NIST SRE18 and Speakers in the Wild evaluations"
https://www.sciencedirect.com/science/article/pii/S0885230819302700
'''
super(FTDNN, self).__init__()
self.ncoef=ncoef
self.init_coef=init_coef
self.layer01 = TDNN_(input_dim=self.ncoef, output_dim=512, context_size=5, padding=2)
self.layer02 = FTDNNLayer(512, 1024, 256, context_size=2, dilations=[ 2, 2, 2], paddings=[1, 1, 1])
self.layer03 = FTDNNLayer(1024, 1024, 256, context_size=1, dilations=[1, 1, 1], paddings=[0, 0, 0])
self.layer04 = FTDNNLayer(1024, 1024, 256, context_size=2, dilations=[3, 3, 2], paddings=[2, 1, 1])
self.layer05 = FTDNNLayer(2048, 1024, 256, context_size=1, dilations=[1, 1, 1], paddings=[0, 0, 0])
self.layer06 = FTDNNLayer(1024, 1024, 256, context_size=2, dilations=[3, 3, 2], paddings=[2, 1, 1])
self.layer07 = FTDNNLayer(3072, 1024, 256, context_size=2, dilations=[3, 3, 2], paddings=[2, 1, 1])
self.layer08 = FTDNNLayer(1024, 1024, 256, context_size=2, dilations=[3, 3, 2], paddings=[2, 1, 1])
self.layer09 = FTDNNLayer(3072, 1024, 256, context_size=1, dilations=[1, 1, 1], paddings=[0, 0, 0])
self.layer10 = DenseReLU(1024, 2048)
self.layer11 = StatsPool()
self.layer12 = DenseReLU(4096, 512)
self.out_layer = nn.Linear(512, nclasses) if nclasses>2 else nn.Linear(512, 1)
def forward(self, x):
'''
Input must be (batch_size, seq_len, in_dim)
'''
x = x.squeeze(1).transpose(1,-1)
x = self.layer01(x)
x_2 = self.layer02(x)
x_3 = self.layer03(x_2)
x_4 = self.layer04(x_3)
skip_5 = torch.cat([x_4, x_3], dim=-1)
x = self.layer05(skip_5)
x_6 = self.layer06(x)
skip_7 = torch.cat([x_6, x_4, x_2], dim=-1)
x = self.layer07(skip_7)
x_8 = self.layer08(x)
skip_9 = torch.cat([x_8, x_6, x_4], dim=-1)
x = self.layer09(skip_9)
x = self.layer10(x)
x = self.layer11(x)
x = self.layer12(x)
x = self.out_layer(x)
return x
def step_ftdnn_layers(self):
for layer in self.children():
if isinstance(layer, FTDNNLayer):
layer.step_semi_orth()
def set_dropout_alpha(self, alpha):
for layer in self.children():
if isinstance(layer, FTDNNLayer):
layer.dropout.alpha = alpha
def get_orth_errors(self):
errors = 0.
with torch.no_grad():
for layer in self.children():
if isinstance(layer, FTDNNLayer):
errors += layer.orth_error()
return errors
class hswish(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
class SeModule(nn.Module):
def __init__(self, in_size, reduction=4):
super(SeModule, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size // reduction),
nn.ReLU(inplace=True),
nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size),
hsigmoid()
)
def forward(self, x):
return x * self.se(x)
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride):
super(Block, self).__init__()
self.stride = stride
self.se = semodule
self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(expand_size)
self.nolinear1 = nolinear
self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, groups=expand_size, bias=False)
self.bn2 = nn.BatchNorm2d(expand_size)
self.nolinear2 = nolinear
self.conv3 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_size)
self.shortcut = nn.Sequential()
if stride == 1 and in_size != out_size:
self.shortcut = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_size),
)
def forward(self, x):
out = self.nolinear1(self.bn1(self.conv1(x)))
out = self.nolinear2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.se != None:
out = self.se(out)
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV3_Small(nn.Module):
def __init__(self):
super(MobileNetV3_Small, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),
Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 96, 40, hswish(), SeModule(40), 2),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 120, 48, hswish(), SeModule(48), 1),
Block(5, 48, 144, 48, hswish(), SeModule(48), 1),
Block(5, 48, 288, 96, hswish(), SeModule(96), 2),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
)
self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(576)
self.hs2 = hswish()
self.conv_out = nn.Conv2d(576, 256, kernel_size=(9,3), stride=(1,1), padding=(0,1), bias=False)
self.bn_out = nn.BatchNorm2d(256)
self.attention = SelfAttention(256)
self.out = nn.Linear(256*2, 1)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
x = self.hs1(self.bn1(self.conv1(x)))
x = self.bneck(x)
x = self.hs2(self.bn2(self.conv2(x)))
x = F.relu(self.bn_out(self.conv_out(x))).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.out(stats)
return x
class densenet_Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(densenet_Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block=densenet_Bottleneck, nblocks=[6,12,24,16], growth_rate=12, reduction=0.5):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(1, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.conv_out = nn.Conv2d(num_planes, 256, kernel_size=(8,3), stride=(1,1), padding=(0,1), bias=False)
self.bn_out = nn.BatchNorm2d(256)
self.attention = SelfAttention(256)
self.out = nn.Linear(256*2, 1)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.trans3(self.dense3(x))
x = self.dense4(x)
x = F.avg_pool2d(F.relu(self.bn(x)), 4)
x = F.relu(self.bn_out(self.conv_out(x))).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.out(stats)
return x
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.conv_out = nn.Conv2d(512, 256, kernel_size=(7,3), stride=(1,1), padding=(0,1), bias=False)
self.bn_out = nn.BatchNorm2d(256)
self.attention = SelfAttention(256)
self.out = nn.Linear(256*2, 1)
def forward(self, x):
x = self.avgpool(self.features(x))
x = F.relu(self.bn_out(self.conv_out(x))).squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
x = self.out(stats)
return x
def _make_layers(self, cfg):
layers = []
in_channels = 1
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
``` |
{
"source": "joaomonteirof/e2e_verification",
"score": 2
} |
#### File: e2e_verification/asv/embedd.py
```python
import argparse
import numpy as np
import glob
import torch
import os
import sys
import pathlib
from kaldi_io import read_mat_scp, open_or_fd, write_vec_flt
import model as model_
import scipy.io as sio
from utils.utils import *
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return torch.device('cuda:'+str(np.argmax(memory_available)))
def prep_feats(data_):
#data_ = ( data_ - data_.mean(0) ) / data_.std(0)
features = data_.T
if features.shape[1]<50:
mul = int(np.ceil(50/features.shape[1]))
features = np.tile(features, (1, mul))
features = features[:, :50]
return torch.from_numpy(features[np.newaxis, np.newaxis, :, :]).float()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute embeddings')
parser.add_argument('--path-to-data', type=str, default='./data/', metavar='Path', help='Path to input data')
parser.add_argument('--path-to-more-data', type=str, default=None, metavar='Path', help='Path to input data')
parser.add_argument('--utt2spk', type=str, default=None, metavar='Path', help='Optional path for utt2spk')
parser.add_argument('--more-utt2spk', type=str, default=None, metavar='Path', help='Optional path for utt2spk')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for file containing model')
parser.add_argument('--out-path', type=str, default='./', metavar='Path', help='Path to output hdf file')
parser.add_argument('--model', choices=['resnet_stats', 'resnet_mfcc', 'resnet_lstm', 'resnet_small', 'resnet_large', 'TDNN'], default='resnet_mfcc', help='Model arch according to input type')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--eps', type=float, default=0.0, metavar='eps', help='Add noise to embeddings')
parser.add_argument('--inner', action='store_true', default=True, help='Inner layer as embedding')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
if args.cp_path is None:
raise ValueError('There is no checkpoint/model path. Use arg --cp-path to indicate the path!')
pathlib.Path(args.out_path).mkdir(parents=True, exist_ok=True)
print('Cuda Mode is: {}'.format(args.cuda))
if args.cuda:
device = get_freer_gpu()
ckpt = torch.load(args.cp_path, map_location = lambda storage, loc: storage)
if args.model == 'resnet_mfcc':
model = model_.ResNet_mfcc(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'resnet_lstm':
model = model_.ResNet_lstm(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'resnet_stats':
model = model_.ResNet_stats(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'resnet_small':
model = model_.ResNet_small(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'resnet_large':
model = model_.ResNet_large(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'TDNN':
model = model_.TDNN(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
try:
model.load_state_dict(ckpt['model_state'], strict=True)
except RuntimeError as err:
print("Runtime Error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
model.eval()
if args.cuda:
model = model.to(device)
scp_list = glob.glob(args.path_to_data + '*.scp')
if len(scp_list)<1:
print('Nothing found at {}.'.format(args.path_to_data))
exit(1)
if args.path_to_more_data:
more_scp_list = glob.glob(args.path_to_more_data + '*.scp')
if len(more_scp_list)<1:
print('Nothing found at {}.'.format(args.path_to_more_data))
exit(1)
else:
scp_list = scp_list + more_scp_list
if args.utt2spk:
utt2spk = read_utt2spk(args.utt2spk)
if args.more_utt2spk:
utt2spk.update(read_utt2spk(args.more_utt2spk))
scp_list = glob.glob(args.path_to_data + '*.scp')
if len(scp_list)<1:
print('Nothing found at {}.'.format(args.path_to_data))
exit(1)
print('Start of data embeddings computation')
embeddings = {}
with torch.no_grad():
for file_ in scp_list:
data = { k:m for k,m in read_mat_scp(file_) }
for i, utt in enumerate(data):
if args.utt2spk:
if not utt in utt2spk:
print('Skipping utterance '+ utt)
continue
feats = prep_feats(data[utt])
try:
if args.cuda:
feats = feats.to(device)
model = model.to(device)
emb_2 = model.forward(feats)
except:
feats = feats.cpu()
model = model.cpu()
emb_2 = model.forward(feats)
emb = emb_2[1] if args.inner else emb_2[0]
embeddings[utt] = emb.detach().cpu().numpy().squeeze()
if args.eps>0.0:
embeddings[utt] += args.eps*np.random.randn(embeddings[utt].shape[0])
print('Storing embeddings in output file')
out_name = args.path_to_data.split('/')[-2] if not args.utt2spk else args.utt2spk.split('/')[-2]
file_name = args.out_path+out_name+'.ark'
if os.path.isfile(file_name):
os.remove(file_name)
print(file_name + ' Removed')
with open_or_fd(file_name,'wb') as f:
for k,v in embeddings.items(): write_vec_flt(f, v, k)
print('End of embeddings computation.')
```
#### File: e2e_verification/asv/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from utils.losses import AMSoftmax, Softmax
class SelfAttention(nn.Module):
def __init__(self, hidden_size):
super(SelfAttention, self).__init__()
#self.output_size = output_size
self.hidden_size = hidden_size
self.att_weights = nn.Parameter(torch.Tensor(1, hidden_size), requires_grad=True)
init.kaiming_uniform_(self.att_weights)
def forward(self, inputs):
batch_size = inputs.size(0)
weights = torch.bmm(inputs, self.att_weights.permute(1, 0).unsqueeze(0).repeat(batch_size, 1, 1))
attentions = F.softmax(torch.tanh(weights.squeeze(2)),dim=1)
weighted = torch.mul(inputs, attentions.unsqueeze(2).expand_as(inputs))
noise = 1e-5*torch.randn(weighted.size())
if inputs.is_cuda:
noise = noise.to(inputs.get_device())
avg_repr, std_repr = weighted.sum(1), (weighted+noise).std(1)
representations = torch.cat((avg_repr,std_repr),1)
return representations
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) )
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class ResNet_stats(nn.Module):
def __init__(self, n_z=256, nh=1, n_h=512, layers=[3,4,6,3], block=PreActBottleneck, proj_size=100, ncoef=23, dropout_prob=0.25, sm_type='softmax', ndiscriminators=1, r_proj_size=0):
self.in_planes = 32
super(ResNet_stats, self).__init__()
self.ndiscriminators = ndiscriminators
self.r_proj_size = r_proj_size
self.classifier = nn.ModuleList()
self.dropout_prob = dropout_prob
self.n_hidden = nh
self.hidden_size = n_h
self.latent_size = n_z
self.sm_type = sm_type
self.ncoef = ncoef
self.conv1 = nn.Conv2d(1, 32, kernel_size=(ncoef,3), stride=(1,1), padding=(0,1), bias=False)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.fc = nn.Linear(block.expansion*512*2,512)
self.lbn = nn.BatchNorm1d(512)
self.fc_mu = nn.Linear(512, n_z)
self.initialize_params()
if ndiscriminators>1:
for i in range(self.ndiscriminators):
self.classifier.append(self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob))
else:
self.classifier = self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)
if sm_type=='softmax':
self.out_proj=Softmax(input_features=n_z, output_features=proj_size)
elif sm_type=='am_softmax':
self.out_proj=AMSoftmax(input_features=n_z, output_features=proj_size)
else:
raise NotImplementedError
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def make_bin_layers(self, n_in, n_h_layers, h_size, dropout_p):
if self.r_proj_size>0:
projection = nn.Linear(n_in, self.r_proj_size, bias=False)
with torch.no_grad():
projection.weight.div_(torch.norm(projection.weight, keepdim=True))
projection.weight.require_grad=False
classifier = nn.ModuleList([projection, nn.Linear(self.r_proj_size, h_size), nn.LeakyReLU(0.1)])
else:
classifier = nn.ModuleList([nn.Linear(n_in, h_size), nn.LeakyReLU(0.1)])
for i in range(n_h_layers-1):
classifier.append(nn.Linear(h_size, h_size))
classifier.append(nn.LeakyReLU(0.1))
classifier.append(nn.Dropout(p=dropout_p))
classifier.append(nn.Linear(h_size, 1))
classifier.append(nn.Sigmoid())
return classifier
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.squeeze(2)
x = torch.cat([x.mean(-1), x.std(-1)], dim=1)
fc = F.relu(self.lbn(self.fc(x)))
mu = self.fc_mu(fc)
return mu, fc
def forward_bin(self, z):
if self.ndiscriminators>1:
out = []
for disc in self.classifier:
z_ = z
for l in disc:
z_ = l(z_)
out.append(z_)
return out
else:
for l in self.classifier:
z = l(z)
return z
class ResNet_mfcc(nn.Module):
def __init__(self, n_z=256, nh=1, n_h=512, layers=[3,4,6,3], block=PreActBottleneck, proj_size=100, ncoef=23, dropout_prob=0.25, sm_type='softmax', ndiscriminators=1, r_proj_size=0):
self.in_planes = 32
super(ResNet_mfcc, self).__init__()
self.ndiscriminators = ndiscriminators
self.r_proj_size = r_proj_size
self.classifier = nn.ModuleList()
self.dropout_prob = dropout_prob
self.n_hidden = nh
self.hidden_size = n_h
self.latent_size = n_z
self.sm_type = sm_type
self.ncoef = ncoef
self.conv1 = nn.Conv2d(1, 32, kernel_size=(ncoef,3), stride=(1,1), padding=(0,1), bias=False)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.fc = nn.Linear(block.expansion*512*2,512)
self.lbn = nn.BatchNorm1d(512)
self.fc_mu = nn.Linear(512, n_z)
self.initialize_params()
if ndiscriminators>1:
for i in range(self.ndiscriminators):
self.classifier.append(self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob))
else:
self.classifier = self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)
self.attention = SelfAttention(block.expansion*512)
if sm_type=='softmax':
self.out_proj=Softmax(input_features=n_z, output_features=proj_size)
elif sm_type=='am_softmax':
self.out_proj=AMSoftmax(input_features=n_z, output_features=proj_size)
else:
raise NotImplementedError
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def make_bin_layers(self, n_in, n_h_layers, h_size, dropout_p):
if self.r_proj_size>0:
projection = nn.Linear(n_in, self.r_proj_size, bias=False)
with torch.no_grad():
projection.weight.div_(torch.norm(projection.weight, keepdim=True))
projection.weight.require_grad=False
classifier = nn.ModuleList([projection, nn.Linear(self.r_proj_size, h_size), nn.LeakyReLU(0.1)])
else:
classifier = nn.ModuleList([nn.Linear(n_in, h_size), nn.LeakyReLU(0.1)])
for i in range(n_h_layers-1):
classifier.append(nn.Linear(h_size, h_size))
classifier.append(nn.LeakyReLU(0.1))
classifier.append(nn.Dropout(p=dropout_p))
classifier.append(nn.Linear(h_size, 1))
classifier.append(nn.Sigmoid())
return classifier
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
fc = F.relu(self.lbn(self.fc(stats)))
mu = self.fc_mu(fc)
return mu, fc
def forward_bin(self, z):
if self.ndiscriminators>1:
out = []
for disc in self.classifier:
z_ = z
for l in disc:
z_ = l(z_)
out.append(z_)
return out
else:
for l in self.classifier:
z = l(z)
return z
class ResNet_lstm(nn.Module):
def __init__(self, n_z=256, nh=1, n_h=512, layers=[3,4,6,3], block=PreActBottleneck, proj_size=100, ncoef=23, dropout_prob=0.25, sm_type='softmax', ndiscriminators=1, r_proj_size=0):
self.in_planes = 32
super(ResNet_lstm, self).__init__()
self.ndiscriminators = ndiscriminators
self.r_proj_size = r_proj_size
self.classifier = nn.ModuleList()
self.dropout_prob = dropout_prob
self.n_hidden = nh
self.hidden_size = n_h
self.latent_size = n_z
self.sm_type = sm_type
self.ncoef = ncoef
self.conv1 = nn.Conv2d(1, 32, kernel_size=(ncoef,3), stride=(1,1), padding=(0,1), bias=False)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.lstm = nn.LSTM(block.expansion*512, 256, 2, bidirectional=True, batch_first=False)
self.fc = nn.Linear(2*512+256,512)
self.lbn = nn.BatchNorm1d(512)
self.fc_mu = nn.Linear(512, n_z)
self.initialize_params()
if ndiscriminators>1:
for i in range(self.ndiscriminators):
self.classifier.append(self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob))
else:
self.classifier = self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)
self.attention = SelfAttention(512)
if sm_type=='softmax':
self.out_proj=Softmax(input_features=n_z, output_features=proj_size)
elif sm_type=='am_softmax':
self.out_proj=AMSoftmax(input_features=n_z, output_features=proj_size)
else:
raise NotImplementedError
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def make_bin_layers(self, n_in, n_h_layers, h_size, dropout_p):
if self.r_proj_size>0:
projection = nn.Linear(n_in, self.r_proj_size, bias=False)
with torch.no_grad():
projection.weight.div_(torch.norm(projection.weight, keepdim=True))
projection.weight.require_grad=False
classifier = nn.ModuleList([projection, nn.Linear(self.r_proj_size, h_size), nn.LeakyReLU(0.1)])
else:
classifier = nn.ModuleList([nn.Linear(n_in, h_size), nn.LeakyReLU(0.1)])
for i in range(n_h_layers-1):
classifier.append(nn.Linear(h_size, h_size))
classifier.append(nn.LeakyReLU(0.1))
classifier.append(nn.Dropout(p=dropout_p))
classifier.append(nn.Linear(h_size, 1))
classifier.append(nn.Sigmoid())
return classifier
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.squeeze(2).permute(2,0,1)
batch_size = x.size(1)
seq_size = x.size(0)
h0 = torch.zeros(2*2, batch_size, 256)
c0 = torch.zeros(2*2, batch_size, 256)
if x.is_cuda:
h0 = h0.to(x.get_device())
c0 = c0.to(x.get_device())
out_seq, (h_, c_) = self.lstm(x, (h0, c0))
stats = self.attention(out_seq.permute(1,0,2).contiguous())
x = torch.cat([stats,h_.mean(0)],dim=1)
fc = F.relu(self.lbn(self.fc(x)))
emb = self.fc_mu(fc)
return emb, fc
def forward_bin(self, z):
if self.ndiscriminators>1:
out = []
for disc in self.classifier:
z_ = z
for l in disc:
z_ = l(z_)
out.append(z_)
return out
else:
for l in self.classifier:
z = l(z)
return z
class ResNet_small(nn.Module):
def __init__(self, n_z=256, nh=1, n_h=512, layers=[2,2,2,2], block=PreActBlock, proj_size=0, ncoef=23, dropout_prob=0.25, sm_type='none', ndiscriminators=1, r_proj_size=0):
self.in_planes = 32
super(ResNet_small, self).__init__()
self.ndiscriminators = ndiscriminators
self.r_proj_size = r_proj_size
self.classifier = nn.ModuleList()
self.dropout_prob = dropout_prob
self.n_hidden = nh
self.hidden_size = n_h
self.latent_size = n_z
self.sm_type = sm_type
self.ncoef = ncoef
self.conv1 = nn.Conv2d(1, 32, kernel_size=(ncoef,3), stride=(1,1), padding=(0,1), bias=False)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.fc = nn.Linear(block.expansion*512*2,512)
self.lbn = nn.BatchNorm1d(512)
self.fc_mu = nn.Linear(512, n_z)
self.initialize_params()
if ndiscriminators>1:
for i in range(self.ndiscriminators):
self.classifier.append(self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob))
else:
self.classifier = self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)
self.attention = SelfAttention(block.expansion*512)
if proj_size>0 and sm_type!='none':
if sm_type=='softmax':
self.out_proj=Softmax(input_features=n_z, output_features=proj_size)
elif sm_type=='am_softmax':
self.out_proj=AMSoftmax(input_features=n_z, output_features=proj_size)
else:
raise NotImplementedError
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def make_bin_layers(self, n_in, n_h_layers, h_size, dropout_p):
if self.r_proj_size>0:
projection = nn.Linear(n_in, self.r_proj_size, bias=False)
with torch.no_grad():
projection.weight.div_(torch.norm(projection.weight, keepdim=True))
projection.weight.require_grad=False
classifier = nn.ModuleList([projection, nn.Linear(self.r_proj_size, h_size), nn.LeakyReLU(0.1)])
else:
classifier = nn.ModuleList([nn.Linear(n_in, h_size), nn.LeakyReLU(0.1)])
for i in range(n_h_layers-1):
classifier.append(nn.Linear(h_size, h_size))
classifier.append(nn.LeakyReLU(0.1))
classifier.append(nn.Dropout(p=dropout_p))
classifier.append(nn.Linear(h_size, 1))
classifier.append(nn.Sigmoid())
return classifier
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
fc = F.relu(self.lbn(self.fc(stats)))
mu = self.fc_mu(fc)
return mu, fc
def forward_bin(self, z):
if self.ndiscriminators>1:
out = []
for disc in self.classifier:
z_ = z
for l in disc:
z_ = l(z_)
out.append(z_)
return out
else:
for l in self.classifier:
z = l(z)
return z
class ResNet_large(nn.Module):
def __init__(self, n_z=256, nh=1, n_h=512, layers=[3,4,23,3], block=PreActBottleneck, proj_size=100, ncoef=23, dropout_prob=0.25, sm_type='softmax', ndiscriminators=1, r_proj_size=0):
self.in_planes = 32
super(ResNet_large, self).__init__()
self.ndiscriminators = ndiscriminators
self.r_proj_size = r_proj_size
self.classifier = nn.ModuleList()
self.dropout_prob = dropout_prob
self.n_hidden = nh
self.hidden_size = n_h
self.latent_size = n_z
self.sm_type = sm_type
self.ncoef = ncoef
self.conv1 = nn.Conv2d(1, 32, kernel_size=(ncoef,3), stride=(1,1), padding=(0,1), bias=False)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.fc = nn.Linear(block.expansion*512*2,512)
self.lbn = nn.BatchNorm1d(512)
self.fc_mu = nn.Linear(512, n_z)
self.initialize_params()
if ndiscriminators>1:
for i in range(self.ndiscriminators):
self.classifier.append(self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob))
else:
self.classifier = self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)
self.attention = SelfAttention(block.expansion*512)
if sm_type=='softmax':
self.out_proj=Softmax(input_features=n_z, output_features=proj_size)
elif sm_type=='am_softmax':
self.out_proj=AMSoftmax(input_features=n_z, output_features=proj_size)
else:
raise NotImplementedError
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def make_bin_layers(self, n_in, n_h_layers, h_size, dropout_p):
if self.r_proj_size>0:
projection = nn.Linear(n_in, self.r_proj_size, bias=False)
with torch.no_grad():
projection.weight.div_(torch.norm(projection.weight, keepdim=True))
projection.weight.require_grad=False
classifier = nn.ModuleList([projection, nn.Linear(self.r_proj_size, h_size), nn.LeakyReLU(0.1)])
else:
classifier = nn.ModuleList([nn.Linear(n_in, h_size), nn.LeakyReLU(0.1)])
for i in range(n_h_layers-1):
classifier.append(nn.Linear(h_size, h_size))
classifier.append(nn.LeakyReLU(0.1))
classifier.append(nn.Dropout(p=dropout_p))
classifier.append(nn.Linear(h_size, 1))
classifier.append(nn.Sigmoid())
return classifier
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.squeeze(2)
stats = self.attention(x.permute(0,2,1).contiguous())
fc = F.relu(self.lbn(self.fc(stats)))
mu = self.fc_mu(fc)
return mu, fc
def forward_bin(self, z):
if self.ndiscriminators>1:
out = []
for disc in self.classifier:
z_ = z
for l in disc:
z_ = l(z_)
out.append(z_)
return out
else:
for l in self.classifier:
z = l(z)
return z
class StatisticalPooling(nn.Module):
def forward(self, x):
# x is 3-D with axis [B, feats, T]
noise = torch.rand(x.size()).to(x.device)*1e-6
x = x + noise
mu = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
return torch.cat((mu, std), dim=1)
class TDNN(nn.Module):
# Architecture taken from https://github.com/santi-pdp/pase/blob/master/pase/models/tdnn.py
def __init__(self, n_z=256, nh=1, n_h=512, proj_size=0, ncoef=23, sm_type='none', dropout_prob=0.25, ndiscriminators=1, r_proj_size=0):
super(TDNN, self).__init__()
self.ndiscriminators = ndiscriminators
self.r_proj_size = r_proj_size
self.classifier = nn.ModuleList()
self.dropout_prob = dropout_prob
self.n_hidden = nh
self.hidden_size = n_h
self.latent_size = n_z
self.sm_type = sm_type
self.ncoef = ncoef
self.model = nn.Sequential( nn.Conv1d(ncoef, 512, 5, padding=2),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 5, padding=2),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 5, padding=3),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, 7),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 1500, 1),
nn.BatchNorm1d(1500),
nn.ReLU(inplace=True) )
self.pooling = StatisticalPooling()
self.post_pooling_1 = nn.Sequential(nn.Conv1d(3000, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True) )
self.post_pooling_2 = nn.Sequential(nn.Conv1d(512, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, n_z, 1) )
if ndiscriminators>1:
for i in range(self.ndiscriminators):
self.classifier.append(self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob))
else:
self.classifier = self.make_bin_layers(n_in=2*512, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)
if proj_size>0 and sm_type!='none':
if sm_type=='softmax':
self.out_proj=Softmax(input_features=n_z, output_features=proj_size)
elif sm_type=='am_softmax':
self.out_proj=AMSoftmax(input_features=n_z, output_features=proj_size)
else:
raise NotImplementedError
# get output features at affine after stats pooling
# self.model = nn.Sequential(*list(self.model.children())[:-5])
def make_bin_layers(self, n_in, n_h_layers, h_size, dropout_p):
if self.r_proj_size>0:
projection = nn.Linear(n_in, self.r_proj_size, bias=False)
with torch.no_grad():
projection.weight /= torch.norm(projection.weight.squeeze()).item()
projection.weight.require_grad=False
classifier = nn.ModuleList([projection, nn.Linear(self.r_proj_size, h_size), nn.LeakyReLU(0.1)])
else:
classifier = nn.ModuleList([nn.Linear(n_in, h_size), nn.LeakyReLU(0.1)])
for i in range(n_h_layers-1):
classifier.append(nn.Linear(h_size, h_size))
classifier.append(nn.LeakyReLU(0.1))
classifier.append(nn.Dropout(p=dropout_p))
classifier.append(nn.Linear(h_size, 1))
classifier.append(nn.Sigmoid())
return classifier
def forward(self, x):
x = self.model(x.squeeze(1))
x = self.pooling(x)
fc = self.post_pooling_1(x)
x = self.post_pooling_2(fc)
return x.squeeze(-1), fc.squeeze(-1)
def forward_bin(self, z):
if self.ndiscriminators>1:
out = []
for disc in self.classifier:
z_ = z
for l in disc:
z_ = l(z_)
out.append(z_)
return out
else:
for l in self.classifier:
z = l(z)
return z
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d) or isinstance(layer, torch.nn.Conv1d):
init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
```
#### File: e2e_verification/asv/symmetry_check.py
```python
import argparse
import numpy as np
import torch
from kaldi_io import read_mat_scp
from sklearn import metrics
import scipy.io as sio
import model as model_
import glob
import pickle
import os
import sys
from utils.utils import *
def prep_feats(data_, min_nb_frames=100):
features = data_.T
if features.shape[1]<min_nb_frames:
mul = int(np.ceil(min_nb_frames/features.shape[1]))
features = np.tile(features, (1, mul))
features = features[:, :min_nb_frames]
return torch.from_numpy(features[np.newaxis, np.newaxis, :, :]).float()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluation')
parser.add_argument('--test-data', type=str, default='./data/test/', metavar='Path', help='Path to input data')
parser.add_argument('--trials-path', type=str, default=None, help='Path to trials file. If None, will be created from spk2utt')
parser.add_argument('--spk2utt', type=str, default=None, metavar='Path', help='Path to spk2utt file. Will be used in case no trials file is provided')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for file containing model')
parser.add_argument('--model', choices=['resnet_stats', 'resnet_mfcc', 'resnet_lstm', 'resnet_small', 'resnet_large', 'TDNN'], default='resnet_lstm', help='Model arch according to input type')
parser.add_argument('--out-path', type=str, default='./', metavar='Path', help='Path for saving outputs')
parser.add_argument('--out-prefix', type=str, default=None, metavar='Path', help='Prefix to be added to output file name')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--inner', action='store_true', default=True, help='Inner layer as embedding')
parser.add_argument('--no-histogram', action='store_true', default=False, help='Disables histogram plot')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
if args.cp_path is None:
raise ValueError('There is no checkpoint/model path. Use arg --cp-path to indicate the path!')
print('Cuda Mode is: {}'.format(args.cuda))
if args.cuda:
device = get_freer_gpu()
ckpt = torch.load(args.cp_path, map_location = lambda storage, loc: storage)
if args.model == 'resnet_mfcc':
model = model_.ResNet_mfcc(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'resnet_lstm':
model = model_.ResNet_lstm(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'resnet_stats':
model = model_.ResNet_stats(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'resnet_small':
model = model_.ResNet_small(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'resnet_large':
model = model_.ResNet_large(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
elif args.model == 'TDNN':
model = model_.TDNN(n_z=ckpt['latent_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], proj_size=ckpt['r_proj_size'], ncoef=ckpt['ncoef'], ndiscriminators=ckpt['ndiscriminators'])
try:
model.load_state_dict(ckpt['model_state'], strict=True)
except RuntimeError as err:
print("Runtime Error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
model.eval()
if args.cuda:
model = model.to(device)
test_data = None
files_list = glob.glob(args.test_data+'*.scp')
for file_ in files_list:
if test_data is None:
test_data = { k:v for k,v in read_mat_scp(file_) }
else:
for k,v in read_mat_scp(file_):
test_data[k] = v
if args.trials_path:
utterances_enroll, utterances_test, labels = read_trials(args.trials_path)
else:
spk2utt = read_spk2utt(args.spk2utt)
utterances_enroll, utterances_test, labels = create_trials(spk2utt)
print('\nAll data ready. Start of scoring')
scores_dif = []
mem_embeddings = {}
model.eval()
with torch.no_grad():
for i in range(len(labels)):
enroll_utt = utterances_enroll[i]
try:
emb_enroll = mem_embeddings[enroll_utt]
except KeyError:
enroll_utt_data = prep_feats(test_data[enroll_utt])
if args.cuda:
enroll_utt_data = enroll_utt_data.to(device)
emb_enroll = model.forward(enroll_utt_data)[1].detach() if args.inner else model.forward(enroll_utt_data)[0].detach()
mem_embeddings[enroll_utt] = emb_enroll
test_utt = utterances_test[i]
try:
emb_test = mem_embeddings[test_utt]
except KeyError:
test_utt_data = prep_feats(test_data[test_utt])
if args.cuda:
enroll_utt_data = enroll_utt_data.to(device)
test_utt_data = test_utt_data.to(device)
emb_test = model.forward(test_utt_data)[1].detach() if args.inner else model.forward(test_utt_data)[0].detach()
mem_embeddings[test_utt] = emb_test
pred_1 = model.forward_bin(torch.cat([emb_enroll, emb_test],1))
pred_2 = model.forward_bin(torch.cat([emb_test, emb_enroll],1))
if model.ndiscriminators>1:
score_1 = torch.cat(pred_1, 1).mean(1).squeeze().item()
score_2 = torch.cat(pred_2, 1).mean(1).squeeze().item()
else:
score_1 = pred_1.squeeze().item()
score_2 = pred_2.squeeze().item()
scores_dif.append( abs(score_1 - score_2) )
print('\nScoring done')
print('Avg: {}'.format(np.mean(scores_dif)))
print('Std: {}'.format(np.std(scores_dif)))
print('Median: {}'.format(np.median(scores_dif)))
print('Max: {}'.format(np.max(scores_dif)))
print('Min: {}'.format(np.min(scores_dif)))
if not args.no_histogram:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.hist(scores_dif, density=True, bins=30)
plt.savefig(args.out_path+args.out_prefix+'sym_hist_vox.pdf', bbox_inches='tight')
```
#### File: e2e_verification/cifar/train.py
```python
from __future__ import print_function
import argparse
import torch
from torch.utils.data import DataLoader
from train_loop import TrainLoop
import torch.optim as optim
from torchvision import datasets, transforms
from models import vgg, resnet, densenet
import numpy as np
from time import sleep
import os
import sys
def set_np_randomseed(worker_id):
np.random.seed(np.random.get_state()[1][0]+worker_id)
def get_freer_gpu(trials=10):
sleep(2)
for j in range(trials):
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
dev_ = torch.device('cuda:'+str(np.argmax(memory_available)))
try:
a = torch.rand(1).cuda(dev_)
return dev_
except:
pass
print('NO GPU AVAILABLE!!!')
exit(1)
# Training settings
parser = argparse.ArgumentParser(description='Cifar10 Classification')
parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')
parser.add_argument('--valid-batch-size', type=int, default=16, metavar='N', help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=500, metavar='N', help='number of epochs to train (default: 500)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.1)')
parser.add_argument('--l2', type=float, default=5e-4, metavar='lambda', help='L2 wheight decay coefficient (default: 0.0005)')
parser.add_argument('--smoothing', type=float, default=0.2, metavar='l', help='Label smoothing (default: 0.2)')
parser.add_argument('--patience', type=int, default=10, metavar='S', help='Epochs to wait before decreasing LR by a factor of 0.5 (default: 10)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='lambda', help='Momentum (default: 0.9)')
parser.add_argument('--checkpoint-epoch', type=int, default=None, metavar='N', help='epoch to load for checkpointing. If None, training starts from scratch')
parser.add_argument('--checkpoint-path', type=str, default=None, metavar='Path', help='Path for checkpointing')
parser.add_argument('--data-path', type=str, default='./data/', metavar='Path', help='Path to data')
parser.add_argument('--valid-data-path', type=str, default='./data/', metavar='Path', help='Path to data')
parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)')
parser.add_argument('--n-workers', type=int, default=4, metavar='N', help='Workers for data loading. Default is 4')
parser.add_argument('--model', choices=['vgg', 'resnet', 'densenet'], default='resnet')
parser.add_argument('--softmax', choices=['softmax', 'am_softmax'], default='softmax', help='Softmax type')
parser.add_argument('--hidden-size', type=int, default=512, metavar='S', help='latent layer dimension (default: 512)')
parser.add_argument('--n-hidden', type=int, default=1, metavar='N', help='maximum number of frames per utterance (default: 1)')
parser.add_argument('--dropout-prob', type=float, default=0.25, metavar='p', help='Dropout probability (default: 0.25)')
parser.add_argument('--save-every', type=int, default=1, metavar='N', help='how many epochs to wait before logging training status. Default is 1')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--no-cp', action='store_true', default=False, help='Disables checkpointing')
parser.add_argument('--verbose', type=int, default=1, metavar='N', help='Verbose is activated if > 0')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
if args.cuda:
torch.backends.cudnn.benchmark=True
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([x / 255 for x in [125.3, 123.0, 113.9]], [x / 255 for x in [63.0, 62.1, 66.7]])])
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize([x / 255 for x in [125.3, 123.0, 113.9]], [x / 255 for x in [63.0, 62.1, 66.7]])])
#trainset = Loader(args.data_path)
trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers, worker_init_fn=set_np_randomseed)
#validset = Loader(args.valid_data_path)
validset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
valid_loader = torch.utils.data.DataLoader(validset, batch_size=args.valid_batch_size, shuffle=False, num_workers=args.n_workers)
if args.model == 'vgg':
model = vgg.VGG('VGG16', nh=args.n_hidden, n_h=args.hidden_size, dropout_prob=args.dropout_prob, sm_type=args.softmax)
elif args.model == 'resnet':
model = resnet.ResNet18(nh=args.n_hidden, n_h=args.hidden_size, dropout_prob=args.dropout_prob, sm_type=args.softmax)
elif args.model == 'densenet':
model = densenet.densenet_cifar(nh=args.n_hidden, n_h=args.hidden_size, dropout_prob=args.dropout_prob, sm_type=args.softmax)
if args.verbose >0:
print(model)
if args.cuda:
device = get_freer_gpu()
model = model.cuda(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.l2, momentum=args.momentum)
trainer = TrainLoop(model, optimizer, train_loader, valid_loader, patience=args.patience, label_smoothing=args.smoothing, verbose=args.verbose, save_cp=(not args.no_cp), checkpoint_path=args.checkpoint_path, checkpoint_epoch=args.checkpoint_epoch, cuda=args.cuda)
if args.verbose >0:
print('\nCuda Mode is: {}'.format(args.cuda))
print('Selected model: {}'.format(args.model))
print('Batch size: {}'.format(args.batch_size))
print('LR: {}'.format(args.lr))
print('Momentum: {}'.format(args.momentum))
print('l2: {}'.format(args.l2))
print('Label smoothing: {}'.format(args.smoothing))
print('Patience: {}'.format(args.patience))
print('Dropout rate: {}'.format(args.dropout_prob))
print('Softmax Mode is: {}'.format(args.softmax))
trainer.train(n_epochs=args.epochs, save_every=args.save_every)
```
#### File: imagenet/models/resnet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.losses import AMSoftmax, Softmax
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, nh, n_h, z_size, sm_type, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, dropout_prob=0.25, rproj_size=0):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.dropout_prob = dropout_prob
self.n_hidden = nh
self.hidden_size = n_h
self.sm_type = sm_type
self.n_classes = num_classes
self.emb_size = z_size
self.r_proj_size = rproj_size
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.lin_proj = nn.Sequential(nn.Linear(512*block.expansion, self.emb_size))
if sm_type=='softmax':
self.out_proj=Softmax(input_features=self.emb_size, output_features=num_classes)
elif sm_type=='am_softmax':
self.out_proj=AMSoftmax(input_features=self.emb_size, output_features=num_classes)
else:
raise NotImplementedError
self.classifier = self.make_bin_layers(n_in=2*self.emb_size, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def make_bin_layers(self, n_in, n_h_layers, h_size, dropout_p):
if self.r_proj_size>0:
projection = nn.Linear(n_in, self.r_proj_size, bias=False)
with torch.no_grad():
projection.weight.div_(torch.norm(projection.weight, keepdim=True))
projection.weight.require_grad=False
classifier = nn.ModuleList([projection, nn.Linear(self.r_proj_size, h_size), nn.LeakyReLU(0.1)])
else:
classifier = nn.ModuleList([nn.Linear(n_in, h_size), nn.LeakyReLU(0.1)])
for i in range(n_h_layers-1):
classifier.append(nn.Linear(h_size, h_size))
classifier.append(nn.LeakyReLU(0.1))
classifier.append(nn.Dropout(p=dropout_p))
classifier.append(nn.Linear(h_size, 1))
classifier.append(nn.Sigmoid())
return classifier
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x_out = torch.flatten(x, 1)
x_emb = self.lin_proj(x_out)
return x_emb, x_out
def forward_bin(self, z):
for l in self.classifier:
z = l(z)
return z
def ResNet18(nh=1, n_h=512, emb_size=128, dropout_prob=0.25, sm_type='softmax', n_classes=1000, r_proj_size=0):
return ResNet(BasicBlock, [2,2,2,2], nh=nh, n_h=n_h, z_size=emb_size, sm_type=sm_type, dropout_prob=dropout_prob, num_classes=n_classes, rproj_size=r_proj_size)
def ResNet34(nh=1, n_h=512, emb_size=128, dropout_prob=0.25, sm_type='softmax', n_classes=1000, r_proj_size=0):
return ResNet(BasicBlock, [3,4,6,3], nh=nh, n_h=n_h, z_size=emb_size, sm_type=sm_type, dropout_prob=dropout_prob, num_classes=n_classes, rproj_size=r_proj_size)
def ResNet50(nh=1, n_h=512, emb_size=128, dropout_prob=0.25, sm_type='softmax', n_classes=1000, r_proj_size=0):
return ResNet(Bottleneck, [3,4,6,3], nh=nh, n_h=n_h, z_size=emb_size, sm_type=sm_type, dropout_prob=dropout_prob, num_classes=n_classes, rproj_size=r_proj_size)
def ResNet101(nh=1, n_h=512, emb_size=128, dropout_prob=0.25, sm_type='softmax', n_classes=1000, r_proj_size=0):
return ResNet(Bottleneck, [3,4,23,3], nh=nh, n_h=n_h, z_size=emb_size, sm_type=sm_type, dropout_prob=dropout_prob, num_classes=n_classes, rproj_size=r_proj_size)
def ResNet152(nh=1, n_h=512, emb_size=128, dropout_prob=0.25, sm_type='softmax', n_classes=1000, r_proj_size=0):
return ResNet(Bottleneck, [3,8,36,3], nh=nh, n_h=n_h, z_size=emb_size, sm_type=sm_type, dropout_prob=dropout_prob, num_classes=n_classes, rproj_size=r_proj_size)
```
#### File: e2e_verification/retrieval/average_cps.py
```python
import argparse
import collections
import torch
import os
import re
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state['model_state']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
averaged_params[k].div_(num_models)
new_state['model'] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r'checkpoint_(\d+)it_(\d+)ep.pt')
else:
pt_regexp = re.compile(r'checkpoint_(\d+)ep.pt')
files = os.listdir(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(description='Average the params of input checkpoints to produce a new checkpoint')
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int, help='this will set an upper bound on which checkpoint to use, e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.')
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.num_epoch_checkpoints is None or args.num_update_checkpoints is None, \
'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints'
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound,
)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
if __name__ == '__main__':
main()
``` |
{
"source": "joaomonteirof/multitask_asv",
"score": 2
} |
#### File: multitask_asv/cifar10/train_loop.py
```python
import torch
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
import os
from tqdm import tqdm
from harvester import HardestNegativeTripletSelector, AllTripletSelector
from utils import compute_eer
class TrainLoop(object):
def __init__(self, model, optimizer, train_loader, valid_loader, margin, lambda_, patience, verbose=-1, cp_name=None, save_cp=False, checkpoint_path=None, checkpoint_epoch=None, swap=False, cuda=True):
if checkpoint_path is None:
# Save to current directory
self.checkpoint_path = os.getcwd()
else:
self.checkpoint_path = checkpoint_path
if not os.path.isdir(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.save_epoch_fmt = os.path.join(self.checkpoint_path, cp_name) if cp_name else os.path.join(self.checkpoint_path, 'checkpoint_{}ep.pt')
self.cuda_mode = cuda
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.valid_loader = valid_loader
self.history = {'train_loss': [], 'train_loss_batch': [], 'triplet_loss': [], 'triplet_loss_batch': [], 'ce_loss': [], 'ce_loss_batch': [],'ErrorRate': [], 'EER': []}
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=0.5, patience=patience, verbose=True if verbose>0 else False, threshold=1e-4, min_lr=1e-8)
self.total_iters = 0
self.cur_epoch = 0
self.lambda_ = lambda_
self.swap = swap
self.margin = margin
self.harvester = HardestNegativeTripletSelector(margin=0.1, cpu=not self.cuda_mode)
self.harvester_val = AllTripletSelector()
self.verbose = verbose
self.save_cp = save_cp
self.device = next(self.model.parameters()).device
if checkpoint_epoch is not None:
self.load_checkpoint(self.save_epoch_fmt.format(checkpoint_epoch))
def train(self, n_epochs=1, save_every=1):
while self.cur_epoch < n_epochs:
np.random.seed()
if self.verbose>0:
print(' ')
print('Epoch {}/{}'.format(self.cur_epoch+1, n_epochs))
train_iter = tqdm(enumerate(self.train_loader))
else:
train_iter = enumerate(self.train_loader)
ce=0.0
triplet_loss=0.0
train_loss=0.0
# Train step
for t, batch in train_iter:
ce_batch, triplet_loss_batch = self.train_step(batch)
ce += ce_batch
triplet_loss += triplet_loss_batch
train_loss += ce_batch + triplet_loss_batch
self.history['train_loss_batch'].append(ce_batch + triplet_loss_batch)
self.history['triplet_loss_batch'].append(triplet_loss_batch)
self.history['ce_loss_batch'].append(ce_batch)
self.total_iters += 1
self.history['train_loss'].append(train_loss/(t+1))
self.history['triplet_loss'].append(triplet_loss/(t+1))
self.history['ce_loss'].append(ce/(t+1))
if self.verbose>0:
print(' ')
print('Total train loss, Triplet loss, and Cross-entropy: {:0.4f}, {:0.4f}, {:0.4f}'.format(self.history['train_loss'][-1], self.history['triplet_loss'][-1], self.history['ce_loss'][-1]))
# Validation
tot_correct = 0
tot_ = 0
scores, labels = None, None
for t, batch in enumerate(self.valid_loader):
correct, total, scores_batch, labels_batch = self.valid(batch)
try:
scores = np.concatenate([scores, scores_batch], 0)
labels = np.concatenate([labels, labels_batch], 0)
except:
scores, labels = scores_batch, labels_batch
tot_correct += correct
tot_ += total
self.history['EER'].append(compute_eer(labels, scores))
self.history['ErrorRate'].append(1.-float(tot_correct)/tot_)
if self.verbose>0:
print(' ')
print('Current, best validation error rate, and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['ErrorRate'][-1], np.min(self.history['ErrorRate']), 1+np.argmin(self.history['ErrorRate'])))
print(' ')
print('Current, best validation EER, and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['EER'][-1], np.min(self.history['EER']), 1+np.argmin(self.history['EER'])))
self.scheduler.step(self.history['ErrorRate'][-1])
if self.verbose>0:
print(' ')
print('Current LR: {}'.format(self.optimizer.param_groups[0]['lr']))
if self.save_cp and (self.cur_epoch % save_every == 0 or (self.history['ErrorRate'][-1] < np.min([np.inf]+self.history['ErrorRate'][:-1])) or (self.history['EER'][-1] < np.min([np.inf]+self.history['EER'][:-1]))):
self.checkpointing()
self.cur_epoch += 1
if self.verbose>0:
print('Training done!')
if self.valid_loader is not None:
print('Best error rate and corresponding epoch: {:0.4f}, {}'.format(np.min(self.history['ErrorRate']), 1+np.argmin(self.history['ErrorRate'])))
print('Best EER and corresponding epoch: {:0.4f}, {}'.format(np.min(self.history['EER']), 1+np.argmin(self.history['EER'])))
return np.min(self.history['ErrorRate'])
def train_step(self, batch):
self.model.train()
self.optimizer.zero_grad()
x, y = batch
if self.cuda_mode:
x = x.to(self.device)
y = y.to(self.device)
embeddings = self.model.forward(x)
embeddings_norm = F.normalize(embeddings, p=2, dim=1)
loss_class = torch.nn.CrossEntropyLoss()(self.model.out_proj(embeddings_norm, y), y)
triplets_idx, entropy_indices = self.harvester.get_triplets(embeddings_norm.detach(), y)
if self.cuda_mode:
triplets_idx = triplets_idx.to(self.device)
emb_a = torch.index_select(embeddings_norm, 0, triplets_idx[:, 0])
emb_p = torch.index_select(embeddings_norm, 0, triplets_idx[:, 1])
emb_n = torch.index_select(embeddings_norm, 0, triplets_idx[:, 2])
loss_metric = self.triplet_loss(emb_a, emb_p, emb_n)
loss = loss_class + loss_metric
entropy_regularizer = torch.nn.functional.pairwise_distance(embeddings_norm, embeddings_norm[entropy_indices,:]).mean()
loss -= entropy_regularizer*self.lambda_
loss.backward()
self.optimizer.step()
return loss_class.item(), loss_metric.item()
def valid(self, batch):
self.model.eval()
x, y = batch
if self.cuda_mode:
x = x.to(self.device)
y = y.to(self.device)
with torch.no_grad():
embeddings = self.model.forward(x)
embeddings_norm = F.normalize(embeddings, p=2, dim=1)
out=self.model.out_proj(embeddings_norm, y)
pred = F.softmax(out, dim=1).max(1)[1].long()
correct = pred.squeeze().eq(y.squeeze()).detach().sum().item()
triplets_idx = self.harvester_val.get_triplets(embeddings, y)
embeddings = embeddings.cpu()
emb_a = torch.index_select(embeddings, 0, triplets_idx[:, 0])
emb_p = torch.index_select(embeddings, 0, triplets_idx[:, 1])
emb_n = torch.index_select(embeddings, 0, triplets_idx[:, 2])
scores_p = F.cosine_similarity(emb_a, emb_p)
scores_n = F.cosine_similarity(emb_a, emb_n)
return correct, x.size(0), np.concatenate([scores_p.detach().cpu().numpy(), scores_n.detach().cpu().numpy()], 0), np.concatenate([np.ones(scores_p.size(0)), np.zeros(scores_n.size(0))], 0)
def triplet_loss(self, emba, embp, embn, reduce_=True):
loss_ = torch.nn.TripletMarginLoss(margin=self.margin, p=2.0, eps=1e-06, swap=self.swap, reduction='mean' if reduce_ else 'none')(emba, embp, embn)
return loss_
def checkpointing(self):
# Checkpointing
if self.verbose>0:
print(' ')
print('Checkpointing...')
ckpt = {'model_state': self.model.state_dict(),
'optimizer_state': self.optimizer.state_dict(),
'scheduler_state': self.scheduler.state_dict(),
'history': self.history,
'total_iters': self.total_iters,
'cur_epoch': self.cur_epoch}
try:
torch.save(ckpt, self.save_epoch_fmt.format(self.cur_epoch))
except:
torch.save(ckpt, self.save_epoch_fmt)
def load_checkpoint(self, ckpt):
if os.path.isfile(ckpt):
ckpt = torch.load(ckpt)
# Load model state
self.model.load_state_dict(ckpt['model_state'])
# Load optimizer state
self.optimizer.load_state_dict(ckpt['optimizer_state'])
# Load scheduler state
self.scheduler.load_state_dict(ckpt['scheduler_state'])
# Load history
self.history = ckpt['history']
self.total_iters = ckpt['total_iters']
self.cur_epoch = ckpt['cur_epoch']
else:
print('No checkpoint found at: {}'.format(ckpt))
def print_grad_norms(self):
norm = 0.0
for params in list(self.model.parameters()):
norm+=params.grad.norm(2).data[0]
print('Sum of grads norms: {}'.format(norm))
def check_nans(self):
for params in list(self.model.parameters()):
if np.any(np.isnan(params.data.cpu().numpy())):
print('params NANs!!!!!')
if np.any(np.isnan(params.grad.data.cpu().numpy())):
print('grads NANs!!!!!!')
def initialize_params(self):
for layer in self.model.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal(layer.weight.data)
elif isinstance(layer, torch.nn.BatchNorm2d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
```
#### File: joaomonteirof/multitask_asv/embedd.py
```python
import argparse
import numpy as np
import glob
import torch
import torch.nn.functional as F
import os
import sys
import pathlib
from kaldi_io import read_mat_scp, open_or_fd, write_vec_flt
import model as model_
import scipy.io as sio
from utils.utils import *
from librosa.feature import delta as delta_
def prep_feats(data_, max_dur, max_nchunks, delta=False):
'''
data_ : [T, ncoef]
'''
features = (data_.T)[np.newaxis, np.newaxis, :, :]
if features.shape[-1]<50:
mul = int(np.ceil(50/features.shape[-1]))
features = np.tile(features, (1, 1, 1, mul))
features = features[..., :50]
elif features.shape[-1]>max_dur:
sliced_data = []
idxs = strided_app(np.arange(features.shape[-1]), max_dur, max_dur//2)
for idx in idxs:
sliced_data.append(features[...,idx])
features = np.concatenate(sliced_data, axis=0)
if delta:
features = np.concatenate([features, delta_(features, width=3, order=1), delta_(features, width=3, order=2)], axis=1)
return torch.from_numpy(features).float()[:min(features.shape[0], max_nchunks),...]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute embeddings')
parser.add_argument('--path-to-data', type=str, default='./data/', metavar='Path', help='Path to input data')
parser.add_argument('--path-to-more-data', type=str, default=None, metavar='Path', help='Path to input data')
parser.add_argument('--utt2spk', type=str, default=None, metavar='Path', help='Optional path for utt2spk')
parser.add_argument('--more-utt2spk', type=str, default=None, metavar='Path', help='Optional path for utt2spk')
parser.add_argument('--max-dur', type=int, default=800, metavar='S', help='Max duration in frames (default: 800)')
parser.add_argument('--max-nchunks', type=int, default=10, metavar='S', help='Max number of chunks for long files (default: 10)')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for file containing model')
parser.add_argument('--out-path', type=str, default='./', metavar='Path', help='Path to output hdf file')
parser.add_argument('--model', choices=['resnet_mfcc', 'resnet_34', 'resnet_lstm', 'resnet_qrnn', 'resnet_stats', 'resnet_large', 'resnet_small', 'resnet_2d', 'TDNN', 'TDNN_att', 'TDNN_multihead', 'TDNN_lstm', 'TDNN_aspp', 'TDNN_mod', 'TDNN_multipool', 'transformer'], default='resnet_mfcc', help='Model arch according to input type')
parser.add_argument('--latent-size', type=int, default=200, metavar='S', help='latent layer dimension (default: 200)')
parser.add_argument('--ncoef', type=int, default=23, metavar='N', help='number of MFCCs (default: 23)')
parser.add_argument('--delta', action='store_true', default=False, help='Enables extra data channels')
parser.add_argument('--eps', type=float, default=0.0, metavar='eps', help='Add noise to embeddings')
parser.add_argument('--inner', action='store_true', default=False, help='Get embeddings from inner layer')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
pathlib.Path(args.out_path).mkdir(parents=True, exist_ok=True)
if args.cp_path is None:
raise ValueError('There is no checkpoint/model path. Use arg --cp-path to indicate the path!')
print('Cuda Mode is: {}'.format(args.cuda))
if args.cuda:
device = get_freer_gpu()
if args.model == 'resnet_qrnn':
import cupy
cupy.cuda.Device(int(str(device).split(':')[-1])).use()
if args.model == 'resnet_mfcc':
model = model_.ResNet_mfcc(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'resnet_34':
model = model_.ResNet_34(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'resnet_lstm':
model = model_.ResNet_lstm(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'resnet_qrnn':
model = model_.ResNet_qrnn(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'resnet_stats':
model = model_.ResNet_stats(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'resnet_large':
model = model_.ResNet_large(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'resnet_small':
model = model_.ResNet_small(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'resnet_2d':
model = model_.ResNet_2d(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'TDNN':
model = model_.TDNN(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'TDNN_att':
model = model_.TDNN_att(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'TDNN_multihead':
model = model_.TDNN_multihead(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'TDNN_lstm':
model = model_.TDNN_lstm(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'TDNN_aspp':
model = model_.TDNN_aspp(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'TDNN_mod':
model = model_.TDNN_mod(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'TDNN_multipool':
model = model_.TDNN_multipool(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
elif args.model == 'transformer':
model = model_.transformer_enc(n_z=args.latent_size, proj_size=0, ncoef=args.ncoef, delta = args.delta)
ckpt = torch.load(args.cp_path, map_location = lambda storage, loc: storage)
print('\n', model.load_state_dict(ckpt['model_state'], strict=False), '\n')
model.eval()
if args.cuda:
model = model.to(device)
scp_list = glob.glob(args.path_to_data + '*.scp')
if len(scp_list)<1:
print('Nothing found at {}.'.format(args.path_to_data))
exit(1)
if args.path_to_more_data:
more_scp_list = glob.glob(args.path_to_more_data + '*.scp')
if len(more_scp_list)<1:
print('Nothing found at {}.'.format(args.path_to_more_data))
exit(1)
else:
scp_list = scp_list + more_scp_list
if args.utt2spk:
utt2spk = read_utt2spk(args.utt2spk)
if args.more_utt2spk:
utt2spk.update(read_utt2spk(args.more_utt2spk))
print('Start of data embeddings computation')
embeddings = {}
with torch.no_grad():
for file_ in scp_list:
data = { k:m for k,m in read_mat_scp(file_) }
for i, utt in enumerate(data):
if args.utt2spk:
if not utt in utt2spk:
print('Skipping utterance '+ utt)
continue
feats = prep_feats(data[utt], args.max_dur, args.max_nchunks, args.delta)
try:
if args.cuda:
feats = feats.to(device)
model = model.to(device)
emb = model.forward(feats)
except:
feats = feats.cpu()
model = model.cpu()
emb = model.forward(feats)
emb = emb[1] if args.inner else emb[0]
emb_enroll = F.normalize(emb, p=2, dim=1)
embeddings[utt] = emb.mean(0).detach().cpu().numpy().squeeze()
if args.eps>0.0:
embeddings[utt] += args.eps*np.random.randn(embeddings[utt].shape[0])
print('Storing embeddings in output file')
out_name = args.path_to_data.split('/')[-2] if not args.utt2spk else args.utt2spk.split('/')[-2]
file_name = args.out_path+out_name+'.ark'
if os.path.isfile(file_name):
os.remove(file_name)
print(file_name + ' Removed')
with open_or_fd(file_name,'wb') as f:
for k,v in embeddings.items(): write_vec_flt(f, v, k)
print('End of embeddings computation.')
```
#### File: joaomonteirof/multitask_asv/hp_search_slurm.py
```python
import nevergrad as ng
import argparse
import subprocess
import shlex
import numpy as np
from time import sleep
import pickle
import glob
import torch
import os
import shutil
from concurrent import futures
def get_file_name(dir_):
idx = np.random.randint(1)
fname = dir_ + str(np.random.randint(1,999999999,1)[0]) + '.p'
while os.path.isfile(fname):
fname = dir_ + str(np.random.randint(1,999999999,1)[0]) + '.p'
file_ = open(fname, 'wb')
pickle.dump(None, file_)
file_.close()
return fname
def kill_job(id_):
try:
status = subprocess.check_output('scancel ' + id_, shell=True)
print(' ')
print('Job {} killed'.format(id_))
print(' ')
except:
pass
def remove_err_out_files(id_):
files_list = glob.glob('*'+id_+'.*')
for file_ in files_list:
os.remove(file_)
# Training settings
parser=argparse.ArgumentParser(description='HP search for ASV')
parser.add_argument('--batch-size', type=int, default=24, metavar='N', help='input batch size for training (default: 24)')
parser.add_argument('--valid-batch-size', type=int, default=64, metavar='N', help='input batch size for valid (default: 64)')
parser.add_argument('--epochs', type=int, default=200, metavar='N', help='number of epochs to train (default: 200)')
parser.add_argument('--budget', type=int, default=30, metavar='N', help='Maximum training runs')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--slurm-sub-file', type=str, default='./run_hp.sh', metavar='Path', help='Path to sge submission file')
parser.add_argument('--train-hdf-file', type=str, default='./data/train.hdf', metavar='Path', help='Path to hdf data')
parser.add_argument('--valid-hdf-file', type=str, default=None, metavar='Path', help='Path to hdf data')
parser.add_argument('--model', choices=['resnet_mfcc', 'resnet_34', 'resnet_lstm', 'resnet_qrnn', 'resnet_stats', 'resnet_large', 'resnet_small', 'resnet_2d', 'TDNN', 'TDNN_att', 'TDNN_multihead', 'TDNN_lstm', 'TDNN_aspp', 'TDNN_mod', 'TDNN_multipool', 'transformer', 'all'], default='resnet_mfcc', help='Model arch according to input type')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--hp-workers', type=int, help='number of search workers', default=1)
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--save-every', type=int, default=1, metavar='N', help='how many epochs to wait before logging training status. Default is 1')
parser.add_argument('--ncoef', type=int, default=23, metavar='N', help='number of MFCCs (default: 23)')
parser.add_argument('--temp-folder', type=str, default='temp', metavar='Path', help='Temp folder for pickle files')
parser.add_argument('--checkpoint-path', type=str, default=None, metavar='Path', help='Path for checkpointing')
parser.add_argument('--logdir', type=str, default=None, metavar='Path', help='Path for checkpointing')
args=parser.parse_args()
args.cuda=True if not args.no_cuda else False
def train(lr, l2, max_gnorm, momentum, margin, lambda_, swap, latent_size, n_frames, model, ncoef, epochs,
batch_size, valid_batch_size, n_workers, cuda, train_hdf_file, valid_hdf_file, slurm_submission_file,
tmp_dir, cp_path, softmax, delta, logdir):
file_name = get_file_name(tmp_dir)
np.random.seed()
command = 'sbatch' + ' ' + slurm_submission_file + ' ' + str(lr) + ' ' + str(l2) + ' ' + str(max_gnorm) + ' ' + str(momentum) + ' ' + str(margin) + ' ' + str(lambda_) + ' ' + str(swap) + ' ' + str(int(latent_size)) + ' ' + str(int(n_frames)) + ' ' + str(model) + ' ' + str(ncoef) + ' ' + str(epochs) + ' ' + str(batch_size) + ' ' + str(valid_batch_size) + ' ' + str(n_workers) + ' ' + str(cuda) + ' ' + str(train_hdf_file) + ' ' + str(valid_hdf_file) + ' ' + str(file_name) + ' ' + str(cp_path) + ' ' + str(file_name.split('/')[-1]+'t') + ' ' + str(softmax) + ' ' + str(delta) + ' ' + str(logdir)
for j in range(10):
sleep(np.random.randint(10,120,1)[0])
result=None
p=subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
out = p.communicate()
job_id = out[0].decode('utf-8').split(' ')[3]
result_file = open(file_name, 'rb')
result = pickle.load(result_file)
result_file.close()
if result is not None:
remove_err_out_files(job_id)
os.remove(file_name)
print(' ')
print('Best EER in result file ' + file_name.split('/')[-1].split('.p')[0] + ' was: {}'.format(result))
print(' ')
print('With hyperparameters:')
print('Model: {}'.format(model))
print('N frames: {}'.format(int(n_frames)))
print('Embeddings size: {}'.format(int(latent_size)))
print('LR: {}'.format(lr))
print('momentum: {}'.format(momentum))
print('l2: {}'.format(l2))
print('Max. grad norm: {}'.format(max_gnorm))
print('lambda: {}'.format(lambda_))
print('Margin: {}'.format(margin))
print('Swap: {}'.format(swap))
print('Softmax Mode: {}'.format(softmax))
print('Delta features: {}'.format(delta))
print(' ')
return result
return 0.5
tmp_dir = os.getcwd() + '/' + args.temp_folder + '/'
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
parametrization = ng.p.Instrumentation(lr=ng.p.Choice([2.0, 1.0, 0.1, 0.01]),
l2=ng.p.Choice([0.001, 0.0005, 0.0001, 0.00005, 0.00001]),
max_gnorm=ng.p.Choice([10.0, 30.0, 100.0, 1000.0]),
momentum=ng.p.Choice([0.1, 0.3, 0.5, 0.7, 0.9]),
margin=ng.p.Choice([0.1, 0.01, 0.001, 0.0001, 0.00001]),
lambda_=ng.p.Choice([0.0, 0.0001, 0.001, 0.01, 0.1]),
swap=ng.p.Choice([True, False]),
latent_size=ng.p.Choice([64, 128, 256, 512]),
n_frames=ng.p.Choice([300, 400, 600, 800]),
model=ng.p.Choice(['resnet_mfcc', 'resnet_34', 'resnet_lstm', 'resnet_qrnn', 'resnet_stats', 'resnet_large', 'resnet_small', 'TDNN', 'TDNN_att', 'TDNN_multihead', 'TDNN_lstm', 'TDNN_aspp', 'TDNN_mod', 'TDNN_multipool', 'transformer']) if args.model=='all' else args.model,
ncoef=args.ncoef,
epochs=args.epochs,
batch_size=args.batch_size,
valid_batch_size=args.valid_batch_size,
n_workers=args.workers,
cuda=args.cuda,
train_hdf_file=args.train_hdf_file,
valid_hdf_file=args.valid_hdf_file,
slurm_submission_file=args.slurm_sub_file,
tmp_dir=tmp_dir,
cp_path=args.checkpoint_path,
softmax=ng.p.Choice(['softmax', 'am_softmax']),
delta=ng.p.Choice([True, False]),
logdir=args.logdir)
hp_optimizer=ng.optimizers.RandomSearch(parametrization=parametrization, budget=args.budget, num_workers=args.hp_workers)
with futures.ThreadPoolExecutor(max_workers=args.hp_workers) as executor:
print(hp_optimizer.minimize(train, executor=executor, verbosity=2))
shutil.rmtree(tmp_dir)
``` |
{
"source": "joaomoreno/electron",
"score": 2
} |
#### File: electron/script/install-sysroot.py
```python
import hashlib
import platform
import optparse
import os
import re
import shutil
import subprocess
import sys
from lib.util import get_host_arch
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
URL_PREFIX = 'https://github.com'
URL_PATH = 'atom/debian-sysroot-image-creator/releases/download'
REVISION_AMD64 = 'v0.5.0'
REVISION_I386 = 'v0.5.0'
REVISION_ARM = 'v0.5.0'
TARBALL_AMD64 = 'debian_wheezy_amd64_sysroot.tgz'
TARBALL_I386 = 'debian_wheezy_i386_sysroot.tgz'
TARBALL_ARM = 'debian_wheezy_arm_sysroot.tgz'
TARBALL_AMD64_SHA1SUM = '981b2440d446156801c6fdecffb5edcadf27593c'
TARBALL_I386_SHA1SUM = '2e4e43c1e8718595e37c6b6ab89256dae53adf23'
TARBALL_ARM_SHA1SUM = '448e635f38e99d6d860db538a9db85ac74d36e41'
SYSROOT_DIR_AMD64 = 'debian_wheezy_amd64-sysroot'
SYSROOT_DIR_I386 = 'debian_wheezy_i386-sysroot'
SYSROOT_DIR_ARM = 'debian_wheezy_arm-sysroot'
valid_archs = ('arm', 'i386', 'amd64')
def GetSha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
def DetectArch(gyp_defines):
# Check for optional target_arch and only install for that architecture.
# If target_arch is not specified, then only install for the host
# architecture.
if 'target_arch=x64' in gyp_defines:
return 'amd64'
elif 'target_arch=ia32' in gyp_defines:
return 'i386'
elif 'target_arch=arm' in gyp_defines:
return 'arm'
detected_host_arch = get_host_arch()
if detected_host_arch == 'x64':
return 'amd64'
elif detected_host_arch == 'ia32':
return 'i386'
elif detected_host_arch == 'arm':
return 'arm'
else:
print "Unknown host arch: %s" % detected_host_arch
return None
def main():
if options.linux_only:
# This argument is passed when run from the gclient hooks.
# In this case we return early on non-linux platforms.
if not sys.platform.startswith('linux'):
return 0
gyp_defines = os.environ.get('GYP_DEFINES', '')
if options.arch:
target_arch = options.arch
else:
target_arch = DetectArch(gyp_defines)
if not target_arch:
print 'Unable to detect host architecture'
return 1
if options.linux_only and target_arch != 'arm':
# When run from runhooks, only install the sysroot for an Official Chrome
# Linux build, except on ARM where we always use a sysroot.
defined = ['branding=Chrome', 'buildtype=Official']
undefined = ['chromeos=1']
for option in defined:
if option not in gyp_defines:
return 0
for option in undefined:
if option in gyp_defines:
return 0
# The sysroot directory should match the one specified in build/common.gypi.
# TODO(thestig) Consider putting this else where to avoid having to recreate
# it on every build.
linux_dir = os.path.join(SCRIPT_DIR, '..', 'vendor')
if target_arch == 'amd64':
sysroot = os.path.join(linux_dir, SYSROOT_DIR_AMD64)
tarball_filename = TARBALL_AMD64
tarball_sha1sum = TARBALL_AMD64_SHA1SUM
revision = REVISION_AMD64
elif target_arch == 'arm':
sysroot = os.path.join(linux_dir, SYSROOT_DIR_ARM)
tarball_filename = TARBALL_ARM
tarball_sha1sum = TARBALL_ARM_SHA1SUM
revision = REVISION_ARM
elif target_arch == 'i386':
sysroot = os.path.join(linux_dir, SYSROOT_DIR_I386)
tarball_filename = TARBALL_I386
tarball_sha1sum = TARBALL_I386_SHA1SUM
revision = REVISION_I386
else:
print 'Unknown architecture: %s' % target_arch
assert(False)
url = '%s/%s/%s/%s' % (URL_PREFIX, URL_PATH, revision, tarball_filename)
stamp = os.path.join(sysroot, '.stamp')
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
print 'Debian Wheezy %s root image already up-to-date: %s' % \
(target_arch, sysroot)
return 0
print 'Installing Debian Wheezy %s root image: %s' % (target_arch, sysroot)
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, tarball_filename)
print 'Downloading %s' % url
sys.stdout.flush()
sys.stderr.flush()
subprocess.check_call(['curl', '--fail', '-L', url, '-o', tarball])
sha1sum = GetSha1(tarball)
if sha1sum != tarball_sha1sum:
print 'Tarball sha1sum is wrong.'
print 'Expected %s, actual: %s' % (tarball_sha1sum, sha1sum)
return 1
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
return 0
if __name__ == '__main__':
parser = optparse.OptionParser('usage: %prog [OPTIONS]')
parser.add_option('--linux-only', action='store_true',
default=False, help='Only install sysroot for official '
'Linux builds')
parser.add_option('--arch', type='choice', choices=valid_archs,
help='Sysroot architecture: %s' % ', '.join(valid_archs))
options, _ = parser.parse_args()
sys.exit(main())
``` |
{
"source": "joaomota59/compiladorLexicoSintaticoSematinco",
"score": 4
} |
#### File: compiladorLexicoSintaticoSematinco/compilador/codigoObjeto.py
```python
from goto import with_goto
@with_goto
def main():
j=9
k=10
_t1=j%1.9
k=_t1
print("tk")
_t2=5 > 4
_t3=5 < 1
_t4=_t2 and _t3
if _t4 == False: goto ._l3
_t5=3+2
_t6=_t5+1
_t7=_t6+3
_t8=_t7+9
_t9=4 < 3
print(_t8,"oi",_t9)
print("nice")
_t10=3+5
x=_t10
a=input()
a=float(a)
print(a)
_t11=5 > 2
_t12= not _t11
if _t12 == False: goto ._l1
print("compiladores ou jubiladores?")
label ._l1
_t13=2 < 1
if _t13 == False: goto ._l2
print("ss")
label ._l2
label ._l3
if msm == False: goto ._l4
print("verdadeiro, deu bom")
msm=True
print(msm)
goto ._l5
label ._l4
print("falso deu ruim")
print("falso deu ruim")
label ._l5
label ._l7
_t14=j <= 10
_t15=5 > 2
_t16=_t14 and _t15
if _t16 == False: goto ._l6
print(j)
_t17=2+1
j=_t17
goto ._l7
label ._l6
main()
``` |
{
"source": "joaomota59/diversos",
"score": 3
} |
#### File: joaomota59/diversos/BS4 IN JS PAGE.py
```python
from get_html import create_renderer #carrega a página após ser renderizada com JS
#from get_html import HtmlRenderer
#import asyncio
from bs4 import BeautifulSoup
#import requests
async def manipulate(page):#Função usada para processar algo antes de renderizar a página
return
# https://github.com/miyakogi/pyppeteer/issues/205#issuecomment-470886682
page2=page
#await page.cookie({'timeframeSelect':'1'})
await page.select('select#timeframeSelect','M5')
#await page.select('#pares', 'Todos')
#await page.setCookie({'timeframeSelect':'1'})
#await page.evaluate('{window.scrollBy(0, document.body.scrollHeight);}')
with create_renderer() as renderer:
# use the renderer. The underlying browser will be instanciated on first call to render
#cookies = dict([cname='timeframeSelect',cvalue='1',exdays='7')
#s = requests.Session()
#s.cookies.update({'timeframeSelect':'1'})
#response = s.get('https://catalogador.ml/') # Automatically uses the session cookies
#response = renderer.render(url='https://catalogador.ml/', cookies=s.cookies,keep_page=True)
response = renderer.render(url='https://catalogador.ml/', manipulate_page_func=manipulate)
#s = requests.Session()
#s.cookies.update({'timeframeSelect':'1'})
#response = s.get('https://catalogador.ml/') # Automatically uses the session cookies
soup = BeautifulSoup(response.text, 'html.parser')
#print(soup.prettify()) printa o html renderizado plo JavaScript
def getEstrategias():#selecionando estratégias que estão em tags internas do DOM
for i in soup.select("div.estrategiaContainer div.parInfo div.estrategiaInfo p.estrategiaName"):
print(i.get_text())
getEstrategias()
``` |
{
"source": "joaomota59/messagePassingInterface-MPI",
"score": 3
} |
#### File: messagePassingInterface-MPI/cloud/MPI2.py
```python
from mpi4py import MPI
arquivo = open("etapa2.txt","a")
def mpiPI(nroProcesso, rank):#funcao que calcula o valor aprox de pi
N = 840
i = int(1 + (N/nroProcesso)*rank)
k = int((N/nroProcesso)*(rank+1))
somatorio = 0
for j in range(i,k+1):
somatorio += 1/(1+((j-0.5)/N)**2)
#print(i,k)#intervalos
#print((somatorio/N)*4)#somatorio de cada intervalo
return (somatorio/N)*4
if __name__ == "__main__": #main -- Segunda versão
comm = MPI.COMM_WORLD
rank = comm.Get_rank()#rank do processo atual
numDeProcessos = comm.Get_size()#numero de processos
idmaquina = MPI.Get_processor_name()#hostname damaquina
comm.Barrier()#barreira inicio
tinicial = MPI.Wtime()
res1 = mpiPI(comm.Get_size(),rank)
comm.Barrier()#barreira fim
tfinal=MPI.Wtime()
k = ("Resposta do processo [" + str(rank) + "] = " + str(res1) + " ID Máquina = "+str(idmaquina))
#print("-"*len(k)+"\n"+k+"\n")
if rank == 0:
bufferAux = [tfinal-tinicial]
for i in range(1,numDeProcessos):
bufferAux.append(comm.recv(source = i))
arquivo.write(str(max(bufferAux))+"\n")
arquivo.close()
#print("Tempo de execução:",max(bufferAux))#tempo do processo que durou mais
else:
comm.send(tfinal-tinicial,dest = 0)
``` |
{
"source": "joaomota59/redesNeuraisArtificiais",
"score": 4
} |
#### File: joaomota59/redesNeuraisArtificiais/adalineTreinamento.py
```python
def degrau(u):
if u>=0:
return 1
else:
return 0
if __name__ == '__main__':
quantAmostras = int(input("Quantidade de amostras que possui o conjunto de treinamento -> "))
precisaoRequerida = float(input("Precisão Requerida(ε) -> "))
numMaxEpocas = int(input("Número máximo de épocas -> "))
amostras = []
for i in range(quantAmostras):
print("\nAmostra "+str(i+1))
x1 = float(input("x1: "))
x2 = float(input("x2: "))
dk = float(input("d(k): "))
amostras.append([x1,x2,dk])
print('''\nDigite os valores do vetor contendo limiar e os pesos,
separados por vírgula!
exemplo: θ,w1,w2,w3
exemplo: 0.5,0.4,0.3,0.1
''')
w = [float(i) for i in input("-> ").split(",")]
taxaDeAprendizagem = float(input("Taxa de aprendizagem(η) -> "))
epoca = 0
vetPotAtv = []#lista que guarda os valores do potencial de ativacao
while True:
print("\nÉpoca:",epoca+1)
if epoca == 0:
for indiceAmostra in range(quantAmostras):
entradas = [-1] + amostras[indiceAmostra][:-1]#wo = -1 no perceptron ...trocar dps o 0 aqui
u = 0
print("u = ",end="")
for i in range(len(entradas)):
u+= entradas[i]*w[i]
if(i!=len(entradas) - 1):
print(entradas[i],"*",w[i],"+",end="")
continue
print(entradas[i],"*",w[i],"=",u,end="\n")
#epoca += 1
vetPotAtv.append(u)
print("W =",w)
print("Eqm_Anterior(w) = ",end="")
equacao = "1/"+str(len(vetPotAtv))+"*("
for index,potencial in enumerate(vetPotAtv):#Eqm Anterior
if(index != len(vetPotAtv) - 1):
equacao += "("+str(amostras[index][-1])+"-"+str(potencial)+")**2 + "
continue
equacao += "("+str(amostras[index][-1])+"-"+str(potencial)+")**2 )"
print(equacao,"=",eval(equacao))
vetPotAtv = []
for indiceAmostra in range(quantAmostras):
entradas = [-1] + amostras[indiceAmostra][:-1]#wo = -1 no perceptron ...trocar dps o 0 aqui
u = 0
#print("Época:",epoca+1)
print("u = ",end="")
for i in range(len(entradas)):
u+= entradas[i]*w[i]
if(i!=len(entradas) - 1):
print(entradas[i],"*",w[i],"+",end="")
continue
print(entradas[i],"*",w[i],"=",u,end="\n")
for i in range(len(w)):
w[i] = w[i] + taxaDeAprendizagem*(amostras[indiceAmostra][-1]-u)*entradas[i]
vetPotAtv.append(u)
print("Eqm_Atual(w) = ",end="")
equacao2 = "1/"+str(len(vetPotAtv))+"*("
for index,potencial in enumerate(vetPotAtv):#Eqm Atual
if(index != len(vetPotAtv) - 1):
equacao2 += "("+str(amostras[index][-1])+"-"+str(potencial)+")**2 + "
continue
equacao2 += "("+str(amostras[index][-1])+"-"+str(potencial)+")**2 )"
print(equacao2,"=",eval(equacao2))
print("W =",w)
epoca+=1
diferencaAtualeAnterior = abs(eval(equacao) - eval(equacao2))
print("|Eqm_Atual(w) - Eqm_Anterior(w)| =",diferencaAtualeAnterior)
if diferencaAtualeAnterior <= precisaoRequerida or numMaxEpocas == epoca:
break
```
#### File: joaomota59/redesNeuraisArtificiais/convolucao-stride-padding-kernel.py
```python
from scipy import signal
import numpy as np
def convolucao (entrada,filtro,stride=1,modo="valid"):
filtro = np.array(filtro,np.float32)
entrada = np.array(entrada,np.float32)
saida = signal.convolve2d(entrada, filtro[::-1, ::-1], mode=modo)[::stride, ::stride]
print(saida)
if __name__ == "__main__":
filtro = [[0.1,0.2,0.6],
[0.6,0.8,0.7],
[0.5,0.4,0.3]]
entrada = [
[0.0,0.0,0.0,0.0,0.0],
[0.0,0.2,0.4,0.3,0.0],
[0.0,0.3,0.9,0.6,0.0],
[0.0,0.9,0.1,0.2,0.0],
[0.0,0.0,0.0,0.0,0.0]
]
stride = 1
modo = "valid"
#entrada = np.pad(entrada, (1, 1), 'constant', constant_values=(0, 0))#caso tenha padding ao arredor da matriz, descomentar aqui
convolucao(entrada=entrada,filtro=filtro,stride=stride,modo=modo)
```
#### File: joaomota59/redesNeuraisArtificiais/feedforward-camada-simples-random.py
```python
from random import random
import numpy as np
from math import e
def degrau(u):
if u>=0:
return 1
else:
return 0
def degrauBipolar(u):
if u>0:
return 1
elif u==0:
return 0
else:
return -1
def linear(u):
return u
def logistica(u,beta):
return 1/(1 + e**(-beta*u))
def tangenteHiperbolica(u,beta):
return (1 - e**(-beta*u))/(1 + e**(-beta*u))
def camadaSimples(entrada_e_peso=[],entrada_e_peso_prod=[],limiarDeAtivacao =[],funcaoDeAtivacao=[]):#entradas e pesos e limiar
pontencialDeAtv = []#potencial de ativação de cada neuronio
saidas = []#saídas de cada neuronio g(u)/y
print("\nPasso 1 [u = Σ - θ]:")
for indexEntrada,i in enumerate(entrada_e_peso_prod):
print("u"+str(indexEntrada+1)+" = ",end="")
for indexCamada,j in enumerate(i):
if indexCamada != len(i)-1:#se nao for o ultimo elemento nao dá \n
print("x"+str(indexCamada+1)+"*w("+str(indexCamada+1)+","+str(indexEntrada+1)+") + ",end="")
continue
print("x"+str(indexCamada+1)+"*w("+str(indexCamada+1)+","+str(indexEntrada+1)+") - θ"+str(indexEntrada+1))#se for o ultimo elemento dá \n
print("\nPasso 2 [u = Σ - θ]:")
for indexEntrada,i in enumerate(entrada_e_peso_prod):
print("u"+str(indexEntrada+1)+" = ",end="")
for indexCamada,j in enumerate(i):
if indexCamada != len(i)-1:#se nao for o ultimo elemento nao dá \n
print(str(entrada_e_peso[indexCamada][indexEntrada][0])+"*"+str(entrada_e_peso[indexCamada][indexEntrada][1])+" + ",end="")
continue
u = sum(i)-limiarDeAtivacao[indexEntrada]
print(str(entrada_e_peso[indexCamada][indexEntrada][0])+"*"+str(entrada_e_peso[indexCamada][indexEntrada][1]),
"-",limiarDeAtivacao[indexEntrada],"=>",u)#se for o ultimo elemento dá \n
pontencialDeAtv.append(u)
k = 0
print("\nPasso 3 g(u):")
for indicePotencial,potencial in enumerate(pontencialDeAtv):
if funcaoDeAtivacao[indicePotencial] == 1:
k = degrau(potencial)
elif funcaoDeAtivacao[indicePotencial] == 2:
k = linear(potencial)
elif funcaoDeAtivacao[indicePotencial] == 3:
k = logistica(potencial,beta)
elif funcaoDeAtivacao[indicePotencial] == 4:
k = tangenteHiperbolica(potencial,beta)
elif funcaoDeAtivacao[indicePotencial] == 5:
k = degrauBipolar(potencial)
saidas.append(k)
print("g(u"+str(indicePotencial+1)+") =",k)
return saidas
if __name__ == '__main__':
#amostras com seus pesos correspondentes e suas saidas d(x) desejadas
amostras = [[0.9,0.1,1],
[0.6,0.5,1],
[0.2,0.8,-1],
[0.7,0.2,1],
[0.5,0.4,-1],
[0.4,0.6,1],
[0.25,0.8,-1],
[0.1,0.9,-1],
[0.3,0.7,-1],
[0.0,1.0,-1]]
for indexAm in range(len(amostras)):
erro = False
print("\n\nAmostra",indexAm+1)
for epoca in range(1,101): #100 epocas foram adotadas
quantidadeDeSaidas = 1 #sempre será uma saida nas amostras que foram dadas
entrada_e_peso = []
entrada_e_peso_prod = []#produto da entrada com o peso
limiarDeAtivacao = [] #limiar de ativação de cada neurônio
funcaoDeAtivacao = []#Vetor que guarda qual a função de ativacao de cada neuronio
for entrada in amostras[indexAm][:2]:
aux = []
aux.append((entrada,random()))#entrada da amostra e peso randomico
entrada_e_peso.append(aux)
for linha in entrada_e_peso: #Faz o produto da entrada com o peso para cada neuronio
entrada_e_peso_prod.append(np.prod(linha,axis=1))
for i in range(quantidadeDeSaidas):
limiarDeAtivacao.append(random())
funcaoDeAtivacao.append(5)#função adotada foi a degrau bipolar para os testes randomicos
beta = None
if ((3 in funcaoDeAtivacao) or (4 in funcaoDeAtivacao)):
beta = 1#valor de beta adotado foi sempre de 1
entrada_e_peso = np.array(entrada_e_peso)
entrada_e_peso_prod = np.array(entrada_e_peso_prod).transpose()
y = camadaSimples(entrada_e_peso,entrada_e_peso_prod,limiarDeAtivacao,funcaoDeAtivacao)[0]
if y != amostras[indexAm][-1]:#compara a saida obtida com a saída desejada
continue
print("Solução encontrada na época",epoca)
erro = True
break
``` |
{
"source": "joaomoura1996/TransQuest",
"score": 3
} |
#### File: common/util/postprocess.py
```python
def format_submission(df, method, index, path):
predictions = df['predictions']
with open(path, 'w') as f:
for number, prediction in zip(index, predictions):
text = method + "\t" + str(number) + "\t" + str(prediction) + "\t" + str(0)
f.write("%s\n" % text)
```
#### File: common/util/download.py
```python
from google_drive_downloader import GoogleDriveDownloader as gdd
def download_from_google_drive(file_id, path):
gdd.download_file_from_google_drive(file_id=file_id,
dest_path=path + "/model.zip",
unzip=True)
```
#### File: siamese_transformers/losses/triplet_loss.py
```python
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from transquest.algo.siamese_transformers.run_model import SiameseTransQuestModel
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(self, model: SiameseTransQuestModel, distance_metric=TripletDistanceMetric.EUCLIDEAN,
triplet_margin=1):
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
```
#### File: algo/transformers/model_args.py
```python
import json
import os
import sys
from dataclasses import dataclass, field, asdict
from multiprocessing import cpu_count
def get_default_process_count():
process_count = cpu_count() - 2 if cpu_count() > 2 else 1
if sys.platform == "win32":
process_count = min(process_count, 61)
return process_count
def get_special_tokens():
return ["<s>", "<pad>", "</s>", "<unk>", "<mask>"]
@dataclass
class ModelArgs:
adam_epsilon: float = 1e-8
best_model_dir: str = "outputs/best_model"
cache_dir: str = "cache_dir/"
custom_layer_parameters: list = field(default_factory=list)
custom_parameter_groups: list = field(default_factory=list)
train_custom_parameters_only: bool = False
config: dict = field(default_factory=dict)
do_lower_case: bool = False
early_stopping_consider_epochs: bool = False
early_stopping_delta: float = 0
early_stopping_metric: str = "eval_loss"
early_stopping_metric_minimize: bool = True
early_stopping_patience: int = 3
encoding: str = None
eval_batch_size: int = 8
evaluate_during_training: bool = False
evaluate_during_training_silent: bool = True
evaluate_during_training_steps: int = 2000
evaluate_during_training_verbose: bool = False
fp16: bool = False
fp16_opt_level: str = "O1"
gradient_accumulation_steps: int = 1
learning_rate: float = 4e-5
local_rank: int = -1
logging_steps: int = 50
manual_seed: int = None
max_grad_norm: float = 1.0
max_seq_length: int = 128
multiprocessing_chunksize: int = 500
n_gpu: int = 1
no_cache: bool = False
no_save: bool = False
num_train_epochs: int = 1
output_dir: str = "outputs/"
overwrite_output_dir: bool = False
process_count: int = field(default_factory=get_default_process_count)
reprocess_input_data: bool = True
save_best_model: bool = True
save_eval_checkpoints: bool = True
save_model_every_epoch: bool = True
save_recent_only: bool = False
save_steps: int = 2000
save_optimizer_and_scheduler: bool = True
silent: bool = False
tensorboard_dir: str = None
train_batch_size: int = 8
use_cached_eval_features: bool = False
use_early_stopping: bool = False
use_multiprocessing: bool = True
wandb_kwargs: dict = field(default_factory=dict)
wandb_project: str = None
warmup_ratio: float = 0.06
warmup_steps: int = 0
weight_decay: int = 0
def update_from_dict(self, new_values):
if isinstance(new_values, dict):
for key, value in new_values.items():
setattr(self, key, value)
else:
raise (TypeError(f"{new_values} is not a Python dict."))
def save(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "model_args.json"), "w") as f:
json.dump(asdict(self), f)
def load(self, input_dir):
if input_dir:
model_args_file = os.path.join(input_dir, "model_args.json")
if os.path.isfile(model_args_file):
with open(model_args_file, "r") as f:
model_args = json.load(f)
self.update_from_dict(model_args)
@dataclass
class ClassificationArgs(ModelArgs):
"""
Model args for a ClassificationModel
"""
labels_list: list = field(default_factory=list)
labels_map: dict = field(default_factory=dict)
lazy_delimiter: str = "\t"
lazy_labels_column: int = 1
lazy_loading: bool = False
lazy_loading_start_line: int = 1
lazy_text_a_column: bool = None
lazy_text_b_column: bool = None
lazy_text_column: int = 0
regression: bool = True
sliding_window: bool = False
stride: float = 0.8
tie_value: int = 1
``` |
{
"source": "joaompereira/adahessian",
"score": 3
} |
#### File: image_classification/models/resnet.py
```python
from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) <NAME>
'''
import torch.nn as nn
import math
from copy import deepcopy
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
residual_not,
batch_norm_not,
stride=1,
downsample=None):
super(BasicBlock, self).__init__()
self.residual_not = residual_not
self.batch_norm_not = batch_norm_not
self.conv1 = conv3x3(inplanes, planes, stride)
if self.batch_norm_not:
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
if self.batch_norm_not:
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.batch_norm_not:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.batch_norm_not:
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.residual_not:
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
residual_not,
batch_norm_not,
stride=1,
downsample=None):
super(Bottleneck, self).__init__()
self.residual_not = residual_not
self.batch_norm_not = batch_norm_not
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if self.batch_norm_not:
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
if self.batch_norm_not:
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
if self.batch_norm_not:
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.batch_norm_not:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.batch_norm_not:
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if self.batch_norm_not:
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.residual_not:
out += residual
out = self.relu(out)
return out
ALPHA_ = 1
class ResNet(nn.Module):
def __init__(
self,
depth,
residual_not=True,
batch_norm_not=True,
base_channel=16,
num_classes=10):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
n = (depth - 2) // 6
# block = Bottleneck if depth >=44 else BasicBlock
block = BasicBlock
self.base_channel = int(base_channel)
self.residual_not = residual_not
self.batch_norm_not = batch_norm_not
self.inplanes = self.base_channel * ALPHA_
self.conv1 = nn.Conv2d(
3,
self.base_channel *
ALPHA_,
kernel_size=3,
padding=1,
bias=False)
if self.batch_norm_not:
self.bn1 = nn.BatchNorm2d(self.base_channel * ALPHA_)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(
block,
self.base_channel *
ALPHA_,
n,
self.residual_not,
self.batch_norm_not)
self.layer2 = self._make_layer(
block,
self.base_channel *
2 *
ALPHA_,
n,
self.residual_not,
self.batch_norm_not,
stride=2)
self.layer3 = self._make_layer(
block,
self.base_channel *
4 *
ALPHA_,
n,
self.residual_not,
self.batch_norm_not,
stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(
self.base_channel *
4 *
ALPHA_ *
block.expansion,
num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(
self,
block,
planes,
blocks,
residual_not,
batch_norm_not,
stride=1):
downsample = None
if (stride != 1 or self.inplanes != planes *
block.expansion) and (residual_not):
if batch_norm_not:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = nn.ModuleList()
layers.append(
block(
self.inplanes,
planes,
residual_not,
batch_norm_not,
stride,
downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
residual_not,
batch_norm_not))
# return nn.Sequential(*layers)
return layers
def forward(self, x):
output_list = []
x = self.conv1(x)
if self.batch_norm_not:
x = self.bn1(x)
x = self.relu(x) # 32x32
output_list.append(x.view(x.size(0), -1))
for layer in self.layer1:
x = layer(x) # 32x32
output_list.append(x.view(x.size(0), -1))
for layer in self.layer2:
x = layer(x) # 16x16
output_list.append(x.view(x.size(0), -1))
for layer in self.layer3:
x = layer(x) # 8x8
output_list.append(x.view(x.size(0), -1))
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
output_list.append(x.view(x.size(0), -1))
# return output_list, x
return x
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs)
```
#### File: adahessian/instruction/adahessian.py
```python
import math
import torch
from torch.optim.optimizer import Optimizer
from copy import deepcopy
import numpy as np
class Adahessian(Optimizer):
"""Implements Adahessian algorithm.
It has been proposed in `ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning`.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 0.15)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-4)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
hessian_power (float, optional): Hessian power (default: 1)
"""
def __init__(self, params, lr=0.15, betas=(0.9, 0.999), eps=1e-4,
weight_decay=0, block_length=1, hessian_power=1):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(
betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(
betas[1]))
if not 0.0 <= hessian_power <= 1.0:
raise ValueError("Invalid Hessian power value: {}".format(hessian_power))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, hessian_power=hessian_power)
super(Adahessian, self).__init__(params, defaults)
self.block_length = block_length
def get_trace(self, gradsH):
"""
compute the Hessian vector product with a random vector v, at the current gradient point,
i.e., compute the gradient of <gradsH,v>.
:param gradsH: a list of torch variables
:return: a list of torch tensors
"""
params = self.param_groups[0]['params']
v = [torch.randint_like(p, high=2, device='cuda') for p in params]
for v_i in v:
v_i[v_i == 0] = -1
hvs = torch.autograd.grad(
gradsH,
params,
grad_outputs=v,
only_inputs=True,
retain_graph=True)
hutchinson_trace = []
for hv, vi in zip(hvs, v):
param_size = hv.size()
if len(param_size) <= 1:
# For 1D tensor, e.g.,, bias, BatchNorm, LayerNorm etc.
# Usually, you do not need to set spatial aveging for it, i.e., Hessian diagonal block size is 1 here.
tmp_output = torch.abs(hv * vi)
hutchinson_trace.append(tmp_output)
# Of course, you can also use the same way as 2D tensor does to average your 1D tensor.
# tmp_output1 = torch.abs((hv * vi + 0.)).view(-1, self.block_length) # faltten to the N times self.block_length
# tmp_output2 = torch.abs(torch.sum(tmp_output1, dim=[1])).view(-1) / float(self.block_length)
# tmp_output3 = tmp_output2.repeat_interleave(self.block_length).view(param_size)
# hutchinson_trace.append(tmp_output3)
elif len(param_size) == 2:
# For 2D tensor, e.g., the matrix in the fully-connected layer.
# This is a normal case for MLP, Transformer models.
# Usually, a spatial averaging needs to be used here to get the best result.
# If you are not looking for the absolute best config, you may set it to be 1.
# In all of our experiments, we sill get pretty good performance.
tmp_output1 = torch.abs((hv * vi + 0.)).view(-1, self.block_length) # faltten to the N times self.block_length
tmp_output2 = torch.abs(torch.sum(tmp_output1, dim=[1])).view(-1) / float(self.block_length)
tmp_output3 = tmp_output2.repeat_interleave(self.block_length).view(param_size)
hutchinson_trace.append(tmp_output3)
elif len(param_size) == 3:
# For 3D tensor, e.g., the 1D Conv layer.
# This layer is usually used for Char-LM.
# First Way:
# Usually, you can set it to be the conv kernel size: in more details, for instance, your input/output channels are 20 and your kernel size is 5,
# then the 1D Conv kernel is in size 20x20x5, you can average along the final dim, i.e., the block_length = 5
tmp_output = torch.abs(torch.sum(torch.abs(
hv * vi), dim=[2], keepdim=True)) / vi[0, 1].numel() # torch.sum() reduces the dim 2, i.e. the size 5
# Second way:
# Of course, you can also use the same self.block_length to average the spatival Hessian of 3D kernel.
# tmp_output1 = torch.abs((hv * vi + 0.)).view(-1, self.block_length) # faltten to the N times self.block_length
# tmp_output2 = torch.abs(torch.sum(tmp_output1, dim=[1])).view(-1) / float(self.block_length)
# tmp_output3 = tmp_output2.repeat_interleave(self.block_length).view(param_size)
# hutchinson_trace.append(tmp_output3)
elif len(param_size) == 4:
# For 4D tensor, e.g, the 2D Conv layer
# This layer is usually used for CV tasks.
# First Way:
# Usually, you can set it to be the conv kernel size: in more details, for instance, your input/output channels are 256 and your kernel size is 3x3,
# then the 2D Conv kernel is in size 20x20x3x3, you can average along the last two dims, , i.e., the block_length = 9
tmp_output = torch.abs(torch.sum(torch.abs(
hv * vi), dim=[2, 3], keepdim=True)) / vi[0, 1].numel() # torch.sum() reduces the dim 2/3.
hutchinson_trace.append(tmp_output)
# Second way:
# Of course, you can also use the same self.block_length to average the spatival Hessian of 4D kernel.
# tmp_output1 = torch.abs((hv * vi + 0.)).view(-1, self.block_length) # faltten to the N times self.block_length
# tmp_output2 = torch.abs(torch.sum(tmp_output1, dim=[1])).view(-1) / float(self.block_length)
# tmp_output3 = tmp_output2.repeat_interleave(self.block_length).view(param_size)
# hutchinson_trace.append(tmp_output3)
return hutchinson_trace
def step(self, gradsH, closure=None):
"""Performs a single optimization step.
Arguments:
gradsH: The gradient used to compute Hessian vector product.
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
# get the Hessian diagonal
hut_trace = self.get_trace(gradsH)
for group in self.param_groups:
for i, p in enumerate(group['params']):
if p.grad is None:
continue
grad = deepcopy(gradsH[i].data)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of Hessian diagonal square values
state['exp_hessian_diag_sq'] = torch.zeros_like(p.data)
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_hessian_diag_sq.mul_(beta2).addcmul_(
1 - beta2, hut_trace[i], hut_trace[i])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# make the square root, and the Hessian power
k = group['hessian_power']
denom = (
(exp_hessian_diag_sq.sqrt() ** k) /
math.sqrt(bias_correction2) ** k).add_(
group['eps'])
# make update
p.data = p.data - \
group['lr'] * (exp_avg / bias_correction1 / denom + group['weight_decay'] * p.data)
return loss
```
#### File: fairseq/optim/adahessian.py
```python
import math
import types
import torch
import torch.optim
import torch.distributed as dist
from copy import deepcopy
import numpy as np
from . import FairseqOptimizer, register_optimizer
@register_optimizer('adahessian')
class FairseqAdahess(FairseqOptimizer):
"""Adam optimizer for fairseq.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adahess(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D',
help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--block-length', default=1, type=int,
help='We use this number for length of the hessian average block')
parser.add_argument('--hessian-power', type=float, default=1, metavar='H',
help='Hessian power')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'betas': eval(self.args.adam_betas),
'eps': self.args.adam_eps,
'weight_decay': self.args.weight_decay,
'block_length': self.args.block_length,
'single_gpu': self.args.single_gpu,
'hessian_power': self.args.hessian_power
}
def average_params(self):
"""Reduce Params is only used during BMUF distributed training."""
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for _, value in state_dict["state"].items():
value["exp_avg"] /= total_gpus
value["exp_avg_sq"] /= total_gpus
dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
class Adahess(torch.optim.Optimizer):
"""Implements AdamHess algorithm.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, block_length=1, hessian_power=1, single_gpu=False):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(Adahess, self).__init__(params, defaults)
self.block_length = block_length
self.single_gpu = single_gpu
self.hessian_power = hessian_power
def get_trace(self, gradsH):
"""
compute the Hessian vector product with v, at the current gradient point.
or compute the gradient of <gradsH,v>.
:param v: a list of torch tensors
:param gradsH: a list of torch variables
:return: a list of torch tensors
"""
params = self.param_groups[0]['params']
params = list(filter(lambda x: x.requires_grad, params) )
v = [torch.randint_like(p, high = 2) for p in params]
# this is for distributed setting
if not self.single_gpu:
for v1 in v:
dist.all_reduce(v1)
for v_i in v:
v_i[v_i < 0.5] = -1
v_i[v_i >= 0.5] = 1
hvs = torch.autograd.grad(gradsH, params, grad_outputs=v, only_inputs=True, retain_graph=True)
hutchinson_trace = []
for hv, vi in zip(hvs, v):
param_size = hv.size()
if len(param_size) <= 1: # for Bias and LN
tmp_output = torch.abs( hv * vi) + 0.
hutchinson_trace.append( tmp_output )
elif len(param_size) == 2: # Matrix
tmp_output1 = torch.abs((hv * vi + 0.)).view(-1, self.block_length) # faltten to the N times self.block_length
tmp_output2 = torch.abs(torch.sum(tmp_output1, dim=[1])).view(-1) / float(self.block_length)
tmp_output3 = tmp_output2.repeat_interleave(self.block_length).view(param_size)
hutchinson_trace.append(tmp_output3)
# this is for distributed setting
if not self.single_gpu:
for output1 in output:
dist.all_reduce(output1)
return hutchinson_trace
def step(self, gradsH=None, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
hut_trace = self.get_trace(gradsH)
for group in self.param_groups:
for i, p in enumerate(group['params']):
if p.grad is None:
continue
# grad = p.grad.data.float()
grad = deepcopy(gradsH[i].data.float())
if grad.is_sparse:
raise RuntimeError('AdaHessian does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state['exp_hessian_diag_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_hessian_diag_sq'] = state['exp_hessian_diag_sq'].type_as(p_data_fp32)
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_hessian_diag_sq.mul_(beta2).addcmul_(1 - beta2, hut_trace[i] , hut_trace[i])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if self.hessian_power < 1:
denom = ((exp_hessian_diag_sq.sqrt() / math.sqrt(bias_correction2)) ** self.hessian_power).add_(group['eps'])
else:
denom = (exp_hessian_diag_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
# do weight decay
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
``` |
{
"source": "joaompereira/SPM",
"score": 2
} |
#### File: SPM/python/compiler_options.py
```python
import numpy as np
try:
from numba import njit
compiler_decorator = njit
NUMBA_COMPILER = True
except ModuleNotFoundError:
def compiler_decorator(fun):
return fun
NUMBA_COMPILER = True
BLAS_DOT = False
if not NUMBA_COMPILER:
try:
from scipy.linalg.blas import dnrm2 as norm
from scipy.linalg.blas import ddot as dot
BLAS_DOT = True
except ModuleNotFoundError:
pass
if not BLAS_DOT:
dot = np.dot
@compiler_decorator
def norm(v):
return np.sqrt(np.dot(v, v))
``` |
{
"source": "joaompinto/2DExplorer",
"score": 3
} |
#### File: data/items/Item-BronzeAxe.py
```python
import Item
def setter():
#name,ID,weaponPicID,damage,power,skill
x = Item.tool('Bronze Axe',5,4,5,5,'axe','Woodcutting')
return x
```
#### File: data/items/Item-Copper.py
```python
import Item
def setter():
x = Item.item('Copper',13)
return x
```
#### File: data/items/Item-IronSword.py
```python
import Item
def setter():
x = Item.weaponItem('Iron Sword',1,0,10,"Swing")
return x
```
#### File: data/items/Item-StoneBrick.py
```python
import Item
class stoneBrick(Item.itemTile):
def __init__(self):
super().__init__('Stone Brick',0,0)
self.recipe = [(12,2)]
self.craftAmount = 1
def setter():
x = stoneBrick()
return x
```
#### File: data/items/Item-WoodenBack.py
```python
import Item
class woodenBack(Item.itemBackTile):
def __init__(self):
super().__init__('Wooden Back',8,16)
self.recipe = [(6,1)]
self.craftAmount = 2
def setter():
x = woodenBack()
return x
```
#### File: data/items/Item-WoodenPickaxe.py
```python
import Item
class woodenPickaxe(Item.tool):
def __init__(self):
super().__init__('Wooden Pickaxe',14,5,3,4,'pick','Mining')
self.recipe = [(6,3),(11,2)]
self.craftingAmount = 1
def setter():
#name,itemID,weaponPictureID,damage,power,type,skill
x = woodenPickaxe()
return x
```
#### File: data/tiles/Tile-Coal.py
```python
from Tile import tile
class coal (tile):
def __init__(self,parentWorld,colRow):
super().__init__(parentWorld,colRow,14,1,255)
self.durability = 40
self.drop = 7
def setter(parentWorld, colRow):
x = coal(parentWorld, colRow)
return x
```
#### File: data/tiles/Tile-Copper.py
```python
from Tile import tile
class copper (tile):
def __init__(self,parentWorld,colRow):
super().__init__(parentWorld,colRow,25,1)#parentWorld,colRow,tileID,z
self.durability = 45
self.drop = 13
self.lightBlock = 25
def setter(parentWorld, colRow):
x = copper(parentWorld, colRow)
return x
```
#### File: data/tiles/Tile-Grass.py
```python
from Tile import tile
class grassTile (tile):
def __init__(self,parentWorld,colRow):
super().__init__(parentWorld,colRow,2,1,255)
self.durability = 15
def special(self):
if self.parentWorld.physics.touchingTile((self.column*32,(self.row-1)*32),1) == True:
self.changeTile(4)
else:
if self.row <= self.parentWorld.rows // 4:
self.changeTile(8)
def setter(parentWorld, colRow):
x = grassTile(parentWorld, colRow)
return x
```
#### File: data/tiles/Tile-WoodenDoorMiddleClose.py
```python
from Tile import tile
class woodenDoorMiddleClose (tile):
def __init__(self,parentWorld,colRow):
super().__init__(parentWorld,colRow,21,1,255)
self.durability = 20
self.drop = 10
self.physical = True
self.drawBack = True
self.lightBlock = 25
self.updatePic()
self.tool = "axe"
def special(self):
if self.parentWorld.tiles[self.column][self.row + 1][self.z] == None:
self.parentWorld.removeTile(self.column,self.row,self.z)
elif self.parentWorld.tiles[self.column][self.row + 1][self.z].tileType != 22:
self.parentWorld.removeTile(self.column,self.row,self.z)
if self.parentWorld.tiles[self.column][self.row - 1][self.z] == None:
self.parentWorld.removeTile(self.column,self.row,self.z)
elif self.parentWorld.tiles[self.column][self.row - 1][self.z].tileType != 20:
self.parentWorld.removeTile(self.column,self.row,self.z)
def onClick(self,obj):
self.changeTile(18)
self.parentWorld.tiles[self.column][self.row - 1][self.z].changeTile(17)
self.parentWorld.tiles[self.column][self.row + 1][self.z].changeTile(19)
self.relight()
def setter(parentWorld, colRow):
x = woodenDoorMiddleClose(parentWorld, colRow)
return x
```
#### File: 2DExplorer/Game/Main.py
```python
import sys,random, time, math, os, os.path
import pygame
import ParticleEngine#the particle engine
from pygame.locals import *
import AI, operator, Spells, Camera, Overlay, World, Item,copy,Inventory, Mining, Woodcutting,CharacterList, Character, BodyPart, Physics,ItemList
import ctypes
activeLighting = True
# size of the screen
pygame.init()# initialize pygame
screen = pygame.display.set_mode((640,480))#create the screen
#screen = pygame.display.set_mode((1920,1080),FULLSCREEN|HWSURFACE|DOUBLEBUF)
screenSize = screen.get_size()
spawners = []#global list containing the spawners
def simpleDist(xy1,xy2):#returns the distance between two points
dist = abs(math.sqrt((xy2[0] - xy1[0])**2 + (xy2[1] - xy1[1])**2))
return dist
def touchingEntity(x,y,length):#returns any worldBack.entities within length distance of a point
touching = []#a list, since more than one entity may be touching the point
for i in range(len(worldBack.entities)):
dist = simpleDist((worldBack.entities[i].x,worldBack.entities[i].y),(x,y))#gets the distance
if dist <= length:#if the distance is less than the length
touching.append(i)#add it to the list
return touching
#below I attempted a binary search algorithm to do the same thing, it failed
def getCoordOnObject(deg,r):#returns where a object must be placed to be on the end of an object rotated deg degress with r length
theta = math.radians(deg)################################MUST CONVERT TO RADIANS WHEN ROTATING IN PYTHON################################
y = math.cos(theta) * r
x = math.sin(theta) * r
return (x,y)
def quitGame ():#exits the game
pygame.quit()
sys.exit()
def blit_alpha(target, source, location, opacity):#blits with an alpha value (opacity), target being the surface source the surface to be blitted
x = location[0]
y = location[1]
temp = pygame.Surface((source.get_width(), source.get_height())).convert()# creates a temporary surface
temp.blit(target, (-x, -y))
temp.blit(source, (0, 0))
temp.set_alpha(opacity)#sets the opacity
target.blit(temp, location)
def loadPic(imgName):#loads a picture with name imgName
try:
img = pygame.image.load(imgName)
img = img.convert_alpha()#convert makes it draw faster
return img
except:
print('Image Load Error:',imgName)
return False
class projectile:#this class is for any projectiles shot in the game world
gravity = 2#gravity modifier
#http://www.sauropodstudio.com/dev-diary-number-eight/
def __init__(self,pic, xy, vel, damage, owner = None, goal= None, impactRad = 10, duration = 100, opacity = 255, grav=False, collisionCheck = True):#constructor, contains plenty of options
self.pic = pic
self.orig = pic
self.size = self.pic.get_size()
self.x = xy[0]
self.y = xy[1]
self.xVel = vel[0]
self.yVel = vel[1]
self.speed = vel[0]
self.owner = owner#owner being the character that shot the projectile
self.damage = damage
self.goal = goal#the goal of the projectile
self.impactRad = impactRad#impact radius
self.duration = duration#duration
self.opacity = opacity
top = 0
for i in range(len(projectiles)):
if projectiles[i].ID >= top:#if the ID is greater than or equal to the highest ID
top = worldBack.entities[i].ID + 1#make top higher than it
self.ID = top
self.faceRight = False
self.grav = grav#gravity boolean
self.collisionCheck = collisionCheck#if collisions with tiles are allowed
projectiles.append(self)
if self.goal != None and self.grav == True:
try:#this is failed projectile physics, ignore
x = abs((self.x + self.size[0]) - goal[0])
y = abs(self.y - goal[1])
g = projectile.gravity
#angle = math.atan(self.speed**2 + math.sqrt(abs((self.speed**4-g*(g*x**2+2*y*self.speed**2))/g*x)))
power = math.sqrt(0.5*projectile.gravity*x**2*(math.tan(math.radians(45))**2+1) / x*math.tan(math.radians(45)) + y)
self.xVel = power* math.cos(math.radians(45))
if self.goal[0] < self.x:
self.xVel = -self.xVel
self.yVel = power* math.sin(math.radians(45))
except:
pass
def killSelf(self):#destroys current projectile
for i in range(len(projectiles)):
if projectiles[i].ID == self.ID:
projectiles.remove(projectiles[i])
break
def applyPhysics(self):#apply physics to itself
if self.grav == True:
self.yVel -= projectile.gravity
self.x += self.xVel
self.y -= self.yVel
if self.collisionCheck == True:
if physics.touchingTile((self.x,self.y)):#die if touching a tile
self.killSelf()
def draw(self):#draw to the camera
mainCamera.drawToCamera(self.pic,self.size,(self.x, self.y),self.opacity)
def reflectY(self):#flip
if self.faceRight == False:
self.pic = pygame.transform.flip(self.pic, True, False)
self.faceRight = True
else:
self.pic = pygame.transform.flip(self.pic, True, False)
self.faceRight = False
def main(self):#main function
self.applyPhysics()
if self.xVel > 0 and self.faceRight == False:
self.reflectY()
if self.xVel < 0 and self.faceRight == True:
self.reflectY()
touch = touchingEntity(self.x, self.y,self.impactRad)
if len(touch) != 0:#below is smae as character impact
for i in range(len(touch)):
if worldBack.entities[touch[i]].ID != self.owner.ID and worldBack.entities[touch[i]].team != self.owner.team and worldBack.entities[touch[i]].invincible == False:
if self.faceRight == False:
distance = -15
else:
distance = 15
worldBack.entities[touch[i]].takeDamage(self.damage,distance=distance)
if worldBack.entities[touch[i]].health <=0 and self.owner != None:
self.owner.rewardFromKill(worldBack.entities[touch[i]])
self.killSelf()#excpet for this line of course
self.duration -= 1
if self.duration <= 0:
self.killSelf()
self.draw()
def makeText(font,text,col=(255,255,255)):#returns a rendered font
toDraw = font.render(text, 1, col)
return toDraw
class button:#creates a button for use in menus
def __init__(self,screen,font,text, xy, col, func, hoverCol = (250,250,0)):#construct requires a surface, font, text, color, string saying what to do when clicked
#and the option of what color the text is when hovered default yellow
self.screen= screen
self.font = font
self.text = text
self.col = col
self.hoverCol = hoverCol
self.default = makeText(self.font,text, col)#rendered font without hovered
self.hovered = makeText(self.font,text, hoverCol)#rendered font when hovered
self.size = self.default.get_size()#size
self.xy = (xy[0] - (self.size[0]//2),xy[1] - (self.size[1]//2))
self.hover = False#boolean for being hovered or not
self.func = func#click function is a string
def changeText(self,newText):
self.text = newText
self.default = makeText(self.font,newText, self.col)#rendered font without hovered
self.hovered = makeText(self.font,newText, self.hoverCol)#rendered font when hovered
def draw(self):#draw a button
if self.hover == False:#if not hovered
self.screen.blit(self.default, self.xy)#draw normal
else:
self.screen.blit(self.hovered, self.xy)#draw hovered
class mainMenu:#just a menu
def __init__(self,screen):#construct requires a surface
self.screen = screen
self.toDraw = []#what needs to be drawn
self.buttons = []#any buttons
def newTitle(self, font, text, xy, col = (255,255,255)):#creates a title and places it at xy
toDraw = makeText(font,text,col)
size = toDraw.get_size()
newXY= (xy[0] - (size[0]//2),xy[1] - (size[1]//2))#places on its center
toDraw = {'Draw':toDraw, 'XY':newXY}#dictionary of what to draw and where is appended to the list
self.toDraw.append(toDraw)
def newButton(self, font, text, xy, col = (255,255,255), func = None, hoverCol = (250,250,0)):#creates a button
self.buttons.append(button(self.screen,font,text,xy,col,func))#adds the new button to the button list
def draw(self):#draws the menu
for i in range(len(self.toDraw)):
self.screen.blit(self.toDraw[i]['Draw'],self.toDraw[i]['XY'])#draws eveything need drawing excluding buttons
for i in range(len(self.buttons)):#draws buttons
self.buttons[i].draw()
def buttonHovers(self, mxy, click):#checks for button hovers and ajusts them accordingly, mxy is mouse coord and click is a boolean for clicking
for i in range(len(self.buttons)):
if physics.colliding(mxy,(1,1),self.buttons[i].xy,self.buttons[i].size) == True:#checks if the mouse is colliding
self.buttons[i].hover = True
if click == True:
exec(self.buttons[i].func)#if they clicked run the function stored in the button
else:
self.buttons[i].hover = False
def loadPictures(path):#loads every picture in a directory
files = os.listdir(path)#gets the direcotry
images = []#init empty image list
for i in range(len(files)):
imagePath = path + files[i]#gets the path of each file
pic = loadPic(imagePath)
if pic != False:
images.append(pic)#loads the picture and adds it to a list
return images
itemList = ItemList.itemList(loadPictures('data/images/Items/'))
weaponPics = loadPictures('data/images/Weapons/')#empty list of weapons
armorPics = loadPictures('data/images/Armor/')
play = False#play is a boolean for whether the play has ppressed play
pygame.display.set_caption('Game2')#adds caption
pygame.mouse.set_visible(1)#makes mouse visible
#pygame.key.set_repeat(1,10)#allows keys to be held down
font = pygame.font.Font(pygame.font.match_font('bitstreamverasans'), 24)#creates a default font
buttonFont = pygame.font.Font(pygame.font.match_font('bitstreamverasans'), 36)#creates a button font
clock = pygame.time.Clock()#creates a game clock
background = pygame.Surface(screen.get_size())#creates a background
background = background.convert()
tileCracks = []
tileCracks.append(loadPic('data/images/Effects/Crack1.gif'))
tileCracks.append(loadPic('data/images/Effects/Crack2.gif'))
tileCracks.append(loadPic('data/images/Effects/Crack3.gif'))
mainCamera = Camera.camera(screen,screenSize[0]//2,screenSize[1]//2,screenSize)#creates a camera for viewing the menu
background.fill((150, 150, 150))#gray background
menBack = World.world(screenSize,mainCamera,loadPictures('data/images/tiles/'),itemList,tileCracks,weaponPics,armorPics,None,32,False,special=False)#creates a mini world for the menu
backFill = random.randint(0,len(menBack.tilePics)-1)#chooses a random num from 0,1
menBack.fill(backFill)#fill with the block
physics =Physics.physics(menBack)
def toggle(mainVar):
var = mainVar
if var == True:
var = False
elif var == False:
var = True
return var
men = mainMenu(screen)#creates a menu
titleFont = pygame.font.Font(pygame.font.match_font('impact'), 55)#creates a tile font
titleFont.set_underline(True)#underlines the title
title = men.newTitle(titleFont,'Game 2',(screenSize[0]//2,(screenSize[1]//2) - 100))#creates a title
b = men.newButton(buttonFont,'Play',(screenSize[0]//2,(screenSize[1]//2) + 100), func='global play; play = True;')#creates a button that makes play true
b2 = men.newButton(buttonFont,'Controls',(screenSize[0]//2,(screenSize[1]//2) + 100 + buttonFont.get_height() + 10), func='global controls; controls = True;')
b3 = men.newButton(buttonFont,'Options',(screenSize[0]//2,(screenSize[1]//2) + 100 + buttonFont.get_height() + 10 + buttonFont.get_height()), func='global options; options = True;')
controlMen = mainMenu(screen)#menu for the controls
titleFont = pygame.font.Font(pygame.font.match_font('georgia'), 24)#creates a tile font
title = controlMen.newTitle(titleFont,'Controls',(screenSize[0]//2,(screenSize[1]//2) - 100))#creates the title
sizeDown = titleFont.get_height()#size down is how much to move down for the next sentence
l1 = controlMen.newTitle(titleFont,'Use A and D to move left and right, respectively.',(screenSize[0]//2,(screenSize[1]//2) - sizeDown - 20))#creates more instructions
sizeDown = sizeDown - titleFont.get_height()
l2 = controlMen.newTitle(titleFont,'Left click to attack, right click to place a tile.',(screenSize[0]//2,(screenSize[1]//2) - sizeDown - 20))
sizeDown = sizeDown - titleFont.get_height()
l3 = controlMen.newTitle(titleFont,'Use Spacebar to jump.',(screenSize[0]//2,(screenSize[1]//2) - sizeDown - 20))
sizeDown = sizeDown - titleFont.get_height()
b3 = controlMen.newButton(buttonFont,'Back',(screenSize[0]//2,(screenSize[1]//2) + 50), func='global controls;controls = False;')#button that goes back to the menu
lightText = 'global activeLighting;activeLighting = toggle(activeLighting);optionMen.buttons[len(optionMen.buttons)-2].changeText("Lighting: "+str(activeLighting));'
optionMen = mainMenu(screen)
title= optionMen.newTitle(titleFont,'Options',(screenSize[0]//2,(screenSize[1]//2) - 100))
sizeDown = titleFont.get_height()
b1 = optionMen.newButton(buttonFont,'Lighting: '+str(activeLighting),(screenSize[0]//2,(screenSize[1]//2)), func=lightText)
sizeDown = sizeDown + buttonFont.get_height()
b2 = optionMen.newButton(buttonFont,'Back',(screenSize[0]//2,(screenSize[1]//2)+ sizeDown), func='global options;options = False;')
clicked = False#clicked is false
controls = False#not on controls screen
options = False
while play == False:
mx = pygame.mouse.get_pos()#find mouse
my = mx[1]
mx = mx[0]
for event in pygame.event.get():#finds events
if event.type == pygame.QUIT:#if pressed red x
quitGame()#exit
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:#if pressed escape
quitGame()#quit
elif event.type == MOUSEBUTTONDOWN:#if clicked
if event.button == 1:
clicked = True#ajust clik accordingly
elif event.type == MOUSEBUTTONUP:#if released click
if event.button == 1:
clicked = False#not clicking
screen.blit(background,(0,0))#draws background
menBack.draw()#draws tiles
if controls == False and options == False:#not on controls
men.buttonHovers((mx,my),clicked)#checks for hovers
men.draw()#draws menu
elif controls == True:
controlMen.buttonHovers((mx,my),clicked)#checks for hovers
controlMen.draw()#draws controls
elif options == True:
optionMen.buttonHovers((mx,my),clicked)#checks for hovers
optionMen.draw()#draws controls
pygame.display.flip()#refresh
clock.tick(60)#set framerate
if play == True:
background.fill((173, 216, 230))#sets background to sky color
mainCamera = Camera.camera(screen,0,0,screenSize)#creates camera
over = Overlay.overlay(screen,font,ItemList.itemList(loadPictures('data/images/Items/')))#creates overlay focussed on player
worldBack = World.world((10000, 5000),mainCamera,loadPictures('data/images/tiles/'),itemList,tileCracks,weaponPics,armorPics,over,lighting=activeLighting)#creates a world
worldBack.worldGen('Grassland')#generates the world
AIOn = worldBack.AIOn#global variable controlling whether the AI is active
charList = CharacterList.characterList(armorPics)
player = worldBack.addCharacter(0,True)#creates the player on team player
player.changeXY(500)#move him/her right 500px
bottomRow = worldBack.size[1]//32#finds bottom row
def displayFPS(font):#displays the frames per second
fps = int(clock.get_fps())#gets fps
fpsFont = font.render(str(fps), 1, (250, 250, 250))#font renders it
fpsPos = fpsFont.get_rect()
fpsPos.centerx = 50
fpsPos.centery = 50
screen.blit(fpsFont,fpsPos)
def setNight():#sets time to night, unused
background.fill((0,0,0))
worldBack.baseLightLevel = 0
worldBack.lighting.lightSection(0,worldBack.columns,0,worldBack.rows)
ParticleEngine.setGravity(True)#turns on particle engine gravity
projectiles = []#makes empty list of projectiles
over.updateHealthbar()#update the healthbar
#player.changeWeapon(weapons[0])#gives player a weapon
player.inv.addToInventory(1,1)
player.inv.addToInventory(3,1)
player.inv.addToInventory(5,1)
player.inv.addToInventory(0,80)
player.inv.addToInventory(2,80)
player.inv.addToInventory(10,1)
player.inv.addToInventory(8,80)
toFocus = 0#unused, kinda
mouseInv = (None,None)
clickDel = 0.5
clickTime = time.time()
while True:
mx = pygame.mouse.get_pos()
my = mx[1]
mx = mx[0]
relMX = mx + (mainCamera.centX - (mainCamera.fov[0]//2))#gets the mouse coordinates in the gameworld, not the screen
relMY = my + (mainCamera.centY - (mainCamera.fov[1]//2))
playerHover = False
otherHover = False
craftHover = False
if over.fullInv == True and over.obj == player:
for i in range(len(over.slots)):
if physics.colliding((mx,my),(1,1),over.slots[i],over.slotSize) == True:
player.selectedSlot = i
playerHover = True
if player.openInv != False and over.obj == player:
for i in range(len(over.otherSlots)):
if physics.colliding((mx,my),(1,1),over.otherSlots[i],over.slotSize) == True:
player.openInv.selectedSlot = i
otherHover = True
if over.fullInv == True and player.openInv == False and over.obj == player:
for i in range(len(over.craftSlots)):
if physics.colliding((mx,my),(1,1),over.craftSlots[i],over.slotSize) == True:
over.craftHighlight = i
craftHover = True
if craftHover == False:
over.craftHighlight = None
pressedKeys = pygame.key.get_pressed()#makes a list of pressed keys
for key in range(len(pressedKeys)):
if pressedKeys[key] == True:
if key == K_SPACE:#if space
if player.control == True:
player.jump()#jump
elif key == K_a:#if a
if player.control == True:
player.addSpeed(-2)#move left
elif key == K_d:# if d
if player.control == True:
player.addSpeed(2)#move right
elif key == K_RETURN:#if enter
player.speak('Hello!',5)#say hello for 5 seconds
elif key == K_o:#if o
AIOn = True#turn AI on
elif key == K_f:#if f
AIOn = False#turn Ai off
elif key == K_u:#if pressed u
player.setLevel(player.level+1)#level up player for testing of course
elif key == K_n:#if pressed n
player.mining.addExp(500)
elif key == K_g:
user32 = ctypes.windll.user32
screen = pygame.display.set_mode((user32.GetSystemMetrics(0),user32.GetSystemMetrics(1)),FULLSCREEN)
screenSize = screen.get_size()
mainCamera.changeScreen(screen,(screenSize[0],screenSize[1]))
background = pygame.Surface(screen.get_size())#creates a background
background = background.convert()
background.fill((173, 216, 230))
over.changeScreen(screen,screenSize)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quitGame()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
if over.fullInv == False:
quitGame()
else:
if over.fullInv == True:
if mouseInv[0] != None:
player.inv.addToInventory(mouseInv[0].ID,mouseInv[1])
mouseInv = (None,None)
over.toggleInv()
elif event.key == K_i:
if over.fullInv == True:
if mouseInv[0] != None:
player.inv.addToInventory(mouseInv[0].ID,mouseInv[1])
mouseInv = (None,None)
over.toggleInv()
elif event.key == K_UP:
if player.selectedSlot > 0:
player.selectedSlot -= 1
elif event.key == K_DOWN:
if over.fullInv == False:
if player.selectedSlot < over.slotAmount:
player.selectedSlot += 1
if player.selectedSlot > over.slotAmount:
player.selectedSlot -= over.slotAmount
else:
if player.selectedSlot < player.inv.inventorySize:
player.selectedSlot += 1
elif event.type == MOUSEBUTTONDOWN:#if click
if event.button == 1:
if player.control == True:
#player.attack()#if left click attack
#player.castSpell('Fireblast')
#player.shootArrow(1,(mx,my),7)
#worldBack.removeTile(relMX//32,relMY//32,1)#remove any clicked on tiles
if over.fullInv == False:
player.activateHeldItem((relMX,relMY))
if time.time() - clickTime >= clickDel:
clickTime = time.time()
if playerHover == True:
if mouseInv[0] == None:
if player.inv.inventory[player.selectedSlot][0] != None:
mouseInv = player.inv.inventory[player.selectedSlot]
player.inv.removeFromInventory(player.selectedSlot,player.inv.inventory[player.selectedSlot][1])
else:
if player.inv.inventory[player.selectedSlot][0] == None:
player.inv.inventory[player.selectedSlot] = mouseInv
mouseInv = (None,None)
elif player.inv.inventory[player.selectedSlot][0].ID == mouseInv[0].ID:
player.inv.addToInventory(mouseInv[0].ID,mouseInv[1])
mouseInv = (None,None)
elif otherHover == True:
if mouseInv[0] == None:
if player.openInv.inv.inventory[player.openInv.selectedSlot][0] != None:
mouseInv = player.openInv.inv.inventory[player.openInv.selectedSlot]
player.openInv.inv.removeFromInventory(player.openInv.selectedSlot,player.openInv.inv.inventory[player.openInv.selectedSlot][1])
else:
if player.openInv.inv.inventory[player.openInv.selectedSlot][0] == None:
player.openInv.inv.inventory[player.openInv.selectedSlot] = mouseInv
mouseInv = (None,None)
elif player.openInv.inv.inventory[player.openInv.selectedSlot][0].ID == mouseInv[0].ID:
player.openInv.inv.addToInventory(mouseInv[0].ID,mouseInv[1])
mouseInv = (None,None)
elif craftHover == True:
player.crafting.craftItem(player.craftable[over.craftHighlight])
if over.fullInv == True and playerHover == False and otherHover == False:
mouseInv = (None,None)
elif event.button == 3:#if right click
col = relMX // 32
row = relMY // 32
if worldBack.tiles[col][row][1] != None:
worldBack.tiles[col][row][1].onClick(player)
if worldBack.tiles[col][row][1].inv != None:
player.openInv = worldBack.tiles[col][row][1]
#worldBack.addTile(relMX//32,relMY//32, 11, 1,lightLevel = 255, physical = False)#place torch
#worldBack.addTile(relMX//32,relMY//32, 0, 1)
#player.inv.removeFromInventory(0)
#worldBack.placeBlueprint('data/blueprints/House1.bprt',(relMX,relMY))
#worldBack.tree((relMX,relMY))
elif event.button == 4:
if player.selectedSlot > 0:
player.selectedSlot -= 1
elif event.button == 5:
if over.fullInv == False:
if player.selectedSlot < over.slotAmount:
player.selectedSlot += 1
if player.selectedSlot > over.slotAmount:
player.selectedSlot -= over.slotAmount
else:
if player.selectedSlot < player.inv.inventorySize:
player.selectedSlot += 1
elif event.button == 2:
toFocus += 1
if toFocus == len(worldBack.entities):
toFocus = 0
selectEnemy = False#if hovering an enemy
for i in range(len(worldBack.entities)):
xy = mainCamera.getRelative((worldBack.entities[i].x,worldBack.entities[i].y))#gets the relative postition of the characters on teh camera
if worldBack.entities[i].ID != over.obj.ID:#not hovering self
if physics.colliding((mx,my),(1,1),xy,worldBack.entities[i].size) == True:#and colliding
over.hoverEnemy(worldBack.entities[i])#print enemy info
selectEnemy = True
if selectEnemy == False:
over.unhover()#no enemy info if no hover
screen.blit(background, (0,0))#draw background
worldBack.draw()
mainCamera.focus((worldBack.entities[toFocus].x + (worldBack.entities[toFocus].size[0] // 2),worldBack.entities[toFocus].y + (worldBack.entities[toFocus].size[1]//2)))
#mainCamera.focus((player.x + (player.size[0] // 2),player.y + (player.size[1]//2)))#focuses the camera on the player
over.obj = worldBack.entities[toFocus]
for i in range(len(worldBack.entities)):
try:
worldBack.entities[i].main()#runs entity main functions
except:
pass
for i in range(len(projectiles)):
try:
projectiles[i].main()#runs projectile main functions
except:
pass
if over.obj.dead == True:#if the overlay object is dead
over.setMessage('You have died :(',100,(255,0,0))#show condolences
for i in range(len(worldBack.spawners)):
worldBack.spawners[i].main()#runs spawners
for town in worldBack.towns:
town.main()
ParticleEngine.main(screen,mainCamera,worldBack.size)#runs particle engine
over.updateHealthbar()#update healthbar
over.draw()#draw overlay
if mouseInv[0] != None:
over.drawItem(mouseInv,(mx,my))
displayFPS(font)#shows fps
pygame.display.update((0,0,screenSize[0],screenSize[1]))#updates screen
clock.tick(60)#constrain fps
```
#### File: 2DExplorer/Game/Overlay.py
```python
import pygame, time,math
pygame.init()
class overlay:#overlay is the heads up display that appears onscreen containing information for the user to read
def __init__(self,screen,font,itemList,obj=None):#costruct requires a screen, a font, and a character object
self.obj = obj
self.enemy = None#enemy is a character object which info will be displayed opon mouse hover
self.screen = screen
self.items = itemList
self.font = font
self.size = screen.get_size()
self.healthbarLen = 200#length of the health bar
self.EXPbarLen = 200#length of the experience bar
self.EXPbarX = (self.size[0] // 2) - (self.healthbarLen // 2)#x coordinate of the exp bar
self.healthbarX = (self.size[0] // 2) - (self.healthbarLen // 2)#x coordinate of the players health bar
self.currentHealthLen = 0#length of the player's current health
self.hovernameColor = (250, 250, 250)#color of the hovered enemie's name
self.message = None#the message being displayed on screen
self.slotSize = (32,32)
self.slotAmount = 0
self.fullInv = False
self.invToggleTime = time.time()
self.invToggleWait = 1
self.messageQue = []
self.slots = []
self.otherSlots = []
self.maxCrafts = (self.size[0]//2)//self.slotSize[0]
self.craftSlots = []
self.craftHighlight = None
def getHealthbarLen(self,obj):#returns the length of the health bar
units = self.healthbarLen /obj.maxHealth
barLen = obj.health * units
return barLen
def updateHealthbar(self):#updates the healthbar with the object's health
self.currentHealthLen = self.getHealthbarLen(self.obj)
def drawHealthbar(self):#draws the health bar on the screen
redBar = (self.healthbarX,self.size[1]-30,self.healthbarLen + 4,20)#redbar is a rect object
greenBar = (self.healthbarX + 2, self.size[1] - 28, self.currentHealthLen, 16)#green bar is a rect object showing a players health
pygame.draw.rect(self.screen, (200,0,0), redBar)#draws both
pygame.draw.rect(self.screen, (0,200,0), greenBar)
def hoverEnemy(self,obj):#assigns enemy to an object
self.enemy = obj
def unhover(self):#unassigns enemy
self.enemy = None
def drawEnemyHover(self):#draw information about the enemy
if self.enemy != None:
greenLen = self.getHealthbarLen(self.enemy)
name = self.font.render(str.capitalize(self.enemy.name)+' Level: '+str(self.enemy.level), 1, self.hovernameColor)#creates a rendered font of the character's health and level
nameLoc = name.get_rect()
nameLoc.centerx = self.size[0]//2#sets the center x and y coordinates of the enemy healthbar
nameLoc.centery = name.get_size()[1]//2
name= name.convert_alpha()
redBar = (self.healthbarX,name.get_size()[1] + 10,self.healthbarLen + 4,20)
greenBar = (self.healthbarX + 2, name.get_size()[1] + 12, greenLen, 16)
self.screen.blit(name,nameLoc)#draws the name and the health bars
pygame.draw.rect(self.screen, (200,0,0), redBar)
pygame.draw.rect(self.screen, (0,200,0), greenBar)
def drawLevel(self):#draws the characters level
level = 'Level: '+str(self.obj.level)
toDraw = self.font.render(level, 1, (255,255,255))#renders the font
pos = toDraw.get_rect()
pos.centerx = self.EXPbarX + (toDraw.get_size()[0]//2)#puts the level in the appropriate position
pos.centery = (self.size[1] -47) - (toDraw.get_size()[1]//2)
toDraw = toDraw.convert_alpha()#converts
self.screen.blit(toDraw, pos)
def getEXPLen(self):#finds the length of the exp bar
units = self.EXPbarLen / self.obj.expToLevel
barLen = self.obj.exp * units
return barLen
def drawExpBar(self):#draws the exp bar
blackBar = (self.EXPbarX,self.size[1]-45,self.EXPbarLen + 4,10)
yellowBar = (self.EXPbarX + 2, self.size[1] - 43, self.getEXPLen(), 6)#yellow bar is the amount of exp
pygame.draw.rect(self.screen, (0,0,0), blackBar)
pygame.draw.rect(self.screen, (250,250,0), yellowBar)
def setMessage(self,msg,dur=3,col=(255,255,255)):#sets a message to be drawn on the screen, msg is the message, dur is the duration, and col is the color
toDraw = self.font.render(msg, 1, col)#renders
pos = toDraw.get_rect()
pos.centerx = (self.size[0]//2)#puts in middle of screen
pos.centery = (self.size[1]//2) - (toDraw.get_size()[1]//2)
self.message = {'Draw':toDraw, 'Pos':pos, 'Dur':dur, 'Start':time.time()}#sets message attribute
def addToQue(self,message,dur=3,col=(255,255,255)):
self.messageQue.append((message,dur,col))
def drawMoney(self):#draws the character's money
toDraw = self.font.render('$'+str(self.obj.money),1, (250,250,0))
pos = toDraw.get_rect()
pos.bottomleft = (5,self.screen.get_height() - 5)#sets bottom left corner of the font to just above the bottom left corner of the screen
toDraw.convert_alpha()
self.screen.blit(toDraw,pos)
def drawItem(self,itemTup,xy):
if itemTup[0] != None:
font = pygame.font.Font(None,18)
pic = self.items.getPictures()[itemTup[0].ID]
self.screen.blit(pic,xy)
if itemTup[1] > 1:
toDraw = font.render(str(itemTup[1]),1,(255,255,255))
self.screen.blit(toDraw,(xy[0]+toDraw.get_width()+2,xy[1] + toDraw.get_height()))
def drawSlot(self,slotNum,xy,font,obj):
i = slotNum
slot = pygame.Surface(self.slotSize)
slot.fill((0,125,200))
border = (0,0,self.slotSize[0],self.slotSize[1])
if obj.selectedSlot != i:
pygame.draw.rect(slot,(0,0,0),border,1)
else:
pygame.draw.rect(slot,(255,255,255),border,2)
if obj.inv.inventory[i][0] != None:
pic = self.items.getPictures()[obj.inv.inventory[i][0].ID]
slot.blit(pic,((self.slotSize[0]//2)-(pic.get_width()//2),(self.slotSize[1]//2)-(pic.get_height()//2)))
if obj.inv.inventory[i][1] > 1:
toDraw = font.render(str(obj.inv.inventory[i][1]),1,(255,255,255))
slot.blit(toDraw,(self.slotSize[0]-toDraw.get_width() - 2,self.slotSize[1]-toDraw.get_height()))
self.screen.blit(slot,xy)#(slot,(self.screen.get_width()-self.slotSize[0],halfScreen+(i*self.slotSize[1])))
def drawQuickinventory(self):
halfScreen = self.screen.get_height() // 2
slotAmount = halfScreen
slotAmount = slotAmount//self.slotSize[1]
self.slotAmount = slotAmount - 1
if self.slotAmount > 9:
self.slotAmount = 9
font = pygame.font.Font(None,18)
for i in range(slotAmount):
self.drawSlot(i,(self.screen.get_width()-self.slotSize[0],halfScreen+(i*self.slotSize[1])),font,self.obj)
def drawFullInventory(self,obj, xStart = None):
slot = -1
yStart = self.slotSize[1]
width = math.ceil(obj.inv.inventorySize/self.slotAmount)*self.slotSize[0]
if xStart == None:
xStart = self.screen.get_width()-width-self.slotSize[0]
font = pygame.font.Font(None,18)
itemName = obj.selectedSlot
try:
if obj.inv.inventory[itemName][0] != None:
itemName = obj.inv.inventory[itemName][0].name
self.screen.blit(font.render(itemName,1,(255,255,255)),(xStart,yStart//3))
except:
pass
addSlot = False
addOtherSlot = False
if self.slots == []:
addSlot = True
if self.otherSlots == []:
addOtherSlot = True
for c in range(math.ceil(obj.inv.inventorySize/self.slotAmount)):
for r in range(self.slotAmount+1):
slot += 1
if slot <= obj.inv.inventorySize:
self.drawSlot(slot,(((c*self.slotSize[0]))+xStart,(r*self.slotSize[1])+yStart),font,obj)
if addSlot == True and obj == self.obj:
self.slots.append(((c*self.slotSize[0])+xStart,(r*self.slotSize[1])+yStart))
elif addOtherSlot == True and obj != self.obj:
self.otherSlots.append(((c*self.slotSize[0])+xStart,(r*self.slotSize[1])+yStart))
def toggleInv(self):
if time.time() - self.invToggleTime > self.invToggleWait:
self.invToggleTime = time.time()
if self.fullInv == True:
self.fullInv = False
self.obj.openInv = False
self.otherSlots = []
if self.obj.selectedSlot > self.slotAmount:
self.obj.selectedSlot = 0
else:
self.fullInv = True
def drawCraftSlot(self,craftNum,xy,highLight=False):
slot=pygame.Surface(self.slotSize)
slot.fill((0,125,200))
border = (0,0,self.slotSize[0],self.slotSize[1])
pic = self.items.getPictures()[self.obj.craftable[craftNum]]
slot.blit(pic,((self.slotSize[0]//2)-(pic.get_width()//2),(self.slotSize[1]//2)-(pic.get_height()//2)))
if highLight == True:
pygame.draw.rect(slot,(255,255,255),border,2)
else:
pygame.draw.rect(slot,(0,0,0),border,1)
self.screen.blit(slot,xy)
def drawCrafts(self):
xStart = self.size[0]//2
yStart = self.slotAmount * self.slotSize[1]
yStart += self.slotSize[1]*3
draws=0
self.craftSlots = []
for i in range(len(self.obj.craftable)):
item = self.obj.craftable[i]
if i < self.maxCrafts:
if self.craftHighlight == i:
self.drawCraftSlot(i,(xStart+(draws*self.slotSize[0]),yStart),True)
else:
self.drawCraftSlot(i,(xStart+(draws*self.slotSize[0]),yStart))
self.craftSlots.append((xStart+(draws*self.slotSize[0]),yStart))
draws += 1
if self.craftHighlight != None:
try:
name = self.items.getItemByID(self.obj.craftable[self.craftHighlight]).name
font = pygame.font.Font(None,18)
name = font.render(name,1,(255,255,255))
self.screen.blit(name,(xStart,yStart-(self.slotSize[1]//2)))
except:
pass
def draw(self):#draws everything
self.drawHealthbar()
self.drawEnemyHover()
self.drawLevel()
self.drawExpBar()
self.drawMoney()
if self.fullInv == False:
self.drawQuickinventory()
else:
self.drawFullInventory(self.obj)
if self.obj.openInv == False:
self.drawCrafts()
if self.obj.openInv != False:
self.drawFullInventory(self.obj.openInv,self.slotSize[0])
self.fullInv = True
if self.message == None:
if len(self.messageQue) > 0:
if self.messageQue[0] != None:
self.setMessage(*self.messageQue[0])
if self.message != None:#if there is a message
if time.time() - self.message['Start'] < self.message['Dur']:#and duration is greater than 0
self.screen.blit(self.message['Draw'],self.message['Pos'])#draws the message
else:
self.message = None
if len(self.messageQue) > 0:
if self.messageQue[0] != None:
del(self.messageQue[0])
def changeScreen(self,screen,size):
self.screen = screen
self.size = size
self.EXPbarX = (self.size[0] // 2) - (self.healthbarLen // 2)#x coordinate of the exp bar
self.healthbarX = (self.size[0] // 2) - (self.healthbarLen // 2)#x coordinate of the players health bar
```
#### File: 2DExplorer/Game/Physics.py
```python
class physics:#universal physics(excluding projectiles because i hate them)
def __init__(self,world,gravity = 2):
self.gravity = gravity
self.world = world
def isAtScreenBottom(self,obj):#unused
if obj.y + obj.size[1] <= screenSize[1]:
return True
else:
return False
def colliding(self,xy1, size1, xy2, size2):#returns true if two rectangles are touching
if xy1[0] + size1[0] > xy2[0] and xy1[0] < xy2[0] + size2[0] and xy1[1] + size1[1] > xy2[1] and xy1[1] < xy2[1] + size2[1]:
return True
def touchingTile(self,xy,z=1,filtered = True):
try:
if self.world.tiles[xy[0]//32][xy[1]//32][z] != None:
if self.world.tiles[xy[0]//32][xy[1]//32][z].tileType != 24:
if filtered == True:
return self.world.tiles[xy[0]//32][xy[1]//32][z].physical
else:
return True
return False
except:
return False
def applyPhys(self,obj):#applies physics to an object
below = False
beside = False
above = False
if self.touchingTile((obj.x + (obj.legLeft.size[0] * 2),(obj.y + obj.size[1]) + obj.yVel)) == True or self.touchingTile((obj.x + obj.armLeft.size[0],(obj.y + obj.size[1]) + obj.yVel)) == True:
obj.yVel = 0
below = True
if self.touchingTile(((obj.x + obj.size[0] + obj.xVel),obj.y)) == True or self.touchingTile(((obj.x + obj.size[0] + obj.xVel),obj.y + obj.legLeft.size[1])) == True or self.touchingTile(((obj.x + obj.size[0] + obj.xVel),obj.y + obj.size[1] - 2)) == True :
obj.xVel = 0
beside = True
if self.touchingTile(((obj.x + obj.armLeft.size[0]) + obj.xVel,obj.y)) == True or self.touchingTile(((obj.x + obj.armLeft.size[0]) + obj.xVel,obj.y + obj.legLeft.size[1])) == True or self.touchingTile(((obj.x + obj.armLeft.size[0]) + obj.xVel,obj.y + obj.size[1] - 2)) == True :
obj.xVel = 0
beside = True
if self.touchingTile((obj.x + (obj.legLeft.size[0] * 2), obj.y + obj.yVel)) == True or self.touchingTile((obj.x + obj.armLeft.size[0], obj.y + obj.yVel)) == True:
obj.yVel = 0
above = True
if below == False:
obj.addSpeed(0,self.gravity)
else:
if self.touchingTile((obj.x + (obj.legLeft.size[0] * 2), obj.y + obj.size[1] - 1)) == True or self.touchingTile((obj.x + obj.armLeft.size[0],obj.y + obj.size[1] - 1)) == True:
obj.changeXY(0,-1)
if beside == True:
obj.faceTile = True
else:
obj.faceTile = False
if obj.xVel != 0:
obj.changeXY(obj.xVel)
obj.walkAnim()
if obj.xVel > 0:
obj.xVel -= 1
if obj.faceRight == False:
obj.flip()
else:
obj.xVel += 1
if obj.faceRight == True:
obj.flip()
if obj.yVel != 0:
obj.changeXY(0,obj.yVel)
objType = str(type(obj))#find the object type
objType = objType.split("'")
objType = objType[1]
objType = objType.split('.')[1]
if objType == 'character':
if obj.isJump == True:
if below == True:#moving down
obj.isJump = False#no longer jumping
obj.y -= 1#lower the y coordinate to prevent some bad stuff probably
obj.limbReset()#reset limbs
obj.attacking = False#no attack
else:
obj.fallAnim()#fall
```
#### File: 2DExplorer/Game/Spawner.py
```python
import time
class spawner:#spawners spawn characters
def __init__(self,world,charID,location,level=1, limit=1, delay=5):#construct requires the list of body parts,name,team,location and other options
#like AI level and max limit to spawn
self.location = location
self.level = level
self.limit = limit
self.charID = charID
self.chardata = world.charList.getList()[charID]
self.children = []
self.spawnTime = time.time()
self.delay = delay
self.world = world
self.over = world.over
def spawn(self):#spawn creates a character
child = self.world.addCharacter(self.charID)
child.changeXY(self.location[0],self.location[1])
child.setLevel(self.level)
self.children.append(child.ID)#it also appends the character to its list of shildren
def main(self):#main is the main function for a spawner
if len(self.children) < self.limit:#if it has less than the limit of children
if time.time() - self.spawnTime >= self.delay:
self.spawn()#make another
self.spawnTime = time.time()
def killChild(self,ID):
toKill = None
for i in range(len(self.children)):
if self.children[i] == ID:
toKill = i
break
if toKill != None:
del(self.children[toKill])
return True
else:
return False
```
#### File: 2DExplorer/Game/Tile.py
```python
import pygame
class tile:
def __init__(self,parentWorld,colRow, tileType, z = 0,opacity = 255, lightBlock=25, lightLevel = 0, physical = True):
self.tileType = tileType
self.origPic = parentWorld.tilePics[tileType]
self.pic = parentWorld.tilePics[tileType]
dark = pygame.Surface((parentWorld.unit,parentWorld.unit))
dark.fill((0,0,0))
dark = dark.convert()
self.dark = dark
self.z = z
self.opacity = opacity
self.column = colRow[0]
self.row = colRow[1]
self.parentWorld = parentWorld
self.lightBlock = lightBlock
self.lightLevel = lightLevel
self.physical = physical
self.drop = None
self.dropAmount = 1
self.inv = None
self.selectedSlot = None
self.drawBack = False
self.canBreak = True
self.tool = 'pick'
def changeTile(self,tileType):
self.parentWorld.addTile(self.column,self.row,tileType)
self.updatePic()
def gravity(self):
if self.parentWorld.tiles[self.column][self.row + 1][self.z] == None:
self.parentWorld.addTile(self.column,self.row+1,self.tileType,self.z)
self.parentWorld.removeTile(self.column,self.row,self.z)
def special(self):
pass
def updatePic(self):
if self.physical == False or self.drawBack == True:
if self.parentWorld.tiles[self.column][self.row][0] != None and self.z == 1:
if self.parentWorld.tiles[self.column][self.row][0].tileType != 24:
self.pic = pygame.Surface((self.parentWorld.unit,self.parentWorld.unit))
self.pic.blit(self.parentWorld.tiles[self.column][self.row][0].pic,(0,0))
self.pic.blit(self.origPic,(0,0))
def relight(self):
self.parentWorld.lighting.lightSection(self.column -10,self.column+10,self.row-10,self.row+10)
def onClick(self,obj):
pass
``` |
{
"source": "joaompinto/argscall",
"score": 3
} |
#### File: argscall/tests/test_some_arguments.py
```python
from argscall import argsCaller
from argscall.exceptions import MissingArgument, TooManyArgument
import pytest
def do_something(name):
return name.upper()
def do_something_default(name, family="Black"):
return f"{name} {family}"
def test_single_argument():
assert argsCaller(do_something, "a").call() == "A"
def test_single_default_argument():
assert argsCaller(do_something_default, "Joe").call() == "<NAME>"
def test_missing_argument():
with pytest.raises(MissingArgument) as pytest_wrapped_e:
argsCaller(do_something)
assert pytest_wrapped_e.value.argument_name == "name"
# Call the function with an extra arguments
def test_too_many_arguments():
with pytest.raises(TooManyArgument) as pytest_wrapped_e:
argsCaller(do_something, "a", "b")
assert pytest_wrapped_e.value.argument_value == "b"
def test_args_str_single():
assert argsCaller(do_something, "a").args_str() == "<name>"
def test_args_str_default():
assert argsCaller(do_something_default, "a").args_str() == "<name> [<family> = Black]"
``` |
{
"source": "joaompinto/buildc",
"score": 2
} |
#### File: buildc/buildc/rootfs.py
```python
import tarfile
import magic
import re
import os
import shutil
from os.path import islink, join, dirname, exists, isfile
from tempfile import NamedTemporaryFile
EXCLUDE_PREFIXES = [".", "/dev/", "/home/", "/proc/" "/sys/", "/etc/", "/run/"]
def create_tar(file_list):
tmp_tar_file = NamedTemporaryFile(delete=False, suffix=".tgz")
with tarfile.open(tmp_tar_file.name, "w:gz") as tar:
while len(file_list) > 0:
name = file_list[0]
file_list.remove(name)
skip_item = False
for prefix in EXCLUDE_PREFIXES:
if name.startswith(prefix):
skip_item = True
break
if skip_item:
continue
tar.add(name)
# Follow symbolic links
if islink(name):
link_target = os.readlink(name)
link_target = join(dirname(name), link_target)
if link_target not in file_list:
file_list.append(link_target)
else:
if isfile(name):
with open(name, "r") as f:
magic_name = magic.detect_from_fobj(f).name
dl = re.findall(
"dynamically linked, interpreter ([^,]*)", magic_name
)
if len(dl) == 0 or dl in file_list:
continue
dl = dl[0]
file_list.append(dl)
return tmp_tar_file.name
def extract_tar(tar_filename, output_dir, force_overwrite):
rootfs_dir = join(output_dir, "rootfs")
if force_overwrite and exists(rootfs_dir):
shutil.rmtree(rootfs_dir)
os.makedirs(rootfs_dir)
with tarfile.open(tar_filename, "r") as tar:
tar.extractall(rootfs_dir)
return output_dir
def create_config_json(bundle_dir, command_args):
""" Create an OCI config file using a templatete """
config_template = join(dirname(__file__), "config.json")
with open(config_template) as template_file:
template_data = template_file.read()
quoted_args = [f'"{arg}"' for arg in command_args]
quoted_args = "[{}]".format(",".join(quoted_args))
template_data = template_data.replace('"%ARGS%"', quoted_args)
bundle_config = join(bundle_dir, "config.json")
with open(bundle_config, "w") as config_file:
config_file.write(template_data)
``` |
{
"source": "joaompinto/ciman",
"score": 3
} |
#### File: ciman/cli/info.py
```python
import json
import typer
from typing import Optional
from ciman.registry import DockerRegistryClient
from ciman.view.info import print_image_info
def info(
image_name: str = typer.Argument(..., help="The name of the image"),
output_format: Optional[str] = typer.Option(
None, "-o", "--output-format", help="Ouput format [json]"
),
):
"""
Show information for an image stored in a docker registry
"""
registry, repository, tag = DockerRegistryClient.parse_image_url(image_name)
drc = DockerRegistryClient(registry)
manifest = drc.get_manifest(repository, tag)
if output_format == "json":
print(json.dumps(manifest, indent=4))
else:
print_image_info(manifest)
```
#### File: ciman/view/history.py
```python
import json
from ciman.view.console import console
from rich.pretty import pprint
def print_layer_info(layer_json: dict):
container_config = layer_json["container_config"]
cmd = container_config["Cmd"]
full_cmd = " ".join(cmd)
if len(cmd) == 1:
cmd = cmd[0]
if full_cmd.startswith("/bin/sh -c #(nop) "):
cmd = full_cmd[18:]
if isinstance(cmd, str):
cmd = cmd.replace("\t", "").strip()
print(str(cmd))
console.print("---", style="bold green")
def print_image_history(image_json: dict):
pprint(image_json)
history = image_json["config"]["history"]
pprint(history)
exit()
def find_child(layers: list, current_layer: dict):
for item in layers:
if item.get("parent") == current_layer["id"]:
return item
history = image_json["history"]
for i, item in enumerate(history):
history[i] = json.loads(item["v1Compatibility"])
top_layer = [i for i in history if not i.get("parent")]
assert len(top_layer) == 1
current_layer = top_layer[0]
while current_layer:
print_layer_info(current_layer)
current_layer = find_child(history, current_layer)
``` |
{
"source": "joaompinto/cmdliner",
"score": 3
} |
#### File: cmdliner/tests/test_optional_posargs.py
```python
from cmdliner import cli
from unittest.mock import patch
@cli("0.0.1")
def main(*args):
print(f"Hello {'+'.join(args)}")
def test_zero_parameters(capsys):
with patch("sys.argv", ["program_name"]):
main()
assert capsys.readouterr().out == "Hello \n"
def test_one_parameter(capsys):
with patch("sys.argv", ["program_name", "abc"]):
main()
assert capsys.readouterr().out == "Hello abc\n"
def test_two_parameters(capsys):
with patch("sys.argv", ["program_name", "abc", "123"]):
main()
assert capsys.readouterr().out == "Hello abc+123\n"
if __name__ == "__main__":
cli()
``` |
{
"source": "joaompinto/container-trust",
"score": 2
} |
#### File: container-trust/ctinspector/layer.py
```python
from __future__ import print_function
import os
import re
import ctinspector.tar
from os.path import dirname, basename, join, getsize, exists
from ctinspector.utils import print_list, human_size
def show_info(image_content_path, layer_path):
info_dict = {}
layer_id = basename(dirname(layer_path))[:12]
layer_full_path = join(image_content_path, layer_path)
info_dict["Size"] = ('%s' % human_size(getsize(layer_full_path)))
layer_tar = ctinspector.tar.extract_layer(image_content_path, layer_path)
file_count = 0
dir_count = 0
dir_list = []
file_list = []
base_len = len(layer_tar)
for root, dirs, files in os.walk(layer_tar, topdown=False):
relevant_root = root[base_len:]
for name in files:
file_list.append(relevant_root+"/"+name)
file_count += 1
# Skip when dir is parent of a previously found dir
for name in dirs:
dir_name = relevant_root+"/"+name
must_add = True
for i in file_list:
if i.startswith(dir_name):
must_add = False
break
for i in dir_list:
if i.startswith(dir_name):
must_add = False
break
if must_add:
dir_list.append(dir_name)
dir_count += 1
if file_count <= 10:
info_dict["Files"] = ' '.join(file_list)
else:
info_dict["File Count"] = file_count
if dir_count <= 10:
info_dict["Dirs"] = ' '.join(dir_list)
else:
info_dict["Dir Count"] = dir_count
info_dict["Distro"] = identify_distro(layer_tar)
print_list("Layer %s" % layer_id, info_dict)
def identify_distro(layer_path):
distro_map = {
'os-release' : ['etc/os-release', r'^NAME="([^"]+)', r'^VERSION_ID="?([\w\.]+)']
}
for detection_type, items in distro_map.items(): # pylint: disable=W0612
(file_name, name_regex, version_regex) = items
full_file_name = join(layer_path, file_name)
if exists(full_file_name):
with open(full_file_name) as release_file:
release_data = release_file.read()
name = re.findall(name_regex, release_data, re.MULTILINE)
if name:
name = name[0]
version = re.findall(version_regex, release_data, re.MULTILINE)
if version:
name += " "+version[0]
return name
return None
``` |
{
"source": "joaompinto/doreg",
"score": 2
} |
#### File: doreg/doreg/arg_parser.py
```python
import sys
from optparse import OptionParser
from .version import version
def arg_parse():
usage = f"{sys.argv[0]}"
parser = OptionParser(usage, version=version)
(options, args) = parser.parse_args()
return (options, args)
``` |
{
"source": "joaompinto/eprint",
"score": 3
} |
#### File: eprint/eprint/eprint.py
```python
from colored import fg, attr
from enum import Enum, auto
RESET = attr('reset')
class MsgClass(Enum):
PLAIN = auto()
OK = auto()
ERROR = auto()
COLOR_MAP = {
MsgClass.PLAIN : fg("white"),
MsgClass.OK : fg("green"),
MsgClass.ERROR : fg("red")
}
class eprint:
def __init__(self, *args, **kwargs):
eprint.raw(MsgClass.PLAIN, *args, **kwargs)
@staticmethod
def raw(msg_class: MsgClass, *args, **kwargs):
color = COLOR_MAP[msg_class]
if len(args) == 1:
msg = color + args[0] + RESET
else:
colored_args = [color + str(x) + RESET for x in args[1:]]
msg = args[0].format(*colored_args)
print(msg)
@staticmethod
def ok(*args, **kwargs):
eprint.raw(MsgClass.OK, *args, **kwargs)
@staticmethod
def error(*args, **kwargs):
eprint.raw(MsgClass.ERROR, *args, **kwargs)
``` |
{
"source": "joaompinto/filecontent",
"score": 3
} |
#### File: filecontent/extractors/tar.py
```python
from tarfile import TarFile
class Extractor:
needs_file = True
match_content = "x-tar"
def __init__(self, metadata, fileobj):
# Open a stream of tar blocks for reading with transparent compression.
self._tarfile = TarFile.open(fileobj=fileobj, mode="r|*")
def get_content(self):
for member in self._tarfile.getmembers():
print(member)
files = []
return files
```
#### File: filecontent/handlers/file.py
```python
from os.path import getsize, getmtime
from ..mime import guess_type
class FileHandler:
def __init__(self, url):
self._url = url
def get_metadata(self):
metadata = {
"url": self._url,
"size": getsize(self._url),
"date": int(getmtime(self._url)),
}
guess_type(metadata)
return metadata
def get_fileobj(self):
fileobj = open(self._url, "rb")
return fileobj
``` |
{
"source": "joaompinto/filetrace",
"score": 2
} |
#### File: filetrace/filetrace/__main__.py
```python
from .tracer import FileRunTracer
from .cli import parse_cmd_line
def main():
options, args = parse_cmd_line()
FileRunTracer(options, args).run()
if __name__ == "__main__":
main()
``` |
{
"source": "joaompinto/kubesh",
"score": 2
} |
#### File: kubesh/commands/help.py
```python
class Command:
Name = ".help"
Description = "List all available commands"
Aliases = [".h"]
def run(self, console, api):
for cmd in console.cmd_handler.all_commands:
print(f"{cmd.Name} - {cmd.Description}")
```
#### File: kubesh/commands/pods.py
```python
from collections import OrderedDict
from ..mapper import table_from_list
class Command:
Name = ".pods"
Description = "List the pods"
default_fields = OrderedDict(
{"Namespace": "metadata.name", "Name": "metadata.name"}
)
def run(self, console, api, argv):
if len(argv) == 0:
response = api.list_pod_for_all_namespaces()
response_data = table_from_list(response, self.default_fields)
console.table(response_data)
```
#### File: kubesh/kubesh/mapper.py
```python
def find_item(root_item, field_spec, keys_only=False):
if field_spec is None:
return root_item.to_dict()
tokens = field_spec.split(".")
cursor = root_item
for tokenName in tokens:
if tokenName[0] == "[":
assert isinstance(cursor, list)
tokenName = tokenName.strip("[]]")
search_key, search_value = tokenName.split("=")
match_item = [x for x in cursor if getattr(x, search_key) == search_value]
if match_item:
cursor = match_item[0]
else:
return None
else:
cursor = getattr(cursor, tokenName)
if not isinstance(cursor, (str, int, float, tuple, list)):
return cursor.to_dict()
return cursor
def table_from_list(data, fields):
input_data = data.items
field_label_list = []
field_spec_list = []
for field_spec in fields:
label, spec = list(field_spec.items())[0]
field_label_list.append(label)
field_spec_list.append(spec)
response_data = [field_label_list]
for list_item in input_data:
row = []
for field_spec in field_spec_list:
value = find_item(list_item, field_spec)
row.append(value)
response_data.append(row)
return response_data
```
#### File: kubesh/kubesh/plugins.py
```python
import io
import yaml
import pkgutil
import importlib
from os.path import dirname
from pathlib import Path
import inspect
import traceback
from wasabi import TracebackPrinter
from .mapper import table_from_list
# https://packaging.python.org/guides/creating-and-discovering-plugins/
class YAMLCommand:
def __init__(self, yaml_data):
self.yaml = yaml_data
clone_fields = ["Name", "Aliases", "Description", "When"]
for field in clone_fields:
if field in yaml_data:
setattr(self, field, yaml_data[field])
def run(self, console, api):
api_func = self.yaml["API"]
api_call = f"api.{api_func}"
response = eval(api_call)
output_type = self.yaml["Output"]["Type"]
content = self.yaml["Output"]["Content"]
if output_type.lower() == "list":
response_data = table_from_list(response, content)
console.table(response_data)
def iter_namespace(ns_pkg):
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
def yaml2cmds(filename):
cmd_list = []
with io.open(filename, encoding="utf8") as yaml_file:
yaml_data = yaml.load_all(yaml_file, Loader=yaml.FullLoader)
for cmd_data in yaml_data:
cmd = YAMLCommand(cmd_data)
cmd_list.append(cmd)
return cmd_list
def load_yaml_commands():
yaml_cmd_list = []
commands_dir = Path(dirname((__file__))).joinpath("commands")
for cmd_yaml_filename in Path(commands_dir).glob("*.yaml"):
cmd_list = yaml2cmds(cmd_yaml_filename)
yaml_cmd_list.extend(cmd_list)
return yaml_cmd_list
def load_module_commands():
cmd_list = []
from . import commands
for finder, name, ispkg in iter_namespace(commands):
module = importlib.import_module(name)
cls_members = inspect.getmembers(module, inspect.isclass)
cls_members = [c[1] for c in cls_members if c[1].__name__.startswith("Command")]
for cmd in cls_members:
cmd_object = cmd()
required_params = ["Name", "Description"]
for param in required_params:
if not hasattr(cmd_object, param):
tb = TracebackPrinter(
tb_base="kubesh", tb_exclude=("core.py", "runpy.py")
)
error = tb(
f"Command missing required '{param}' attribute",
f"File: {module.__file__}",
highlight="kwargs",
tb=traceback.extract_stack(),
)
raise ValueError(error)
# raise Exception(f"Command {cmd} does must provide a syntax field")
cmd_list.append(cmd_object)
return cmd_list
def load_commands():
yaml_cmds = load_yaml_commands()
module_cmds = load_module_commands()
yaml_cmds.extend(module_cmds)
return yaml_cmds + module_cmds
``` |
{
"source": "joaompinto/matriosca",
"score": 3
} |
#### File: matriosca/matrioska/argparser.py
```python
from optparse import OptionParser
from .version import version
def arg_parser():
parser = OptionParser(version=version)
parser.add_option(
"-d",
"--decrypt",
action="store_true",
dest="decrypt",
default=False,
help="decrypt file",
)
parser.add_option(
"-g",
"--gen-key",
action="store_true",
dest="gen_key",
default=None,
help="generate a random encryption key",
)
parser.add_option(
"-o",
"--output-file",
action="store",
dest="output_filename",
default=None,
help="write to output file instead of stdout",
)
parser.add_option(
"-k",
"--key-env",
action="store",
dest="key_env",
default=None,
help="environment variable name containing the encryption key",
)
parser.add_option(
"-f",
"--force",
action="store_true",
dest="force",
default=False,
help="overwrite output file if it exists",
)
(options, args) = parser.parse_args()
return (options, args)
```
#### File: matriosca/matrioska/file.py
```python
from sys import stderr
ENC_HEADER = b"AES256"
def write_to_file(output_file, nonce, tag, ciphertext):
output_file.write(ENC_HEADER)
output_file.write(nonce)
output_file.write(tag)
output_file.write(ciphertext)
def read_from_file(input_file):
header, nonce, tag, ciphertext = [
input_file.read(x) for x in (len(ENC_HEADER), 16, 16, -1)
]
return header, nonce, tag, ciphertext
if header != ENC_HEADER:
print(f"Got unexpected header {ENC_HEADER}", file=stderr)
exit(3)
```
#### File: matriosca/matrioska/__main__.py
```python
from .argparser import arg_parser
from .key import get_encryption_key, generate_random_key
from .encryption import encrypt, decrypt
def main():
options, args = arg_parser()
input_filename = None
encryption_key = None
if options.gen_key:
print(generate_random_key(b64_encoded=True))
return
if len(args) == 1:
input_filename = args[0]
if options.decrypt:
encryption_key = get_encryption_key(options.key_env)
decrypt(input_filename, options.output_filename, options.force, encryption_key)
else:
encryption_key = get_encryption_key(options.key_env)
encrypt(input_filename, options.output_filename, options.force, encryption_key)
if __name__ == "__main__":
main()
``` |
{
"source": "joaompinto/pylibcontainer",
"score": 2
} |
#### File: pylibcontainer/pylibcontainer/image.py
```python
from __future__ import print_function
import os
import shutil
import hashlib
import requests
import click
from tempfile import NamedTemporaryFile
from hashlib import sha256
from os.path import expanduser, join, exists, basename
from .utils import HumanSize
from .tar import extract_layer
from . import trust
from . import container
from .colorhelper import print_info, print_error, print_warn, print_success
from .colorhelper import success
from .image_index import get_url
from clint.textui import progress
from dateutil.parser import parse as parsedate
from datetime import datetime
CACHE_PATH = join(expanduser("~"), ".pylibcontainer", "images_cache")
class Cache(object):
cache_dir = CACHE_PATH
""" Provides an image caching mechanism on disk """
def __init__(self):
if not exists(CACHE_PATH):
os.makedirs(CACHE_PATH, 0o700)
def get(self, cache_key, default=None):
""" return info for cached file """
cache_hash = sha256(cache_key.encode()).hexdigest()
cache_fn = join(CACHE_PATH, "url_" + cache_hash)
if exists(cache_fn):
file_stat = os.stat(cache_fn)
last_modified = datetime.fromtimestamp(file_stat.st_mtime)
file_size = file_stat.st_size
return cache_fn, cache_hash, last_modified, file_size
return default
def put(self, filename, cache_key):
""" put a file into cache """
cache_hash = sha256(cache_key.encode()).hexdigest()
cache_fn = join(CACHE_PATH, "url_" + cache_hash)
shutil.move(filename, cache_fn)
return cache_hash, cache_fn
def download(image_url):
""" Download image (if not found in cache) and return it's filename """
response = requests.head(image_url)
file_size = remote_file_size = int(response.headers.get("Content-Length"))
remote_last_modified = parsedate(response.headers.get("Last-Modified")).replace(
tzinfo=None
)
remote_is_valid = response.status_code == 200 and file_size and remote_last_modified
# Check if image is on cache
cache = Cache()
cached_image = cache.get(image_url)
if cached_image:
if remote_is_valid:
cache_fn, cache_hash, last_modified, file_size = cached_image
if remote_file_size == file_size and remote_last_modified < last_modified:
print_info("Using file from cache", CACHE_PATH)
return cache_hash, cache_fn
print_info("Downloading new remote file because an update was found")
else:
print_warn("Unable to check the status for " + image_url)
print_warn("Assuming local cache is valid")
# Not cached, and no valid remote information was found
if not remote_is_valid:
print_error(
"Unable to get file, http_code=%s, size=%s, last_modified=%s"
% (response.status_code, remote_file_size, remote_last_modified)
)
exit(2)
# Dowload image
print_info(
"Downloading image... ",
"{0} [{1:.2S}]".format(basename(image_url), HumanSize(file_size)),
)
remote_sha256 = hashlib.sha256()
response = requests.get(image_url, stream=True)
with NamedTemporaryFile(delete=False) as tmp_file:
for chunk in progress.bar(
response.iter_content(chunk_size=1024), expected_size=(file_size / 1024) + 1
):
if chunk:
remote_sha256.update(chunk)
tmp_file.write(chunk)
tmp_file.flush()
# Verify image integrity
trust_verify = trust.verify(image_url, tmp_file.name, remote_sha256.hexdigest())
if not trust_verify or not trust_verify.valid or not trust_verify.username:
print_error("Integrity/authenticity error - GPG signature mismatch!")
exit(3)
print("{0:>10}: {1}".format("GPG Signer", success(trust_verify.username)))
print("{0:>10}: {1}".format("GPG ID", success(trust_verify.pubkey_fingerprint)))
print("{0:>10}: {1}".format("Creation", success(trust_verify.creation_date)))
return cache.put(tmp_file.name, image_url)
@click.command()
@click.argument("image_url")
@click.option("--as_root", is_flag=True)
@click.option("--overlay", "-o", multiple=True)
@click.argument("command", nargs=-1)
def run(image_url, command, as_root, overlay):
url = get_url(image_url)
image_url = url or image_url
if not image_url:
print_info("No index was found for image", image_url)
exit(5)
is_validate_only = False
if not command:
command = ["/bin/sh"]
image_protocol = image_url.split(":")[0].lower()
if image_protocol in ["http", "https"]:
_, image_fn = download(image_url)
else:
_, image_fn = sha256(image_url).hexdigest(), image_url
rootfs = extract_layer(image_fn)
if len(command) == 1 and command[0] == "-":
is_validate_only = True
print("Validating container setup with the rootfs")
else:
print_info("Executing", " ".join(command))
_, exit_code = container.runc(rootfs, command, as_root, overlay)
if exit_code != 0:
print_error("Last command returned an error")
elif is_validate_only:
print_success("OK")
```
#### File: joaompinto/pylibcontainer/setup.py
```python
import os
import io
from setuptools import setup, find_packages
# This file is based on:
# https://github.com/ines/wasabi/blob/master/setup.py
def setup_package():
package_name = "pylibcontainer"
root = os.path.abspath(os.path.dirname(__file__))
# Read in package meta from about.py
about_path = os.path.join(root, package_name, "about.py")
with io.open(about_path, encoding="utf8") as f:
about = {}
exec(f.read(), about)
# Get readme
readme_path = os.path.join(root, "README.md")
with io.open(readme_path, encoding="utf8") as f:
readme = f.read()
# Find packages
all_packages = find_packages()
with io.open("requirements.txt", encoding="utf8") as f:
requirements = f.read()
setup(
name=package_name,
description=about["__summary__"],
long_description=readme,
long_description_content_type="text/markdown",
author=about["__author__"],
author_email=about["__email__"],
url=about["__uri__"],
version=about["__version__"],
license=about["__license__"],
packages=all_packages,
zip_safe=True,
entry_points="""
[console_scripts]
pylibcontainer=pylibcontainer.__main__:pylibcontainer
""",
install_requires=[x for x in requirements.splitlines() if x],
include_package_data=True,
)
if __name__ == "__main__":
setup_package()
``` |
{
"source": "joaompinto/pystrace",
"score": 2
} |
#### File: pystrace/pystrace/tracer.py
```python
from multiprocessing import Process
from tempfile import TemporaryDirectory
from pathlib import Path
from sys import stderr
import os
import subprocess
import re
class Tracer:
def __init__(
self,
command_args,
syscall_callback,
follow_childs=True,
filter_syscalls="",
filter_return="",
debug=False,
):
self.command_args = command_args
self.syscall_callback = syscall_callback
self.follow_childs = follow_childs
self.filter_syscalls = filter_syscalls
self.filter_return = filter_return
self.debug = debug
self.parse_regex_ok = re.compile(r"^(\d+)\s*(\w*)\((.*)\) = (\d+)$")
self.parse_regex_fail = re.compile(
r"^(\d+)\s*(\w*)\((.*)\) = ([-\d]+)\s*(\w+)\s*(.*)$"
)
def _create_log_fifo(self):
self.temp_dir = TemporaryDirectory()
fifo_filename = Path(self.temp_dir.name).joinpath("strace")
self.fifo_filename = fifo_filename
os.mkfifo(fifo_filename, 0o600)
def run(self):
self._create_log_fifo()
strace_process = Process(
target=self._run_strace)
strace_process.start()
rc = self.handle_strace_data()
strace_process.join()
return rc
def handle_strace_data(self):
while True:
with open(self.fifo_filename, 'r') as fifo:
data = fifo.read()
while data:
# When there is an error during strace, it will not open the fifo
# which means we only get single FIFO item with the rc
if data.isnumeric():
rc = int(data)
return rc
for line in data.splitlines():
syscall_data = self.parse_regex_ok.findall(line)
if syscall_data:
pid, syscall, arguments, result = syscall_data[0]
syscall_dict = {
"pid": int(pid),
"syscall": syscall,
"arguments": arguments,
"result": int(result),
}
self.syscall_callback(syscall_dict)
else:
syscall_data = self.parse_regex_fail.findall(line)
if syscall_data:
(
pid,
syscall,
arguments,
result,
errno,
errdesc,
) = syscall_data[0]
syscall_dict = {
"pid": int(pid),
"syscall": syscall,
"arguments": arguments,
"result": int(result),
"errno": errno,
"errdesc": errdesc,
}
self.syscall_callback(syscall_dict)
data = fifo.read()
def _run_strace(self):
strace_args = []
if self.follow_childs:
strace_args.append("-f")
if self.filter_return:
strace_args += ["-e", f"status={self.filter_return}"]
if self.filter_syscalls:
strace_args += ["-e", f"trace={self.filter_syscalls}"]
strace_args += ["-o", self.fifo_filename]
if self.debug:
print("DEBUG ARGS:", strace_args)
try:
run_result = subprocess.run(
["strace"] + strace_args + self.command_args, capture_output=True
)
except FileNotFoundError:
print("Could not execute 'strace', is it installed?")
rc = 1
else:
rc = run_result.returncode
if run_result.stdout:
print(run_result.stdout.strip().decode().strip("\n"))
if rc != 0:
print((run_result.stderr.decode().strip("\n")), file=stderr)
with open(self.fifo_filename, "w") as output:
output.write(str(rc))
``` |
{
"source": "joaompinto/python3-visitor-pattern",
"score": 3
} |
#### File: python3-visitor-pattern/class-name-expr-visitor/ast.py
```python
class Expr(object):
def accept(self, visitor):
method_name = "visit_{}".format(self.__class__.__name__.lower())
visit = getattr(visitor, method_name)
return visit(self)
class Int(Expr):
def __init__(self, value):
self.value = value
class Add(Expr):
def __init__(self, left, right):
self.left = left
self.right = right
class Mul(Expr):
def __init__(self, left, right):
self.left = left
self.right = right
```
#### File: python3-visitor-pattern/singledispatch-expr-visitor/main.py
```python
from ast import Add, Int, Mul
from visitors import Print, Eval
def main():
expr = Add(Add(Int(4), Int(3)), Mul(Int(10), Add(Int(1), Int(1))))
print("PRINT:", expr.accept(Print()))
print("EVAL:", expr.accept(Eval()))
if __name__ == "__main__":
main()
```
#### File: python3-visitor-pattern/singledispatch-expr-visitor/visitors.py
```python
from ast import *
from functools import singledispatchmethod
class Visitor(object):
pass
class Eval(Visitor):
# Default handler, will handle the Int
@singledispatchmethod
def visit(self, a):
return a.value
@visit.register
def _(self, a: Add):
return a.left.accept(self) + a.right.accept(self)
@visit.register
def _(self, a: Mul):
return a.left.accept(self) * a.right.accept(self)
class Print(Visitor):
# Default handler, will handle the Int
@singledispatchmethod
def visit(cls, a):
return a.value
@visit.register
def _(self, a: Add):
return "(+ {} {})".format(a.left.accept(self), a.right.accept(self))
@visit.register
def _(self, a: Mul):
return "(* {} {})".format(a.left.accept(self), a.right.accept(self))
``` |
{
"source": "joaompinto/quickweb",
"score": 3
} |
#### File: features/web/controllers.py
```python
from os.path import basename, join, splitext, dirname, exists, sep
from fnmatch import fnmatch
from importlib.machinery import SourceFileLoader
import cherrypy
from quickweb import controller
from quickweb.events import on_event
class Feature(object):
""" templates """
def setup(self):
on_event("content_file_found", lambda x, y: self.on_found_content_file(x, y))
def on_found_content_file(self, content_root, content_name):
""" A file was found """
on_file_name = "*.py"
# We only care if it matches our selection pattern
if not fnmatch(basename(content_name), on_file_name):
return
# Ignore if there is a matching .html because it's a composed controller
html_fn = splitext(content_root + sep + content_name)[0] + ".html"
if exists(html_fn):
return
if basename(content_name) == "index.py":
url = "/" + dirname(content_name)
else:
url = "/" + content_name
noext_name, ext = splitext(url)
url = noext_name
module_fname = join(content_root, content_name)
controller_module = SourceFileLoader(module_fname, module_fname).load_module()
controller.attach(url, controller_module.Controller())
cherrypy.engine.autoreload.files.add(module_fname)
```
#### File: quickweb/quickweb/modules.py
```python
from fnmatch import fnmatch
def fnmatch_list(filename, pattern_list):
""" Check filename against a list of patterns using fnmatch """
if type(pattern_list) != list:
pattern_list = [pattern_list]
for pattern in pattern_list:
if fnmatch(filename, pattern):
return True
return False
```
#### File: quickweb/quickweb/startup.py
```python
import os
import sys
import importlib
from os.path import abspath, join, dirname, basename
from glob import glob
import cherrypy
import quickweb
from quickweb import controller, data_provider
from quickweb.colorhelper import info, print_error, print_warn
web_app_config = {}
def setup_app(app_name, app_directory, no_logs):
""" setup the application initial configuration """
test_mode = os.getenv("TEST_MODE")
if test_mode:
print_warn("Running in TEST mode")
app_directory = abspath(app_directory)
controller.load_app_modules(app_directory)
os.chdir(app_directory)
run_boot(app_directory)
set_engine_config(test_mode, no_logs)
load_tools(app_directory)
setup_features()
data_provider.set_base_dir(app_directory)
cherrypy.tree.mount(controller.get_app_root(), config=web_app_config)
def load_tools(app_directory):
tools_dir = join(app_directory, "tools")
tools_glob = join(tools_dir, "*.py")
for tool_filename in glob(tools_glob):
tool_name = basename(tool_filename).split(".")[0]
print(f"** Loading tool {tool_filename}")
spec = importlib.util.spec_from_file_location(
"tools_" + tool_name, tool_filename
)
tool = importlib.util.module_from_spec(spec)
spec.loader.exec_module(tool)
app_config = {f"tools.{tool_name}.on": True}
cherrypy.config.update(app_config)
cherrypy.engine.autoreload.files.add(tool_filename)
cherrypy.engine.autoreload.files.add(tools_dir)
def run_boot(app_directory):
boot_dir = join(app_directory, "boot")
boot_log = join(boot_dir, "*.py")
for boot_filename in glob(boot_log):
boot_name = basename(boot_filename).split(".")[0]
print(f"** Running boot script {boot_filename}")
spec = importlib.util.spec_from_file_location(
"boot_" + boot_name, boot_filename
)
boot_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(boot_module)
boot_module.start()
cherrypy.engine.autoreload.files.add(boot_filename)
cherrypy.engine.autoreload.files.add(boot_dir)
def setup_features():
""" Call the features setup function """
core_features = {"web": ["content_directory", "controllers", "templates"]}
imported_features = []
for feature_type, feature_list in core_features.items():
features_list_names = ", ".join(feature_list)
print(
"** Setting up {0} features {1}".format(
info(feature_type), info(features_list_names)
)
)
for feature_name in feature_list:
script_dir = dirname(abspath(__file__))
module_fname = join(
script_dir, "features", feature_type, feature_name + ".py"
)
feature_dict = {}
with open(module_fname) as source_file:
exec(compile(source_file.read(), module_fname, "exec"), feature_dict)
try:
feature = feature_dict["Feature"]()
except KeyError:
print_error(
"Feature module '%s' does not provide a Feature class!"
% feature_name
)
sys.exit(1)
try:
feature.setup()
except: # NOQA: E722
print_error("Failed setting up feature '%s' !" % feature_name)
raise
imported_features.append(feature)
for feature in imported_features:
if hasattr(feature, "activate"):
feature.activate()
def set_engine_config(test_mode, no_logs):
""" Set engine global config options """
quickweb.is_production = os.getenv("QUICKWEB_PRODUCTION", False)
# Enforce utf8 encoding
app_config = {
"tools.encode.on": True,
"tools.encode.encoding": "utf-8",
"tools.encode.errors": "replace",
"tools.trailing_slash.on": False,
"checker.on": False,
}
if test_mode:
app_config.update(
{
"log.screen": False,
"log.access_file": "access.log",
"log.error_file": "error.log",
"engine.autoreload.on": True,
}
)
# Disable logging when running with --no-logs
if no_logs:
app_config.update(
{"log.screen": False, "log.access_file": None, "log.error_file": None}
)
cherrypy.config.update(app_config)
```
#### File: quickweb/quickweb/template_manager.py
```python
from html.parser import HTMLParser
from os.path import basename, join, exists, dirname
from quickweb._tempfile import TemporaryDirectory
import os
import sys
import shutil
import io
import requests
import zipfile
from quickweb.colorhelper import info, print_error
TEMPLATES_REPO = "https://github.com/OpenPipe/QuickWeb-templates/archive/master.zip"
class MyHTMLParser(HTMLParser):
def __init__(self, url, template_directory):
HTMLParser.__init__(self)
self.template_directory = template_directory
self.url = url
self.replacement_map = {}
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == "link":
self.fetch_static_asset(attrs["href"])
if tag == "script" and "src" in attrs:
self.fetch_static_asset(attrs["src"])
# print("Encountered a start tag:", tag)
def handle_endtag(self, tag):
return
print("Encountered an end tag :", tag)
def handle_data(self, data):
data = data.strip(" \r\n")
if data:
print("Encountered some data :", data)
def fetch_static_asset(self, url):
print(self.template_directory, url)
if url.split(":")[0] in ["http", "https"]: # External resource
print("WARNING: External resource", url, "Skipping")
return
url = url.strip("\r\n ")
fetch_url = self.url + url
output_filename = basename(fetch_url)
resource_extension = url.split(".")[-1]
resource_map = {
"js": join("static", "js"),
"css": join("static", "css"),
"ico": None,
}
resource_type = resource_map.get(resource_extension, None)
if resource_type is None:
print("WARNING: Ignoring resource", url)
return
output_dir = join(self.template_directory, resource_map[resource_extension])
if output_dir is None: # We don't fetch icons
return
print("Fetching %s -> %s/%s" % (fetch_url, output_dir, output_filename))
os.makedirs(output_dir, mode=0o755, exist_ok=True)
resource = requests.get(fetch_url)
with open(join(output_dir, output_filename), "w") as static_file:
static_file.write(resource.text)
self.replacement_map[url] = "/" + join(resource_extension, output_filename)
class TemplateMaker:
def __init__(self, url, template_directory):
self.url = url
if template_directory is None:
template_directory = url.strip("/").split("/")[-1]
self.template_directory = template_directory
def make(self, force):
if exists(self.template_directory):
if force:
shutil.rmtree(self.template_directory)
else:
sys.stderr.write(
self.template_directory
+ " already exists!\n\
Please use --force if you want to overwrite.\n"
)
sys.exit(2)
# Fetch the page
page = requests.get(self.url)
parser = MyHTMLParser(self.url, self.template_directory)
html = page.text
# Fetch any static elements
parser.feed(page.text)
# Replace resource urls with our uniformed static locations
for old, new in parser.replacement_map.items():
print(old, new)
html = html.replace(old, new)
# Create the index.html
webroot_dir = join(self.template_directory, "webroot")
if not exists(webroot_dir):
os.makedirs(webroot_dir, mode=0o755)
index_filename = join(webroot_dir, "index.html")
print("Creating ", index_filename)
with open(index_filename, "w") as index_file:
index_file.write(html)
print("\nMake completed.")
print("You can now execute:\n\t quickweb run", self.template_directory)
def download_archive():
print("** Downloading templates archive from", info(TEMPLATES_REPO))
page = requests.get(TEMPLATES_REPO)
file_like_object = io.BytesIO(page.content)
templates_archive = zipfile.ZipFile(file_like_object)
return templates_archive
def download_template(template_name, app_directory):
templates_archive = download_archive()
with TemporaryDirectory() as tmpdirname:
templates_archive.extractall(tmpdirname)
templates_archive_tmp = join(tmpdirname, "QuickWeb-templates-master")
template_dirs = os.listdir(templates_archive_tmp)
if template_name not in template_dirs:
print_error("Unable to find template %s !" % template_name)
sys.exit(2)
template_root = join(templates_archive_tmp, template_name)
template_provides = [x for x in os.listdir(template_root)]
print("** The template provides: %s" % info(str(template_provides)))
shutil.copytree(template_root, app_directory)
def list():
templates_archive = download_archive()
print("** The following templates are available:")
for x in templates_archive.infolist():
if x.filename.count("/") == 2 and x.filename.endswith("/"):
print(" " + info(basename(dirname(x.filename))))
print("**")
```
#### File: joaompinto/quickweb/ssl_test.py
```python
import cherrypy
class RootServer:
@cherrypy.expose
def index(self, **keywords):
return "it works!"
if __name__ == "__main__":
server_config = {
"server.socket_host": "0.0.0.0",
"server.socket_port": 8443,
"server.ssl_certificate": "cert.pem",
"server.ssl_private_key": "privkey.pem",
}
cherrypy.config.update(server_config)
cherrypy.quickstart(RootServer())
``` |
{
"source": "joaompinto/sassh",
"score": 3
} |
#### File: sassh/sassh/sshforward.py
```python
import socket
import select
import SocketServer
import threading
import errno
from timeout import timeout
g_verbose = False
class ForwardServer (SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler (SocketServer.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel('direct-tcpip',
(self.chain_host, self.chain_port),
self.request.getpeername())
except Exception, e:
verbose('Incoming request to %s:%d failed: %s' % (self.chain_host,
self.chain_port,
repr(e)))
return
if chan is None:
verbose('Incoming request to %s:%d was rejected by the SSH server.' %
(self.chain_host, self.chain_port))
return
verbose('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(),
chan.getpeername(), (self.chain_host, self.chain_port)))
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
try:
data = self.request.recv(1024)
except IOError, e:
if e.errno == errno.ECONNRESET: # Connection reset by peer
break
raise
if len(data) == 0:
break
try:
chan.send(data)
except: # Ignore errors sending to the proxy process
pass
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
try:
self.request.send(data)
except IOError, e:
if e.errno != errno.ECONNRESET:
raise
try:
chan.close()
except EOFError:
pass # Already closed
self.request.close()
class FordwardTunnel (threading.Thread):
def __init__(self, local_port, remote_host, remote_port, transport):
self.assigned_port = None
self.local_port = local_port
self.remote_host = remote_host
self.remote_port = remote_port
self.transport = transport
self.server = None
threading.Thread.__init__(self)
def run(self):
# this is a little convoluted, but lets me configure things for the Handler
# object. (SocketServer doesn't give Handlers any way to access the outer
# server normally.)
class SubHander (Handler):
chain_host = self.remote_host
chain_port = self.remote_port
ssh_transport = self.transport
retry_count = 3
while retry_count > 0:
try:
self.server = ForwardServer(('', self.local_port), SubHander)
except socket.error as err:
if err.errno == 98:
print "Trying next port"
pass
else:
raise
else:
break
retry_count -= 1
self.local_port += 1
if retry_count == 0:
self.assigned_port = 0
else:
self.assigned_port = self.local_port
self.server.serve_forever()
def shutdown(self):
if self.server:
self.server.shutdown()
def verbose(s):
if g_verbose:
print s
``` |
{
"source": "joaompinto/shutit",
"score": 2
} |
#### File: joaompinto/shutit/shutit_class.py
```python
from __future__ import print_function
from distutils.dir_util import mkpath
from distutils import spawn
try:
from StringIO import StringIO
except ImportError: # pragma: no cover
from io import StringIO
import argparse
import base64
import codecs
import getpass
import glob
import hashlib
import imp
import json
import logging
import operator
import os
import tarfile
import re
import readline
import string
import sys
import subprocess
import time
import uuid
import texttable
import pexpect
import shutit
import shutit_util
import shutit_global
import shutit_skeleton
import shutit_exam
try:
import ConfigParser
except ImportError: # pragma: no cover
import configparser as ConfigParser
from shutit_sendspec import ShutItSendSpec
from shutit_module import ShutItFailException, ShutItModule
from shutit_pexpect import ShutItPexpectSession
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility/35804945
DEBUG_LEVELV_NUM = 9
DEBUG_LEVELV_NAME = "DEBUGV"
logging.addLevelName(DEBUG_LEVELV_NUM, "DEBUGV")
def debugv(self, message, *args, **kws):
if self.isEnabledFor(DEBUG_LEVELV_NUM):
# Yes, logger takes its '*args' as 'args'.
self._log(DEBUG_LEVELV_NUM, message, args, **kws)
logging.Logger.debugv = debugv
setattr(logging, "DEBUGV", DEBUG_LEVELV_NUM)
logging.DEBUGV
def get_module_file(shutit, module):
shutit.shutit_file_map[module.module_id] = module.__module_file
return shutit.shutit_file_map[module.module_id]
def do_finalize():
"""Runs finalize phase; run after all builds are complete and all modules
have been stopped.
"""
def _finalize(shutit):
# Stop all the modules
shutit.stop_all()
# Finalize in reverse order
shutit.log('PHASE: finalizing object ' + str(shutit), level=logging.DEBUG)
# Login at least once to get the exports.
for module_id in shutit.module_ids(rev=True):
# Only finalize if it's thought to be installed.
if shutit.is_installed(shutit.shutit_map[module_id]):
shutit.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)
if not shutit.shutit_map[module_id].finalize(shutit):
shutit.fail(module_id + ' failed on finalize', shutit_pexpect_child=shutit.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover
shutit.logout(echo=False)
for fshutit in shutit_global.shutit_global_object.shutit_objects:
_finalize(fshutit)
class LayerConfigParser(ConfigParser.RawConfigParser):
def __init__(self):
ConfigParser.RawConfigParser.__init__(self)
self.layers = []
def read(self, filenames):
if not isinstance(filenames, list):
filenames = [filenames]
for filename in filenames:
cp = ConfigParser.RawConfigParser()
cp.read(filename)
self.layers.append((cp, filename, None))
return ConfigParser.RawConfigParser.read(self, filenames)
def readfp(self, fp, filename=None):
cp = ConfigParser.RawConfigParser()
fp.seek(0)
cp.readfp(fp, filename)
self.layers.append((cp, filename, fp))
fp.seek(0)
ret = ConfigParser.RawConfigParser.readfp(self, fp, filename)
return ret
def whereset(self, section, option):
for cp, filename, fp in reversed(self.layers):
fp = fp # pylint
if cp.has_option(section, option):
return filename
raise ShutItFailException('[%s]/%s was never set' % (section, option)) # pragma: no cover
def get_config_set(self, section, option):
"""Returns a set with each value per config file in it.
"""
values = set()
for cp, filename, fp in self.layers:
filename = filename # pylint
fp = fp # pylint
if cp.has_option(section, option):
values.add(cp.get(section, option))
return values
def reload(self):
"""
Re-reads all layers again. In theory this should overwrite all the old
values with any newer ones.
It assumes we never delete a config item before reload.
"""
oldlayers = self.layers
self.layers = []
for cp, filename, fp in oldlayers:
cp = cp # pylint
if fp is None:
self.read(filename)
else:
self.readfp(fp, filename)
def remove_section(self, *args, **kwargs):
raise NotImplementedError('''Layer config parsers aren't directly mutable''') # pragma: no cover
def remove_option(self, *args, **kwargs):
raise NotImplementedError('''Layer config parsers aren't directly mutable''') # pragma: no cover
def set(self, *args, **kwargs):
raise NotImplementedError('''Layer config parsers aren\'t directly mutable''') # pragma: no cover
class ShutItInit(object):
"""Object used to initialise a shutit object.
"""
def __init__(self,
action,
logfile='',
loglevel='',
nocolor=False,
delivery='bash',
accept=False,
shutitfiles=None,
script=None,
base_image='ubuntu:16.04',
depends='shutit.tk.setup',
name='',
domain='',
pattern='',
output_dir=False,
vagrant_ssh_access=False,
vagrant_num_machines=None,
vagrant_machine_prefix=None,
vagrant_docker=None,
vagrant_snapshot=None,
vagrant_upload=None,
vagrant_image_name=None,
push=False,
export=False,
save=False,
distro='',
mount_docker=False,
walkthrough=False,
walkthrough_wait=-1,
training=False,
choose_config=False,
config=[],
set=[],
ignorestop=False,
ignoreimage=False,
imageerrorok=False,
tag_modules=False,
image_tag='',
video=-1,
deps_only=False,
echo=False,
history=False,
long_modules=False,
sort='id',
interactive=1,
trace=False,
shutit_module_path=None,
exam=False):
assert isinstance(action,str), shutit_util.print_debug()
assert isinstance(loglevel,str), shutit_util.print_debug()
self.action = action
self.logfile = logfile
self.loglevel = loglevel
self.nocolor = nocolor
if self.action == 'version':
return
elif self.action == 'skeleton':
self.accept = accept
self.shutitfiles = shutitfiles
self.script = script
self.base_image = base_image
self.depends = depends
self.name = name
self.domain = domain
self.pattern = pattern
self.output_dir = output_dir
self.vagrant_ssh_access = vagrant_ssh_access
self.vagrant_num_machines = vagrant_num_machines
self.vagrant_machine_prefix = vagrant_machine_prefix
self.vagrant_docker = vagrant_docker
self.vagrant_snapshot = vagrant_snapshot
self.vagrant_upload = vagrant_upload
self.vagrant_image_name = vagrant_image_name
self.delivery = delivery
assert self.accept in (True,False,None), shutit_util.print_debug()
assert not (self.shutitfiles and self.script), shutit_util.print_debug(msg='Cannot have any two of script, -d/--shutitfiles <files> as arguments')
assert isinstance(self.base_image,str), shutit_util.print_debug()
assert isinstance(self.depends,str), shutit_util.print_debug()
#assert isinstance(self.shutitfiles,list)
assert isinstance(self.name,str), shutit_util.print_debug()
assert isinstance(self.domain,str), shutit_util.print_debug()
assert isinstance(self.pattern,str), shutit_util.print_debug()
assert isinstance(self.output_dir,bool), shutit_util.print_debug()
assert isinstance(self.vagrant_ssh_access,bool), shutit_util.print_debug()
#assert isinstance(self.delivery,str), shutit_util.print_debug()
elif self.action == 'run':
self.shutitfiles = shutitfiles
self.delivery = delivery
self.echo = echo
#assert isinstance(self.delivery,str), shutit_util.print_debug()
#assert isinstance(self.shutitfiles,list), shutit_util.print_debug()
elif self.action == 'build' or self.action == 'list_configs' or self.action == 'list_modules':
self.push = push
self.export = export
self.save = save
self.distro = distro
self.mount_docker = mount_docker
self.walkthrough = walkthrough
self.walkthrough_wait = walkthrough_wait
self.training = training
self.choose_config = choose_config
self.config = config
self.set = set
self.ignorestop = ignorestop
self.ignoreimage = ignoreimage
self.imageerrorok = imageerrorok
self.tag_modules = tag_modules
self.image_tag = image_tag
self.video = video
self.deps_only = deps_only
self.echo = echo
self.delivery = delivery
self.interactive = interactive
self.trace = trace
self.shutit_module_path = shutit_module_path
self.exam = exam
self.history = history
self.sort = sort
self.long = long_modules
# Video/exam/training logic
if self.exam and not self.training:
shutit_global.shutit_global_object.shutit_print('Exam starting up')
self.training = True
if (self.exam or self.training) and not self.walkthrough:
if not self.exam:
shutit_global.shutit_global_object.shutit_print('--training or --exam implies --walkthrough, setting --walkthrough on!')
self.walkthrough = True
if isinstance(self.video, list) and self.video[0] >= 0:
self.walkthrough = True
self.walkthrough_wait = self.video[0]
self.video = True
if (self.video != -1 and self.video) and self.training:
shutit_global.shutit_global_object.shutit_print('--video and --training mode incompatible')
shutit_global.shutit_global_object.handle_exit(exit_code=1)
if (self.video != -1 and self.video) and self.exam:
shutit_global.shutit_global_object.shutit_print('--video and --exam mode incompatible')
shutit_global.shutit_global_object.handle_exit(exit_code=1)
#assert isinstance(self.delivery,str), shutit_util.print_debug()
# If the image_tag has been set then ride roughshod over the ignoreimage value if not supplied
if self.image_tag != '' and self.ignoreimage is None:
self.ignoreimage = True
# If ignoreimage is still not set, then default it to False
if self.ignoreimage is None:
self.ignoreimage = False
if self.delivery in ('bash',):
if self.image_tag != '': # pragma: no cover
shutit_global.shutit_global_object.shutit_print('delivery method specified (' + self.delivery + ') and image_tag argument make no sense')
shutit_global.shutit_global_object.handle_exit(exit_code=1)
class ShutIt(object):
"""ShutIt build class.
Represents an instance of a ShutIt run/session/build with associated config.
"""
def __init__(self,
session_type,
standalone):
"""Constructor.
Sets up:
- shutit_modules - representation of loaded shutit modules
- shutit_main_dir - directory in which shutit is located
- cfg - dictionary of configuration of build
- shutit_map - maps module_ids to module objects
standalone - Whether this is a shutit object created dynamically (True)
within a python script, or as part of a shutit invocation (False).
If it's created dynamically, then this can make a difference to
how the configuration is collected.
"""
self.standalone = standalone
# Store the root directory of this application.
# http://stackoverflow.com/questions/5137497
self.build = {}
self.build['report'] = ''
self.build['mount_docker'] = False
self.build['distro_override'] = ''
self.build['shutit_command_history'] = []
self.build['walkthrough'] = False # Whether to honour 'walkthrough' requests
self.build['walkthrough_wait'] = -1 # mysterious problems setting this to 1 with fixterm
self.build['log_config_path'] = None
self.build['step_through'] = False
self.build['ctrlc_stop'] = False
self.build['ctrlc_passthrough'] = False
self.build['have_read_config_file'] = False
self.build['vagrant_run_dir'] = None
self.build['this_vagrant_run_dir'] = None
self.build['accept_defaults'] = None
self.build['exam'] = False
# Host information - move to global?
self.host = {}
self.host['shutit_path'] = sys.path[0]
self.host['calling_path'] = os.getcwd()
self.build['asciinema_session'] = None
self.build['asciinema_session_file'] = None
# These used to be in shutit_global, so we pass them in as args so
# the original reference can be put in shutit_global
self.repository = {}
self.expect_prompts = {}
self.list_configs = {}
self.target = {}
self.action = {}
# TODO: do we ever have more than one of these? YES
self.shutit_pexpect_sessions = {}
self.shutit_map = {}
self.shutit_file_map = {}
self.list_modules = {} # list_modules' options
self.current_shutit_pexpect_session = None
self.config_parser = None
self.shutit_modules = set()
# These are new members we dont have to provide compatibility for
self.conn_modules = set()
self.shutit_main_dir = os.path.abspath(os.path.dirname(__file__))
self.shutit_global_object = shutit_global.shutit_global_object
# Needed for patterns
self.cfg = {} # used to store module information
self.shutitfile = {}
self.cfg['shutitfile'] = self.shutitfile # required for patterns
self.cfg['skeleton'] = {} # required for patterns
# Session type
self.session_type = session_type
# Unique id
self.uuid_str = str(uuid.uuid4())
# Logging
self.loglevel = None
self.logfile = None
self.last_log_time = time.time()
self.logging_setup_done = False
self.nocolor = False
# Vagrant machine data
self.vagrant_machines = None
def __str__(self):
string = '\n======= SHUTIT OBJECT BEGIN ========'
string += '\nstandalone=' + str(self.standalone)
string += '\nbuild=' + str(self.build)
string += '\nhost=' + str(self.host)
string += '\nrepository=' + str(self.repository)
string += '\texpect_prompts=' + str(self.expect_prompts)
string += '\tlist_configs=' + str(self.list_configs)
string += '\ttarget=' + str(self.target)
string += '\taction=' + str(self.action)
string += '\tshutit_pexpect_sessions=' + str(self.shutit_pexpect_sessions)
string += '\tshutit_map=' + str(self.shutit_map)
string += '\tshutit_file_map=' + str(self.shutit_file_map)
string += '\tlist_modules=' + str(self.list_modules)
string += '\tcurrent_shutit_pexpect_session=' + str(self.current_shutit_pexpect_session)
string += '\tconfig_parser=' + str(self.config_parser)
string += '\tshutit_modules=' + str(self.shutit_modules)
string += '\tconn_modules=' + str(self.conn_modules)
string += '\tshutit_main_dir=' + str(self.shutit_main_dir)
string += '\tcfg=' + str(self.cfg)
string += '\tcurrent_shutit_pexpect_session=' + str(self.current_shutit_pexpect_session)
string += '\tuuid_str=' + str(self.uuid_str)
string += '\tnocolor=' + str(self.nocolor)
string += '\tlogging_setup_done=' + str(self.logging_setup_done)
string += '\tlast_log_time=' + str(self.last_log_time)
string += '\tlogfile=' + str(self.logfile)
string += '\tloglevel=' + str(self.loglevel)
string += '\tshutitfile=' + str(self.shutitfile)
string += '\tsession_type=' + str(self.session_type)
if self.current_shutit_pexpect_session:
string += '\tlogin_stack=' + str(self.current_shutit_pexpect_session.login_stack)
string += '\n======= SHUTIT OBJECT DONE ========'
return string
def setup_logging(self):
logformat='%(asctime)s %(levelname)s: %(message)s'
logobj = logging.getLogger(self.uuid_str)
if self.shutit_global_object.managed_panes:
# Set up logging for https://stackoverflow.com/questions/31999627/storing-logger-messages-in-a-string
self.loglevel = self.loglevel.upper()
if self.loglevel == 'DEBUGV':
logging.basicConfig(format=logformat, stream=self.shutit_global_object.logstream)
logobj.level = logging.DEBUGV
elif self.loglevel == 'DEBUG':
logging.basicConfig(format=logformat, stream=self.shutit_global_object.logstream)
logobj.level = logging.DEBUG
elif self.loglevel == 'ERROR':
logging.basicConfig(format=logformat, stream=self.shutit_global_object.logstream)
logobj.level = logging.ERROR
elif self.loglevel in ('WARN','WARNING'):
logging.basicConfig(format=logformat, stream=self.shutit_global_object.logstream)
logobj.level = logging.WARNING
elif self.loglevel == 'CRITICAL':
logging.basicConfig(format=logformat, stream=self.shutit_global_object.logstream)
logobj.level = logging.CRITICAL
elif self.loglevel == 'INFO':
logging.basicConfig(format=logformat, stream=self.shutit_global_object.logstream)
logobj.level = logging.INFO
else:
logging.basicConfig(format=logformat, stream=self.shutit_global_object.logstream)
logobj.level = logging.DEBUG
elif self.logfile == '':
self.loglevel = self.loglevel.upper()
if self.loglevel == 'DEBUGV':
logging.basicConfig(format=logformat)
logobj.level = logging.DEBUGV
elif self.loglevel == 'DEBUG':
logging.basicConfig(format=logformat)
logobj.level = logging.DEBUG
elif self.loglevel == 'ERROR':
logging.basicConfig(format=logformat)
logobj.level = logging.ERROR
elif self.loglevel in ('WARN','WARNING'):
logging.basicConfig(format=logformat)
logobj.level = logging.WARNING
elif self.loglevel == 'CRITICAL':
logging.basicConfig(format=logformat)
logobj.level = logging.CRITICAL
elif self.loglevel == 'INFO':
logging.basicConfig(format=logformat)
logobj.level = logging.INFO
else:
logging.basicConfig(format=logformat)
logobj.level = logging.DEBUG
else:
self.loglevel = self.loglevel.upper()
if self.loglevel == 'DEBUGV':
logging.basicConfig(format=logformat,filename=self.logfile)
logobj.level = logging.DEBUGV
elif self.loglevel == 'DEBUG':
logging.basicConfig(format=logformat,filename=self.logfile)
logobj.level = logging.DEBUG
elif self.loglevel == 'ERROR':
logging.basicConfig(format=logformat,filename=self.logfile)
logobj.level = logging.ERROR
elif self.loglevel in ('WARN','WARNING'):
logging.basicConfig(format=logformat,filename=self.logfile)
logobj.level = logging.WARNING
elif self.loglevel == 'CRITICAL':
logging.basicConfig(format=logformat,filename=self.logfile)
logobj.level = logging.CRITICAL
elif self.loglevel == 'INFO':
logging.basicConfig(format=logformat,filename=self.logfile)
logobj.level = logging.INFO
else:
logging.basicConfig(format=logformat,filename=self.logfile)
logobj.level = logging.DEBUG
self.loglevel = logobj.getEffectiveLevel()
self.logging_setup_done = True
def get_shutit_pexpect_session_environment(self, environment_id):
"""Returns the first shutit_pexpect_session object related to the given
environment-id
"""
if not isinstance(environment_id, str):
self.fail('Wrong argument type in get_shutit_pexpect_session_environment') # pragma: no cover
for env in shutit_global.shutit_global_object.shutit_pexpect_session_environments:
if env.environment_id == environment_id:
return env
return None
def get_current_shutit_pexpect_session_environment(self, note=None):
"""Returns the current environment from the currently-set default
pexpect child.
"""
self.handle_note(note)
current_session = self.get_current_shutit_pexpect_session()
if current_session is not None:
res = current_session.current_environment
else:
res = None
self.handle_note_after(note)
return res
def get_current_shutit_pexpect_session(self, note=None):
"""Returns the currently-set default pexpect child.
@return: default shutit pexpect child object
"""
self.handle_note(note)
res = self.current_shutit_pexpect_session
self.handle_note_after(note)
return res
def get_shutit_pexpect_sessions(self, note=None):
"""Returns all the shutit_pexpect_session keys for this object.
@return: list of all shutit_pexpect_session keys (pexpect_session_ids)
"""
self.handle_note(note)
sessions = []
for key in self.shutit_pexpect_sessions:
sessions.append(shutit_object.shutit_pexpect_sessions[key])
self.handle_note_after(note)
return sessions
def get_default_shutit_pexpect_session_expect(self):
"""Returns the currently-set default pexpect string (usually a prompt).
@return: default pexpect string
"""
return self.current_shutit_pexpect_session.default_expect
def get_default_shutit_pexpect_session_check_exit(self):
"""Returns default value of check_exit. See send method.
@rtype: boolean
@return: Default check_exit value
"""
return self.current_shutit_pexpect_session.check_exit
def set_default_shutit_pexpect_session(self, shutit_pexpect_session):
"""Sets the default pexpect child.
@param shutit_pexpect_session: pexpect child to set as default
"""
assert isinstance(shutit_pexpect_session, ShutItPexpectSession), shutit_util.print_debug()
self.current_shutit_pexpect_session = shutit_pexpect_session
return True
def set_default_shutit_pexpect_session_expect(self, expect=None):
"""Sets the default pexpect string (usually a prompt).
Defaults to the configured root prompt if no
argument is passed.
@param expect: String to expect in the output
@type expect: string
"""
if expect is None:
self.current_shutit_pexpect_session.default_expect = self.expect_prompts['ORIGIN_ENV']
else:
self.current_shutit_pexpect_session.default_expect = expect
return True
def fail(self, msg, shutit_pexpect_child=None, throw_exception=False):
"""Handles a failure, pausing if a pexpect child object is passed in.
@param shutit_pexpect_child: pexpect child to work on
@param throw_exception: Whether to throw an exception.
@type throw_exception: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
# Note: we must not default to a child here
if shutit_pexpect_child is not None:
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
shutit_util.print_debug(sys.exc_info())
shutit_pexpect_session.pause_point('Pause point on fail: ' + msg, color='31')
if throw_exception:
sys.stderr.write('Error caught: ' + msg + '\n')
sys.stderr.write('\n')
shutit_util.print_debug(sys.exc_info())
raise ShutItFailException(msg)
else:
# This is an "OK" failure, ie we don't need to throw an exception.
# However, it's still a "failure", so return 1
shutit_global.shutit_global_object.handle_exit(exit_code=1,msg=msg)
shutit_global.shutit_global_object.yield_to_draw()
def get_current_environment(self, note=None):
"""Returns the current environment id from the current
shutit_pexpect_session
"""
shutit_global.shutit_global_object.yield_to_draw()
self.handle_note(note)
res = self.get_current_shutit_pexpect_session_environment().environment_id
self.handle_note_after(note)
return res
def multisend(self,
send,
send_dict,
expect=None,
shutit_pexpect_child=None,
timeout=shutit_global.shutit_global_object.default_timeout,
check_exit=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
escape=False,
echo=None,
note=None,
secret=False,
nonewline=False,
loglevel=logging.DEBUG):
"""Multisend. Same as send, except it takes multiple sends and expects in a dict that are
processed while waiting for the end "expect" argument supplied.
@param send_dict: see shutit_sendspec
@param expect: String or list of strings of final expected output that returns from this function. See send()
@param send: See send()
@param shutit_pexpect_child: See send()
@param timeout: See send()
@param check_exit: See send()
@param fail_on_empty_before: See send()
@param record_command: See send()
@param exit_values: See send()
@param echo: See send()
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
assert isinstance(send_dict, dict), shutit_util.print_debug()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
expect = expect or self.get_current_shutit_pexpect_session().default_expect
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.multisend(ShutItSendSpec(shutit_pexpect_session,
send=send,
send_dict=send_dict,
expect=expect,
timeout=timeout,
check_exit=check_exit,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
escape=escape,
echo=echo,
note=note,
loglevel=loglevel,
secret=secret,
nonewline=nonewline))
def send_and_require(self,
send,
regexps,
not_there=False,
shutit_pexpect_child=None,
echo=None,
note=None,
loglevel=logging.INFO):
"""Send string and require the item in the output.
See send_until
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_and_require(send,
regexps,
not_there=not_there,
echo=echo,
note=note,
loglevel=loglevel)
def send_until(self,
send,
regexps,
not_there=False,
shutit_pexpect_child=None,
cadence=5,
retries=100,
echo=None,
note=None,
debug_command=None,
pause_point_on_fail=True,
nonewline=False,
loglevel=logging.INFO):
"""Send string on a regular cadence until a string is either seen, or the timeout is triggered.
@param send: See send()
@param regexps: List of regexps to wait for.
@param not_there: If True, wait until this a regexp is not seen in the output. If False
wait until a regexp is seen in the output (default)
@param shutit_pexpect_child: See send()
@param echo: See send()
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_until(send,
regexps,
not_there=not_there,
cadence=cadence,
retries=retries,
echo=echo,
note=note,
loglevel=loglevel,
debug_command=debug_command,
nonewline=nonewline,
pause_point_on_fail=pause_point_on_fail)
def challenge(self,
task_desc,
expect=None,
hints=None,
congratulations='OK',
failed='FAILED',
expect_type='exact',
challenge_type='command',
shutit_pexpect_child=None,
timeout=None,
check_exit=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
echo=True,
escape=False,
pause=1,
loglevel=logging.DEBUG,
follow_on_context=None,
num_stages=None):
"""Set the user a task to complete, success being determined by matching the output.
Either pass in regexp(s) desired from the output as a string or a list, or an md5sum of the output wanted.
@param follow_on_context On success, move to this context. A dict of information about that context.
context = the type of context, eg docker, bash
ok_container_name = if passed, send user to this container
reset_container_name = if resetting, send user to this container
@param challenge_type Behaviour of challenge made to user
command = check for output of single command
golf = user gets a pause point, and when leaving, command follow_on_context['check_command'] is run to check the output
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.challenge(self,
task_desc=task_desc,
expect=expect,
hints=hints,
congratulations=congratulations,
failed=failed,
expect_type=expect_type,
challenge_type=challenge_type,
timeout=timeout,
check_exit=check_exit,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
echo=echo,
escape=escape,
pause=pause,
loglevel=loglevel,
follow_on_context=follow_on_context,
num_stages=num_stages)
# Alternate names
practice = challenge
golf = challenge
def send(self,
send,
expect=None,
shutit_pexpect_child=None,
timeout=None,
check_exit=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
echo=None,
escape=False,
retry=3,
note=None,
assume_gnu=True,
follow_on_commands=None,
searchwindowsize=None,
maxread=None,
delaybeforesend=None,
secret=False,
nonewline=False,
background=False,
wait=True,
block_other_commands=True,
loglevel=logging.INFO):
"""Send string as a shell command, and wait until the expected output
is seen (either a string or any from a list of strings) before
returning. The expected string will default to the currently-set
default expected string (see get_default_shutit_pexpect_session_expect)
Returns the pexpect return value (ie which expected string in the list
matched)
@param send: See shutit.ShutItSendSpec
@param expect: See shutit.ShutItSendSpec
@param shutit_pexpect_child: See shutit.ShutItSendSpec
@param timeout: See shutit.ShutItSendSpec
@param check_exit: See shutit.ShutItSendSpec
@param fail_on_empty_before:See shutit.ShutItSendSpec
@param record_command:See shutit.ShutItSendSpec
@param exit_values:See shutit.ShutItSendSpec
@param echo: See shutit.ShutItSendSpec
@param escape: See shutit.ShutItSendSpec
@param retry: See shutit.ShutItSendSpec
@param note: See shutit.ShutItSendSpec
@param assume_gnu: See shutit.ShutItSendSpec
@param wait: See shutit.ShutItSendSpec
@param block_other_commands: See shutit.ShutItSendSpec.block_other_commands
@return: The pexpect return value (ie which expected string in the list matched)
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
ignore_background = not wait
return shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session,
send,
expect=expect,
timeout=timeout,
check_exit=check_exit,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
echo=echo,
escape=escape,
retry=retry,
note=note,
assume_gnu=assume_gnu,
loglevel=loglevel,
follow_on_commands=follow_on_commands,
searchwindowsize=searchwindowsize,
maxread=maxread,
delaybeforesend=delaybeforesend,
secret=secret,
nonewline=nonewline,
run_in_background=background,
ignore_background=ignore_background,
block_other_commands=block_other_commands))
def send_and_return_status(self,
send,
expect=None,
shutit_pexpect_child=None,
timeout=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
echo=None,
escape=False,
retry=3,
note=None,
assume_gnu=True,
follow_on_commands=None,
loglevel=logging.INFO):
"""Returns true if a good exit code was received (usually 0)
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session,
send=send,
expect=expect,
timeout=timeout,
check_exit=False,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
echo=echo,
escape=escape,
retry=retry,
note=note,
assume_gnu=assume_gnu,
loglevel=loglevel,
follow_on_commands=follow_on_commands))
return shutit_pexpect_session.check_last_exit_values(send,
check_exit=True,
expect=expect,
exit_values=exit_values,
retry=retry,
retbool=True)
def handle_note(self, note, command='', training_input=''):
"""Handle notes and walkthrough option.
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
if self.build['walkthrough'] and note != None and note != '':
assert isinstance(note, str), shutit_util.print_debug()
wait = self.build['walkthrough_wait']
wrap = '\n' + 80*'=' + '\n'
message = wrap + note + wrap
if command != '':
message += 'Command to be run is:\n\t' + command + wrap
if wait >= 0:
self.pause_point(message, color=31, wait=wait)
else:
if training_input != '' and self.build['training']:
if len(training_input.split('\n')) == 1:
shutit_global.shutit_global_object.shutit_print(shutit_util.colorise('31',message))
while shutit_util.util_raw_input(prompt=shutit_util.colorise('32','Enter the command to continue (or "s" to skip typing it in): ')) not in (training_input,'s'):
shutit_global.shutit_global_object.shutit_print('Wrong! Try again!')
shutit_global.shutit_global_object.shutit_print(shutit_util.colorise('31','OK!'))
else:
self.pause_point(message + '\nToo long to use for training, so skipping the option to type in!\nHit CTRL-] to continue', color=31)
else:
self.pause_point(message + '\nHit CTRL-] to continue', color=31)
return True
def handle_note_after(self, note, training_input=''):
shutit_global.shutit_global_object.yield_to_draw()
if self.build['walkthrough'] and note != None:
wait = self.build['walkthrough_wait']
if wait >= 0:
time.sleep(wait)
if training_input != '' and self.build['training']:
self.pause_point('Training mode - pause point.\nDo what you like, but try not to disturb state too much,\neg by moving directories exiting the/entering a new shell.\nHit CTRL-] to continue.')
return True
def expect_allow_interrupt(self,
shutit_pexpect_child,
expect,
timeout,
iteration_s=1):
"""This function allows you to interrupt the run at more or less any
point by breaking up the timeout into interactive chunks.
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
accum_timeout = 0
if isinstance(expect, str):
expect = [expect]
if timeout < 1:
timeout = 1
if iteration_s > timeout:
iteration_s = timeout - 1
if iteration_s < 1:
iteration_s = 1
timed_out = True
iteration_n = 0
while accum_timeout < timeout:
iteration_n+=1
res = shutit_pexpect_session.expect(expect, timeout=iteration_s, iteration_n=iteration_n)
if res == len(expect):
if self.build['ctrlc_stop']:
timed_out = False
self.build['ctrlc_stop'] = False
break
accum_timeout += iteration_s
else:
return res
if timed_out and not shutit_global.shutit_global_object.determine_interactive():
self.log('Command timed out, trying to get terminal back for you', level=logging.DEBUG)
self.fail('Timed out and could not recover') # pragma: no cover
else:
if shutit_global.shutit_global_object.determine_interactive():
shutit_pexpect_child.send('\x03')
res = shutit_pexpect_session.expect(expect,timeout=1)
if res == len(expect):
shutit_pexpect_child.send('\x1a')
res = shutit_pexpect_session.expect(expect,timeout=1)
if res == len(expect):
self.fail('CTRL-C sent by ShutIt following a timeout, and could not recover') # pragma: no cover
shutit_pexpect_session.pause_point('CTRL-C sent by ShutIt following a timeout; the command has been cancelled')
return res
else:
if timed_out:
self.fail('Timed out and interactive, but could not recover') # pragma: no cover
else:
self.fail('CTRL-C hit and could not recover') # pragma: no cover
self.fail('Should not get here (expect_allow_interrupt)') # pragma: no cover
return True
def run_script(self,
script,
shutit_pexpect_child=None,
in_shell=True,
echo=None,
note=None,
loglevel=logging.DEBUG):
"""Run the passed-in string as a script on the target's command line.
@param script: String representing the script. It will be de-indented
and stripped before being run.
@param shutit_pexpect_child: See send()
@param in_shell: Indicate whether we are in a shell or not. (Default: True)
@param note: See send()
@type script: string
@type in_shell: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.run_script(script,
in_shell=in_shell,
echo=echo,
note=note,
loglevel=loglevel)
def send_file(self,
path,
contents,
shutit_pexpect_child=None,
truncate=False,
note=None,
user=None,
echo=False,
group=None,
loglevel=logging.INFO,
encoding=None):
"""Sends the passed-in string as a file to the passed-in path on the
target.
@param path: Target location of file on target.
@param contents: Contents of file as a string.
@param shutit_pexpect_child: See send()
@param note: See send()
@param user: Set ownership to this user (defaults to whoami)
@param group: Set group to this user (defaults to first group in groups)
@type path: string
@type contents: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_file(path,
contents,
truncate=truncate,
note=note,
echo=echo,
user=user,
group=group,
loglevel=loglevel,
encoding=encoding)
def chdir(self,
path,
shutit_pexpect_child=None,
timeout=shutit_global.shutit_global_object.default_timeout,
note=None,
loglevel=logging.DEBUG):
"""How to change directory will depend on whether we are in delivery mode bash or docker.
@param path: Path to send file to.
@param shutit_pexpect_child: See send()
@param timeout: Timeout on response
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.chdir(path,timeout=timeout,note=note,loglevel=loglevel)
def send_host_file(self,
path,
hostfilepath,
expect=None,
shutit_pexpect_child=None,
note=None,
user=None,
group=None,
loglevel=logging.INFO):
"""Send file from host machine to given path
@param path: Path to send file to.
@param hostfilepath: Path to file from host to send to target.
@param expect: See send()
@param shutit_pexpect_child: See send()
@param note: See send()
@param user: Set ownership to this user (defaults to whoami)
@param group: Set group to this user (defaults to first group in groups)
@type path: string
@type hostfilepath: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
expect = expect or self.get_current_shutit_pexpect_session().default_expect
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
self.handle_note(note, 'Sending file from host: ' + hostfilepath + ' to target path: ' + path)
self.log('Sending file from host: ' + hostfilepath + ' to: ' + path, level=loglevel)
if user is None:
user = shutit_pexpect_session.whoami()
if group is None:
group = self.whoarewe()
# TODO: use gz for both
if os.path.isfile(hostfilepath):
shutit_pexpect_session.send_file(path,
codecs.open(hostfilepath,mode='rb',encoding='iso-8859-1').read(),
user=user,
group=group,
loglevel=loglevel,
encoding='iso-8859-1')
elif os.path.isdir(hostfilepath):
# Need a binary type encoding for gzip(?)
self.send_host_dir(path,
hostfilepath,
user=user,
group=group,
loglevel=loglevel)
else:
self.fail('send_host_file - file: ' + hostfilepath + ' does not exist as file or dir. cwd is: ' + os.getcwd(), shutit_pexpect_child=shutit_pexpect_child, throw_exception=False) # pragma: no cover
self.handle_note_after(note=note)
return True
def send_host_dir(self,
path,
hostfilepath,
expect=None,
shutit_pexpect_child=None,
note=None,
user=None,
group=None,
loglevel=logging.DEBUG):
"""Send directory and all contents recursively from host machine to
given path. It will automatically make directories on the target.
@param path: Path to send directory to (places hostfilepath inside path as a subfolder)
@param hostfilepath: Path to file from host to send to target
@param expect: See send()
@param shutit_pexpect_child: See send()
@param note: See send()
@param user: Set ownership to this user (defaults to whoami)
@param group: Set group to this user (defaults to first group in groups)
@type path: string
@type hostfilepath: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
expect = expect or self.get_current_shutit_pexpect_session().default_expect
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
self.handle_note(note, 'Sending host directory: ' + hostfilepath + ' to target path: ' + path)
self.log('Sending host directory: ' + hostfilepath + ' to: ' + path, level=logging.INFO)
shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session,
send=' command mkdir -p ' + path,
echo=False,
loglevel=loglevel))
if user is None:
user = shutit_pexpect_session.whoami()
if group is None:
group = self.whoarewe()
# Create gzip of folder
#import pdb
#pdb.set_trace()
if shutit_pexpect_session.command_available('tar'):
gzipfname = '/tmp/shutit_tar_tmp.tar.gz'
with tarfile.open(gzipfname, 'w:gz') as tar:
tar.add(hostfilepath, arcname=os.path.basename(hostfilepath))
shutit_pexpect_session.send_file(gzipfname,
codecs.open(gzipfname,mode='rb',encoding='iso-8859-1').read(),
user=user,
group=group,
loglevel=loglevel,
encoding='iso-8859-1')
shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session,
send=' command mkdir -p ' + path + ' && command tar -C ' + path + ' -zxf ' + gzipfname))
else:
# If no gunzip, fall back to old slow method.
for root, subfolders, files in os.walk(hostfilepath):
subfolders.sort()
files.sort()
for subfolder in subfolders:
shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session,
send=' command mkdir -p ' + path + '/' + subfolder,
echo=False,
loglevel=loglevel))
self.log('send_host_dir recursing to: ' + hostfilepath + '/' + subfolder, level=logging.DEBUG)
self.send_host_dir(path + '/' + subfolder,
hostfilepath + '/' + subfolder,
expect=expect,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel)
for fname in files:
hostfullfname = os.path.join(root, fname)
targetfname = os.path.join(path, fname)
self.log('send_host_dir sending file ' + hostfullfname + ' to ' + 'target file: ' + targetfname, level=logging.DEBUG)
shutit_pexpect_session.send_file(targetfname,
codecs.open(hostfullfname,mode='rb',encoding='iso-8859-1').read(),
user=user,
group=group,
loglevel=loglevel,
encoding='iso-8859-1')
self.handle_note_after(note=note)
return True
def file_exists(self,
filename,
shutit_pexpect_child=None,
directory=False,
note=None,
loglevel=logging.DEBUG):
"""Return True if file exists on the target host, else False
@param filename: Filename to determine the existence of.
@param shutit_pexpect_child: See send()
@param directory: Indicate that the file is a directory.
@param note: See send()
@type filename: string
@type directory: boolean
@rtype: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.file_exists(filename=filename,directory=directory,note=note,loglevel=loglevel)
def get_file_perms(self,
filename,
shutit_pexpect_child=None,
note=None,
loglevel=logging.DEBUG):
"""Returns the permissions of the file on the target as an octal
string triplet.
@param filename: Filename to get permissions of.
@param shutit_pexpect_child: See send()
@param note: See send()
@type filename: string
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.get_file_perms(filename,note=note,loglevel=loglevel)
def remove_line_from_file(self,
line,
filename,
shutit_pexpect_child=None,
match_regexp=None,
literal=False,
note=None,
loglevel=logging.DEBUG):
"""Removes line from file, if it exists.
Must be exactly the line passed in to match.
Returns True if there were no problems, False if there were.
@param line: Line to remove.
@param filename Filename to remove it from.
@param shutit_pexpect_child: See send()
@param match_regexp: If supplied, a regexp to look for in the file
instead of the line itself,
handy if the line has awkward characters in it.
@param literal: If true, then simply grep for the exact string without
bash interpretation. (Default: False)
@param note: See send()
@type line: string
@type filename: string
@type match_regexp: string
@type literal: boolean
@return: True if the line was matched and deleted, False otherwise.
@rtype: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.remove_line_from_file(line,filename,match_regexp=match_regexp,literal=literal,note=note,loglevel=loglevel)
def change_text(self,
text,
fname,
pattern=None,
expect=None,
shutit_pexpect_child=None,
before=False,
force=False,
delete=False,
note=None,
replace=False,
line_oriented=True,
create=True,
loglevel=logging.DEBUG):
"""Change text in a file.
Returns None if there was no match for the regexp, True if it was matched
and replaced, and False if the file did not exist or there was some other
problem.
@param text: Text to insert.
@param fname: Filename to insert text to
@param pattern: Regexp for a line to match and insert after/before/replace.
If none, put at end of file.
@param expect: See send()
@param shutit_pexpect_child: See send()
@param before: Whether to place the text before or after the matched text.
@param force: Force the insertion even if the text is in the file.
@param delete: Delete text from file rather than insert
@param replace: Replace matched text with passed-in text. If nothing matches, then append.
@param note: See send()
@param line_oriented: Consider the pattern on a per-line basis (default True).
Can match any continuous section of the line, eg 'b.*d' will match the line: 'abcde'
If not line_oriented, the regexp is considered on with the flags re.DOTALL, re.MULTILINE
enabled
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
expect = expect or self.get_current_shutit_pexpect_session().default_expect
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.change_text(text,
fname,
pattern=pattern,
before=before,
force=force,
delete=delete,
note=note,
replace=replace,
line_oriented=line_oriented,
create=create,
loglevel=loglevel)
def insert_text(self,
text,
fname,
pattern=None,
expect=None,
shutit_pexpect_child=None,
before=False,
force=False,
note=None,
replace=False,
line_oriented=True,
create=True,
loglevel=logging.DEBUG):
"""Insert a chunk of text at the end of a file, or after (or before) the first matching pattern
in given file fname.
See change_text"""
shutit_global.shutit_global_object.yield_to_draw()
return self.change_text(text=text,
fname=fname,
pattern=pattern,
expect=expect,
shutit_pexpect_child=shutit_pexpect_child,
before=before,
force=force,
note=note,
line_oriented=line_oriented,
create=create,
replace=replace,
delete=False,
loglevel=loglevel)
def delete_text(self,
text,
fname,
pattern=None,
expect=None,
shutit_pexpect_child=None,
note=None,
before=False,
force=False,
line_oriented=True,
loglevel=logging.DEBUG):
"""Delete a chunk of text from a file.
See insert_text.
"""
shutit_global.shutit_global_object.yield_to_draw()
return self.change_text(text,
fname,
pattern,
expect,
shutit_pexpect_child,
before,
force,
note=note,
delete=True,
line_oriented=line_oriented,
loglevel=loglevel)
def replace_text(self,
text,
fname,
pattern=None,
expect=None,
shutit_pexpect_child=None,
note=None,
before=False,
force=False,
line_oriented=True,
loglevel=logging.DEBUG):
"""Replace a chunk of text from a file.
See insert_text.
"""
shutit_global.shutit_global_object.yield_to_draw()
return self.change_text(text,
fname,
pattern,
expect,
shutit_pexpect_child,
before,
force,
note=note,
line_oriented=line_oriented,
replace=True,
loglevel=loglevel)
def add_line_to_file(self, line, filename, expect=None, shutit_pexpect_child=None, match_regexp=None, loglevel=logging.DEBUG):
"""Deprecated.
Use replace/insert_text instead.
Adds line to file if it doesn't exist (unless Force is set, which it is not by default).
Creates the file if it doesn't exist.
Must be exactly the line passed in to match.
Returns True if line(s) added OK, False if not.
If you have a lot of non-unique lines to add, it's a good idea to have a sentinel value to add first, and then if that returns true, force the remainder.
@param line: Line to add. If a list, processed per-item, and match_regexp ignored.
@param filename: Filename to add it to.
@param expect: See send()
@param shutit_pexpect_child: See send()
@param match_regexp: If supplied, a regexp to look for in the file instead of the line itself, handy if the line has awkward characters in it.
@type line: string
@type filename: string
@type match_regexp: string
"""
shutit_global.shutit_global_object.yield_to_draw()
if isinstance(line, str):
lines = [line]
elif isinstance(line, list):
lines = line
match_regexp = None
fail = False
for fline in lines:
if match_regexp is None:
this_match_regexp = fline
else:
this_match_regexp = match_regexp
if not self.replace_text(fline,
filename,
pattern=this_match_regexp,
shutit_pexpect_child=shutit_pexpect_child,
expect=expect,
loglevel=loglevel):
fail = True
if fail:
return False
return True
def add_to_bashrc(self, line, shutit_pexpect_child=None, match_regexp=None, note=None, loglevel=logging.DEBUG):
"""Takes care of adding a line to everyone's bashrc
(/etc/bash.bashrc, /etc/profile).
@param line: Line to add.
@param shutit_pexpect_child: See send()
@param match_regexp: See add_line_to_file()
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
shutit_pexpect_session.add_to_bashrc(line,match_regexp=match_regexp,note=note,loglevel=loglevel)
return True
def get_url(self,
filename,
locations,
command='curl',
shutit_pexpect_child=None,
timeout=shutit_global.shutit_global_object.default_timeout,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
retry=3,
note=None,
loglevel=logging.DEBUG):
"""Handles the getting of a url for you.
Example:
get_url('somejar.jar', ['ftp://loc.org','http://anotherloc.com/jars'])
@param filename: name of the file to download
@param locations: list of URLs whence the file can be downloaded
@param command: program to use to download the file (Default: wget)
@param shutit_pexpect_child: See send()
@param timeout: See send()
@param fail_on_empty_before: See send()
@param record_command: See send()
@param exit_values: See send()
@param retry: How many times to retry the download
in case of failure. Default: 3
@param note: See send()
@type filename: string
@type locations: list of strings
@type retry: integer
@return: True if the download was completed successfully, False otherwise.
@rtype: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.get_url(filename,
locations,
send=command,
timeout=timeout,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
retry=retry,
note=note,
loglevel=loglevel)
def user_exists(self,
user,
shutit_pexpect_child=None,
note=None,
loglevel=logging.DEBUG):
"""Returns true if the specified username exists.
@param user: username to check for
@param shutit_pexpect_child: See send()
@param note: See send()
@type user: string
@rtype: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session(user,note=note,loglevel=loglevel)
def package_installed(self,
package,
shutit_pexpect_child=None,
note=None,
loglevel=logging.DEBUG):
"""Returns True if we can be sure the package is installed.
@param package: Package as a string, eg 'wget'.
@param shutit_pexpect_child: See send()
@param note: See send()
@rtype: boolean
"""
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session(package,note=note,loglevel=loglevel)
def command_available(self,
command,
shutit_pexpect_child=None,
note=None,
loglevel=logging.DEBUG):
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.command_available(command,note=note,loglevel=loglevel)
def is_shutit_installed(self,
module_id,
note=None,
loglevel=logging.DEBUG):
"""Helper proc to determine whether shutit has installed already here by placing a file in the db.
@param module_id: Identifying string of shutit module
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.is_shutit_installed(module_id,note=note,loglevel=loglevel)
def ls(self,
directory,
note=None,
loglevel=logging.DEBUG):
"""Helper proc to list files in a directory
@param directory: directory to list. If the directory doesn't exist, shutit.fail() is called (i.e. the build fails.)
@param note: See send()
@type directory: string
@rtype: list of strings
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.is_shutit_installed(directory,note=note,loglevel=loglevel)
def get_file(self,
target_path,
host_path,
note=None,
loglevel=logging.DEBUG):
"""Copy a file from the target machine to the host machine
@param target_path: path to file in the target
@param host_path: path to file on the host machine (e.g. copy test)
@param note: See send()
@type target_path: string
@type host_path: string
@return: boolean
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
self.handle_note(note)
# Only handle for docker initially, return false in case we care
if self.build['delivery'] != 'docker':
return False
# on the host, run:
#Usage: docker cp [OPTIONS] CONTAINER:PATH LOCALPATH|-
# Need: host env, container id, path from and path to
shutit_pexpect_child = self.get_shutit_pexpect_session_from_id('host_child').pexpect_child
expect = self.expect_prompts['ORIGIN_ENV']
self.send('docker cp ' + self.target['container_id'] + ':' + target_path + ' ' + host_path,
shutit_pexpect_child=shutit_pexpect_child,
expect=expect,
check_exit=False,
echo=False,
loglevel=loglevel)
self.handle_note_after(note=note)
return True
def prompt_cfg(self, msg, sec, name, ispass=False):
"""Prompt for a config value, optionally saving it to the user-level
cfg. Only runs if we are in an interactive mode.
@param msg: Message to display to user.
@param sec: Section of config to add to.
@param name: Config item name.
@param ispass: If True, hide the input from the terminal.
Default: False.
@type msg: string
@type sec: string
@type name: string
@type ispass: boolean
@return: the value entered by the user
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
cfgstr = '[%s]/%s' % (sec, name)
config_parser = self.config_parser
usercfg = os.path.join(self.host['shutit_path'], 'config')
self.log('\nPROMPTING FOR CONFIG: %s' % (cfgstr,),transient=True,level=logging.INFO)
self.log('\n' + msg + '\n',transient=True,level=logging.INFO)
if not shutit_global.shutit_global_object.determine_interactive():
self.fail('ShutIt is not in a terminal so cannot prompt for values.', throw_exception=False) # pragma: no cover
if config_parser.has_option(sec, name):
whereset = config_parser.whereset(sec, name)
if usercfg == whereset:
self.fail(cfgstr + ' has already been set in the user config, edit ' + usercfg + ' directly to change it', throw_exception=False) # pragma: no cover
for subcp, filename, _ in reversed(config_parser.layers):
# Is the config file loaded after the user config file?
if filename == whereset:
self.fail(cfgstr + ' is being set in ' + filename + ', unable to override on a user config level', throw_exception=False) # pragma: no cover
elif filename == usercfg:
break
else:
# The item is not currently set so we're fine to do so
pass
if ispass:
val = getpass.getpass('>> ')
else:
val = shutit_util.util_raw_input(prompt='>> ')
is_excluded = (
config_parser.has_option('save_exclude', sec) and
name in config_parser.get('save_exclude', sec).split()
)
# TODO: ideally we would remember the prompted config item for this invocation of shutit
if not is_excluded:
usercp = [
subcp for subcp, filename, _ in config_parser.layers
if filename == usercfg
][0]
if shutit_util.util_raw_input(prompt=shutit_util.colorise('32', 'Do you want to save this to your user settings? y/n: '),default='y') == 'y':
sec_toset, name_toset, val_toset = sec, name, val
else:
# Never save it
if config_parser.has_option('save_exclude', sec):
excluded = config_parser.get('save_exclude', sec).split()
else:
excluded = []
excluded.append(name)
excluded = ' '.join(excluded)
sec_toset, name_toset, val_toset = 'save_exclude', sec, excluded
if not usercp.has_section(sec_toset):
usercp.add_section(sec_toset)
usercp.set(sec_toset, name_toset, val_toset)
usercp.write(open(usercfg, 'w'))
config_parser.reload()
return val
def step_through(self, msg='', shutit_pexpect_child=None, level=1, print_input=True, value=True):
"""Implements a step-through function, using pause_point.
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
if (not shutit_global.shutit_global_object.determine_interactive() or not shutit_global.shutit_global_object.interactive or
shutit_global.shutit_global_object.interactive < level):
return True
self.build['step_through'] = value
shutit_pexpect_session.pause_point(msg, print_input=print_input, level=level)
return True
def interact(self,
msg='SHUTIT PAUSE POINT',
shutit_pexpect_child=None,
print_input=True,
level=1,
resize=True,
color='32',
default_msg=None,
wait=-1):
"""Same as pause_point, but sets up the terminal ready for unmediated
interaction."""
shutit_global.shutit_global_object.yield_to_draw()
self.pause_point(msg=msg,
shutit_pexpect_child=shutit_pexpect_child,
print_input=print_input,
level=level,
resize=resize,
color=color,
default_msg=default_msg,
interact=True,
wait=wait)
def pause_point(self,
msg='SHUTIT PAUSE POINT',
shutit_pexpect_child=None,
print_input=True,
level=1,
resize=True,
color='32',
default_msg=None,
interact=False,
wait=-1):
"""Inserts a pause in the build session, which allows the user to try
things out before continuing. Ignored if we are not in an interactive
mode, or the interactive level is less than the passed-in one.
Designed to help debug the build, or drop to on failure so the
situation can be debugged.
@param msg: Message to display to user on pause point.
@param shutit_pexpect_child: See send()
@param print_input: Whether to take input at this point (i.e. interact), or
simply pause pending any input.
Default: True
@param level: Minimum level to invoke the pause_point at.
Default: 1
@param resize: If True, try to resize terminal.
Default: False
@param color: Color to print message (typically 31 for red, 32 for green)
@param default_msg: Whether to print the standard blurb
@param wait: Wait a few seconds rather than for input
@type msg: string
@type print_input: boolean
@type level: integer
@type resize: boolean
@type wait: decimal
@return: True if pause point handled ok, else false
"""
shutit_global.shutit_global_object.yield_to_draw()
if (not shutit_global.shutit_global_object.determine_interactive() or shutit_global.shutit_global_object.interactive < 1 or
shutit_global.shutit_global_object.interactive < level):
return True
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
# Don't log log traces while in interactive
log_trace_when_idle_original_value = shutit_global.shutit_global_object.log_trace_when_idle
shutit_global.shutit_global_object.log_trace_when_idle = False
if shutit_pexpect_child:
if shutit_global.shutit_global_object.pane_manager is not None:
shutit_global.shutit_global_object.pane_manager.draw_screen(draw_type='clearscreen')
shutit_global.shutit_global_object.pane_manager.do_render = False
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
# TODO: context added to pause point message
shutit_pexpect_session.pause_point(msg=msg,
print_input=print_input,
resize=resize,
color=color,
default_msg=default_msg,
wait=wait,
interact=interact)
else:
self.log(msg,level=logging.DEBUG)
self.log('Nothing to interact with, so quitting to presumably the original shell',level=logging.DEBUG)
shutit_global.shutit_global_object.handle_exit(exit_code=1)
if shutit_pexpect_child:
if shutit_global.shutit_global_object.pane_manager is not None:
shutit_global.shutit_global_object.pane_manager.do_render = True
shutit_global.shutit_global_object.pane_manager.draw_screen(draw_type='clearscreen')
self.build['ctrlc_stop'] = False
# Revert value of log_trace_when_idle
shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value
return True
def send_and_match_output(self,
send,
matches,
shutit_pexpect_child=None,
retry=3,
strip=True,
note=None,
echo=None,
loglevel=logging.DEBUG):
"""Returns true if the output of the command matches any of the strings in
the matches list of regexp strings. Handles matching on a per-line basis
and does not cross lines.
@param send: See send()
@param matches: String - or list of strings - of regexp(s) to check
@param shutit_pexpect_child: See send()
@param retry: Number of times to retry command (default 3)
@param strip: Whether to strip output (defaults to True)
@param note: See send()
@type send: string
@type matches: list
@type retry: integer
@type strip: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_and_match_output(send,
matches,
retry=retry,
strip=strip,
note=note,
echo=echo,
loglevel=loglevel)
def send_and_get_output(self,
send,
shutit_pexpect_child=None,
timeout=None,
retry=3,
strip=True,
preserve_newline=False,
note=None,
record_command=False,
echo=None,
fail_on_empty_before=True,
nonewline=False,
wait=False,
loglevel=logging.INFO):
"""Returns the output of a command run. send() is called, and exit is not checked.
@param send: See send()
@param shutit_pexpect_child: See send()
@param retry: Number of times to retry command (default 3)
@param strip: Whether to strip output (defaults to True). Strips whitespace
and ansi terminal codes
@param note: See send()
@param echo: See send()
@type retry: integer
@type strip: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
ignore_background = not wait
return shutit_pexpect_session.send_and_get_output(send,
timeout=timeout,
retry=retry,
strip=strip,
preserve_newline=preserve_newline,
note=note,
record_command=record_command,
echo=echo,
fail_on_empty_before=fail_on_empty_before,
nonewline=nonewline,
ignore_background=ignore_background,
loglevel=loglevel)
def install(self,
package,
shutit_pexpect_child=None,
options=None,
timeout=shutit_global.shutit_global_object.default_timeout,
force=False,
check_exit=True,
echo=None,
reinstall=False,
background=False,
wait=False,
block_other_commands=True,
note=None,
loglevel=logging.INFO):
"""Distro-independent install function.
Takes a package name and runs the relevant install function.
@param package: Package to install, which is run through package_map
@param shutit_pexpect_child: See send()
@param timeout: Timeout (s) to wait for finish of install. Defaults to 3600.
@param options: Dictionary for specific options per install tool.
Overrides any arguments passed into this function.
@param force: Force if necessary. Defaults to False
@param check_exit: If False, failure to install is ok (default True)
@param reinstall: Advise a reinstall where possible (default False)
@param note: See send()
@type package: string
@type timeout: integer
@type options: dict
@type force: boolean
@type check_exit: boolean
@type reinstall: boolean
@return: True if all ok (ie it's installed), else False.
@rtype: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
# If separated by spaces, install separately
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
ignore_background = not wait
return shutit_pexpect_session.install(package,
options=options,
timeout=timeout,
force=force,
check_exit=check_exit,
reinstall=reinstall,
echo=echo,
note=note,
run_in_background=background,
ignore_background=ignore_background,
block_other_commands=block_other_commands,
loglevel=loglevel)
def remove(self,
package,
shutit_pexpect_child=None,
options=None,
echo=None,
timeout=shutit_global.shutit_global_object.default_timeout,
note=None):
"""Distro-independent remove function.
Takes a package name and runs relevant remove function.
@param package: Package to remove, which is run through package_map.
@param shutit_pexpect_child: See send()
@param options: Dict of options to pass to the remove command,
mapped by install_type.
@param timeout: See send(). Default: 3600
@param note: See send()
@return: True if all ok (i.e. the package was successfully removed),
False otherwise.
@rtype: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
# If separated by spaces, remove separately
if package.find(' ') != -1:
for p in package.split(' '):
self.install(p,shutit_pexpect_child=shutit_pexpect_child,options=options,timeout=timeout,note=note)
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.remove(package,
echo=echo,
options=options,
timeout=timeout,
note=note)
def get_env_pass(self,
user=None,
msg=None,
shutit_pexpect_child=None,
note=None):
"""Gets a password from the user if one is not already recorded for this environment.
@param user: username we are getting password for
@param msg: message to put out there
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.get_env_pass(user=user,
msg=msg,
note=note)
def whoarewe(self,
shutit_pexpect_child=None,
note=None,
loglevel=logging.DEBUG):
"""Returns the current group.
@param shutit_pexpect_child: See send()
@param note: See send()
@return: the first group found
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.whoarewe(note=note,
loglevel=loglevel)
def login(self,
command='su -',
user=None,
password=<PASSWORD>,
prompt_prefix=None,
expect=None,
timeout=shutit_global.shutit_global_object.default_timeout,
escape=False,
echo=None,
note=None,
go_home=True,
fail_on_fail=True,
is_ssh=True,
check_sudo=True,
loglevel=logging.DEBUG):
"""Logs user in on default child.
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.login(ShutItSendSpec(shutit_pexpect_session,
user=user,
send=command,
password=password,
prompt_prefix=prompt_prefix,
expect=expect,
timeout=timeout,
escape=escape,
echo=echo,
note=note,
go_home=go_home,
fail_on_fail=fail_on_fail,
is_ssh=is_ssh,
check_sudo=check_sudo,
loglevel=loglevel))
def logout_all(self,
command='exit',
note=None,
echo=None,
timeout=shutit_global.shutit_global_object.default_timeout,
nonewline=False,
loglevel=logging.DEBUG):
"""Logs the user out of all pexpect sessions within this ShutIt object.
@param command: Command to run to log out (default=exit)
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
for key in self.shutit_pexpect_sessions:
shutit_pexpect_session = self.shutit_pexpect_sessions[key]
shutit_pexpect_session.logout_all(ShutItSendSpec(shutit_pexpect_session,
send=command,
note=note,
timeout=timeout,
nonewline=nonewline,
loglevel=loglevel,
echo=echo))
return True
def logout(self,
command='exit',
note=None,
echo=None,
timeout=shutit_global.shutit_global_object.default_timeout,
nonewline=False,
loglevel=logging.DEBUG):
"""Logs the user out. Assumes that login has been called.
If login has never been called, throw an error.
@param command: Command to run to log out (default=exit)
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.logout(ShutItSendSpec(shutit_pexpect_session,
send=command,
note=note,
timeout=timeout,
nonewline=nonewline,
loglevel=loglevel,
echo=echo))
exit_shell = logout
def wait(self, cadence=2):
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.wait(cadence=cadence)
def get_memory(self,
shutit_pexpect_child=None,
note=None):
"""Returns memory available for use in k as an int"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.get_memory(note=note)
def get_distro_info(self,
shutit_pexpect_child=None,
loglevel=logging.DEBUG):
"""Get information about which distro we are using, placing it in the environment object.
Fails if distro could not be determined.
Should be called with the container is started up, and uses as core info
as possible.
Note: if the install type is apt, it issues the following:
- apt-get update
- apt-get install -y -qq lsb-release
@param shutit_pexpect_child: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.get_distro_info(loglevel=loglevel)
def lsb_release(self,
shutit_pexpect_child=None,
loglevel=logging.DEBUG):
"""Get distro information from lsb_release.
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.lsb_release(loglevel=loglevel)
def set_password(self,
password,
user='',
shutit_pexpect_child=None,
note=None):
"""Sets the password for the current user or passed-in user.
As a side effect, installs the "password" package.
@param user: username to set the password for. Defaults to '' (i.e. current user)
@param password: <PASSWORD> the user
@param shutit_pexpect_child: See send()
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.set_password(password,user=user,note=note)
def whoami(self,
note=None,
shutit_pexpect_child=None,
loglevel=logging.DEBUG):
"""Returns the current user by executing "whoami".
@param note: See send()
@return: the output of "whoami"
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.whoami(note=note,loglevel=loglevel)
def is_user_id_available(self,
user_id,
shutit_pexpect_child=None,
note=None,
loglevel=logging.DEBUG):
"""Determine whether the specified user_id available.
@param user_id: User id to be checked.
@param shutit_pexpect_child: See send()
@param note: See send()
@type user_id: integer
@rtype: boolean
@return: True is the specified user id is not used yet, False if it's already been assigned to a user.
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.is_user_id_available(user_id,
note=note,
loglevel=loglevel)
def push_repository(self,
repository,
docker_executable='docker',
shutit_pexpect_child=None,
expect=None,
note=None,
loglevel=logging.INFO):
"""Pushes the repository.
@param repository: Repository to push.
@param docker_executable: Defaults to 'docker'
@param expect: See send()
@param shutit_pexpect_child: See send()
@type repository: string
@type docker_executable: string
"""
shutit_global.shutit_global_object.yield_to_draw()
self.handle_note(note)
shutit_pexpect_child = shutit_pexpect_child or self.get_shutit_pexpect_session_from_id('host_child').pexpect_child
expect = expect or self.expect_prompts['ORIGIN_ENV']
send = docker_executable + ' push ' + self.repository['user'] + '/' + repository
timeout = 99999
self.log('Running: ' + send,level=logging.INFO)
self.multisend(docker_executable + ' login',
{'Username':self.repository['user'], 'Password':self.repository['password'], 'Email':self.repository['email']},
shutit_pexpect_child=shutit_pexpect_child,
expect=expect)
self.send(send,
shutit_pexpect_child=shutit_pexpect_child,
expect=expect,
timeout=timeout,
check_exit=False,
fail_on_empty_before=False,
loglevel=loglevel)
self.handle_note_after(note)
return True
def do_repository_work(self,
repo_name,
repo_tag=None,
docker_executable='docker',
password=<PASSWORD>,
force=None,
loglevel=logging.DEBUG,
note=None,
tag=None,
push=None,
export=None,
save=None):
"""Commit, tag, push, tar a docker container based on the configuration we have.
@param repo_name: Name of the repository.
@param docker_executable: Defaults to 'docker'
@param password:
@param force:
@type repo_name: string
@type docker_executable: string
@type password: string
@type force: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
# TODO: make host and client configurable
self.handle_note(note)
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
if tag is None:
tag = self.repository['tag']
if push is None:
push = self.repository['push']
if export is None:
export = self.repository['export']
if save is None:
save = self.repository['save']
if not (push or export or save or tag):
# If we're forcing this, then tag as a minimum
if force:
tag = True
else:
return True
shutit_pexpect_child = self.get_shutit_pexpect_session_from_id('host_child').pexpect_child
expect = self.expect_prompts['ORIGIN_ENV']
server = self.repository['server']
repo_user = self.repository['user']
if repo_tag is None:
repo_tag = self.repository['tag_name']
if repo_user and repo_name:
repository = '%s/%s' % (repo_user, repo_name)
repository_tar = '%s%s' % (repo_user, repo_name)
elif repo_user:
repository = repository_tar = repo_user
elif repo_name:
repository = repository_tar = repo_name
else:
repository = repository_tar = ''
if not repository:
self.fail('Could not form valid repository name', shutit_pexpect_child=shutit_pexpect_child, throw_exception=False) # pragma: no cover
if (export or save) and not repository_tar:
self.fail('Could not form valid tar name', shutit_pexpect_child=shutit_pexpect_child, throw_exception=False) # pragma: no cover
if server != '':
repository = '%s/%s' % (server, repository)
if self.build['deps_only']:
repo_tag += '_deps'
if self.repository['suffix_date']:
suffix_date = time.strftime(self.repository['suffix_format'])
repository = '%s%s' % (repository, suffix_date)
repository_tar = '%s%s' % (repository_tar, suffix_date)
if repository != '' and len(repository.split(':')) > 1:
repository_with_tag = repository
repo_tag = repository.split(':')[1]
elif repository != '':
repository_with_tag = repository + ':' + repo_tag
# Commit image
# Only lower case accepted
repository = repository.lower()
repository_with_tag = repository_with_tag.lower()
if server == '' and len(repository) > 30 and push:
self.fail("""repository name: '""" + repository + """' too long to push. If using suffix_date consider shortening, or consider adding "-s repository push no" to your arguments to prevent pushing.""", shutit_pexpect_child=shutit_pexpect_child, throw_exception=False) # pragma: no cover
if self.send(docker_executable + ' commit ' + self.target['container_id'] + ' ' + repository_with_tag,
expect=[expect,' assword'],
shutit_pexpect_child=shutit_pexpect_child,
timeout=99999,
check_exit=False,
loglevel=loglevel) == 1:
self.send(self.host['password'],
expect=expect,
check_exit=False,
record_command=False,
shutit_pexpect_child=shutit_pexpect_child,
echo=False,
loglevel=loglevel)
# Tag image, force it by default
self.build['report'] += '\nBuild tagged as: ' + repository_with_tag
if export or save:
shutit_pexpect_session.pause_point('We are now exporting the container to a bzipped tar file, as configured in\n[repository]\ntar:yes', print_input=False, level=3)
if export:
bzfile = (repository_tar + 'export.tar.bz2')
self.log('Depositing bzip2 of exported container into ' + bzfile,level=logging.DEBUG)
if self.send(docker_executable + ' export ' + self.target['container_id'] + ' | bzip2 - > ' + bzfile,
expect=[expect, 'assword'],
timeout=99999,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel) == 1:
self.send(password,
expect=expect,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel)
self.log('Deposited bzip2 of exported container into ' + bzfile, level=loglevel)
self.log('Run: bunzip2 -c ' + bzfile + ' | sudo docker import - to get this imported into docker.', level=logging.DEBUG)
self.build['report'] += ('\nDeposited bzip2 of exported container into ' + bzfile)
self.build['report'] += ('\nRun:\n\nbunzip2 -c ' + bzfile + ' | sudo docker import -\n\nto get this imported into docker.')
if save:
bzfile = (repository_tar + 'save.tar.bz2')
self.log('Depositing bzip2 of exported container into ' + bzfile,level=logging.DEBUG)
if self.send(docker_executable + ' save ' + self.target['container_id'] + ' | bzip2 - > ' + bzfile,
expect=[expect, 'assword'],
timeout=99999,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel) == 1:
self.send(password,
expect=expect,
shutit_pexpect_child=shutit_pexpect_child,
loglevel=loglevel)
self.log('Deposited bzip2 of exported container into ' + bzfile, level=logging.DEBUG)
self.log('Run: bunzip2 -c ' + bzfile + ' | sudo docker import - to get this imported into docker.', level=logging.DEBUG)
self.build['report'] += ('\nDeposited bzip2 of exported container into ' + bzfile)
self.build['report'] += ('\nRun:\n\nbunzip2 -c ' + bzfile + ' | sudo docker import -\n\nto get this imported into docker.')
if self.repository['push']:
# Pass the child explicitly as it's the host child.
self.push_repository(repository, docker_executable=docker_executable, expect=expect, shutit_pexpect_child=shutit_pexpect_child)
self.build['report'] = (self.build['report'] + '\nPushed repository: ' + repository)
self.handle_note_after(note)
return True
def get_config(self,
module_id,
option,
default=None,
boolean=False,
secret=False,
forcedefault=False,
forcenone=False,
hint=None):
"""Gets a specific config from the config files, allowing for a default.
Handles booleans vs strings appropriately.
@param module_id: module id this relates to, eg com.mycorp.mymodule.mymodule
@param option: config item to set
@param default: default value if not set in files
@param boolean: whether this is a boolean value or not (default False)
@param secret: whether the config item is a secret
@param forcedefault: if set to true, allows you to override any value already set (default False)
@param forcenone: if set to true, allows you to set the value to None (default False)
@param hint: if we are interactive, then show this prompt to help the user input a useful value
@type module_id: string
@type option: string
@type default: string
@type boolean: boolean
@type secret: boolean
@type forcedefault: boolean
@type forcenone: boolean
@type hint: string
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
if module_id not in cfg.keys():
cfg[module_id] = {}
if not self.config_parser.has_section(module_id):
self.config_parser.add_section(module_id)
if not forcedefault and self.config_parser.has_option(module_id, option):
if boolean:
cfg[module_id][option] = self.config_parser.getboolean(module_id, option)
else:
cfg[module_id][option] = self.config_parser.get(module_id, option)
else:
if not forcenone:
if shutit_global.shutit_global_object.interactive > 0:
if self.build['accept_defaults'] is None:
answer = None
# util_raw_input may change the interactive level, so guard for this.
while answer not in ('yes','no','') and shutit_global.shutit_global_object.interactive > 1:
answer = shutit_util.util_raw_input(prompt=shutit_util.colorise('32', 'Do you want to accept the config option defaults? ' + '(boolean - input "yes" or "no") (default: yes): \n'),default='yes',ispass=secret)
# util_raw_input may change the interactive level, so guard for this.
self.build['accept_defaults'] = answer in ('yes','') or shutit_global.shutit_global_object.interactive < 2
if self.build['accept_defaults'] and default != None:
cfg[module_id][option] = default
else:
# util_raw_input may change the interactive level, so guard for this.
prompt = '\n\nPlease input a value for ' + module_id + '.' + option
if default != None:
prompt = prompt + ' (default: ' + str(default) + ')'
if hint != None:
prompt = prompt + '\n\n' + hint
answer = None
if boolean:
while answer not in ('yes','no'):
answer = shutit_util.util_raw_input(prompt=shutit_util.colorise('32',prompt + ' (boolean - input "yes" or "no"): \n'),ispass=secret)
if answer == 'yes':
answer = True
elif answer == 'no':
answer = False
else:
if re.search('assw',option) is None:
answer = shutit_util.util_raw_input(prompt=shutit_util.colorise('32',prompt) + ': \n',ispass=secret)
else:
answer = shutit_util.util_raw_input(ispass=True,prompt=shutit_util.colorise('32',prompt) + ': \n')
if answer == '' and default != None:
answer = default
cfg[module_id][option] = answer
else:
if default != None:
cfg[module_id][option] = default
else:
self.fail('Config item: ' + option + ':\nin module:\n[' + module_id + ']\nmust be set!\n\nOften this is a deliberate requirement to place in your ~/.shutit/config file, or you can pass in with:\n\n-s ' + module_id + ' ' + option + ' yourvalue\n\nto the build command', throw_exception=False) # pragma: no cover
else:
cfg[module_id][option] = default
return True
def begin_asciinema_session(self,
title=None,
max_pause=None,
filename=None,
shutit_pexpect_child=None):
shutit_global.shutit_global_object.yield_to_draw()
assert self.build['asciinema_session'] is None, shutit_util.print_debug()
self.build['asciinema_session'] = True
self.build['asciinema_session_file'] = False
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
if not self.command_available('asciinema'):
self.install('asciinema')
version = self.send_and_get_output("""asciinema --version | awk '{print $2}'""")
if max_pause:
max_pause_str = ' -w ' + str(max_pause)
else:
max_pause_str = ' -w 5.0'
opts = '-y'
if title:
opts += ' -t "' + str(title) + '"'
if version < '1.3':
self.login(command='asciinema rec -c "' + shutit_global.shutit_global_object.bash_startup_command + '" ' + opts, go_home=False)
elif filename != None:
self.login(command='asciinema rec -c "' + shutit_global.shutit_global_object.bash_startup_command + '" ' + opts + ' ' + max_pause_str + ' ' + filename, go_home=False)
else:
self.login(command='asciinema rec -c "' + shutit_global.shutit_global_object.bash_startup_command + '" ' + opts + ' ' + max_pause_str, go_home=False)
return True
def end_asciinema_session(self,
shutit_pexpect_child=None):
shutit_global.shutit_global_object.yield_to_draw()
assert self.build['asciinema_session'] is True, shutit_util.print_debug()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
self.logout(timeout=shutit_global.shutit_global_object.default_timeout)
self.build['asciinema_session'] = None
self.build['asciinema_session_file'] = None
return True
def get_emailer(self, cfg_section):
"""Sends an email using the mailer
"""
shutit_global.shutit_global_object.yield_to_draw()
import emailer
return emailer.Emailer(cfg_section, self)
# eg sys.stdout or None
def divert_output(self, output):
shutit_global.shutit_global_object.yield_to_draw()
for key in self.shutit_pexpect_sessions:
self.shutit_pexpect_sessions[key].pexpect_child.logfile = output
return True
def get_shutit_pexpect_session_from_child(self, shutit_pexpect_child):
"""Given a pexpect/child object, return the shutit_pexpect_session object.
"""
shutit_global.shutit_global_object.yield_to_draw()
if not isinstance(shutit_pexpect_child, pexpect.pty_spawn.spawn):
self.fail('Wrong type in get_shutit_pexpect_session_child: ' + str(type(shutit_pexpect_child)),throw_exception=True) # pragma: no cover
for key in self.shutit_pexpect_sessions:
if self.shutit_pexpect_sessions[key].pexpect_child == shutit_pexpect_child:
return self.shutit_pexpect_sessions[key]
return self.fail('Should not get here in get_shutit_pexpect_session',throw_exception=True) # pragma: no cover
def get_shutit_pexpect_session_id(self, shutit_pexpect_child):
"""Given a pexpect child object, return the shutit_pexpect_session_id object.
"""
shutit_global.shutit_global_object.yield_to_draw()
if not isinstance(shutit_pexpect_child, pexpect.pty_spawn.spawn):
self.fail('Wrong type in get_shutit_pexpect_session_id',throw_exception=True) # pragma: no cover
for key in self.shutit_pexpect_sessions:
if self.shutit_pexpect_sessions[key].pexpect_child == shutit_pexpect_child:
return key
return self.fail('Should not get here in get_shutit_pexpect_session_id',throw_exception=True) # pragma: no cover
def get_shutit_pexpect_session_from_id(self, shutit_pexpect_id):
"""Get the pexpect session from the given identifier.
"""
shutit_global.shutit_global_object.yield_to_draw()
for key in self.shutit_pexpect_sessions:
if self.shutit_pexpect_sessions[key].pexpect_session_id == shutit_pexpect_id:
return self.shutit_pexpect_sessions[key]
return self.fail('Should not get here in get_shutit_pexpect_session_from_id',throw_exception=True) # pragma: no cover
def print_session_state(self):
shutit_global.shutit_global_object.yield_to_draw()
ret = '\n'
for key in self.shutit_pexpect_sessions:
ret += '===============================================================================\n'
session_id = self.shutit_pexpect_sessions[key].pexpect_session_id
session = self.shutit_pexpect_sessions[key]
ret += 'KEY: ' + key + '\n'
ret += 'SESSION_ID: ' + session_id + '\n'
ret += 'SESSION: ' + str(session) + '\n'
ret += 'DEFAULT_EXP: ' + session.default_expect + '\n'
ret += 'LOGIN_STACK: ' + str(session.login_stack) + '\n'
ret += 'CURRENT_ENVIRONMENT: ' + str(session.current_environment) + '\n'
ret += '===============================================================================\n'
return ret
# TODO: walkthrough and exam at global level? but see handle_note - looks like that is shutit-specific
# given a shutit object and an echo value, return the appropriate echo
# value for the given context.
def get_echo_override(self, echo):
shutit_global.shutit_global_object.yield_to_draw()
# Should we echo the output?
if shutit_global.shutit_global_object.managed_panes:
# Never echo if in managed panes
return False
if self.build['always_echo'] is True or self.loglevel <= logging.DEBUG:
# Yes if it's set to always echo or is in debug
echo = True
if echo is None and self.build['walkthrough']:
# Yes if it's in walkthrough and was not explicitly passed in
echo = True
if echo is None:
# No if it was not explicitly passed in
echo = False
if self.build['exam'] and self.loglevel not in ('DEBUG',):
# No if we are in exam mode
echo = False
return echo
def check_sudo(self, shutit_pexpect_session=None):
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = shutit_pexpect_session or self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.check_sudo()
def get_sudo_pass_if_needed(self, shutit, ignore_brew=False):
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.get_sudo_pass_if_needed(shutit, ignore_brew=ignore_brew)
def get_commands(self):
"""Gets command that have been run and have not been redacted.
"""
shutit_global.shutit_global_object.yield_to_draw()
s = ''
for c in self.build['shutit_command_history']:
if isinstance(c, str):
#Ignore commands with leading spaces
if c and c[0] != ' ':
s += c + '\n'
return s
# Build report
def build_report(self, msg=''):
"""Resposible for constructing a report to be output as part of the build.
Returns report as a string.
"""
shutit_global.shutit_global_object.yield_to_draw()
s = '\n'
s += '################################################################################\n'
s += '# COMMAND HISTORY BEGIN ' + shutit_global.shutit_global_object.build_id + '\n'
s += self.get_commands()
s += '# COMMAND HISTORY END ' + shutit_global.shutit_global_object.build_id + '\n'
s += '################################################################################\n'
s += '################################################################################\n'
s += '# BUILD REPORT FOR BUILD BEGIN ' + shutit_global.shutit_global_object.build_id + '\n'
s += '# ' + msg + '\n'
if self.build['report'] != '':
s += self.build['report'] + '\n'
else:
s += '# Nothing to report\n'
if 'container_id' in self.target:
s += '# CONTAINER_ID: ' + self.target['container_id'] + '\n'
s += '# BUILD REPORT FOR BUILD END ' + shutit_global.shutit_global_object.build_id + '\n'
s += '###############################################################################\n'
s += '# INVOKING COMMAND WAS: ' + sys.executable
for arg in sys.argv:
s += ' ' + arg
s += '\n'
s += '###############################################################################\n'
return s
def match_string(self, string_to_match, regexp):
"""Get regular expression from the first of the lines passed
in in string that matched. Handles first group of regexp as
a return value.
@param string_to_match: String to match on
@param regexp: Regexp to check (per-line) against string
@type string_to_match: string
@type regexp: string
Returns None if none of the lines matched.
Returns True if there are no groups selected in the regexp.
else returns matching group (ie non-None)
"""
shutit_global.shutit_global_object.yield_to_draw()
if not isinstance(string_to_match, str):
return None
lines = string_to_match.split('\r\n')
# sometimes they're separated by just a carriage return...
new_lines = []
for line in lines:
new_lines = new_lines + line.split('\r')
# and sometimes they're separated by just a newline...
for line in lines:
new_lines = new_lines + line.split('\n')
lines = new_lines
if not shutit_util.check_regexp(regexp):
self.fail('Illegal regexp found in match_string call: ' + regexp) # pragma: no cover
for line in lines:
match = re.match(regexp, line)
if match is not None:
if match.groups():
return match.group(1)
return True
return None
def module_ids(self, rev=False):
"""Gets a list of module ids guaranteed to be sorted by run_order, ignoring conn modules
(run order < 0).
"""
shutit_global.shutit_global_object.yield_to_draw()
ids = sorted(list(self.shutit_map.keys()),key=lambda module_id: self.shutit_map[module_id].run_order)
if rev:
return list(reversed(ids))
return ids
def is_to_be_built_or_is_installed(self, shutit_module_obj):
"""Returns true if this module is configured to be built, or if it is already installed.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
if cfg[shutit_module_obj.module_id]['shutit.core.module.build']:
return True
return self.is_installed(shutit_module_obj)
def is_installed(self, shutit_module_obj):
"""Returns true if this module is installed.
Uses cache where possible.
"""
shutit_global.shutit_global_object.yield_to_draw()
# Cache first
if shutit_module_obj.module_id in self.get_current_shutit_pexpect_session_environment().modules_installed:
return True
if shutit_module_obj.module_id in self.get_current_shutit_pexpect_session_environment().modules_not_installed:
return False
# Is it installed?
if shutit_module_obj.is_installed(self):
self.get_current_shutit_pexpect_session_environment().modules_installed.append(shutit_module_obj.module_id)
return True
# If not installed, and not in cache, add it.
else:
if shutit_module_obj.module_id not in self.get_current_shutit_pexpect_session_environment().modules_not_installed:
self.get_current_shutit_pexpect_session_environment().modules_not_installed.append(shutit_module_obj.module_id)
return False
return False
def determine_compatibility(self, module_id):
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
# Allowed images
if (cfg[module_id]['shutit.core.module.allowed_images'] and self.target['docker_image'] not in cfg[module_id]['shutit.core.module.allowed_images']) and not self.allowed_image(module_id):
return 1
# Build methods
if cfg[module_id]['shutit.core.module.build'] and self.build['delivery'] not in self.shutit_map[module_id].ok_delivery_methods:
return 2
return 0
def allowed_image(self, module_id):
"""Given a module id, determine whether the image is allowed to be built.
"""
shutit_global.shutit_global_object.yield_to_draw()
self.log("In allowed_image: " + module_id,level=logging.DEBUG)
cfg = self.cfg
if self.build['ignoreimage']:
self.log("ignoreimage == true, returning true" + module_id,level=logging.DEBUG)
return True
self.log(str(cfg[module_id]['shutit.core.module.allowed_images']),level=logging.DEBUG)
if cfg[module_id]['shutit.core.module.allowed_images']:
# Try allowed images as regexps
for regexp in cfg[module_id]['shutit.core.module.allowed_images']:
if not shutit_util.check_regexp(regexp):
self.fail('Illegal regexp found in allowed_images: ' + regexp) # pragma: no cover
if re.match('^' + regexp + '$', self.target['docker_image']):
return True
return False
def print_modules(self):
"""Returns a string table representing the modules in the ShutIt module map.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
module_string = ''
module_string += 'Modules: \n'
module_string += ' Run order Build Remove Module ID\n'
for module_id in self.module_ids():
module_string += ' ' + str(self.shutit_map[module_id].run_order) + ' ' + str(
cfg[module_id]['shutit.core.module.build']) + ' ' + str(
cfg[module_id]['shutit.core.module.remove']) + ' ' + module_id + '\n'
return module_string
def load_shutit_modules(self):
"""Responsible for loading the shutit modules based on the configured module
paths.
"""
shutit_global.shutit_global_object.yield_to_draw()
if self.loglevel <= logging.DEBUG:
self.log('ShutIt module paths now: ',level=logging.DEBUG)
self.log(self.host['shutit_module_path'],level=logging.DEBUG)
for shutit_module_path in self.host['shutit_module_path']:
self.load_all_from_path(shutit_module_path)
def get_command(self, command):
"""Helper function for osx - return gnu utils rather than default for
eg head and md5sum where possible and needed.
"""
shutit_global.shutit_global_object.yield_to_draw()
if command in ('md5sum','sed','head'):
if self.get_current_shutit_pexpect_session_environment().distro == 'osx':
return 'g' + command
return command
def get_send_command(self, send):
"""Internal helper function to get command that's really sent
"""
shutit_global.shutit_global_object.yield_to_draw()
if send is None:
return send
cmd_arr = send.split()
if cmd_arr and cmd_arr[0] in ('md5sum','sed','head'):
newcmd = self.get_command(cmd_arr[0])
send = send.replace(cmd_arr[0],newcmd)
return send
def load_configs(self):
"""Responsible for loading config files into ShutIt.
Recurses down from configured shutit module paths.
"""
shutit_global.shutit_global_object.yield_to_draw()
# Get root default config.
# TODO: change default_cnf so it places whatever the values are at this stage of the build.
configs = [('defaults', StringIO(default_cnf)), os.path.expanduser('~/.shutit/config'), os.path.join(self.host['shutit_path'], 'config'), 'configs/build.cnf']
# Add the shutit global host- and user-specific config file.
# Add the local build.cnf
# Get passed-in config(s)
for config_file_name in self.build['extra_configs']:
run_config_file = os.path.expanduser(config_file_name)
if not os.path.isfile(run_config_file):
shutit_global.shutit_global_object.shutit_print('Did not recognise ' + run_config_file + ' as a file - do you need to touch ' + run_config_file + '?')
shutit_global.shutit_global_object.handle_exit(exit_code=0)
configs.append(run_config_file)
# Image to use to start off. The script should be idempotent, so running it
# on an already built image should be ok, and is advised to reduce diff space required.
if self.action['list_configs'] or self.loglevel <= logging.DEBUG:
msg = ''
for c in configs:
if isinstance(c, tuple):
c = c[0]
msg = msg + ' \n' + c
self.log(' ' + c,level=logging.DEBUG)
# Interpret any config overrides, write to a file and add them to the
# list of configs to be interpreted
if self.build['config_overrides']:
# We don't need layers, this is a temporary configparser
override_cp = ConfigParser.RawConfigParser()
for o_sec, o_key, o_val in self.build['config_overrides']:
if not override_cp.has_section(o_sec):
override_cp.add_section(o_sec)
override_cp.set(o_sec, o_key, o_val)
override_fd = StringIO()
override_cp.write(override_fd)
override_fd.seek(0)
configs.append(('overrides', override_fd))
self.config_parser = self.get_configs(configs)
self.get_base_config()
# Manage config settings, returning a dict representing the settings
# that have been sanity-checked.
# Should only be called from load_configs above.
def get_base_config(self):
"""Responsible for getting core configuration from config files.
"""
shutit_global.shutit_global_object.yield_to_draw()
cp = self.config_parser
# TODO: what happens if a command-line arg was already set?
# BEGIN Read from config files
# build - details relating to the build
self.build['privileged'] = cp.getboolean('build', 'privileged')
self.build['base_image'] = cp.get('build', 'base_image')
self.build['dotest'] = cp.get('build', 'dotest')
self.build['net'] = cp.get('build', 'net')
# Take a command-line arg if given, else default.
if self.build['conn_module'] is None:
self.build['conn_module'] = cp.get('build', 'conn_module')
# target - the target of the build, ie the container
self.target['hostname'] = cp.get('target', 'hostname')
self.target['ports'] = cp.get('target', 'ports')
self.target['volumes'] = cp.get('target', 'volumes')
self.target['volumes_from'] = cp.get('target', 'volumes_from')
self.target['name'] = cp.get('target', 'name')
self.target['rm'] = cp.getboolean('target', 'rm')
# host - the host on which the shutit script is run
self.host['add_shutit_to_path'] = cp.getboolean('host', 'add_shutit_to_path')
self.host['docker_executable'] = cp.get('host', 'docker_executable')
self.host['dns'] = cp.get('host', 'dns')
self.host['password'] = cp.get('host', 'password')
if isinstance(self.host['password'],str):
shutit_global.shutit_global_object.secret_words_set.add(self.host['password'])
self.logfile = cp.get('host', 'logfile')
self.host['shutit_module_path'] = cp.get('host', 'shutit_module_path').split(':')
# repository - information relating to docker repository/registry
self.repository['name'] = cp.get('repository', 'name')
self.repository['server'] = cp.get('repository', 'server')
self.repository['push'] = cp.getboolean('repository', 'push')
self.repository['tag'] = cp.getboolean('repository', 'tag')
self.repository['export'] = cp.getboolean('repository', 'export')
self.repository['save'] = cp.getboolean('repository', 'save')
self.repository['suffix_date'] = cp.getboolean('repository', 'suffix_date')
self.repository['suffix_format'] = cp.get('repository', 'suffix_format')
self.repository['user'] = cp.get('repository', 'user')
self.repository['password'] = cp.get('repository', 'password')
if isinstance(self.repository['password'],str):
shutit_global.shutit_global_object.secret_words_set.add(self.repository['password'])
self.repository['email'] = cp.get('repository', 'email')
self.repository['tag_name'] = cp.get('repository', 'tag_name')
# END Read from config files
# BEGIN tidy configs up
if self.target['docker_image'] == '':
self.target['docker_image'] = self.build['base_image']
# END tidy configs up
# FAILS begins
# rm is incompatible with repository actions
if self.target['rm'] and (self.repository['tag'] or self.repository['push'] or self.repository['save'] or self.repository['export']): # pragma: no cover
shutit_global.shutit_global_object.shutit_print("Can't have [target]/rm and [repository]/(push/save/export) set to true")
shutit_global.shutit_global_object.handle_exit(exit_code=1)
if self.target['hostname'] != '' and self.build['net'] != '' and self.build['net'] != 'bridge': # pragma: no cover
shutit_global.shutit_global_object.shutit_print('\n\ntarget/hostname or build/net configs must be blank\n\n')
shutit_global.shutit_global_object.handle_exit(exit_code=1)
# FAILS ends
def load_all_from_path(self, path):
"""Dynamically imports files within the same directory (in the end, the path).
"""
shutit_global.shutit_global_object.yield_to_draw()
#111: handle expanded paths
path = os.path.abspath(path)
#http://stackoverflow.com/questions/301134/dynamic-module-import-in-python
if os.path.abspath(path) == self.shutit_main_dir:
return
if not os.path.exists(path):
return
if os.path.exists(path + '/STOPBUILD') and not self.build['ignorestop']:
self.log('Ignoring directory: ' + path + ' as it has a STOPBUILD file in it. Pass --ignorestop to shutit run to override.',level=logging.DEBUG)
return
for sub in glob.glob(os.path.join(path, '*')):
subpath = os.path.join(path, sub)
if os.path.isfile(subpath):
self.load_mod_from_file(subpath)
elif os.path.isdir(subpath):
self.load_all_from_path(subpath)
def load_mod_from_file(self, fpath):
"""Loads modules from a .py file into ShutIt if there are no modules from
this file already.
We expect to have a callable 'module/0' which returns one or more module
objects.
If this doesn't exist we assume that the .py file works in the old style
(automatically inserting the module into shutit_global) or it's not a shutit
module.
"""
shutit_global.shutit_global_object.yield_to_draw()
fpath = os.path.abspath(fpath)
file_ext = os.path.splitext(os.path.split(fpath)[-1])[-1]
if file_ext.lower() != '.py':
return
with open(fpath) as f:
content = f.read().splitlines()
ok = False
for line in content:
if line.strip() == 'from shutit_module import ShutItModule':
ok = True
break
if not ok:
self.log('Rejected file: ' + fpath,level=logging.DEBUG)
return
# Note that this attribute will only be set for 'new style' module loading, # this should be ok because 'old style' loading checks for duplicate # existing modules.
# TODO: this is quadratic complexity
existingmodules = [
m for m in self.shutit_modules
if getattr(m, '__module_file', None) == fpath
]
if existingmodules:
self.log('Module already seen: ' + fpath,level=logging.DEBUG)
return
# Looks like it's ok to load this file
self.log('Loading source for: ' + fpath,level=logging.DEBUG)
# Add this directory to the python path iff not already there.
directory = os.path.dirname(fpath)
if directory not in sys.path:
sys.path.append(os.path.dirname(fpath))
# TODO: use bytearray to encode?
mod_name = base64.b32encode(fpath.encode()).decode().replace('=', '')
pymod = imp.load_source(mod_name, fpath)
# Got the python module, now time to pull the shutit module(s) out of it.
targets = [
('module', self.shutit_modules), ('conn_module', self.conn_modules)
]
self.build['source'] = {}
for attr, target in targets:
modulefunc = getattr(pymod, attr, None)
# Old style or not a shutit module, nothing else to do
if not callable(modulefunc):
return
modules = modulefunc()
if not isinstance(modules, list):
modules = [modules]
for module in modules:
setattr(module, '__module_file', fpath)
ShutItModule.register(module.__class__)
target.add(module)
self.build['source'][fpath] = open(fpath).read()
def config_collection_for_built(self, throw_error=True,silent=False):
"""Collect configuration for modules that are being built.
When this is called we should know what's being built (ie after
dependency resolution).
"""
shutit_global.shutit_global_object.yield_to_draw()
self.log('In config_collection_for_built',level=logging.DEBUG)
cfg = self.cfg
for module_id in self.module_ids():
# Get the config even if installed or building (may be needed in other hooks, eg test).
if (self.is_to_be_built_or_is_installed(self.shutit_map[module_id]) and
not self.shutit_map[module_id].get_config(self)):
self.fail(module_id + ' failed on get_config') # pragma: no cover
# Collect the build.cfg if we are building here.
# If this file exists, process it.
if cfg[module_id]['shutit.core.module.build'] and not self.build['have_read_config_file']:
# TODO: __module_file not accessible when within object - look to get this elsewhere and re-read in, then move this function into shutit object.
cfg_file = os.path.dirname(self.shutit_file_map[module_id]) + '/configs/build.cnf'
if os.path.isfile(cfg_file):
self.build['have_read_config_file'] = True
# use self.get_config, forcing the passed-in default
config_parser = ConfigParser.ConfigParser()
config_parser.read(cfg_file)
for section in config_parser.sections():
if section == module_id:
for option in config_parser.options(section):
override = False
for mod, opt, val in self.build['config_overrides']:
val = val # pylint
# skip overrides
if mod == module_id and opt == option:
override = True
if override:
continue
is_bool = isinstance(cfg[module_id][option], bool)
if is_bool:
value = config_parser.getboolean(section,option)
else:
value = config_parser.get(section,option)
if option == 'shutit.core.module.allowed_images':
value = json.loads(value)
self.get_config(module_id, option, value, forcedefault=True)
# Check the allowed_images against the base_image
passed = True
for module_id in self.module_ids():
if (cfg[module_id]['shutit.core.module.build'] and
(cfg[module_id]['shutit.core.module.allowed_images'] and
self.target['docker_image'] not in cfg[module_id]['shutit.core.module.allowed_images'])):
if not self.allowed_image(module_id):
passed = False
if not silent:
shutit_global.shutit_global_object.shutit_print('\n\nWARNING!\n\nAllowed images for ' + module_id + ' are: ' + str(cfg[module_id]['shutit.core.module.allowed_images']) + ' but the configured image is: ' + self.target['docker_image'] + '\n\nIs your shutit_module_path set correctly?\n\nIf you want to ignore this, pass in the --ignoreimage flag to shutit.\n\n')
if not passed:
if not throw_error:
return False
if self.build['imageerrorok']:
# useful for test scripts
shutit_global.shutit_global_object.shutit_print('Exiting on allowed images error, with return status 0')
shutit_global.shutit_global_object.handle_exit(exit_code=1)
else:
raise ShutItFailException('Allowed images checking failed') # pragma: no cover
return True
def config_collection(self):
"""Collect core config from config files for all seen modules.
"""
shutit_global.shutit_global_object.yield_to_draw()
self.log('In config_collection',level=logging.DEBUG)
cfg = self.cfg
for module_id in self.module_ids():
# Default to None so we can interpret as ifneeded
self.get_config(module_id, 'shutit.core.module.build', None, boolean=True, forcenone=True)
self.get_config(module_id, 'shutit.core.module.remove', False, boolean=True)
self.get_config(module_id, 'shutit.core.module.tag', False, boolean=True)
# Default to allow any image
self.get_config(module_id, 'shutit.core.module.allowed_images', [".*"])
module = self.shutit_map[module_id]
cfg_file = os.path.dirname(get_module_file(self,module)) + '/configs/build.cnf'
if os.path.isfile(cfg_file):
# use self.get_config, forcing the passed-in default
config_parser = ConfigParser.ConfigParser()
config_parser.read(cfg_file)
for section in config_parser.sections():
if section == module_id:
for option in config_parser.options(section):
if option == 'shutit.core.module.allowed_images':
override = False
for mod, opt, val in self.build['config_overrides']:
val = val # pylint
# skip overrides
if mod == module_id and opt == option:
override = True
if override:
continue
value = config_parser.get(section,option)
if option == 'shutit.core.module.allowed_images':
value = json.loads(value)
self.get_config(module_id, option, value, forcedefault=True)
# ifneeded will (by default) only take effect if 'build' is not
# specified. It can, however, be forced to a value, but this
# should be unusual.
if cfg[module_id]['shutit.core.module.build'] is None:
self.get_config(module_id, 'shutit.core.module.build_ifneeded', True, boolean=True)
cfg[module_id]['shutit.core.module.build'] = False
else:
self.get_config(module_id, 'shutit.core.module.build_ifneeded', False, boolean=True)
def do_list_modules(self, long_output=None,sort_order=None):
"""Display a list of loaded modules.
Config items:
- shutit.list_modules['long']
If set, also print each module's run order value
- shutit.list_modules['sort']
Select the column by which the list is ordered:
- id: sort the list by module id
- run_order: sort the list by module run order
The output is also saved to ['build']['log_config_path']/module_order.txt
Dependencies: operator
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
# list of module ids and other details
# will also contain column headers
table_list = []
if long_output is None:
long_output = self.list_modules['long']
if sort_order is None:
sort_order = self.list_modules['sort']
if long_output:
# --long table: sort modules by run order
table_list.append(["Order","Module ID","Description","Run Order","Built","Compatible"])
#table_list.append(["Order","Module ID","Description","Run Order","Built"])
else:
# "short" table ==> sort module by module_id
#table_list.append(["Module ID","Description","Built"])
table_list.append(["Module ID","Description","Built","Compatible"])
if sort_order == 'run_order':
d = {}
for m in self.shutit_modules:
d.update({m.module_id:m.run_order})
# sort dict by run_order; see http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value
b = sorted(d.items(), key=operator.itemgetter(1))
count = 0
# now b is a list of tuples (module_id, run_order)
for pair in b:
# module_id is the first item of the tuple
k = pair[0]
for m in self.shutit_modules:
if m.module_id == k:
count += 1
compatible = True
if not cfg[m.module_id]['shutit.core.module.build']:
cfg[m.module_id]['shutit.core.module.build'] = True
compatible = self.determine_compatibility(m.module_id) == 0
cfg[m.module_id]['shutit.core.module.build'] = False
if long_output:
table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])
#table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])])
else:
table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])
elif sort_order == 'id':
l = []
for m in self.shutit_modules:
l.append(m.module_id)
l.sort()
for k in l:
for m in self.shutit_modules:
if m.module_id == k:
count = 1
compatible = True
if not cfg[m.module_id]['shutit.core.module.build']:
cfg[m.module_id]['shutit.core.module.build'] = True
compatible = self.determine_compatibility(m.module_id) == 0
if long_output:
table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])
#table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])])
else:
#table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build'])])
table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])
# format table for display
table = texttable.Texttable()
table.add_rows(table_list)
# Base length of table on length of strings
colwidths = []
for item in table_list:
for n in range(0,len(item)):
# default to 10 chars
colwidths.append(10)
break
for item in table_list:
for n in range(0,len(item)-1):
if len(str(item[n])) > colwidths[n]:
colwidths[n] = len(str(item[n]))
table.set_cols_width(colwidths)
msg = table.draw()
shutit_global.shutit_global_object.shutit_print('\n' + msg)
def print_config(self, cfg, hide_password=True, history=False, module_id=None):
"""Returns a string representing the config of this ShutIt run.
"""
shutit_global.shutit_global_object.yield_to_draw()
cp = self.config_parser
s = ''
keys1 = list(cfg.keys())
if keys1:
keys1.sort()
for k in keys1:
if module_id is not None and k != module_id:
continue
if isinstance(k, str) and isinstance(cfg[k], dict):
s += '\n[' + k + ']\n'
keys2 = list(cfg[k].keys())
if keys2:
keys2.sort()
for k1 in keys2:
line = ''
line += k1 + ':'
# If we want to hide passwords, we do so using a sha512
# done an aritrary number of times (27).
if hide_password and (k1 == 'password' or k1 == 'passphrase'):
p = hashlib.sha512(cfg[k][k1]).hexdigest()
i = 27
while i > 0:
i -= 1
p = hashlib.sha512(s).hexdigest()
line += p
else:
if type(cfg[k][k1] == bool):
line += str(cfg[k][k1])
elif type(cfg[k][k1] == str):
line += cfg[k][k1]
if history:
try:
line += (30-len(line)) * ' ' + ' # ' + cp.whereset(k, k1)
except Exception:
# Assume this is because it was never set by a config parser.
line += (30-len(line)) * ' ' + ' # ' + "defaults in code"
s += line + '\n'
return s
def process_args(self, args):
"""Process the args we have. 'args' is always a ShutItInit object.
"""
shutit_global.shutit_global_object.yield_to_draw()
assert isinstance(args,ShutItInit), shutit_util.print_debug()
if args.action == 'version':
shutit_global.shutit_global_object.shutit_print('ShutIt version: ' + shutit.shutit_version)
shutit_global.shutit_global_object.handle_exit(exit_code=0)
# What are we asking shutit to do?
self.action['list_configs'] = args.action == 'list_configs'
self.action['list_modules'] = args.action == 'list_modules'
self.action['list_deps'] = args.action == 'list_deps'
self.action['skeleton'] = args.action == 'skeleton'
self.action['build'] = args.action == 'build'
self.action['run'] = args.action == 'run'
# Logging
if not self.logging_setup_done:
self.logfile = args.logfile
self.loglevel = args.loglevel
if self.loglevel is None or self.loglevel == '':
self.loglevel = 'INFO'
self.setup_logging()
shutit_global.shutit_global_object.setup_panes(action=args.action)
# This mode is a bit special - it's the only one with different arguments
if self.action['skeleton']:
self.handle_skeleton(args)
shutit_global.shutit_global_object.handle_exit()
elif self.action['run']:
self.handle_run(args)
sys.exit(0)
elif self.action['build'] or self.action['list_configs'] or self.action['list_modules']:
self.handle_build(args)
else:
self.fail('Should not get here: action was: ' + str(self.action))
self.nocolor = args.nocolor
def handle_skeleton(self, args):
shutit_global.shutit_global_object.yield_to_draw()
delivery_method = args.delivery
accept_defaults = args.accept
shutitfiles = args.shutitfiles
module_directory = args.name
domain = args.domain
pattern = args.pattern
base_image = args.base_image
depends = args.depends
script = args.script
vagrant_num_machines = args.vagrant_num_machines
vagrant_num_machines = args.vagrant_num_machines
vagrant_ssh_access = args.vagrant_ssh_access
vagrant_machine_prefix = args.vagrant_machine_prefix
vagrant_docker = args.vagrant_docker
vagrant_snapshot = args.vagrant_snapshot
vagrant_upload = args.vagrant_upload
vagrant_image_name = args.vagrant_image_name
default_pattern = 'bash'
# Looks through the arguments given for valid shutitfiles, and adds their names to _new_shutitfiles.
_new_shutitfiles = None
if shutitfiles:
cwd = os.getcwd()
_new_shutitfiles = []
_delivery_methods_seen = set()
for shutitfile in shutitfiles:
if shutitfile[0] != '/':
shutitfile = cwd + '/' + shutitfile
if os.path.isfile(shutitfile):
candidate_shutitfile_fh = open(shutitfile,'r')
candidate_shutitfile_contents = candidate_shutitfile_fh.read()
candidate_shutitfile_fh.close()
try:
shutitfile_representation, ok = shutit_skeleton.process_shutitfile(self, candidate_shutitfile_contents)
if not ok or candidate_shutitfile_contents.strip() == '':
shutit_global.shutit_global_object.shutit_print('Ignoring file (failed to parse candidate shutitfile): ' + shutitfile)
else:
_new_shutitfiles.append(shutitfile)
if shutitfile_representation['shutitfile']['delivery']:
_delivery_methods_seen.add(shutitfile_representation['shutitfile']['delivery'][0][1])
except Exception as e:
shutit_global.shutit_global_object.shutit_print('')
shutit_global.shutit_global_object.shutit_print(e)
shutit_global.shutit_global_object.shutit_print('Ignoring file (failed to parse candidate shutitfile): ' + shutitfile)
elif os.path.isdir(shutitfile):
for root, subfolders, files in os.walk(shutitfile):
subfolders.sort()
files.sort()
for fname in files:
candidate_shutitfile = os.path.join(root, fname)
try:
if os.path.isfile(candidate_shutitfile):
candidate_shutitfile_fh = open(candidate_shutitfile,'r')
candidate_shutitfile_contents = candidate_shutitfile_fh.read()
candidate_shutitfile_fh.close()
shutitfile_representation, ok = shutit_skeleton.process_shutitfile(shutit, candidate_shutitfile_contents)
if not ok or candidate_shutitfile_contents.strip() == '':
shutit_global.shutit_global_object.shutit_print('Ignoring file (failed to parse candidate shutitfile): ' + candidate_shutitfile)
else:
_new_shutitfiles.append(candidate_shutitfile)
if shutitfile_representation['shutitfile']['delivery']:
_delivery_methods_seen.add(shutitfile_representation['shutitfile']['delivery'][0][1])
else:
shutit_global.shutit_global_object.shutit_print('Ignoring filename (not a normal file): ' + fname)
except:
shutit_global.shutit_global_object.shutit_print('Ignoring file (failed to parse candidate shutitfile): ' + candidate_shutitfile)
if _new_shutitfiles:
if not _delivery_methods_seen and delivery_method is None:
delivery_method = 'bash'
elif not _delivery_methods_seen:
pass
elif len(_delivery_methods_seen) == 1 and delivery_method is None:
delivery_method = _delivery_methods_seen.pop()
elif len(_delivery_methods_seen) == 1:
shutitfile_delivery_method = _delivery_methods_seen.pop()
if delivery_method != shutitfile_delivery_method:
shutit_global.shutit_global_object.shutit_print('Conflicting delivery methods passed in vs. from shutitfile.\nPassed-in: ' + delivery_method + '\nShutitfile: ' + shutitfile_delivery_method)
shutit_global.shutit_global_object.handle_exit(exit_code=1)
else:
shutit_global.shutit_global_object.shutit_print('Too many delivery methods seen in shutitfiles: ' + str(_new_shutitfiles))
shutit_global.shutit_global_object.shutit_print('Delivery methods: ' + str(_delivery_methods_seen))
shutit_global.shutit_global_object.shutit_print('Delivery method passed in: ' + delivery_method)
shutit_global.shutit_global_object.handle_exit(exit_code=1)
else:
shutit_global.shutit_global_object.shutit_print('ShutItFiles: ' + str(_new_shutitfiles) + ' appear to not exist.')
shutit_global.shutit_global_object.handle_exit(exit_code=1)
if module_directory == '':
default_dir = self.host['calling_path'] + '/shutitskel_' + shutit_util.random_word()
if accept_defaults:
module_directory = default_dir
else:
# TODO: Python3?
#def hook():
# readline.insert_text(default_dir)
#readline.redisplay()
#readline.set_pre_input_hook(hook)
readline.set_startup_hook(lambda: readline.insert_text(default_dir))
shutit_global.shutit_global_object.shutit_print('Input a path for this module')
module_directory = shutit_util.util_raw_input(prompt='\n>> ', default=default_dir)
readline.set_startup_hook()
if module_directory[0] != '/':
module_directory = self.host['calling_path'] + '/' + module_directory
module_name = module_directory.split('/')[-1].replace('-','_')
if domain == '':
default_domain_name = os.getcwd().split('/')[-1] + '.' + module_name
domain = default_domain_name
# Figure out defaults.
# If no pattern supplied, then assume it's the same as delivery.
if pattern == '':
if accept_defaults or _new_shutitfiles:
if _new_shutitfiles:
default_pattern = delivery_method
pattern = default_pattern
else:
pattern = ''
choices = ('bash','docker','vagrant','docker_tutorial','shutitfile')
for choice in choices:
readline.add_history(choice)
while pattern not in choices:
table = texttable.Texttable()
rows = [['Choice','Description'],
['bash','Shell script'],
['docker','Builds a Docker image'],
['vagrant','Builds a cluster of Vagrant machines'],
['docker_tutorial','Creates a Docker-based tutorial environment'],
['shutitfile','A ShutItFile based project (can be docker-, bash-, or vagrant-based)']]
table.add_rows(rows)
shutit_global.shutit_global_object.shutit_print(table.draw() + '\n')
shutit_global.shutit_global_object.shutit_print('Choose, but choose wisely: ')
readline.set_startup_hook(lambda: readline.insert_text('bash'))
pattern = shutit_util.util_raw_input(prompt='\n>> ')
readline.set_startup_hook()
# Sort out delivery method.
if delivery_method is None:
take_this_default = False
default_delivery = 'bash'
if pattern in ('docker','docker_tutorial', 'shutitfile'):
if pattern in ('docker','docker_tutorial'):
take_this_default = True
default_delivery = 'docker'
elif pattern in ('vagrant','bash'):
take_this_default = True
default_delivery = 'bash'
else:
default_delivery = 'bash'
if accept_defaults or take_this_default:
delivery = default_delivery
else:
delivery = ''
while delivery not in shutit_global.shutit_global_object.allowed_delivery_methods:
readline.set_startup_hook(lambda: readline.insert_text('bash'))
table = texttable.Texttable()
rows = [['Choice','Description'],
['docker','Build within a Docker container'],
['bash','Run commands directly within bash'],
['vagrant','Build an n-node Vagrant cluster']]
table.add_rows(rows)
shutit_global.shutit_global_object.shutit_print(table.draw() + '\n')
shutit_global.shutit_global_object.shutit_print('Input a delivery method')
readline.set_startup_hook(lambda: readline.insert_text(default_delivery))
delivery = shutit_util.util_raw_input(prompt='>> ')
readline.set_startup_hook()
else:
delivery = delivery_method
self.cfg['skeleton'] = {
'path': module_directory,
'module_name': module_name,
'base_image': base_image,
'domain': domain,
'domain_hash': str(shutit_util.get_hash(domain)),
'depends': depends,
'script': script,
'shutitfiles': _new_shutitfiles,
'output_dir': args.output_dir,
'delivery': delivery,
'pattern': pattern,
'vagrant_num_machines': vagrant_num_machines,
'vagrant_ssh_access': vagrant_ssh_access,
'vagrant_machine_prefix': vagrant_machine_prefix,
'vagrant_docker': vagrant_docker,
'vagrant_snapshot': vagrant_snapshot,
'vagrant_upload': vagrant_upload,
'vagrant_image_name': vagrant_image_name
}
shutit_skeleton.create_skeleton(self)
def handle_run(self, args):
shutit_global.shutit_global_object.yield_to_draw()
module_name = shutit_util.random_id(chars=string.ascii_letters)
module_dir = "/tmp/shutit_built/" + module_name
module_domain = module_name + '.' + module_name
shutitfiles = args.shutitfiles
echo = args.echo
loglevel = args.loglevel
argv_new = [sys.argv[0],'skeleton','--shutitfile'] + shutitfiles + ['--name', module_dir,'--domain',module_domain,'--pattern','bash']
retdir = os.getcwd()
subprocess.call(argv_new)
os.chdir(module_dir)
run_cmd = ['./run.sh']
if echo:
run_cmd.append('--echo')
if loglevel and isinstance(loglevel,str):
run_cmd.append('-l')
run_cmd.append(loglevel)
subprocess.call(run_cmd)
os.chdir(retdir)
# TODO: rationalise/tidy
def handle_build(self, args):
shutit_global.shutit_global_object.yield_to_draw()
# We're not creating a skeleton, so make sure we have the infrastructure
# in place for a user-level storage area
shutit_home = self.host['shutit_path'] = os.path.expanduser('~/.shutit')
if not os.path.isdir(shutit_home):
mkpath(shutit_home, 0o700)
if not os.path.isfile(os.path.join(shutit_home, 'config')):
f = os.open(os.path.join(shutit_home, 'config'), os.O_WRONLY | os.O_CREAT, 0o600)
if shutit_global.shutit_global_object.ispy3:
os.write(f,bytes(default_cnf,shutit_global.shutit_global_object.default_encoding))
else:
os.write(f,default_cnf)
os.close(f)
self.list_configs['cfghistory'] = False
self.list_modules['long'] = False
self.list_modules['sort'] = None
self.build['exam_object'] = None
if self.action['list_configs']:
self.list_configs['cfghistory'] = args.history
elif self.action['list_modules']:
self.list_modules['long'] = args.long
self.list_modules['sort'] = args.sort
# Default this to False as it's not always set (mostly for debug logging).
self.build['video'] = args.video
self.build['training'] = args.training
self.build['distro_override'] = args.distro
self.build['mount_docker'] = args.mount_docker
self.build['walkthrough'] = args.walkthrough
self.build['walkthrough_wait'] = args.walkthrough_wait
self.nocolor = args.nocolor
self.build['training'] = args.training
self.build['exam'] = args.exam
self.build['choose_config'] = args.choose_config
self.build['extra_configs'] = args.config
self.build['config_overrides'] = args.set
self.build['ignorestop'] = args.ignorestop
self.build['ignoreimage'] = args.ignoreimage
self.build['imageerrorok'] = args.imageerrorok
self.build['tag_modules'] = args.tag_modules
self.build['deps_only'] = args.deps_only
self.build['always_echo'] = args.echo
self.target['docker_image'] = args.image_tag
self.repository['push'] = args.push
self.repository['export'] = args.export
self.repository['save'] = args.save
# Create a test session object if needed.
if self.build['exam']:
self.build['exam_object'] = shutit_exam.ShutItExamSession(self)
# What are we building on? Convert arg to conn_module we use.
self.build['delivery'] = args.delivery
if args.delivery == 'docker' or args.delivery is None:
self.build['conn_module'] = 'shutit.tk.conn_docker'
elif args.delivery == 'bash' or args.delivery == 'dockerfile':
self.build['conn_module'] = 'shutit.tk.conn_bash'
else:
assert False, shutit_util.print_debug(msg='Build must have a delivery method')
# Get these early for this part of the build.
# These should never be config arguments, since they are needed before config is passed in.
if args.shutit_module_path is not None:
module_paths = args.shutit_module_path.split(':')
if '.' not in module_paths:
module_paths.append('.')
args.set.append(('host', 'shutit_module_path', ':'.join(module_paths)))
shutit_global.shutit_global_object.interactive = int(args.interactive)
# Finished parsing args.
# Sort out config path
if self.action['list_configs'] or self.action['list_modules'] or self.action['list_deps'] or self.loglevel == logging.DEBUG:
self.build['log_config_path'] = shutit_global.shutit_global_object.shutit_state_dir + '/config'
if not os.path.exists(self.build['log_config_path']):
os.makedirs(self.build['log_config_path'])
os.chmod(self.build['log_config_path'],0o777)
def get_configs(self, configs):
"""Reads config files in, checking their security first
(in case passwords/sensitive info is in them).
"""
shutit_global.shutit_global_object.yield_to_draw()
cp = LayerConfigParser()
fail_str = ''
files = []
for config_file in configs:
if isinstance(config_file, tuple):
continue
if not shutit_util.is_file_secure(config_file):
fail_str = fail_str + '\nchmod 0600 ' + config_file
files.append(config_file)
if fail_str != '':
if shutit_global.shutit_global_object.interactive > 1:
fail_str = 'Files are not secure, mode should be 0600. Running the following commands to correct:\n' + fail_str + '\n'
# Actually show this to the user before failing...
self.log(fail_str,level=logging.INFO)
self.log('Do you want me to run this for you? (input y/n)',level=logging.INFO)
if shutit_global.shutit_global_object.interactive == 0 or shutit_util.util_raw_input(default='y') == 'y':
for f in files:
self.log('Correcting insecure file permissions on: ' + f,level=logging.INFO)
os.chmod(f,0o600)
# recurse
return self.get_configs(configs)
else:
for f in files:
self.log('Correcting insecure file permissions on: ' + f,level=logging.INFO)
os.chmod(f,0o600)
# recurse
return self.get_configs(configs)
self.fail(fail_str) # pragma: no cover
for config in configs:
if isinstance(config, tuple):
cp.readfp(config[1], filename=config[0])
else:
cp.read(config)
# Treat allowed_images as a special, additive case
self.build['shutit.core.module.allowed_images'] = cp.get_config_set('build', 'shutit.core.module.allowed_images')
return cp
# Returns the config dict
def parse_args(self):
r"""Responsible for parsing arguments from the command line.
Environment variables:
SHUTIT_OPTIONS:
Loads command line options from the environment (if set).
Behaves like GREP_OPTIONS:
- space separated list of arguments
- backslash before a space escapes the space separation
- backslash before a backslash is interpreted as a single backslash
- all other backslashes are treated literally
eg ' a\ b c\\ \\d \\\e\' becomes '', 'a b', 'c\', '\d', '\\e\'
SHUTIT_OPTIONS is ignored if we are creating a skeleton
"""
shutit_global.shutit_global_object.yield_to_draw()
# These are in order of their creation
actions = ['build', 'run', 'list_configs', 'list_modules', 'list_deps', 'skeleton', 'version']
# COMPAT 2014-05-15 - build is the default if there is no action specified
# and we've not asked for help and we've called via 'shutit.py'
if len(sys.argv) == 1 or (len(sys.argv) > 1 and sys.argv[1] not in actions
and '-h' not in sys.argv and '--help' not in sys.argv):
sys.argv.insert(1, 'build')
parser = argparse.ArgumentParser(description='ShutIt - a tool for managing complex Docker deployments.\n\nTo view help for a specific subcommand, type ./shutit <subcommand> -h',prog="ShutIt")
subparsers = parser.add_subparsers(dest='action', help='''Action to perform - build=deploy to target, skeleton=construct a skeleton module, list_configs=show configuration as read in, list_modules=show modules available, list_deps=show dep graph ready for graphviz. Defaults to 'build'.''')
sub_parsers = dict()
for action in actions:
sub_parsers[action] = subparsers.add_parser(action)
args_list = sys.argv[1:]
# Applies to all
for action in ['build', 'list_configs', 'list_modules', 'list_deps','run','skeleton']:
sub_parsers[action].add_argument('--delaybeforesend', help='Delay before send setting (see pexpect)', default='0.05')
sub_parsers[action].add_argument('--promptcommand', help='Prompt command to set', default="'sleep .05||sleep 1'")
sub_parsers[action].add_argument('-o','--logfile',default='', help='Log output to this file')
sub_parsers[action].add_argument('-l','--log',default='', help='Log level (DEBUGV, DEBUG, INFO (default), WARNING, ERROR, CRITICAL)',choices=('DEBUG','INFO','WARNING','ERROR','CRITICAL','debugv','debug','info','warning','error','critical'))
sub_parsers[action].add_argument('--nocolor', help='Remove colorization from ShutIt', default=False, action='store_const', const=True)
sub_parsers[action].add_argument('-d','--delivery', help='Delivery method, aka target. "docker" container (default)', default=None, choices=('docker','dockerfile','bash'))
sub_parsers[action].add_argument('--echo', help='Always echo output', const=True, default=False, action='store_const')
# All except run and skeleton
for action in ['build', 'list_configs', 'list_modules', 'list_deps']:
sub_parsers[action].add_argument('--config', help='Config file for setup config. Must be with perms 0600. Multiple arguments allowed; config files considered in order.', default=[], action='append')
sub_parsers[action].add_argument('-s', '--set', help='Override a config item, e.g. "-s target rm no". Can be specified multiple times.', default=[], action='append', nargs=3, metavar=('SEC', 'KEY', 'VAL'))
sub_parsers[action].add_argument('--image_tag', help='Build container from specified image - if there is a symbolic reference, please use that, eg localhost.localdomain:5000/myref', default='')
sub_parsers[action].add_argument('--tag_modules', help='''Tag each module after it's successfully built regardless of the module config and based on the repository config.''', default=False, const=True, action='store_const')
sub_parsers[action].add_argument('-m', '--shutit_module_path', default=None, help='List of shutit module paths, separated by colons. ShutIt registers modules by running all .py files in these directories.')
sub_parsers[action].add_argument('--trace', help='Trace function calls', const=True, default=False, action='store_const')
sub_parsers[action].add_argument('--interactive', help='Level of interactive. 0 = none, 1 = honour pause points and config prompting, 2 = query user on each module, 3 = tutorial mode', default='1')
sub_parsers[action].add_argument('--ignorestop', help='Ignore STOP files', const=True, default=False, action='store_const')
sub_parsers[action].add_argument('--ignoreimage', help='Ignore disallowed images', const=True, default=None, action='store_const')
sub_parsers[action].add_argument('--imageerrorok', help='Exit without error if allowed images fails (used for test scripts)', const=True, default=False, action='store_const')
sub_parsers[action].add_argument('--deps_only', help='build deps only, tag with suffix "_deps"', const=True, default=False, action='store_const')
# Just run
sub_parsers['run'].add_argument('shutitfiles', nargs='*', default=['ShutItFile','Shutitfile','ShutItfile','ShutitFile','shutitfile'])
# Just skeleton
sub_parsers['skeleton'].add_argument('--name', help='Absolute path to new directory for module. Last part of path is taken as the module name.',default='')
sub_parsers['skeleton'].add_argument('--domain', help='Arbitrary but unique domain for namespacing your module, eg com.mycorp',default='')
sub_parsers['skeleton'].add_argument('--depends', help='Module id to depend on, default shutit.tk.setup (optional)', default='shutit.tk.setup')
sub_parsers['skeleton'].add_argument('--base_image', help='FROM image, default ubuntu:16.04 (optional)', default='ubuntu:16.04')
sub_parsers['skeleton'].add_argument('--script', help='Pre-existing shell script to integrate into module (optional)', nargs='?', default=None)
sub_parsers['skeleton'].add_argument('--output_dir', help='Just output the created directory', default=False, const=True, action='store_const')
sub_parsers['skeleton'].add_argument('--shutitfiles', nargs='+', default=None)
sub_parsers['skeleton'].add_argument('--vagrant_num_machines', default=None)
sub_parsers['skeleton'].add_argument('--vagrant_ssh_access', default=False, const=True, action='store_const')
sub_parsers['skeleton'].add_argument('--vagrant_machine_prefix', default=None)
sub_parsers['skeleton'].add_argument('--vagrant_docker', default=None, const=True, action='store_const')
sub_parsers['skeleton'].add_argument('--vagrant_snapshot', default=None, const=True, action='store_const')
sub_parsers['skeleton'].add_argument('--vagrant_upload', default=None, const=True, action='store_const')
sub_parsers['skeleton'].add_argument('--vagrant_image_name', default=None, const=True, action='store_const')
sub_parsers['skeleton'].add_argument('--pattern', help='Pattern to use', default='')
sub_parsers['skeleton'].add_argument('-a','--accept', help='Accept defaults', const=True, default=False, action='store_const')
# Just build
sub_parsers['build'].add_argument('--export', help='Perform docker export to a tar file', const=True, default=False, action='store_const')
sub_parsers['build'].add_argument('--save', help='Perform docker save to a tar file', const=True, default=False, action='store_const')
sub_parsers['build'].add_argument('--push', help='Push to a repo', const=True, default=False, action='store_const')
sub_parsers['build'].add_argument('--distro', help='Specify the distro type', default='', choices=('ubuntu','debian','alpine','steamos','red hat','centos','fedora','shutit'))
sub_parsers['build'].add_argument('--mount_docker', help='Mount the docker socket', default=False, action='store_const', const=True)
sub_parsers['build'].add_argument('-w','--walkthrough', help='Run in walkthrough mode', default=False, action='store_const', const=True)
sub_parsers['build'].add_argument('-c','--choose_config', help='Choose configuration interactively', default=False, action='store_const', const=True)
sub_parsers['build'].add_argument('--video', help='Run in video mode. Same as walkthrough, but waits n seconds rather than for input', nargs=1, default=-1)
sub_parsers['build'].add_argument('--training', help='Run in "training" mode, where correct input is required at key points', default=False, action='store_const', const=True)
sub_parsers['build'].add_argument('--exam', help='Run in "exam" mode, where correct input is required at key points and progress is tracked', default=False, action='store_const', const=True)
sub_parsers['build'].add_argument('--pane', help='Pane-based output', const=True, default=False, action='store_const')
# Just list_configs
sub_parsers['list_configs'].add_argument('--history', help='Show config with history', const=True, default=False, action='store_const')
# Just list_modules
sub_parsers['list_modules'].add_argument('--long', help='Show extended module info, including ordering', const=True, default=False, action='store_const')
sub_parsers['list_modules'].add_argument('--sort', help='Order the modules seen, default to module id', default='id', choices=('id','run_order'))
if os.environ.get('SHUTIT_OPTIONS', None) and args_list[0] != 'skeleton':
env_args = os.environ['SHUTIT_OPTIONS'].strip()
# Split escaped backslashes
env_args_split = re.split(r'(\\\\)', env_args)
# Split non-escaped spaces
env_args_split = [re.split(r'(?<!\\)( )', item) for item in env_args_split]
# Flatten
env_args_split = [item for sublist in env_args_split for item in sublist]
# Split escaped spaces
env_args_split = [re.split(r'(\\ )', item) for item in env_args_split]
# Flatten
env_args_split = [item for sublist in env_args_split for item in sublist]
# Trim empty strings
env_args_split = [item for item in env_args_split if item != '']
# We know we don't have to deal with an empty env argument string
env_args_list = ['']
# Interpret all of the escape sequences
for item in env_args_split:
if item == ' ':
env_args_list.append('')
elif item == '\\ ':
env_args_list[-1] += ' '
elif item == '\\\\':
env_args_list[-1] += '\\'
else:
env_args_list[-1] += item
args_list[1:1] = env_args_list
args = parser.parse_args(args_list)
# Set up shutit_global
if args.action == 'version':
self.process_args(ShutItInit(args.action))
elif args.action == 'skeleton':
shutit_global.shutit_global_object.delaybeforesend = float(args.delaybeforesend)
shutit_global.shutit_global_object.prompt_command = args.promptcommand
self.process_args(ShutItInit(args.action,
logfile=args.logfile,
loglevel=args.log,
nocolor=args.nocolor,
delivery=args.delivery,
shutitfiles=args.shutitfiles,
script=args.script,
base_image=args.base_image,
depends=args.depends,
name=args.name,
domain=args.domain,
pattern=args.pattern,
output_dir=args.output_dir,
vagrant_ssh_access=args.vagrant_ssh_access,
vagrant_num_machines=args.vagrant_num_machines,
vagrant_machine_prefix=args.vagrant_machine_prefix,
vagrant_docker=args.vagrant_docker,
vagrant_snapshot=args.vagrant_snapshot,
vagrant_upload=args.vagrant_upload,
vagrant_image_name=args.vagrant_image_name))
elif args.action == 'run':
shutit_global.shutit_global_object.delaybeforesend = float(args.delaybeforesend)
shutit_global.shutit_global_object.prompt_command = args.promptcommand
self.process_args(ShutItInit(args.action,
logfile=args.logfile,
loglevel=args.log,
nocolor=args.nocolor,
shutitfiles=args.shutitfiles,
echo=args.echo,
delivery = args.delivery))
elif args.action == 'build':
shutit_global.shutit_global_object.delaybeforesend = float(args.delaybeforesend)
shutit_global.shutit_global_object.prompt_command = args.promptcommand
# If we are in panes mode, tell the global object.
shutit_global.shutit_global_object.managed_panes = args.pane
# If we are in panes mode, set up logIO.
if shutit_global.shutit_global_object.managed_panes:
shutit_global.shutit_global_object.logstream = StringIO()
if not shutit_global.shutit_global_object.determine_interactive():
self.log('You cannot have panes if you are not in an interactive shell',level=logging.WARNING)
shutit_global.shutit_global_object.managed_panes = False
self.process_args(ShutItInit(args.action,
logfile=args.logfile,
loglevel=args.log,
nocolor=args.nocolor,
push=args.push,
export=args.export,
save=args.save,
distro=args.distro,
mount_docker=args.mount_docker,
walkthrough=args.walkthrough,
training=args.training,
choose_config=args.choose_config,
config=args.config,
set=args.set,
ignorestop=args.ignorestop,
ignoreimage=args.ignoreimage,
imageerrorok=args.imageerrorok,
tag_modules=args.tag_modules,
image_tag=args.image_tag,
video=args.video,
deps_only=args.deps_only,
echo=args.echo,
delivery=args.delivery,
interactive=args.interactive,
trace=args.trace,
shutit_module_path=args.shutit_module_path,
exam=args.exam))
# Set up trace ASAP.
if args.trace:
def tracefunc(frame, event, arg, indent=[0]):
indent = indent # pylint
arg = arg # pylint
if event == 'call':
self.log('-> call function: ' + frame.f_code.co_name + ' ' + str(frame.f_code.co_varnames),level=logging.DEBUG)
elif event == 'return':
self.log('<- exit function: ' + frame.f_code.co_name,level=logging.DEBUG)
return tracefunc
sys.settrace(tracefunc)
elif args.action == 'list_configs':
self.process_args(ShutItInit(args.action,
logfile=args.logfile,
nocolor=args.nocolor,
history=args.history))
elif args.action == 'list_modules':
self.process_args(ShutItInit(args.action,
logfile=args.logfile,
nocolor=args.nocolor,
sort=args.sort,
long_modules=args.long))
def conn_docker_start_container(self, shutit_session_name):
shutit_global.shutit_global_object.yield_to_draw()
docker = self.host['docker_executable'].split(' ')
# Always-required options
if not os.path.exists(shutit_global.shutit_global_object.shutit_state_dir + '/cidfiles'):
os.makedirs(shutit_global.shutit_global_object.shutit_state_dir + '/cidfiles')
self.build['cidfile'] = shutit_global.shutit_global_object.shutit_state_dir + '/cidfiles/' + shutit_global.shutit_global_object.username + '_cidfile_' + shutit_global.shutit_global_object.build_id
cidfile_arg = '--cidfile=' + self.build['cidfile']
# Singly-specified options
privileged_arg = ''
name_arg = ''
hostname_arg = ''
rm_arg = ''
net_arg = ''
mount_docker_arg = ''
shell_arg = '/bin/bash'
if self.build['privileged']:
privileged_arg = '--privileged=true'
if self.target['name'] != '':
name_arg = '--name=' + self.target['name']
if self.target['hostname'] != '':
hostname_arg = '-h=' + self.target['hostname']
if self.build['net'] != '':
net_arg = '--net="' + self.build['net'] + '"'
if self.build['mount_docker']:
mount_docker_arg = '-v=/var/run/docker.sock:/var/run/docker.sock'
# Incompatible with do_repository_work
if self.target['rm']:
rm_arg = '--rm=true'
if self.build['base_image'] in ('alpine','busybox'):
shell_arg = '/bin/ash'
# Multiply-specified options
port_args = []
dns_args = []
volume_args = []
volumes_from_args = []
volumes_list = self.target['volumes'].strip().split()
volumes_from_list = self.target['volumes_from'].strip().split()
ports_list = self.target['ports'].strip().split()
dns_list = self.host['dns'].strip().split()
for portmap in ports_list:
port_args.append('-p=' + portmap)
for dns in dns_list:
dns_args.append('--dns=' + dns)
for volume in volumes_list:
volume_args.append('-v=' + volume)
for volumes_from in volumes_from_list:
volumes_from_args.append('--volumes-from=' + volumes_from)
docker_command = docker + [
arg for arg in [
'run',
cidfile_arg,
privileged_arg,
name_arg,
hostname_arg,
rm_arg,
net_arg,
mount_docker_arg,
] + volume_args + volumes_from_args + port_args + dns_args + [
'-t',
'-i',
self.target['docker_image'],
shell_arg
] if arg != ''
]
self.build['docker_command'] = ' '.join(docker_command)
# docker run happens here
self.log('Startup command is: ' + self.build['docker_command'],level=logging.DEBUG)
self.log('Downloading context, please be patient',level=logging.INFO)
shutit_pexpect_session = ShutItPexpectSession(self, shutit_session_name, docker_command[0], docker_command[1:])
target_child = shutit_pexpect_session.pexpect_child
expect = ['assword', shutit_global.shutit_global_object.base_prompt.strip(), 'Waiting', 'ulling', 'endpoint', 'Download','o such file']
res = shutit_pexpect_session.expect(expect, timeout=shutit_global.shutit_global_object.default_timeout)
while True:
if target_child.before == type(pexpect.exceptions.EOF):
self.fail('EOF exception seen') # pragma: no cover
try:
self.log(target_child.before + target_child.after,level=logging.DEBUG)
except Exception:
pass
if res == 0:
res = self.send(self.host['password'], shutit_pexpect_child=target_child, expect=expect, timeout=shutit_global.shutit_global_object.default_timeout, check_exit=False, fail_on_empty_before=False, echo=False)
elif res == 1:
self.log('Prompt found, breaking out',level=logging.DEBUG)
break
elif res == 6:
self.fail('Docker not installed.') # pragma: no cover
break
elif res == 7:
self.log('Initial command timed out, assuming OK to continue.',level=logging.WARNING)
break
elif res == 8:
self.fail('EOF seen.') # pragma: no cover
else:
res = shutit_pexpect_session.expect(expect, timeout=shutit_global.shutit_global_object.default_timeout)
continue
self.log('Getting cid',level=logging.DEBUG)
# Get the cid, to determine whether the container started up ok.
# pexpect.spawn does not give us an easy way to determine the success of the run without closing the stream.
while True:
try:
cid = open(self.build['cidfile']).read()
break
except Exception:
time.sleep(1)
if cid == '' or re.match('^[a-z0-9]+$', cid) is None:
self.fail('Could not get container_id - quitting. Check whether other containers may be clashing on port allocation or name.\nYou might want to try running: sudo docker kill ' + self.target['name'] + '; sudo docker rm ' + self.target['name'] + '\nto resolve a name clash or: ' + self.host['docker_executable'] + ' ps -a | grep ' + self.target['ports'] + " | awk '{print $1}' | " + 'xargs ' + self.host['docker_executable'] + ' kill\nto ' + 'resolve a port clash\n') # pragma: no cover
self.log('cid: ' + cid,level=logging.DEBUG)
self.target['container_id'] = cid
return target_child
def conn_docker_destroy_container(self, host_shutit_session_name, container_shutit_session_name, container_id, loglevel=logging.DEBUG):
shutit_global.shutit_global_object.yield_to_draw()
# Close connection.
self.get_shutit_pexpect_session_from_id(container_shutit_session_name).pexpect_child.close()
host_child = self.get_shutit_pexpect_session_from_id(host_shutit_session_name).pexpect_child
self.send(' command docker rm -f ' + container_id + ' && rm -f ' + self.build['cidfile'],shutit_pexpect_child=host_child,expect=self.expect_prompts['ORIGIN_ENV'],loglevel=loglevel)
def setup_target_child_environment(self, target_child, target_child_id='target_child',prefix='root'):
shutit_global.shutit_global_object.yield_to_draw()
# Some pexpect settings
shutit_pexpect_session = self.get_shutit_pexpect_session_from_id(target_child_id)
shutit_pexpect_session.pexpect_child = target_child
self.set_default_shutit_pexpect_session_expect(shutit_global.shutit_global_object.base_prompt)
# target child
self.set_default_shutit_pexpect_session(shutit_pexpect_session)
shutit_pexpect_session.setup_prompt(prefix,prefix=prefix)
shutit_pexpect_session.login_stack.append(prefix)
def setup_host_child_environment(self):
shutit_global.shutit_global_object.yield_to_draw()
# Now let's have a host_child
self.log('Spawning host child',level=logging.DEBUG)
shutit_pexpect_session = ShutItPexpectSession(self, 'host_child', '/bin/bash')
# Set up prompts and let the user do things before the build
self.set_default_shutit_pexpect_session(shutit_pexpect_session)
self.set_default_shutit_pexpect_session_expect(shutit_global.shutit_global_object.base_prompt)
# ORIGIN_ENV is a special case of the prompt maintained for performance reasons, don't change.
prefix = 'ORIGIN_ENV'
shutit_pexpect_session.setup_prompt('ORIGIN_ENV', prefix=prefix)
shutit_pexpect_session.login_stack.append(prefix)
def do_exam_output(self):
shutit_global.shutit_global_object.yield_to_draw()
if self.build['exam_object']:
test = self.build['exam_object']
test.calculate_score()
test_output = str(test)
self.log(test_output,level=logging.CRITICAL)
f = open('/tmp/shutit_exam_output', 'w')
f.write(test_output)
f.close()
def do_lists(self):
shutit_global.shutit_global_object.yield_to_draw()
if self.action['list_deps']:
cfg = self.cfg
# Show dependency graph
digraph = 'digraph depgraph {\n'
digraph += '\n'.join([ make_dep_graph(module) for module_id, module in self.shutit_map.items() if module_id in cfg and cfg[module_id]['shutit.core.module.build'] ])
digraph += '\n}'
f = open(self.build['log_config_path'] + '/digraph.txt','w')
f.write(digraph)
f.close()
digraph_all = 'digraph depgraph {\n'
digraph_all += '\n'.join([ make_dep_graph(module) for module_id, module in self.shutit_map.items() ])
digraph_all += '\n}'
fname = self.build['log_config_path'] + '/digraph_all.txt'
f = open(fname,'w')
f.write(digraph_all)
f.close()
self.log('\n================================================================================\n' + digraph_all,level=logging.INFO)
self.log('\nAbove is the digraph for ALL MODULES SEEN in this ShutIt invocation. Use graphviz to render into an image, eg\n\n\tcat ' + fname + ' | dot -Tpng -o depgraph.png\n',level=logging.INFO)
self.log('\n================================================================================\n',level=logging.INFO)
fname = self.build['log_config_path'] + '/digraph_this.txt'
f = open(fname,'w')
f.write(digraph_all)
f.close()
self.log('\n\n' + digraph,level=logging.INFO)
self.log('\n================================================================================\n' + digraph,level=logging.INFO)
self.log('\nAbove is the digraph for all modules configured to be built IN THIS ShutIt invocation. Use graphviz to render into an image, eg\n\ncat ' + fname + ' | dot -Tpng -o depgraph.png\n',level=logging.INFO)
self.log('\n================================================================================\n',level=logging.INFO)
# Exit now
shutit_global.shutit_global_object.handle_exit()
# Dependency validation done, now collect configs of those marked for build.
self.config_collection_for_built()
if self.action['list_configs'] or self.loglevel <= logging.DEBUG:
self.log(self.print_config(self.cfg, history=self.list_configs['cfghistory']),level=logging.INFO)
# Set build completed
self.build['completed'] = True
f = open(self.build['log_config_path'] + '/cfg.txt','w')
f.write(self.print_config(self.cfg, history=self.list_configs['cfghistory']))
f.close()
self.log('================================================================================',level=logging.INFO)
self.log('Config details placed in: ' + self.build['log_config_path'],level=logging.INFO)
self.log('================================================================================',level=logging.INFO)
self.log('To render the digraph of this build into an image run eg:\n\ndot -Tgv -o ' + self.build['log_config_path'] + '/digraph.gv ' + self.build['log_config_path'] + '/digraph.txt && dot -Tpdf -o digraph.pdf ' + self.build['log_config_path'] + '/digraph.gv\n\n',level=logging.INFO)
self.log('================================================================================',level=logging.INFO)
self.log('To render the digraph of all visible modules into an image, run eg:\n\ndot -Tgv -o ' + self.build['log_config_path'] + '/digraph_all.gv ' + self.build['log_config_path'] + '/digraph_all.txt && dot -Tpdf -o digraph_all.pdf ' + self.build['log_config_path'] + '/digraph_all.gv\n\n',level=logging.INFO)
self.log('================================================================================',level=logging.INFO)
self.log('\nConfiguration details have been written to the folder: ' + self.build['log_config_path'] + '\n',level=logging.INFO)
self.log('================================================================================',level=logging.INFO)
if self.action['list_configs'] or self.action['list_deps']:
shutit_global.shutit_global_object.handle_exit(exit_code=0)
def do_interactive_modules(self):
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
errs = []
while True:
self.do_list_modules(long_output=False,sort_order='run_order')
# Which module do you want to toggle?
module_id = shutit_util.util_raw_input(prompt='Which module id do you want to toggle?\n(just hit return to continue with build)\n(you can enter a substring if it is uniquely matching)\n')
if module_id:
try:
_=cfg[module_id]
except NameError:
matched_to = []
for m in cfg:
if re.match('.*'+module_id+'.*',m):
matched_to.append(m)
if len(matched_to) > 1:
shutit_global.shutit_global_object.shutit_print('Please input a uniquely matchable module id. Matches were: ' + str(matched_to))
continue
elif matched_to:
shutit_global.shutit_global_object.shutit_print('Please input a valid module id')
else:
module_id = matched_to[0]
cfg[module_id]['shutit.core.module.build'] = not cfg[module_id]['shutit.core.module.build']
if not self.config_collection_for_built(throw_error=False):
cfg[module_id]['shutit.core.module.build'] = not cfg[module_id]['shutit.core.module.build']
shutit_util.util_raw_input(prompt='Hit return to continue.\n')
continue
# If true, set up config for that module
if cfg[module_id]['shutit.core.module.build']:
# TODO: does this catch all the ones switched on? Once done, get configs for all those.
newcfg_list = []
while True:
shutit_global.shutit_global_object.shutit_print(self.print_config(cfg,module_id=module_id))
name = shutit_util.util_raw_input(prompt='Above is the config for that module. Hit return to continue, or a config item you want to update.\n')
if name:
doing_list = False
while True:
if doing_list:
val_type = shutit_util.util_raw_input(prompt='Input the type for the next list item: b(oolean), s(tring).\n')
if val_type not in ('b','s',''):
continue
else:
val_type = shutit_util.util_raw_input(prompt='Input the type for that config item: b(oolean), s(tring), l(ist).\n')
if val_type not in ('b','s','l',''):
continue
if val_type == 's':
val = shutit_util.util_raw_input(prompt='Input the value new for that config item.\n')
if doing_list:
newcfg_list.append(val)
else:
break
elif val_type == 'b':
val = shutit_util.util_raw_input(prompt='Input the value new for the boolean (t/f).\n')
if doing_list:
if val == 't':
newcfg_list.append(True)
elif val == 'f':
newcfg_list.append(False)
else:
shutit_global.shutit_global_object.shutit_print('Input t or f please')
continue
else:
break
elif val_type == 'l':
doing_list = True
newcfg_list = []
elif val_type == '':
break
# TODO: handle blank/None
if doing_list:
cfg[module_id][name] = newcfg_list
else:
cfg[module_id][name] = val
else:
break
else:
pass
# TODO: if removing, get any that depend on it, and remove those too
else:
break
return errs
def setup_shutit_obj(self):
shutit_global.shutit_global_object.yield_to_draw()
# Parse command-line arguments
self.parse_args()
self.load_configs()
# Try and ensure shutit is on the path - makes onboarding easier
# Only do this if we're in a terminal
if shutit_global.shutit_global_object.determine_interactive() and spawn.find_executable('shutit') is None:
self.setup_shutit_path()
self.load_mod_from_file(os.path.join(self.shutit_main_dir, 'shutit_setup.py'))
self.load_shutit_modules()
self.log('ShutIt modules loaded',level=logging.INFO)
self.init_shutit_map()
self.config_collection()
self.log('Configuration loaded',level=logging.INFO)
if self.action['list_modules']:
self.do_list_modules()
shutit_global.shutit_global_object.handle_exit()
if not self.action['list_deps'] and not self.action['list_modules']:
self.conn_target()
self.log('Connected to target',level=logging.INFO)
if shutit_global.shutit_global_object.interactive > 0 and self.build['choose_config']:
errs = self.do_interactive_modules()
else:
errs = []
errs.extend(self.check_deps())
self.do_lists()
# Check for conflicts now.
errs.extend(self.check_conflicts())
# Cache the results of check_ready at the start.
errs.extend(self.check_ready(throw_error=False))
if errs:
self.log(self.print_modules(), level=logging.ERROR)
child = None
for err in errs:
self.log(err[0], level=logging.ERROR)
if not child and len(err) > 1:
child = err[1]
self.fail("Encountered some errors, quitting", shutit_pexpect_child=child) # pragma: no cover
self.do_remove()
self.do_build()
self.do_test()
self.do_finalize()
self.finalize_target()
self.log(self.build_report('#Module: N/A (END)'), level=logging.DEBUG)
self.do_exam_output()
self.log('ShutIt run finished',level=logging.INFO)
shutit_global.shutit_global_object.handle_exit(exit_code=0)
def setup_shutit_path(self):
shutit_global.shutit_global_object.yield_to_draw()
# try the current directory, the .. directory, or the ../shutit directory, the ~/shutit
if not self.host['add_shutit_to_path']:
return
res = shutit_util.util_raw_input(prompt='shutit appears not to be on your path - should try and we find it and add it to your ~/.bashrc (Y/n)?')
if res in ['n','N']:
with open(os.path.join(self.host['shutit_path'], 'config'), 'a') as f:
f.write('\n[host]\nadd_shutit_to_path: no\n')
return
path_to_shutit = ''
for d in ['.','..','~','~/shutit']:
path = os.path.abspath(d + '/shutit')
if not os.path.isfile(path):
continue
path_to_shutit = path
while path_to_shutit == '':
d = shutit_util.util_raw_input(prompt='cannot auto-find shutit - please input the path to your shutit dir\n')
path = os.path.abspath(d + '/shutit')
if not os.path.isfile(path):
continue
path_to_shutit = path
if path_to_shutit != '':
bashrc = os.path.expanduser('~/.bashrc')
with open(bashrc, "a") as myfile:
#http://unix.stackexchange.com/questions/26676/how-to-check-if-a-shell-is-login-interactive-batch
myfile.write('\nexport PATH="$PATH:' + os.path.dirname(path_to_shutit) + '"\n')
shutit_util.util_raw_input(prompt='\nPath set up - please open new terminal and re-run command\n')
shutit_global.shutit_global_object.handle_exit()
def check_deps(self):
"""Dependency checking phase is performed in this method.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
self.log('PHASE: dependencies', level=logging.DEBUG)
self.pause_point('\nNow checking for dependencies between modules', print_input=False, level=3)
# Get modules we're going to build
to_build = [
self.shutit_map[module_id] for module_id in self.shutit_map
if module_id in cfg and cfg[module_id]['shutit.core.module.build']
]
# Add any deps we may need by extending to_build and altering cfg
for module in to_build:
self.resolve_dependencies(to_build, module)
# Dep checking
def err_checker(errs, triples):
"""Collate error information.
"""
new_triples = []
for err, triple in zip(errs, triples):
if not err:
new_triples.append(triple)
continue
found_errs.append(err)
return new_triples
found_errs = []
triples = []
for depender in to_build:
for dependee_id in depender.depends_on:
triples.append((depender, self.shutit_map.get(dependee_id), dependee_id))
triples = err_checker([ self.check_dependee_exists(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)
triples = err_checker([ self.check_dependee_build(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)
triples = err_checker([ check_dependee_order(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)
if found_errs:
return [(err,) for err in found_errs]
self.log('Modules configured to be built (in order) are: ', level=logging.DEBUG)
for module_id in self.module_ids():
module = self.shutit_map[module_id]
if cfg[module_id]['shutit.core.module.build']:
self.log(module_id + ' ' + str(module.run_order), level=logging.DEBUG)
self.log('\n', level=logging.DEBUG)
return []
def check_conflicts(self):
"""Checks for any conflicts between modules configured to be built.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
# Now consider conflicts
self.log('PHASE: conflicts', level=logging.DEBUG)
errs = []
self.pause_point('\nNow checking for conflicts between modules', print_input=False, level=3)
for module_id in self.module_ids():
if not cfg[module_id]['shutit.core.module.build']:
continue
conflicter = self.shutit_map[module_id]
for conflictee in conflicter.conflicts_with:
# If the module id isn't there, there's no problem.
conflictee_obj = self.shutit_map.get(conflictee)
if conflictee_obj is None:
continue
if ((cfg[conflicter.module_id]['shutit.core.module.build'] or
self.is_to_be_built_or_is_installed(conflicter)) and
(cfg[conflictee_obj.module_id]['shutit.core.module.build'] or
self.is_to_be_built_or_is_installed(conflictee_obj))):
errs.append(('conflicter module id: ' + conflicter.module_id + ' is configured to be built or is already built but conflicts with module_id: ' + conflictee_obj.module_id,))
return errs
def check_ready(self, throw_error=True):
"""Check that all modules are ready to be built, calling check_ready on
each of those configured to be built and not already installed
(see shutit.is_installed).
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
self.log('PHASE: check_ready', level=logging.DEBUG)
errs = []
self.pause_point('\nNow checking whether we are ready to build modules configured to be built', print_input=False, level=3)
# Find out who we are to see whether we need to log in and out or not.
for module_id in self.module_ids():
module = self.shutit_map[module_id]
self.log('considering check_ready (is it ready to be built?): ' + module_id, level=logging.DEBUG)
if cfg[module_id]['shutit.core.module.build'] and module.module_id not in self.get_current_shutit_pexpect_session_environment().modules_ready and not self.is_installed(module):
self.log('checking whether module is ready to build: ' + module_id, level=logging.DEBUG)
self.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)
# Move to the correct directory (eg for checking for the existence of files needed for build)
revert_dir = os.getcwd()
self.get_current_shutit_pexpect_session_environment().module_root_dir = os.path.dirname(self.shutit_file_map[module_id])
self.chdir(self.get_current_shutit_pexpect_session_environment().module_root_dir)
if not self.is_ready(module) and throw_error:
errs.append((module_id + ' not ready to install.\nRead the check_ready function in the module,\nor log messages above to determine the issue.\n\n', self.get_shutit_pexpect_session_from_id('target_child')))
self.logout(echo=False)
self.chdir(revert_dir)
return errs
def do_remove(self, loglevel=logging.DEBUG):
"""Remove modules by calling remove method on those configured for removal.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
# Now get the run_order keys in order and go.
self.log('PHASE: remove', level=loglevel)
self.pause_point('\nNow removing any modules that need removing', print_input=False, level=3)
# Login at least once to get the exports.
for module_id in self.module_ids():
module = self.shutit_map[module_id]
self.log('considering whether to remove: ' + module_id, level=logging.DEBUG)
if cfg[module_id]['shutit.core.module.remove']:
self.log('removing: ' + module_id, level=logging.DEBUG)
self.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)
if not module.remove(self):
self.log(self.print_modules(), level=logging.DEBUG)
self.fail(module_id + ' failed on remove', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover
else:
if self.build['delivery'] in ('docker','dockerfile'):
# Create a directory and files to indicate this has been removed.
self.send(' command mkdir -p ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + ' && command rm -f ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + '/built && command touch ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + '/removed', loglevel=loglevel, echo=False)
# Remove from "installed" cache
if module.module_id in self.get_current_shutit_pexpect_session_environment().modules_installed:
self.get_current_shutit_pexpect_session_environment().modules_installed.remove(module.module_id)
# Add to "not installed" cache
self.get_current_shutit_pexpect_session_environment().modules_not_installed.append(module.module_id)
self.logout(echo=False)
def build_module(self, module, loglevel=logging.DEBUG):
"""Build passed-in module.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
self.log('Building ShutIt module: ' + module.module_id + ' with run order: ' + str(module.run_order), level=logging.INFO)
self.build['report'] = (self.build['report'] + '\nBuilding ShutIt module: ' + module.module_id + ' with run order: ' + str(module.run_order))
if not module.build(self):
self.fail(module.module_id + ' failed on build', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover
else:
if self.build['delivery'] in ('docker','dockerfile'):
# Create a directory and files to indicate this has been built.
self.send(' command mkdir -p ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + ' && command touch ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + '/built && command rm -f ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + '/removed', loglevel=loglevel, echo=False)
# Put it into "installed" cache
self.get_current_shutit_pexpect_session_environment().modules_installed.append(module.module_id)
# Remove from "not installed" cache
if module.module_id in self.get_current_shutit_pexpect_session_environment().modules_not_installed:
self.get_current_shutit_pexpect_session_environment().modules_not_installed.remove(module.module_id)
self.pause_point('\nPausing to allow inspect of build for: ' + module.module_id, print_input=True, level=2)
self.build['report'] = (self.build['report'] + '\nCompleted module: ' + module.module_id)
if cfg[module.module_id]['shutit.core.module.tag']:
self.log(self.build_report('#Module:' + module.module_id), level=logging.DEBUG)
if not cfg[module.module_id]['shutit.core.module.tag'] and shutit_global.shutit_global_object.interactive >= 2:
shutit_global.shutit_global_object.shutit_print("\n\nDo you want to save state now we\'re at the " + "end of this module? (" + module.module_id + ") (input y/n)")
cfg[module.module_id]['shutit.core.module.tag'] = (shutit_util.util_raw_input(default='y') == 'y')
if cfg[module.module_id]['shutit.core.module.tag'] or self.build['tag_modules']:
self.log(module.module_id + ' configured to be tagged, doing repository work',level=logging.INFO)
# Stop all before we tag to avoid file changing errors, and clean up pid files etc..
self.stop_all(module.run_order)
self.do_repository_work(str(module.module_id) + '_' + str(module.run_order), password=self.host['password'], docker_executable=self.host['docker_executable'], force=True)
# Start all after we tag to ensure services are up as expected.
self.start_all(module.run_order)
if shutit_global.shutit_global_object.interactive >= 2:
shutit_global.shutit_global_object.shutit_print("\n\nDo you want to stop interactive mode? (input y/n)\n")
if shutit_util.util_raw_input(default='y') == 'y':
shutit_global.shutit_global_object.interactive = 0
def do_build(self):
"""Runs build phase, building any modules that we've determined
need building.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
self.log('PHASE: build, repository work', level=logging.DEBUG)
module_id_list = self.module_ids()
if self.build['deps_only']:
module_id_list_build_only = filter(lambda x: cfg[x]['shutit.core.module.build'], module_id_list)
for module_id in module_id_list:
module = self.shutit_map[module_id]
self.log('Considering whether to build: ' + module.module_id, level=logging.INFO)
if cfg[module.module_id]['shutit.core.module.build']:
if self.build['delivery'] not in module.ok_delivery_methods:
self.fail('Module: ' + module.module_id + ' can only be built with one of these --delivery methods: ' + str(module.ok_delivery_methods) + '\nSee shutit build -h for more info, or try adding: --delivery <method> to your shutit invocation') # pragma: no cover
if self.is_installed(module):
self.build['report'] = (self.build['report'] + '\nBuilt already: ' + module.module_id + ' with run order: ' + str(module.run_order))
else:
# We move to the module directory to perform the build, returning immediately afterwards.
if self.build['deps_only'] and module_id == module_id_list_build_only[-1]:
# If this is the last module, and we are only building deps, stop here.
self.build['report'] = (self.build['report'] + '\nSkipping: ' + module.module_id + ' with run order: ' + str(module.run_order) + '\n\tas this is the final module and we are building dependencies only')
else:
revert_dir = os.getcwd()
self.get_current_shutit_pexpect_session_environment().module_root_dir = os.path.dirname(self.shutit_file_map[module_id])
self.chdir(self.get_current_shutit_pexpect_session_environment().module_root_dir)
self.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)
self.build_module(module)
self.logout(echo=False)
self.chdir(revert_dir)
if self.is_installed(module):
self.log('Starting module',level=logging.DEBUG)
if not module.start(self):
self.fail(module.module_id + ' failed on start', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover
def do_test(self):
"""Runs test phase, erroring if any return false.
"""
shutit_global.shutit_global_object.yield_to_draw()
if not self.build['dotest']:
self.log('Tests configured off, not running',level=logging.DEBUG)
return
# Test in reverse order
self.log('PHASE: test', level=logging.DEBUG)
self.stop_all()
self.start_all()
for module_id in self.module_ids(rev=True):
# Only test if it's installed.
if self.is_installed(self.shutit_map[module_id]):
self.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)
self.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)
if not self.shutit_map[module_id].test(self):
self.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover
self.logout(echo=False)
def do_finalize(self):
"""Runs finalize phase; run after all builds are complete and all modules
have been stopped.
"""
shutit_global.shutit_global_object.yield_to_draw()
def _finalize(self):
# Stop all the modules
self.stop_all()
# Finalize in reverse order
self.log('PHASE: finalizing object ' + str(self), level=logging.DEBUG)
# Login at least once to get the exports.
for module_id in self.module_ids(rev=True):
# Only finalize if it's thought to be installed.
if self.is_installed(self.shutit_map[module_id]):
self.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)
if not self.shutit_map[module_id].finalize(self):
self.fail(module_id + ' failed on finalize', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover
self.logout(echo=False)
_finalize(self)
# run_order of -1 means 'stop everything'
def stop_all(self, run_order=-1):
"""Runs stop method on all modules less than the passed-in run_order.
Used when target is exporting itself mid-build, so we clean up state
before committing run files etc.
"""
shutit_global.shutit_global_object.yield_to_draw()
# sort them so they're stopped in reverse order
for module_id in self.module_ids(rev=True):
shutit_module_obj = self.shutit_map[module_id]
if run_order == -1 or shutit_module_obj.run_order <= run_order:
if self.is_installed(shutit_module_obj):
if not shutit_module_obj.stop(self):
self.fail('failed to stop: ' + module_id, shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').shutit_pexpect_child) # pragma: no cover
# Start all apps less than the supplied run_order
def start_all(self, run_order=-1):
"""Runs start method on all modules less than the passed-in run_order.
Used when target is exporting itself mid-build, so we can export a clean
target and still depended-on modules running if necessary.
"""
shutit_global.shutit_global_object.yield_to_draw()
# sort them so they're started in order
for module_id in self.module_ids():
shutit_module_obj = self.shutit_map[module_id]
if run_order == -1 or shutit_module_obj.run_order <= run_order:
if self.is_installed(shutit_module_obj):
if not shutit_module_obj.start(self):
self.fail('failed to start: ' + module_id, shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').shutit_pexpect_child) # pragma: no cover
def is_ready(self, shutit_module_obj):
"""Returns true if this module is ready to be built.
Caches the result (as it's assumed not to change during the build).
"""
shutit_global.shutit_global_object.yield_to_draw()
if shutit_module_obj.module_id in self.get_current_shutit_pexpect_session_environment().modules_ready:
self.log('is_ready: returning True from cache',level=logging.DEBUG)
return True
ready = shutit_module_obj.check_ready(self)
if ready:
self.get_current_shutit_pexpect_session_environment().modules_ready.append(shutit_module_obj.module_id)
return True
return False
def init_shutit_map(self):
"""Initializes the module map of shutit based on the modules
we have gathered.
Checks we have core modules
Checks for duplicate module details.
Sets up common config.
Sets up map of modules.
"""
shutit_global.shutit_global_object.yield_to_draw()
modules = self.shutit_modules
# Have we got anything to process outside of special modules?
if len([mod for mod in modules if mod.run_order > 0]) < 1:
self.log(modules,level=logging.DEBUG)
path = ':'.join(self.host['shutit_module_path'])
self.log('\nIf you are new to ShutIt, see:\n\n\thttp://ianmiell.github.io/shutit/\n\nor try running\n\n\tshutit skeleton\n\n',level=logging.INFO)
if path == '':
self.fail('No ShutIt modules aside from core ones found and no ShutIt module path given.\nDid you set --shutit_module_path/-m wrongly?\n') # pragma: no cover
elif path == '.':
self.fail('No modules aside from core ones found and no ShutIt module path given apart from default (.).\n\n- Did you set --shutit_module_path/-m?\n- Is there a STOP* file in your . dir?') # pragma: no cover
else:
self.fail('No modules aside from core ones found and no ShutIt modules in path:\n\n' + path + '\n\nor their subfolders. Check your --shutit_module_path/-m setting and check that there are ShutIt modules below without STOP* files in any relevant directories.') # pragma: no cover
self.log('PHASE: base setup', level=logging.DEBUG)
run_orders = {}
has_core_module = False
for module in modules:
assert isinstance(module, ShutItModule), shutit_util.print_debug()
if module.module_id in self.shutit_map:
self.fail('Duplicated module id: ' + module.module_id + '\n\nYou may want to check your --shutit_module_path setting') # pragma: no cover
if module.run_order in run_orders:
self.fail('Duplicate run order: ' + str(module.run_order) + ' for ' + module.module_id + ' and ' + run_orders[module.run_order].module_id + '\n\nYou may want to check your --shutit_module_path setting') # pragma: no cover
if module.run_order == 0:
has_core_module = True
self.shutit_map[module.module_id] = run_orders[module.run_order] = module
self.shutit_file_map[module.module_id] = get_module_file(self, module)
if not has_core_module:
self.fail('No module with run_order=0 specified! This is required.') # pragma: no cover
def conn_target(self):
"""Connect to the target.
"""
shutit_global.shutit_global_object.yield_to_draw()
conn_module = None
for mod in self.conn_modules:
if mod.module_id == self.build['conn_module']:
conn_module = mod
break
if conn_module is None:
self.fail('Couldn\'t find conn_module ' + self.build['conn_module']) # pragma: no cover
# Set up the target in pexpect.
conn_module.get_config(self)
conn_module.build(self)
def finalize_target(self):
"""Finalize the target using the core finalize method.
"""
shutit_global.shutit_global_object.yield_to_draw()
self.pause_point('\nFinalizing the target module (' + self.shutit_main_dir + '/shutit_setup.py)', print_input=False, level=3)
# Can assume conn_module exists at this point
for mod in self.conn_modules:
if mod.module_id == self.build['conn_module']:
conn_module = mod
break
conn_module.finalize(self)
# Once we have all the modules, then we can look at dependencies.
# Dependency validation begins.
def resolve_dependencies(self, to_build, depender):
"""Add any required dependencies.
"""
shutit_global.shutit_global_object.yield_to_draw()
self.log('In resolve_dependencies',level=logging.DEBUG)
cfg = self.cfg
for dependee_id in depender.depends_on:
dependee = self.shutit_map.get(dependee_id)
# Don't care if module doesn't exist, we check this later
if (dependee and dependee not in to_build
and cfg[dependee_id]['shutit.core.module.build_ifneeded']):
to_build.append(dependee)
cfg[dependee_id]['shutit.core.module.build'] = True
return True
def check_dependee_exists(self, depender, dependee, dependee_id):
"""Checks whether a depended-on module is available.
"""
shutit_global.shutit_global_object.yield_to_draw()
# If the module id isn't there, there's a problem.
if dependee is None:
return 'module: \n\n' + dependee_id + '\n\nnot found in paths: ' + str(self.host['shutit_module_path']) + ' but needed for ' + depender.module_id + '\nCheck your --shutit_module_path setting and ensure that all modules configured to be built are in that path setting, eg "--shutit_module_path /path/to/other/module/:."\n\nAlso check that the module is configured to be built with the correct module id in that module\'s configs/build.cnf file.\n\nSee also help.'
return ''
def check_dependee_build(self, depender, dependee, dependee_id):
"""Checks whether a depended on module is configured to be built.
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
# If depender is installed or will be installed, so must the dependee
if not (cfg[dependee.module_id]['shutit.core.module.build'] or
self.is_to_be_built_or_is_installed(dependee)):
return 'depender module id:\n\n[' + depender.module_id + ']\n\nis configured: "build:yes" or is already built but dependee module_id:\n\n[' + dependee_id + ']\n\n is not configured: "build:yes"'
return ''
def get_input(self, msg, default='', valid=None, boolean=False, ispass=False, color=None):
# Don't log log traces while getting input
log_trace_when_idle_original_value = shutit_global.shutit_global_object.log_trace_when_idle
shutit_global.shutit_global_object.log_trace_when_idle = False
shutit_global.shutit_global_object.yield_to_draw()
self = self
res = shutit_util.get_input(msg,
default=default,
valid=valid,
boolean=boolean,
ispass=ispass,
color=color)
# Revert value of log_trace_when_idle
shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value
return res
# Pass through log to global function.
def log(self,
msg,
color_code=0,
level=logging.INFO,
mask_password=True,
newline=True,
transient=False):
if mask_password:
for password in shutit_global.shutit_global_object.secret_words_set:
if password in msg:
msg.replace(password,'<PASSWORD>')
if not self.nocolor:
if color_code == 0:
if level == logging.INFO:
msg = shutit_util.colorise(32,msg)
elif level == logging.WARNING:
msg = shutit_util.colorise(36,msg)
elif level == logging.CRITICAL:
msg = shutit_util.colorise(31,msg)
elif level == logging.ERROR:
msg = shutit_util.colorise(92,msg)
elif level == logging.DEBUG:
msg = shutit_util.colorise(35,msg)
else:
msg = shutit_util.colorise(color_code,msg)
shutit_global.shutit_global_object.yield_to_draw()
if transient:
self.last_log_time = time.time()
if sys.stdout.isatty():
if newline:
msg += '\r\n'
sys.stdout.write(msg)
else:
logobj = logging.getLogger(self.uuid_str)
if logobj.getEffectiveLevel() <= level:
self.last_log_time = time.time()
logobj.log(level,msg)
return True
# Pass through to global object
def create_session(self,
session_type='bash',
docker_image=None,
walkthrough=False,
nocolor=False,
rm=None,
loglevel='WARNING'):
shutit_global.shutit_global_object.yield_to_draw()
self = self # For linters: we want this to be available to shutit object users
return shutit_global.shutit_global_object.create_session(session_type=session_type,
docker_image=docker_image,
walkthrough=walkthrough,
nocolor=nocolor,
rm=rm,
loglevel=loglevel)
def get_os(self):
shutit_global.shutit_global_object.yield_to_draw()
self = self # For linters: we want this to be available to shutit object users
return self.current_shutit_pexpect_session.get_os()
# Returns true if the global session is interactive.
def is_interactive(self):
return shutit_global.shutit_global_object.determine_interactive()
def destroy(self):
"""Finish up a session.
"""
if self.session_type == 'bash':
# TODO: does this work/handle already being logged out/logged in deep OK?
self.logout()
elif self.session_type == 'vagrant':
# TODO: does this work/handle already being logged out/logged in deep OK?
self.logout()
def check_dependee_order(depender, dependee, dependee_id):
"""Checks whether run orders are in the appropriate order.
"""
# If it depends on a module id, then the module id should be higher up
# in the run order.
shutit_global.shutit_global_object.yield_to_draw()
if dependee.run_order > depender.run_order:
return 'depender module id:\n\n' + depender.module_id + '\n\n(run order: ' + str(depender.run_order) + ') ' + 'depends on dependee module_id:\n\n' + dependee_id + '\n\n(run order: ' + str(dependee.run_order) + ') ' + 'but the latter is configured to run after the former'
return ''
def make_dep_graph(depender):
"""Returns a digraph string fragment based on the passed-in module
"""
shutit_global.shutit_global_object.yield_to_draw()
digraph = ''
for dependee_id in depender.depends_on:
digraph = (digraph + '"' + depender.module_id + '"->"' + dependee_id + '";\n')
return digraph
# TODO: change default_cnf - see above
default_cnf = '''
################################################################################
# Default core config file for ShutIt.
################################################################################
# Details relating to the target you are building to (container or bash)
[target]
# Root password for the target - replace with your chosen password
# If left blank, you will be prompted for a password
password:
# Hostname for the target - replace with your chosen target hostname
# (where applicable, eg docker container)
hostname:
# space separated list of ports to expose
# e.g. "ports:2222:22 8080:80" would expose container ports 22 and 80 as the
# host's 2222 and 8080 (where applicable)
ports:
# volume arguments, eg /tmp/postgres:/var/lib/postgres:ro
volumes:
# volumes-from arguments
volumes_from:
# Name to give the docker container (where applicable).
# Empty means "let docker default a name".
name:
# Whether to remove the docker container when finished (where applicable).
rm:no
# Information specific to the host on which the build runs.
[host]
# Ask the user if they want shutit on their path
add_shutit_to_path: yes
# Docker executable on your host machine
docker_executable:docker
# space separated list of dns servers to use
dns:
# Password for the username above on the host (only needed if sudo is needed)
password:
# Log file - will be set to 0600 perms, and defaults to /tmp/<YOUR_USERNAME>_shutit_log_<timestamp>
# A timestamp will be added to the end of the filename.
logfile:
# ShutIt paths to look up modules in separated by ":", eg /path1/here:/opt/path2/there
shutit_module_path:.
# Whether to colorize output
nocolor:no
# Repository information
[repository]
# Whether to tag
tag:yes
# Whether to suffix the date to the tag
suffix_date:no
# Suffix format (default is epoch seconds (%s), but %Y%m%d_%H%M%S is an option if the length is ok with the index)
suffix_format:%s
# tag name
name:my_module
# Whether to tar up the docker image exported
export:no
# Whether to tar up the docker image saved
save:no
# Whether to push to the server
push:no
# User on registry to namespace repo - can be set to blank if not docker.io
user:
#Must be set if push is true/yes and user is not blank
password:<PASSWORD>
#Must be set if push is true/yes and user is not blank
email:YOUR_INDEX_EMAIL_OR_BLANK
# repository server
# make blank if you want this to be sent to the main docker index on docker.io
server:
# tag suffix, defaults to "latest", eg registry/username/repository:latest.
# empty is also "latest"
tag_name:latest
# Root setup script
# Each module should set these in a config
[shutit.tk.setup]
shutit.core.module.build:yes
[shutit.tk.conn_bash]
# None
# Aspects of build process
[build]
# How to connect to target
conn_module:shutit.tk.conn_docker
# Run any docker container in privileged mode
privileged:no
# Base image can be over-ridden by --image_tag defaults to this.
base_image:ubuntu:14.04
# Whether to perform tests.
dotest:yes
# --net argument to docker, eg "bridge", "none", "container:<name|id>" or "host". Empty means use default (bridge).
net:
'''
``` |
{
"source": "joaompinto/sslinfo",
"score": 3
} |
#### File: sslinfo/sslinfo/args.py
```python
import argparse
import json
from urllib.parse import urlparse
from .sslclient import SSLClient
from .view import print_ssl_info
class CommandArgs:
def __init__(self):
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"-k", help="Do not validate certificate", action="store_true"
)
parser.add_argument("-v", help="Set verbose mode", action="store_true")
parser.add_argument(
"-f", help="Select field to print", nargs="+", type=str, action="append"
)
parser.add_argument("hostname", help="Connect to hostname", metavar="HOSTNAME")
parser.add_argument(
"--port", help="Using port", metavar="PORT", type=int, default=443
)
parser.add_argument(
"--timeout",
help="Max connection timeout (secs)",
metavar="TIMEOUT",
type=int,
default=10,
)
parser.add_argument("-j", help="Produce json output", action="store_true")
self.parser = parser
def parse(self):
self.args = self.parser.parse_args()
def run(self):
hostname = self.args.hostname
port = self.args.port
if "//" in hostname:
url_parts = urlparse(hostname)
hostname = url_parts.netloc
if ":" in hostname:
hostname, port = hostname.split(":")
port = int(port)
ssl_client = SSLClient()
ssl_client.connect(hostname, port, self.args.timeout, self.args.k is True)
x509dict = ssl_client.x509dict(add_hints=not self.args.j)
if self.args.f:
for key in self.args.f[0]:
print(x509dict[key])
return
if self.args.j:
print(json.dumps(x509dict, indent=4))
else:
print_ssl_info(x509dict)
``` |
{
"source": "joaompinto/yamlpy",
"score": 3
} |
#### File: yamlpy/yamlfu/functions.py
```python
from copy import deepcopy
from functools import partial
import yaml
def provide_yamlfu_functions(symbols, doc_path):
symbols["render"] = partial(render, doc_path)
symbols["raw_render"] = partial(raw_render, doc_path)
def raw_render(doc_path, template, *args, **kwargs):
load_filename = doc_path.joinpath(template)
with open(load_filename) as yaml_file:
input_data = yaml_file.read()
result = yaml.safe_load(input_data)
if isinstance(result, dict):
print("SET RAW")
result["__raw__"] = True
return result
def render(doc_path, template, *args, **kwargs):
from yamlfu.loader import Loader
loader = Loader(deepcopy(template))
if isinstance(template, str):
from .loader import Loader
load_filename = doc_path.joinpath(template)
loader = Loader(load_filename)
return loader.resolve()[0]
_arguments = template["_arguments"]
template_args = _arguments.split()
assert len(template_args) == len(args)
render_args = {}
for i, value in enumerate(template_args):
render_args[value] = args[i]
result = loader.resolve(render_args)[0]
result["_internal_render"] = True
return result
```
#### File: yamlpy/yamlfu/pretty.py
```python
from pygments import highlight
from pygments.formatters import TerminalTrueColorFormatter
from pygments.lexers import YamlLexer
def pretty_print_yaml(yaml_content):
print(
highlight(
yaml_content, YamlLexer(), TerminalTrueColorFormatter(style="monokai")
)
)
``` |
{
"source": "joaomrcsmartins/MIEIC_IART_2019",
"score": 3
} |
#### File: MIEIC_IART_2019/Class_2/ex2_1.py
```python
from graph import Graph
def validate(state):
return state[0][0] == 1 and state[0][1] == 2 and state[0][2] == 3 \
and state[1][0] == 4 /////////////////////////////////
``` |
{
"source": "joaomsimoes/ANPR-Programming_with_Python",
"score": 3
} |
#### File: ANPR-Programming_with_Python/Arduino/main.py
```python
import time
import cv2
from utils import park
from kerasmodel.model import model_keras
def camera():
# Webcam Video Capture
vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# vid.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) # set W cam resolution
# vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) # set H cam resolution
while True:
# Return frames
_, frame = vid.read()
# Show video input
cv2.imshow('input', frame)
# Image classification with keras
prediction = model_keras(frame)
# If prediction > .95 use OCR to read licence plate
if prediction > .4:
park(prediction=prediction, frame=frame)
# Click 'q' to finish
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Close everything
vid.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
camera()
# img = cv2.imread('plate.jpg')
# predict_plate(prediction=0.99, frame=img)
``` |
{
"source": "joaoms/incRec",
"score": 3
} |
#### File: collaborative_filtering/neighborhood/clustering.py
```python
from random import sample
from algorithms.collaborative_filtering.neighborhood import NeighborhoodCF
from data_structures import DynamicArray
from utils import knn
class Clustering(NeighborhoodCF):
"""
Description
Clustering algorithm which extends NeighborhoodCF.
"""
def __init__(
self, neighbors=[], n_neighbors=5, treshold=0.5, clusters=[],
centroids=[], cluster_map=[]):
"""
Description
Clustering's constructor.
Arguments
:param neighbors: The neighborhood model.
:type neighbors: list
:param treshold: A minimum similarity which pairs need to have for
clusters.
:type treshold: float
:param clusters: The cluster model.
:type clusters: list
:param centroids: The centroids model.
:type centroids: list
:param cluster_map: The inverted index of elements to their cluster
:type cluster_map: dictionary
"""
self.th = treshold
self.centroids = self._init_model(centroids, self._init_centroids)
self.clusters = self._init_model(clusters, self._init_clusters)
self.cluster_map = self._init_model(
cluster_map, self._init_cluster_map)
super().__init__(neighbors, n_neighbors)
def _init_centroids(self, elements):
"""
Description
A function which computes and returns an initial centroid.
Arguments
:param elements: The candidates to centroids.
:type elements: set
"""
if len(elements) == 0:
return []
return sample(elements, 1)
def _init_clusters(self, elements):
"""
Description
A function which computes and returns the initial cluster.
Arguments
:param elements: The set to form clusters by.
:type elements: set
"""
clusters = [set() for centroid in self.centroids]
for element in elements:
sims = [self.similarity_between(
element, centroid) for centroid in self.centroids]
max_sim = max(sims)
if max_sim < self.th:
self.centroids.append(element)
clusters.append({element})
else:
centroid_index = sims.index(max_sim)
clusters[centroid_index].add(element)
return clusters
def _init_cluster_map(self, elements):
"""
Description
A function which computes and returns an inverted index
which maps elements to their clusters.
Arguments
:param elements: The set to form the inverted index by.
:type elements: set
"""
cluster_map = dict()
for element in elements:
for index, cluster in enumerate(self.clusters):
if element in cluster:
cluster_map[element] = index
break
return cluster_map
def _init_neighborhood(self):
"""
Description
A function which computes and returns the neighborhood
model which is a DynamicArray object.
"""
neighbors = DynamicArray(
default_value=lambda: DynamicArray(default_value=lambda: list()))
for cluster in self.clusters:
cluster_neighborhood = self._init_neighborhood_cluster(cluster)
neighbors.append(cluster_neighborhood)
return neighbors
def _init_neighborhood_cluster(self, candidate_set):
"""
Description
A function which computes and returns the neighborhood
for a cluster which is a DynamicArray object.
Argument
:param candidate_set: The cluster.
:type candidate_set: DynamicArray
"""
neighbors = DynamicArray(
[self._neighborhood(
ide, candidate_set
) for ide in candidate_set], default_value=lambda: list())
return neighbors
def _neighborhood(self, ident, candidate_set):
"""
Description
A function which computes and returns the neighborhood
of an element inside a cluster which is a DynamicArray object.
Argument
:param ident: The element to calculate the neighborhood for.
:type ident: int
:param candidate_set: The cluster.
:type candidate_set: DynamicArray
"""
candidates = candidate_set.difference({ident})
return knn(ident, candidates, self.n_neighbors,
self.similarity_between)
def neighborhood_of(self, identifier):
"""
Description
A function which returns the neighborhood of an
element.
Argument
:param ident: Element of which we want to return the neighborbood.
:type ident: int
"""
try:
cluster_index = self.cluster_map[identifier]
position = list(self.clusters[cluster_index]).index(identifier)
return self.neighbors[cluster_index][position]
except KeyError:
return []
def increment(self, identifier):
"""
Description
A function which increments the current cluster model
for a new entry.
Arguments
:param identifier: An element of a rating.
:type identifier: int
"""
sims = [self.similarity_between(
identifier, centroid) for centroid in self.centroids]
try:
max_sim = max(sims)
except ValueError:
max_sim = 0
if max_sim < self.th:
self.centroids.append(identifier)
self.clusters.append({identifier})
self.cluster_map[identifier] = len(self.clusters) - 1
else:
centroid_index = sims.index(max_sim)
self.clusters[centroid_index].add(identifier)
self.cluster_map[identifier] = centroid_index
cluster = self.clusters[centroid_index]
self.neighbors[centroid_index] = self._init_neighborhood_cluster(
cluster)
```
#### File: explicit_feedback/user_based/user_based_neighborhood.py
```python
from .user_based_cf import UserBasedExplicitCF
from algorithms.collaborative_filtering.neighborhood import UserNeighborhood
class UserBasedNeighborhood(UserBasedExplicitCF, UserNeighborhood):
"""
Description
A class which implements the classic user-based neighborhood
algorithm. Extends UserBasedExplicitCF and UserClustering.
"""
def __init__(
self, matrix=[], similarities=[], avg_ratings=dict(), co_rated=[],
neighbors=[], n_neighbors=5):
"""
Description
UserBasedNeighborhood's constructor.
Arguments
:param matrix: The ratings matrix.
:type matrix: list
:param similarities: The similarity matrix.
:type similarities: SymmetricMatrix
:param avg_ratings: Users' average ratings.
:type avg_ratings: DynamicArray
:param co_rated: The co-rated items matrix.
:type co_rated: SymmetricMatrix
:param neighbors: The neighborhood model.
:type neighbors: list
:param n_neighbors: Number of neighbors to compute.
:type n_neighbors: int
"""
super().__init__(matrix, similarities, avg_ratings, co_rated)
super(UserNeighborhood, self).__init__(neighbors, n_neighbors)
def new_rating(self, rating):
"""
Description
The function which processes a new iteration. Expects a tuple
(user, item)
Arguments
:param rating: The rating tuple.
:type rating: tuple
"""
super().new_rating(rating)
self.neighbors = self._init_neighborhood()
```
#### File: implicit_feedback/item_based/item_based_cf.py
```python
from data_structures import SymmetricMatrix, DynamicArray
from algorithms.collaborative_filtering import CollaborativeFiltering
from collections import defaultdict
from utils import cosine_similarity
from random import shuffle
class ItemBasedImplicitCF(CollaborativeFiltering):
"""
Description
The implicit item based collaborative filtering class which focuses
on calculating and incrementing similarities.
Extends CollaborativeFiltering.
"""
def __init__(
self, matrix=[], intersections=[], l1=[], inv_index={},
similarities=[]):
"""
Description
ItemBasedImplicitCF's constructor.
Arguments
:param matrix: The ratings matrix.
:type matrix: list
:param intersections: A matrix of item intersections.
:type intersections: SymmetricMatrix
:param l1: An array of items' l1 norms.
:type l1: DynamicArray
:param inv_index: An inverted index of users to items.
:type inv_index: defaultdict(set)
:param similarities: A similarity matrix.
:type similarities: SymmetricMatrix
"""
super().__init__(matrix)
self.inv_index = self._init_model(
inv_index, self._init_inv_index)
self.intersections = self._init_model(
intersections, self._init_intersections)
self.l1_norms = self._init_model(
l1, self._init_l1)
self.similarities = self._init_model(
similarities, self._init_similarities)
def _init_similarities(self):
"""
Description
A function which computes and returns a
similarity matrix. Returns a SymmetricMatrix.
"""
sims = SymmetricMatrix(
len(self.items), value=lambda: 0)
for item in self.items:
for another_item in range(item + 1):
sims[(item, another_item)] = self._init_similarity(
item, another_item)
return sims
def _init_similarity(self, item, another_item):
"""
Description
A function which computes and returns a similarity
between a pair of items.
Arguments
:param item: The first item.
:type item: int
:param another_item: The second item.
:type another_item: int
"""
return cosine_similarity(
self.intersections_between(item, another_item),
self.l1_norm_of(item),
self.l1_norm_of(another_item)
)
def _init_intersections(self):
"""
Description
The function which computes and returns a
SymmetricMatrix item intersections.
"""
intersections = SymmetricMatrix(len(self.items), lambda: 0)
for items in self.inv_index.values():
for item in items:
others = set(range(item + 1)).intersection(items)
for another_item in others:
intersections[(item, another_item)] += 1
return intersections
def _init_l1(self):
"""
Description
The function which computes and returns a
DynamicArray which contains items' l1 norms.
"""
l1_norms = DynamicArray(
[0 for _ in self.items], default_value=lambda: 0)
for items in self.inv_index.values():
for item in items:
l1_norms[item] += 1
return l1_norms
def _init_inv_index(self):
"""
Description
The function which computes and returns a
defaultdict(set) inverted index of users to
their rated items.
"""
inv_index = defaultdict(set)
for user in self.users:
for item in self.items:
if self.matrix[user][item] is not None:
inv_index[user].add(item)
return inv_index
def _update_intersections(self, user_id, item_id):
"""
Description
A function which updates the intersection matrix.
Arguments
:param user_id: The user identifier.
:type user_id: int
:param item_id: The item identifier.
:type item_id: int
"""
for another_item_id in self.inv_index_of(user_id):
self.intersections[(item_id, another_item_id)] += 1
def _update_similarities(self, item_id):
"""
Description
A function which updates the similarity matrix.
Arguments
:param item_id: The item identifier.
:type item_id: int
"""
for another_item_id in self.items:
self.similarities[(
item_id, another_item_id)] = self._init_similarity(
item_id, another_item_id)
def new_rating(self, rating):
"""
Description
The function which processes a new iteration. Expects a tuple
(user, item).
Arguments
:param rating: The rating tuple.
:type rating: tuple
"""
user_id, item_id = rating
self.matrix[user_id][item_id] = 1
self.users.add(user_id)
self.items.add(item_id)
if item_id not in self.inv_index_of(user_id):
self.inv_index[user_id].add(item_id)
self.l1_norms[item_id] += 1
self._update_intersections(user_id, item_id)
self._update_similarities(item_id)
def recommend(self, user_id, n_rec, repeated=False):
"""
Description
A function which returns recommendations for a user.
Arguments
:param user_id: The user identifier.
:type user_id: int
:param n_rec: The number of items to recommend.
:type n_rec: int
:param repeated: Variable which defines if already rated products\
can be recommended.
:type repeated: boolean
"""
candidates = {
ident for item in self.items for ident in self.neighborhood_of(
item)}
user_items = self.inv_index_of(user_id)
if not repeated:
candidates = candidates.difference(user_items)
final = list(candidates)
shuffle(final)
return final[0:n_rec]
def intersections_between(self, item, another_item):
"""
Description
A function which returns the item intersections between
two items.
Arguments
:param item: The first item.
:type item: int
:param another_item: The second item.
:type another_item: int
"""
return self.intersections[(item, another_item)]
def l1_norm_of(self, item):
"""
Description
A function which returns an item's l1_norm.
Arguments
:param item: The item identifier.
:type item: int
"""
return self.l1_norms[item]
def inv_index_of(self, user_id):
"""
Description
A function which returns a users' rated items.
Arguments
:param user_id: The user identifier.
:type user_id: int
"""
return self.inv_index[user_id]
```
#### File: implicit_feedback/item_based/item_based_lsh_mh.py
```python
from algorithms.collaborative_filtering.neighborhood.implicit_feedback import \
LSHMinHash
from numpy.random import permutation
class ItemLSHMinHash(LSHMinHash):
"""
Description
A class which implements the item-based locality-sensitive min hashing
algorithm which extends LSHMinHash.
"""
def __init__(self, matrix=[], signature_matrix=[], buckets=[], n_perms=6,
n_bands=2):
"""
Description
ItemLSHMinHash's constructor.
Arguments
:param matrix: A ratings matrix.
:type matrix: list
:param signature_matrix: The signature matrix which contains
elements' signatures in the columns.
:type signature_matrix: DynamicArray
:param buckets: The buckets where elements are hashed into.
:type buckets: defaultdict(set)
:param n_perms: Number of permutations for hashing.
:type n_perms: int
:param n_bands: Number of bands which are used for bucketing.
:type n_bands: int
"""
super().__init__(matrix, signature_matrix, buckets, n_perms, n_bands)
def _permutation(self, matrix):
"""
Description
A function which defines how to permutate a matrix.
Arguments
:param matrix: The matrix to be permutated.
:type matrix: list
"""
return permutation(matrix)
def _elements(self):
"""
Description
A function which defines the elements to be
hashed.
"""
return self.items
def get_vector(self, matrix, pos):
"""
Description
A function which returns a position of a matrix.
Arguments
:param matrix: The matrix to be accessed.
:type matrix: list
:param pos: The index/position.
:type pos: int
"""
return matrix.col(pos)
def new_rating(self, rating):
"""
Description
The function which processes a new iteration. Expects a tuple
(user, item)
Arguments
:param rating: The rating tuple.
:type rating: tuple
"""
super().new_rating(rating)
_, item_id = rating
self._update_signature_matrix(item_id)
self._update_buckets(item_id)
def recommend(self, user_id, n_rec, repeated=False):
"""
Description
A function which returns recommendations for a user.
Arguments
:param user_id: The user identifier.
:type user_id: int
:param n_rec: The number of items to recommend.
:type n_rec: int
:param repeated: Variable which defines if already rated products\
can be recommended.
:type repeated: boolean
"""
row = self.matrix[user_id]
row_filtered = [
item_id for item_id in self.items if row[item_id] == 1]
signatures = [self.get_vector(self.signature_matrix, item_id)
for item_id in row_filtered]
rec = set()
candidates = {item_id: 0 for item_id in self.items}
for sign in signatures:
bands = self._group_by_bands(sign)
for band in bands:
items = self.buckets[band]
for item in items:
candidates[item] += 1
rec = rec.union(items)
if not repeated:
rec = rec.difference(set(row_filtered))
return sorted(rec, key=lambda item_id: candidates[item_id])[-n_rec:]
```
#### File: incRec/graphic/static.py
```python
import matplotlib.pyplot as plt
from progress.bar import Bar
class EvaluationStatic:
"""
Description
A class which displays a graphic which portrays the evolution
of processing and recommendation time as well as accuracy
of an algorithm when a dataset is processed incrementally.
"""
def __init__(self, stream, evaluator):
"""
Description
EvaluationStatic's constructor.
Arguments
:param stream: A stream of ratings.
:type stream: list
:param evaluator: A evaluator object.
:type evaluator: PrequentialEvaluator
"""
self.stream = stream
self.evaluator = evaluator
self.x = range(len(stream))
self.err_rate = []
self.elap_nr = []
self.elap_rec = []
def evaluate(self):
"""
Description
A function which evaluates the data stream.
"""
bar = Bar('Evaluating', max=len(self.x))
for element in self.stream:
err, elap_rec, elap_nr = self.evaluator.new_rating(element)
self.err_rate.append(err)
self.elap_rec.append(elap_rec)
self.elap_nr.append(elap_nr)
bar.next()
bar.finish()
def plot(self):
"""
Description
A function which plots the 3 subplots.
"""
fig, axs = plt.subplots(3)
fig.suptitle('Metrics')
axs[0].plot(self.x, self.err_rate, "r", label="Average error.")
axs[0].legend()
axs[1].plot(self.x, self.elap_nr, "g", label="Rating process time.")
axs[1].legend()
axs[2].plot(self.x, self.elap_rec, "b", label="Recommendation time.")
axs[2].legend()
def export(self, path, show=False):
"""
Description
A function which exports the plots to an image format. Displays
it if show=True.
Arguments
:param path: The output path of the figure.
:type path: string
:param show: Does it show the image or not.
:type show: boolean
"""
if show:
plt.show()
plt.savefig(path)
def process(self, path, show=False):
"""
Description
A function which processes a data stream and creates a graph.
Arguments
:param path: The output path of the figure.
:type path: string
:param show: Does it show the image or not.
:type show: boolean
"""
self.evaluate()
self.plot()
self.export(path, show)
```
#### File: stream/file_stream/file_stream.py
```python
class FileStream:
"""
Description
A class which encapsulates common logic for the
implicit and explicit versions, and creates a data
stream with ratings out of a file dataset.
"""
def __init__(self, path, sep=" "):
"""
Description
FileStream's constructor
Arguments
:param path: The path to the file.
:type path: string
:param sep: A character which separates the field in each line.
:type sep: string
"""
self.stream = self._parse_file(path, sep)
def _parse_file(self, path, sep):
"""
Description
A function which parsed a file and returns a
data stream list.
Arguments
:param path: The path to the file.
:type path: string
:param sep: A character which separates the field in each line.
:type sep: string
"""
stream = []
with open(path, "r") as f:
line = f.readline()
while line:
rating_arr = line.split(sep)
rating = self._parse_rating(rating_arr)
stream.append((rating))
line = f.readline()
f.close()
return stream
def process_stream(self, model):
"""
Description
A function which processes a data stream
with a recomendation model.
Arguments
:param path: A recommendation algorithm or evaluator.
:type path: CollaborativeFiltering or PrequentialEvaluator.
"""
it = 0
for rating in self.stream:
# print(f"New rating entering: {rating} -> Iter: {it}")
it += 1
model.new_rating(rating)
return model
```
#### File: explicit_feedback/user_based/user_neighborhood_explicit_test.py
```python
import unittest
from algorithms.collaborative_filtering.neighborhood.\
explicit_feedback.user_based import UserBasedNeighborhood
class UserNeighborhoodTest(unittest.TestCase):
def _test_neighborhood(self, cf, identifier, neighborhood):
self.assertEqual(cf.neighborhood_of(identifier), neighborhood)
self.assertNotIn(identifier, neighborhood)
def _test_neighbors(self, cf, neighbors):
for user, neighborhood in zip(cf.users, neighbors):
self._test_neighborhood(cf, user, neighborhood)
def test_neighborhood(self):
matrix = [
[1, None, None, None, 1, None, None, None, 1],
[1, None, 1, None, 1, None, 1, None, 1],
[None, None, 1, None, None, None, 1, None, None],
[None, 1, 1, None, 1, None, 1, None, None],
[1, None, 1, None, None, None, None, 1, None],
]
cf = UserBasedNeighborhood(matrix, n_neighbors=2)
self._test_neighbors(cf, [[3, 4], [3, 4], [3, 4], [2, 4], [2, 3]])
if __name__ == "__main__":
unittest.main()
```
#### File: implicit_feedback/item_based/item_cf_implicit_test.py
```python
import unittest
from algorithms.collaborative_filtering.\
neighborhood.implicit_feedback.item_based import ItemBasedNeighborhood
class ItemBasedNeighborhoodTest(unittest.TestCase):
def test_asserts(self):
cf = ItemBasedNeighborhood(n_neighbors=10)
self.assertEqual(cf.n_neighbors, 10)
self.assertEqual(len(cf.inv_index), 0)
self.assertEqual(len(cf.l1_norms), 0)
def test_initialization(self):
matrix = [
[1, None, None, None, 1],
[1, None, 1, None, 1],
[None, None, 1, None, None],
[None, 1, 1, None, 1],
[1, None, 1, None, 1],
]
cf = ItemBasedNeighborhood(matrix)
self.assertEqual(cf.l1_norm_of(0), 3)
self.assertEqual(cf.l1_norm_of(3), 0)
self.assertEqual(cf.l1_norm_of(4), 4)
self.assertEqual(cf.inv_index_of(0), {0, 4})
self.assertEqual(cf.inv_index_of(1), {0, 2, 4})
self.assertEqual(cf.inv_index_of(2), {2})
self.assertEqual(cf.intersections_between(0, 4), 3)
self.assertEqual(cf.intersections_between(1, 3), 0)
self.assertEqual(cf.intersections_between(0, 2), 2)
def test_new_rating(self):
matrix = [
[1, None, None, None, 1],
[1, None, 1, None, 1],
[None, None, 1, None, None],
[None, 1, 1, None, 1],
[1, None, 1, None, 1],
]
cf = ItemBasedNeighborhood(matrix)
self.assertAlmostEqual(cf.similarity_between(0, 0), 1.0, delta=0.0001)
self.assertAlmostEqual(cf.similarity_between(1, 1), 1.0, delta=0.0001)
self.assertAlmostEqual(cf.similarity_between(2, 2), 1.0, delta=0.0001)
self.assertAlmostEqual(cf.similarity_between(3, 3), 0, delta=0.0001)
self.assertAlmostEqual(cf.similarity_between(0, 3), 0, delta=0.0001)
cf.new_rating((0, 3))
self.assertAlmostEqual(cf.similarity_between(0, 3), 0.577, delta=0.001)
cf.new_rating((1, 3))
self.assertAlmostEqual(cf.similarity_between(0, 3), 0.816, delta=0.001)
def test_recommendation(self):
matrix = [
[1, None, None, None, 1],
[1, None, 1, None, 1],
[None, None, 1, None, None],
[None, 1, 1, None, 1],
[1, None, 1, None, 1],
]
cf = ItemBasedNeighborhood(matrix, n_neighbors=2)
self.assertIn(2, cf.recommend(0, 3))
self.assertNotIn(0, cf.recommend(0, 3))
if __name__ == "__main__":
unittest.main()
```
#### File: implicit_feedback/user_based/user_clustering_implicit_test.py
```python
import unittest
from algorithms.collaborative_filtering.\
neighborhood.implicit_feedback.user_based import UserBasedClustering
class UserBasedClusteringTest(unittest.TestCase):
def test_clustering(self):
matrix = [
[1, None, None, None, 1, None, None, None, 1],
[1, None, 1, None, 1, None, 1, None, 1],
[None, None, 1, None, None, None, 1, None, None],
[None, 1, 1, None, 1, None, 1, None, None],
[1, None, 1, None, None, None, None, 1, None],
]
cf = UserBasedClustering(matrix, n_neighbors=2, centroids=[1, 3])
self.assertEqual(len(cf.centroids), len(cf.clusters))
self.assertEqual(cf.clusters[0], {0, 1, 4})
self.assertEqual(cf.clusters[1], {2, 3})
self.assertEqual(cf.cluster_map[0], 0)
self.assertEqual(cf.cluster_map[1], 0)
self.assertEqual(cf.cluster_map[2], 1)
self.assertEqual(cf.cluster_map[4], 0)
self.assertEqual(cf.cluster_map[3], 1)
def test_increment(self):
matrix = [
[1, None, None, None, 1, None, None, None, 1],
[1, None, 1, None, 1, None, 1, None, 1],
[None, None, 1, None, None, None, 1, None, None],
[None, 1, 1, None, 1, None, 1, None, None],
[1, None, 1, None, None, None, None, 1, None],
]
cf = UserBasedClustering(matrix, n_neighbors=2, centroids=[1, 3])
cf.new_rating((5, 1))
self.assertEqual(2, len(cf.centroids))
self.assertEqual(cf.cluster_map[5], 1)
if __name__ == "__main__":
unittest.main()
```
#### File: implicit_feedback/user_based/user_neighborhood_implicit_test.py
```python
import unittest
from algorithms.collaborative_filtering.neighborhood.\
implicit_feedback.user_based import UserBasedNeighborhood
class UserNeighborhoodTest(unittest.TestCase):
def _test_neighborhood(self, cf, identifier, neighborhood):
self.assertEqual(cf.neighborhood_of(identifier), neighborhood)
self.assertNotIn(identifier, neighborhood)
def _test_neighbors(self, cf, neighbors):
for user, neighborhood in zip(cf.users, neighbors):
self._test_neighborhood(cf, user, neighborhood)
def test_neighborhood(self):
matrix = [
[1, None, None, None, 1, None, None, None, 1],
[1, None, 1, None, 1, None, 1, None, 1],
[None, None, 1, None, None, None, 1, None, None],
[None, 1, 1, None, 1, None, 1, None, None],
[1, None, 1, None, None, None, None, 1, None],
]
cf = UserBasedNeighborhood(matrix, n_neighbors=2)
self._test_neighbors(cf, [[4, 1], [3, 0], [1, 3], [1, 2], [2, 1]])
if __name__ == "main":
unittest.main()
```
#### File: test/evaluators/explicit_prequential_test.py
```python
import unittest
from algorithms.collaborative_filtering.neighborhood.\
explicit_feedback.user_based import UserBasedNeighborhood
from evaluators.prequential.explicit_feedback.\
prequential_evaluator import PrequentialEvaluatorExplicit
class PrequentialEvaluatorExplicitTest(unittest.TestCase):
def test_evaluate(self):
matrix = [
[8, None, None, None, 7],
[7, None, 1, None, 6],
[None, 2, 9, None, 1],
[None, 1, 9, None, None],
[7, None, 1, None, 6],
]
cf = UserBasedNeighborhood(matrix)
evaluator = PrequentialEvaluatorExplicit(cf)
err, _elap = evaluator.evaluate(0, 2, 3)
self.assertEqual(err, 0.4)
def test_new_rating(self):
matrix = [
[1, None, None, None, 1],
[1, None, 1, None, 1],
[None, None, 1, None, None],
[None, 1, 1, None, 1],
[1, None, 1, None, 1],
]
cf = UserBasedNeighborhood(matrix)
evaluator = PrequentialEvaluatorExplicit(cf)
err, _elap, _elap2 = evaluator.new_rating((0, 2, 3))
self.assertEqual(err, 0.4)
if __name__ == "main":
unittest.main()
```
#### File: stream/file_stream/file_stream_explicit_test.py
```python
from stream.file_stream.explicit import FileStreamExplicit
from algorithms.collaborative_filtering.matrix_factorization.\
explicit_feedback import MFExplicitSGD
import unittest
class FileStreamTest(unittest.TestCase):
def test_initialization(self):
fs = FileStreamExplicit("test/test_dataset/test.data", sep="\t")
self.assertEqual(len(fs.stream), 10)
def test_process_stream(self):
fs = FileStreamExplicit("test/test_dataset/test.data", sep="\t")
cf = MFExplicitSGD()
model = fs.process_stream(cf)
self.assertEqual(len(model.matrix), 306)
self.assertEqual(type(model), MFExplicitSGD)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JoaoMvchvdo22/Python",
"score": 3
} |
#### File: curso-py-coder/funcoes/basico.py
```python
def saudacao(nome='Pessoa', idade=20):
print(f'Olá {nome}!\nVocê não parece ter {idade} anos')
def soma_e_multi(a, b, x):
return a + b * x
``` |
{
"source": "JoaoMWatson/udemyDjango",
"score": 2
} |
#### File: meuPrimeiroProjeto/clients/views.py
```python
from django.shortcuts import render
from .models import Person
def person_list(request):
persons = Person.objects.all()
return render(request, 'person.html', {"person": persons})
``` |
{
"source": "joaonc/hd_active",
"score": 2
} |
#### File: app/ui/log_dialog.py
```python
from PySide6 import QtGui, QtWidgets
from app.hd_active import HdActive
from app.ui.forms.log_ui import Ui_LogDialog
class LogDialog(QtWidgets.QDialog):
# Using class composition instead of inheritance or `PySide6.QtUiTools.loadUiType`.
def __init__(self, hd_active: HdActive, parent):
super().__init__(parent)
self.hd_active = hd_active
self.ui = Ui_LogDialog()
self.ui.setupUi(self)
# UI bindings
self.ui.ok_button.clicked.connect(self.close)
def showEvent(self, arg__1: QtGui.QShowEvent):
super().showEvent(arg__1)
self.set_text()
def set_text(self):
self.ui.log_textBrowser.setText('\n'.join(self.hd_active.log))
```
#### File: hd_active/app/utils.py
```python
from pathlib import Path
from typing import Any
PROJECT_ROOT = Path(__file__).parents[1].resolve()
ASSETS_ROOT = PROJECT_ROOT / 'assets'
def get_asset(*args) -> Path:
return ASSETS_ROOT / '/'.join(args)
def is_truthy(value: Any) -> bool:
if isinstance(value, bool):
return value
value_lower = str(value).lower()
if value_lower in ['true', 'yes', 'on'] or (isinstance(value, (int, float)) and value != 0):
return True
elif value_lower in ['false', 'no', 'off'] or (isinstance(value, (int, float)) and value == 0):
return False
raise ValueError(f'Undefined truthy value: {value}')
```
#### File: hd_active/tests/test_hd_active_config.py
```python
from typing import List, Tuple
from unittest.mock import patch
import pytest
import pytest_check as check
from app.hd_active_config import HdActiveConfig
@pytest.fixture
def config_file(request, tmp_path) -> Tuple[str, List[str]]:
"""
Fixture to be called indirectly with the INI file contents as parameter in the ``request``.
The parameter to ``request`` should be a tuple with the contents of the INI file and a list
with the expected drives.
"""
file = tmp_path / 'test.ini'
file.write_text(request.param[0])
return str(file), request.param[1]
@patch('app.hd_active_config.configparser.ConfigParser.read')
def test_defaults(read_mock):
"""
Skip reading file (so defaults are not overwritten) and verify defaults.
"""
config = HdActiveConfig('foo.ini')
check.is_false(config.run)
check.equal(config.wait, 60)
check.equal(config.drive_paths, [])
@pytest.mark.parametrize(
'config_file',
[
pytest.param(
(
'''[HD Active]
drives = e:\\''',
['e:\\'],
),
id='one drive only',
),
pytest.param(
(
'''[HD Active]
drives = e:\\,f, g/''',
['e:\\', 'f', 'g/'],
),
id='multiple drives',
),
pytest.param(
(
'''[HD Active]
drives = 'e',''f'',"g"''',
['e', 'f', 'g'],
),
id='quotes are ignored',
),
],
indirect=True,
)
def test_drives(config_file):
file_name, expected_drives_paths = config_file
config = HdActiveConfig(file_name)
assert config.drive_paths == expected_drives_paths
def test_file_doesnt_exist():
with pytest.raises(FileNotFoundError):
HdActiveConfig('foo_doesnt_exist.ini')
``` |
{
"source": "joaonizer/sLLGS",
"score": 3
} |
#### File: sLLGS/Python/rk4.py
```python
def rk4(m , h_eff , i_s , dt):
k = np.zeros((4,3))
mm = np.zeros((3,3))
m_new = np.zeros((1,3))
# Step 0
k[0 , :] = dm(m , h_eff , i_s)
mm[0 , :] = m + k[0 , :] * dt / 2
# Step 1
k[1 , :] = dm(mm[0 , :] , h_eff , i_s)
mm[1 , :] = m + k[1 , :] * dt / 2
# Step 2
k[2 , :] = dm(mm[1 , :] , h_eff , i_s)
mm[2 , :] = m + k[2 , :] * dt
# Step 3
k[3 , :] = dm(mm[2 , :] , h_eff , i_s)
# Return new 'm'
m_new = (k[0 , :] + 2 * (k[1 , :] + k[2 , :]) + k[3 , :])* dt / 6
return m_new
``` |
{
"source": "joaonlopes/pysud",
"score": 3
} |
#### File: demos/es/demoscript_0.py
```python
import pysud_gm as gameMgr
import pysud_events as ev
import pysud
# EVENTOS DEFINIDOS POR UN USUARIO:
class EventBotiquin(ev.CommandEvent):
def on_success(self, game):
myflag = game.get_user_defined_variable('botiquin_abierto')
if not myflag:
game.set_user_defined_variable('botiquin_abierto', True)
game.iom.show_message("Al abrir el botiquín encuentras una botella de alcohol medicinal casi llena y unas gasas, decides llevártelas...")
game.pc.increase_score(1)
game.set_user_defined_variable('use_alcohol', False)
# Vemos como un evento puede habilitar a otro:
game.add_global_event(EventAlcohol(['usar alcohol']))
else:
game.iom.show_message("Ya has revisado eso.")
class EventHombre(ev.CommandEvent):
def on_success(self, game):
myflag = game.get_user_defined_variable('hombre')
if not myflag:
game.set_user_defined_variable('hombre', True)
game.iom.show_message("El hombre tiene varias heridas profundas, como de mordeduras, en el rostro y ambos brazos. En el bolsillo de su camisa encuentras un encendedor.")
game.pc.increase_score(1)
else:
game.iom.show_message("Ya no tiene nada más...")
class EventFinalMalo(ev.CommandEvent):
def on_success(self, game):
game.iom.show_message("Al abrir la puerta uno de los empleados del lugar se da vuelta, su cara esta cubierta de sangre y tiene los ojos completamente blancos, no habla, sino que emite unos quijidos de dolor y camina hacia ti con movimientos torpes, pero el miedo te impide reaccionar y se abalanza sobre vos... lo último que logras ver es al otro empleado del lugar a unos pocos metros tuyo, tieso en el suelo sobre un charco de sangre...\n")
game.iom.show_message("Así termina la historia de " + game.pc.get_name() + " con un puntaje de:")
game.iom.show_score()
game.quit_game()
class EventAlcohol(ev.CommandEvent):
def on_success(self, game):
myflag = game.get_user_defined_variable('use_alcohol')
if not myflag:
game.set_user_defined_variable('use_alcohol', True)
game.iom.show_message("Con las gasas y la botella de alcohol te fabricas una suerte de bomba casera... guau!")
game.pc.increase_score(1)
else:
game.iom.show_message("Ya tienes tu bomba lista para usar...")
class EventFinalBueno(ev.CommandEvent):
def on_success(self, game) :
encendedor = game.get_user_defined_variable('hombre')
bomba = game.get_user_defined_variable('use_alcohol')
if encendedor and bomba:
game.pc.increase_score(1)
game.iom.show_message("Enciendes las gasas con el encendedor y tras abrir la puerta del puesto de comida arrojas tu bomba casera a aquella extraña criatura, la cual escapa corriendo torpemente unos pocos metros hasta caer a retorcerse de dolor, no estás seguro de que esté muerta, pero al menos no te sientes indefenso.")
game.iom.show_message("Así termina la historia de " + game.pc.get_name() + " con un puntaje de:")
game.iom.show_score()
game.quit_game()
else:
game.iom.show_message("Te está faltando algo... ¿pero qué será?")
if __name__ == '__main__':
# 1 - Inicializaciones:
NOMBRE_USUARIO = input ('Ingrese nombre de personaje:')
JUEGO = pysud.Game(NOMBRE_USUARIO)
# 2 - Habitaciones:
R1 = pysud.Room('Calle', 'Estas en la puerta de tu viejo departamento. En la calle esta cayendo el sol y no ves personas por ningún lado, todo está en absoluto silencio... hacia el norte está la plaza del barrio...', "1")
R2 = pysud.Room('Casa', 'Tu humilde departamento ha visto mejores épocas, eso es por supuesto antes de que vivieras en él, mucho antes. Es pequeño y has acumulado ropa sucia y basura por doquier. Tantas telarañas en las ventanas te impiden ver con claridad lo que sucede en la calle. Has dejado la puerta del baño abierta...', "2")
R3 = pysud.Room('Baño', 'El baño lo tienes a tono con la decoración del resto de tu hogar: mugriento a más no poder. Bajo el lavamanos se encuentra un pequeño botiquín y detras tuyo está la habitación principal (y única) de tu departamento...', "3")
R4 = pysud.Room('Plaza', 'La plaza está desierta; tiene un camino que la atraviesa por el centro de norte a sur y en medio de este ves a un hombre tirado, boca abajo. Hacia el sur está la calle que conduce a tu hogar, y en la otra dirección el único puesto de comida del parque.', "4")
R5 = pysud.Room('Puesto de comida', 'En el puesto de comida las mesas se han tirado al piso como si hubieran tenido lugar serios disturbios, hay algunas manchas de sangre por el suelo, y especialmente cerca de la entrada a la casilla donde se prepara la comida rápida, la puerta de la misma está entreabierta. Se oyen ruidos extraños que vienen desde adentro...', "5")
# 3 - Transiciones (conectando habitaciones):
R1.add_transition(['ir a casa', 'ir a departamento'], R2)
R2.add_transition(['ir a calle', 'ir afuera'], R1)
R2.add_transition(['ir a baño', 'ir al baño'], R3)
R3.add_transition(['ir a living', 'ir al living', 'atras', 'ir a casa', 'ir a sala'], R2)
R1.add_transition(['ir a plaza', 'norte', 'n'], R4)
R4.add_transition(['sur', 'ir a calle', 's'], R1)
R4.add_transition(['norte', 'n', 'seguir camino', 'ir a puesto'], R5)
R5.add_transition(['s', 'plaza', 'sur'], R4)
# 4 - Eventos locales y variables relacionadas:
JUEGO.set_user_defined_variable('botiquin_abierto', False)
R3.add_local_event(EventBotiquin(commands = ['abrir botiquin']))
JUEGO.set_user_defined_variable('hombre', False)
R4.add_local_event(EventHombre(commands = ['mirar hombre']))
R5.add_local_event(
ev.ShowMessageEvent(
['mirar casilla', 'examinar casilla', 'mirar puesto', 'examinar puesto'],
'Asomas la cabeza dentro de la casilla y puedes ver a unos pocos metros tuyo la espalda de uno de los empleados del lugar, agachado junto a su compañero de trabajo quien yace en un charco de sangre... Pareciera estar masticándole un brazo...' )
)
R5.add_local_event(EventFinalMalo(['entrar', 'entrar a casilla', 'ir a casilla']))
R5.add_local_event(EventFinalBueno(['tirar bomba', 'usar bomba']))
# 5 - Se agregan las habitaciones al juego:
JUEGO.add_rooms( [R1, R2, R3, R4, R5] )
# 6 - Por último comenzar el juego:
JUEGO.pc.move_to_room(R1)
GAME_MANAGER = gameMgr.GameManager(JUEGO)
GAME_MANAGER.run_game()
```
#### File: demos/es/demoscript_1.py
```python
import pysud_gm as gameMgr
import pysud
import pysud_events as ev
##################################################################
# Items #
# Heredar de la clase abstracta pysud.Item e implementar use_on()#
##################################################################
class Termo(pysud.Item):
def use_on(self, game):
pass
class Mate (pysud.Item):
def use_on(self, game):
pass
class Telefono(pysud.Item):
def __init__(self):
# La siguiente línea es necesaria si se sobreescribe el
# constructor de pysud.Item :
pysud.Item.__init__(self, "telefono", "Un viejo teléfono celular.")
self.encendido = False
def use_on(self, game):
if not self.encendido:
game.iom.show_message("Enciendes tu teléfono.")
self.encendido = True
else:
game.iom.show_message("Apagas tu teléfono.")
self.encendido = False
class EsferaAmarilla(pysud.Item):
pass
class EsferaAzul(pysud.Item):
pass
class EsferaVerde(pysud.Item):
pass
##########################
# Eventos Personalizados #
##########################
class CebarUnMate(ev.UseItemWithItemEvent):
def on_success(self, game):
game.iom.show_message('Mmmm... te cebas un rico mate...' )
class MirarCajonera(ev.CommandEvent):
def on_success(self, game):
if not game.get_user_defined_variable('visto_cajonera'):
game.set_user_defined_variable('visto_cajonera', True)
game.iom.show_message('En la cajonera encuentras dos esferas de energía: una amarilla y una azul.')
am = EsferaAmarilla("esfera amarilla", "Una bonita esfera de energía amarilla...")
az = EsferaAzul("esfera azul", "Una bonita esfera de energía azul...")
game.pc.current_room.add_item(am)
game.pc.current_room.add_item(az)
game.add_global_event(ev.CombineItemEvent(am, az, EsferaVerde("esfera verde", "Una hermosa esfera de energía verde... parece poderosa...")))
else:
game.iom.show_message('No encuentras nada nuevo en la cajonera.')
########
# Main #
########
def gamescript():
print('pysud minidemo items ...')
juego = pysud.Game("MiniDemoTester")
r1 = pysud.Room(
room_name = "Pequeña habitación",
room_description = "Esta pequeña habitación de unos 4m cuadrados tiene una cama en un rincón, un escritorio junto a ella y una mesa cuadrada en el centro. También hay un armario empotrado y una cajonera junto a la puerta. \n Un cuadro en un muro lee: 'Los nuevos comandos son agarrar ITEM, usar ITEM, usar ITEM1 con ITEM2, combinar ITEM1 ITEM2, mirar ITEM (de tu inventario), i o inventario (para ver tus cosas), stats para ver donde has estado, puntos para ver tu puntaje, save, load'."
)
# agregando items a la habitación:
t = Termo("termo", "Es un termo Lumilagro, me recuerda a <NAME>...")
m = Mate("mate", "Un mate de lata.")
cel = Telefono()
r1.add_item(t)
r1.add_item(m)
r1.add_item(cel)
# agregando eventos locales a la habitación:
r1.add_local_event(MirarCajonera(["mirar cajonera", "examinar cajonera"]))
# agregando eventos globales:
juego.add_global_event(CebarUnMate(t, m))
juego.set_user_defined_variable('visto_cajonera', False)
# comenzando:
juego.add_rooms([r1])
juego.pc.move_to_room(r1)
gm = gameMgr.GameManager(juego)
gm.run_game()
if __name__ == '__main__':
gamescript()
```
#### File: demos/es/demoscript_2.py
```python
import pysud_gm as gameMgr
import pysud
import pysud_events as ev
# Fichero de datos de juego creado con pysud_make:
DATAFILE = 'game.data'
##################################################################
# Items #
# Heredar de la clase abstracta pysud.Item e implementar use_on()#
##################################################################
class LlaveDeTrampilla(pysud.Item):
def __init__(self):
pysud.Item.__init__(self, "llave", "Una pequeña llave de hierro.")
def use_on(self, game):
my_flag = game.pc.current_room == game.get_room_by_id('5')
if my_flag:
game.set_user_defined_variable('trampilla_abierta', True)
game.iom.show_message("Abres la trampilla con la pequeña llave. Ya no la necesitarás.")
game.remove_item_from_player(self)
else:
game.iom.show_message("No creo que sea el lugar indicado para usar esta llave.")
class Mazo(pysud.Item):
def __init__(self):
pysud.Item.__init__(self, "mazo", "Una pesada maza de madera con cabeza de metal.")
def use_on(self, game):
jugador_en_bosque = game.pc.current_room == game.get_room_by_id('2')
my_flag = game.get_user_defined_variable('jarro_destruido')
if jugador_en_bosque:
if not my_flag:
game.iom.show_message("Golpeas con tu mazo el jarro abriendole un gran hueco.")
game.set_user_defined_variable('jarro_destruido', True)
else:
game.iom.show_message("Ya no necesitas seguir castigando al pobre jarro.")
else:
game.iom.show_message("No se te ocurre que cosa deberías machacar...")
##################################################################
# Eventos Personalizados #
# Normalmente heredan de alguna subclase de events.Event e #
# implementan el método on_success(game) #
##################################################################
class RevisarJarro(ev.CommandEvent):
""" Revisar el jarro que se encuentra en el bosque. """
def on_success(self, game):
my_flag = game.get_user_defined_variable('jarro_destruido')
if my_flag:
game.iom.show_message("Extiendes la mano a través del hueco para revisar dentro del jarro y encuentras una pequeña llave.")
# se agrega directamente la llave al inventario del personaje:
game.add_item_to_player(LlaveDeTrampilla())
# NOTA: un objeto agregado de esta forma al inventario no
# suma puntos automáticamente:
game.pc.increase_score(1)
game.set_user_defined_variable('jarro_destruido', True)
else:
game.iom.show_message("Un gran jarro de arcilla, la tapa parece estar sellada.")
class RevisarEstante(ev.CommandEvent):
""" Revisar el estante de armas del puesto de guardia. """
def on_success(self, game):
my_flag = game.get_user_defined_variable('estante_revisado')
if not my_flag:
mazo = Mazo()
game.pc.current_room.add_item(mazo)
game.iom.show_message("En el estante de armas solo queda un mazo en buen estado.")
game.set_user_defined_variable('estante_revisado', True)
else:
game.iom.show_message("No encuentras nada nuevo.")
class SubirEscalera(ev.CommandEvent):
""" Subir por la escalera con trampilla de la habitacion 5. """
def on_success(self, game):
my_flag = game.get_user_defined_variable('trampilla_abierta')
if my_flag:
game.iom.show_message("Pasas por la trampilla abierta y subes al siguiente piso.")
destination_room = game.get_room_by_id('6')
game.pc.move_to_room(destination_room)
game.iom.show_current_room()
game.iom.show_message("Has completado esta demo, espero haya sido de tu agrado.")
else:
game.iom.show_message("Una trampilla cerrada te impide ascender al siguiente piso.")
########
# Main #
########
def gamescript():
print('pysud minidemo pysud_make ...\n')
JUEGO = pysud.Game("")
GM = gameMgr.GameManager(JUEGO)
# usar GM para cargar los datos de un nuevo juego:
GM.load_game_data(DATAFILE)
# una vez hecho esto, el juego ya no esta en la
# variable JUEGO sino en GM.game
# agregar eventos globales / este script no usa:
# agregar variables definidas por el usuario:
GM.game.set_user_defined_variable('estante_revisado' , False)
GM.game.set_user_defined_variable('trampilla_abierta', False)
GM.game.set_user_defined_variable('jarro_destruido' , False)
# recolectar habitaciones del juego (solamente las necesarias):
ROOM1 = GM.game.get_room_by_id('1')
ROOM2 = GM.game.get_room_by_id('2')
ROOM4 = GM.game.get_room_by_id('4')
ROOM5 = GM.game.get_room_by_id('5')
# agregar eventos locales a las habitaciones:
LE1 = RevisarEstante(commands = ['mirar estante','revisar estante','ver estante'])
ROOM4.add_local_event(LE1)
LE2 = RevisarJarro (commands = ['mirar jarro', 'examinar jarro', 'ver jarro'])
ROOM2.add_local_event(LE2)
LE3 = SubirEscalera (commands = ['subir escalera','subir'])
ROOM5.add_local_event(LE3)
# Poner al personaje en el escenario inicial:
GM.game.pc.move_to_room(ROOM1)
# Mostrar algunos mensajes:
GM.game.iom.show_welcome_message()
GM.game.iom.show_help()
# Iniciar el juego:
GM.run_game()
if __name__ == '__main__':
GM = gamescript()
```
#### File: modules/json/pysud_rooms_json_parser.py
```python
import json
import pysud
class RoomsJsonParser():
"""
Summary
Attributes:
rooms_json_file: A valid json file path containing pysud rooms.
"""
def __init__(self, rooms_json_file_path):
self.rooms_json_file = json.load(open(rooms_json_file_path, 'r', encoding='utf-8'))
def build_rooms_dict(self):
"""
Parses a rooms json.
Returns:
A dictionary containing all found rooms, indexed by room id.
"""
rooms_dict = dict()
for room_json in self.rooms_json_file:
# rooms found in json must be converted to pysud Room objects:
room = pysud.Room(room_json['name'], room_json['description'], room_json['id'])
rooms_dict[room.get_id()] = room
return rooms_dict
def link_rooms(self, rooms_dict):
""" Links rooms trhough transitions.
Args:
rooms_dict: An id indexed rooms collection.
Returns:
A list containing all rooms found on file, conected between them via transitions.
"""
rooms_list = list()
for room_json in self.rooms_json_file:
origin_room_id = room_json['id']
origin_room = rooms_dict[origin_room_id]
# transitions found will be used to create pysud.Transition objects
# this requires all rooms to be loaded first
if ('transitions' in room_json):
for transition_json in room_json['transitions']:
destination_room_id = transition_json['destination']
destination_room = rooms_dict[destination_room_id]
commands = transition_json['commands']
origin_room.add_transition(commands, destination_room)
rooms_list.append(origin_room)
return rooms_list
def parse(self):
return self.link_rooms(self.build_rooms_dict())
# Testing purposes: run this script in a directory containing a valid rooms.json file
# running python3 in interative mode might be a good idea
if __name__ == '__main__':
ROOMS_FILE = 'rooms.json'
rooms_json_parser = RoomsJsonParser(ROOMS_FILE)
rooms = rooms_json_parser.parse()
rooms_json_parser = None
print('Rooms found: ' + str(rooms))
```
#### File: modules/xml/pysud_xml.py
```python
import xml.etree.ElementTree as etree
import pysud
class XMLParser():
"""
Abstract class to provide basic xml files handling functionality.
Attributes:
xml_file: A valid xml file path.
xml_root: ElementTree Root class object.
"""
def __init__(self, xml_file):
self.xml_file = xml_file
self.xml_root = None
def parse_file(self):
pass
class ConfigXMLParser(XMLParser):
""" This class handles all config.xml file parsing. """
def parse_file(self):
"""
Parses main configuration xml.
Returns:
A dictionary containing configuration values.
(This configuration values should be mapped to
a pysud.Game class object instance variables)
"""
tree = etree.parse(self.xml_file)
self.xml_root = tree.getroot()
config = dict()
config['ENABLE_JOURNAL'] = self.xml_root.find('ENABLE_JOURNAL').text
config['ENABLE_SAVEGAME'] = self.xml_root.find('ENABLE_SAVEGAME').text
config['HELP_TEXT'] = self.xml_root.find('HELP_TEXT').text
config['INVALID_CMD_TEXT'] = self.xml_root.find('INVALID_CMD_TEXT').text
config['PROMPT_TEXT'] = self.xml_root.find('PROMPT_TEXT').text
config['GAME_START_TEXT'] = self.xml_root.find('GAME_START_TEXT').text
config['GAME_RESUME_TEXT'] = self.xml_root.find('GAME_RESUME_TEXT').text
config['GAME_END_TEXT'] = self.xml_root.find('GAME_END_TEXT').text
config['PLAYER_DEFAULT_NAME'] = self.xml_root.find('PLAYER_DEFAULT_NAME').text
config['ROOM_ITEMS_STR'] = self.xml_root.find('ROOM_ITEMS_STR').text
config['PLAYER_SCORE_STR'] = self.xml_root.find('PLAYER_SCORE_STR').text
config['PLAYER_INVENTORY_STR'] = self.xml_root.find('PLAYER_INVENTORY_STR').text
config['PLAYER_STATS_STR'] = self.xml_root.find('PLAYER_STATS_STR').text
return config
class RoomsXMLParser(XMLParser):
""" This class handles all rooms.xml file parsing. """
def parse_file(self):
"""
Parses rooms definition file.
Returns:
A python List holding pysud.Room class objects
"""
tree = etree.parse(self.xml_file)
self.xml_root = tree.getroot()
rooms_data = dict()
for room_tag in self.xml_root:
temp = self.__parse_room(room_tag)
rooms_data[temp[0].get_id()] = temp
return self.__link_rooms(rooms_data)
def __link_rooms(self, rooms_data):
""" Associates each rooms with its transitions.
Args:
rooms_data: a python Dictionary composed by:
key : a pysud.Room Id
value : a pair ( Room class object, transition List for that room )
Returns:
A python List holding pysud.Room class objects
"""
res = list()
for p in rooms_data.values():
if p[1] != None:
for transition_stub in p[1]:
destination_room = rooms_data.get(transition_stub.destination)[0]
commands = transition_stub.get_commands()
p[0].add_transition(commands, destination_room)
res.append(p[0])
return res
def __parse_room(self, room_tag):
"""
Parses a (single) room xml tag.
Args:
room_tag: The room xml tag to parse
Returns:
A python Tuple containing:
a pysud.rooms object
a python list
"""
room_description = None
room_name = room_tag.get('name')
room_id = room_tag.get('id')
room_transitions = None
for element in room_tag.getchildren():
if element.tag == 'description':
room_description = element.text
elif element.tag == 'transitions':
# self.print_transitions(element, room_id)
room_transitions = self.__parse_transition(element, room_id)
else:
pass # invalid tag found
room = pysud.Room(room_name, room_description, room_id)
return tuple(room, room_transitions)
def __parse_transition(self, transitions_tag, room_origin_id):
l = list()
# iterates over a given room destinations:
for transition in transitions_tag.getchildren():
room_destination_id = transition.get('destination')
ts = TransitionStub(room_origin_id, room_destination_id)
# iterates over a single destination alias commands:
for command in transition.getchildren():
ts.add_command(command.text)
l.append(ts)
return l
def print_transitions(self, transitions_tag, room_origin_id):
print('from ', room_origin_id, ' you can go to:')
# iterates over a given room destinations:
for transition in transitions_tag.getchildren():
dest = transition.get('destination')
print('\t', dest)
# iterates over a single destination alias commands:
for com in transition.getchildren():
print('\t\t with commands:', com.text)
class TransitionStub:
""" Internal class to hold a single transition data.
An object instance of this class is used exclusively to temporarily hold
parsed data for a room transition."""
def __init__(self, origin_room_id , destination_room_id):
self.origin = origin_room_id
self.destination = destination_room_id
self.__commands = list()
def add_command(self, command):
self.__commands.append(command)
def get_commands(self):
return self.__commands
# @DEBUG - For testing purposes only:
if __name__ == '__main__':
CFG_XML_FILE = 'config.xml'
ROOMS_XML_FILE = 'rooms.xml'
C = ConfigXMLParser(CFG_XML_FILE)
RP = RoomsXMLParser (ROOMS_XML_FILE)
R = RP.parse_file()
E = RP.xml_root
``` |
{
"source": "joaonmatos/advent-of-code-2020",
"score": 3
} |
#### File: advent-of-code-2020/day09/day09-1.py
```python
class BoundedQueue:
def __init__(self, capacity):
self.capacity = capacity
self.size = 0
self.list = [None] * self.capacity
self.start_index = 0
def is_full(self):
return self.size == self.capacity
def is_empty(self):
return self.size == 0
def push(self, element):
index = self.actual_index(self.start_index + self.size)
if not self.is_full():
self.size += 1
else:
self.start_index = self.actual_index(self.start_index + 1)
self.list[index] = element
def pop(self):
if self.is_empty():
return
self.size -= 1
self.start_index = self.actual_index(self.start_index + 1)
def front(self):
if self.is_empty():
return None
return self.list[self.start_index]
def actual_index(self, index):
return index if index < self.capacity else index - self.capacity
def __iter__(self):
if self.start_index + self.size < self.capacity:
return self.list[self.start_index:self.start_index + size]
else:
return self.list[self.start_index:] + self.list[:self.actual_index(self.start_index + self.size)]
input_file = open("inputs/day09.input")
our_input = [int(line) for line in input_file]
input_file.close()
last_seen = BoundedQueue(25)
preamble = our_input[:25]
rest = our_input[25:]
for item in preamble:
last_seen.push(item)
for item in rest:
sums = set()
for i, e1 in enumerate(last_seen.__iter__()):
for e2 in last_seen.__iter__()[i:]:
sums.add(e1 + e2)
if item not in sums:
print(item)
break
last_seen.push(item)
``` |
{
"source": "joaonmatos/feup-mssi",
"score": 3
} |
#### File: feup-mssi/src/metricsProcessor.py
```python
from typing import Dict, Optional
import xml.etree.ElementTree as ET
class TripInfoStats:
def __init__(self, avg_travel_time: float, total_distance: float) -> None:
self.att = avg_travel_time
self.d = total_distance
def get_att(self) -> float:
return self.att
def get_total_distance(self) -> float:
return self.d
def __repr__(self) -> str:
return f'TlpInfoStats(ATT: {self.att}, Total Distance: {self.d})'
def __str__(self) -> str:
return self.__repr__()
def process_trip_info(path_to_file: str) -> TripInfoStats:
tree = ET.parse(path_to_file)
tripinfos = tree.getroot()
distance = 0.0
duration = 0.0
counter = 0
for tripinfo in tripinfos:
counter += 1
distance += float(tripinfo.attrib["routeLength"])
duration += float(tripinfo.attrib["duration"])
return TripInfoStats(duration / counter, distance)
class GadStats:
def __init__(self, ground_distance: float, air_distance: float) -> None:
self.gtd = ground_distance
self.atd = air_distance
def get_ground_distance(self) -> float:
return self.gtd
def get_air_distance(self) -> float:
return self.atd
def get_atd(self) -> Optional[float]:
return self.atd / self.gtd if self.gtd != 0.0 else None
def __repr__(self) -> str:
return f'GAD Stats(GAD {self.get_atd()})'
def __str__(self) -> str:
return self.__repr__()
def process_edge_info(path_to_file: str) -> GadStats:
tree = ET.parse(path_to_file)
meandata = tree.getroot()
ground = 0.0
air = 0.0
for interval in meandata:
if interval.attrib["id"][-4:] != "uams":
continue
for edge in interval:
if edge.attrib["sampledSeconds"] == "0.00":
continue
if edge.attrib["id"][0:15] == "TLP_to_TLP_edge":
air += float(edge.attrib["speed"]) * \
float(edge.attrib["sampledSeconds"])
else:
ground += float(edge.attrib["speed"]) * \
float(edge.attrib["sampledSeconds"])
return GadStats(ground, air)
class TlpStats:
def set_num_in(self, n: int) -> None:
self._in = n
def get_num_in(self) -> int:
return self._in
def set_num_out(self, n: int) -> None:
self.out = n
def get_num_out(self) -> int:
return self.out
def __repr__(self) -> str:
return f'TlpStats(in:{self._in}, out:{self.out})'
def __str__(self) -> str:
return self.__repr__()
def process_tlp_uses(path_to_file: str) -> Dict[str, TlpStats]:
tree = ET.parse(path_to_file)
detector_node = tree.getroot()
stats = {}
for interval_node in detector_node:
id = interval_node.attrib["id"]
n = int(interval_node.attrib["nVehEntered"])
if id[-3:] == "_in":
tlp_id = id[4:-3]
tlp = TlpStats()
tlp.set_num_in(n)
stats[tlp_id] = tlp
else:
tlp_id = id[4:-4]
stats[tlp_id].set_num_out(n)
return stats
class MetricsProcessor:
def __init__(self, path_to_folder, experiment_id) -> None:
self.prefix = path_to_folder
self.id = experiment_id
def run(self):
self.trips = process_trip_info(f"{self.prefix}/tripinfo_{self.id}.xml")
self.gad = process_edge_info(f"{self.prefix}/edges_{self.id}.xml")
self.tlp = process_tlp_uses(f"{self.prefix}/detectors_{self.id}.xml")
def get_trip_info(self):
return self.trips
def get_ground_air_distance(self):
return self.gad
def get_tlp_stats(self):
return self.tlp
``` |
{
"source": "Joao-Nogueira-gh/video-compressin",
"score": 3
} |
#### File: video-compressin/src/Bitstream.py
```python
class BitStream:
def __init__(self, f, mode):
## Initialization function
# @param[in] file_name Name of the file that is going to be manipulated
# @param[in] mode Mode of manipulation (write/read)
self.mode = mode
if mode == "READ":
self.input = open(f, "rb")
elif mode == "WRITE":
self.out = open(f, "wb")
self.write_accumulator = 0
self.write_bcount = 0
self.read_accumulator = 0
self.read_bcount = 0
self.read = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.mode == "WRITE":
self.flush()
def __del__(self):
if self.mode == "WRITE":
try:
self.flush()
except ValueError: # I/O operation on closed file.
pass
## Write a single bit to self.file_name
# @param[in] value of the bit to be written to a file
def _writebit(self, bit):
if self.write_bcount == 8:
self.flush()
if bit > 0:
self.write_accumulator |= 1 << 7-self.write_bcount
self.write_bcount += 1
## Read a single bit from file
# @param[out] value of the bit read
def _readbit(self):
if not self.read_bcount:
a = self.input.read(1)
if a:
self.read_accumulator = ord(a)
self.read_bcount = 8
self.read = len(a)
rv = (self.read_accumulator & (1 << self.read_bcount-1)) >> self.read_bcount-1
self.read_bcount -= 1
return rv
## Write N bits to file
# @param[in] bits to be written to a file
# @param[in] number of bits to be written
def writebits(self, bits, n):
while n > 0:
self._writebit(bits & 1 << n-1)
n -= 1
## Read N bits from a file
# @param[in] number of bits to be read from a file
# @param[out] values of bits read from file
def readbits(self, n):
chars = []
for i in range(0, n):
chars.append(str(self._readbit()))
return ''.join(chars)
## Write a given value using a certain number of bits to a file
# @param[in] value Value that is being written to a file
# @param[in] nbits Number of bits being used to write value
# Throws an error if value cannot be written using only nbits
def write_n_bits(self, value, nbits):
if self.mode != "WRITE":
print("ERROR: Unsupported operation given the current mode (READ)")
exit(1)
b = str(bin(value))[2:]
if nbits < len(b):
print("Error: Cannot write "+str(value)+" with as little as " + str(nbits) + " bits")
exit(0)
for i in range(nbits-1, -1, -1):
if i >= len(b):
self._writebit(0)
else:
self.writebits(int(b[len(b) - i - 1], 2), 1)
## Read the value corresponding to the next N bits of the file
# @param[in] nbits Number of bits that are going to be read from file
def read_n_bits(self, nbits):
if self.mode != "READ":
print("ERROR: Unsupported operation given the current mode (WRITE)")
exit(1)
v = 0
while nbits > 0:
v = (v << 1) | self._readbit()
nbits -= 1
return v
## Auxiliary function to the write operations
# Writes the packaged bits to a file
def flush(self):
self.out.write(bytearray([self.write_accumulator]))
self.write_accumulator = 0
self.write_bcount = 0
## Close files
# Closes the file from where Bitstream is reading if the mode is READ
# Closes the file to where Bitstream is writing if the mode is WRITE
# Importantly, it deletes this object which, in turn, flushes any bits left in buffer
def close(self):
self.__del__()
if self.mode == "WRITE":
self.out.close()
elif self.mode == "READ":
self.input.close()
## Writing text (strings) using the Bitstream
# @param[in] txt String to be written
# Additional method that was not planned but was proven useful
def writeTxt(self,txt):
for c in txt:
self.writebits(ord(c),8)
```
#### File: video-compressin/src/IntraCodec.py
```python
import numpy as np
import math
from Golomb import *
from Bitstream import *
class IntraCodec:
## Initialization function
# @param[in] filename Path of the file to read
# @param[in] encoded A flag used to indicate if the video in the given path was encoded by this same class
# @param[in] limitFrames Optional parameter to limit the number of frames to considered
# Initializing and setting up some useful parameters and flags
def __init__(self, filename, encoded=False, limitFrames=None):
self.vid = filename
self.encoding='utf-8'
# Array of arrays containing each frame's components
self.frameY=[]
self.frameV=[]
self.frameU=[]
self.encoded=False
self.quantizationStep=None
self.colorSpace=None
np.seterr(over='ignore')
#calls read video on initialization
if not encoded:
self.read_video()
else:
self.encoded=True
self.read_encoded_video(limitFrames=limitFrames)
## read_video function
# Reads YUV video information from file, storing all its data in our structures, calculating different components lengths and shapes
def read_video(self):
f=open(self.vid,"rb")
c=1
for line in f:
# Processing header
if c==1:
line=line.decode(self.encoding)
self.header=line.strip()
self.handleHeader()
# Rest of the video
if c>=2:
frameY=f.read(self.yLength)
frameU=f.read(self.uLength)
frameV=f.read(self.vLength)
y=np.frombuffer(frameY, dtype=np.uint8)
u=np.frombuffer(frameU, dtype=np.uint8)
v=np.frombuffer(frameV, dtype=np.uint8)
y=y.reshape(self.shape)
u=u.reshape(self.other_shape)
v=v.reshape(self.other_shape)
self.frameY+=[y]
self.frameU+=[u]
self.frameV+=[v]
c+=1
self.TotalFrames=len(self.frameY)
f.close()
## read_encoded_video function
# @param[in] limitFrames Optional parameter to limit the number of frames to be decoded
# Reads video information (encoded by this class) from file
# Starts by decoding and interpreting the header, followed by decoding of all the pixel errors and recreating the original pixel based on the predictor that was used
def read_encoded_video(self,limitFrames=None):
bs=BitStream(self.vid,'READ')
headerlen=bs.read_n_bits(8)
chars=[]
for i in range(0,headerlen*8):
chars.append(str(bs._readbit()))
res=''.join(chars)
self.header=self.decode_binary_string(res)
#handle header
self.handleHeader()
g=Golomb(self.golombParam)
bitsResto=int(math.log(self.golombParam,2))
if limitFrames==None:
l=self.TotalFrames
else:
l=limitFrames
#
self.frameY=[None]*l
self.frameU=[None]*l
self.frameV=[None]*l
#
for frame in range(0,l):
print('decoding frame',frame)
y=np.zeros(shape=self.shape,dtype=np.uint8)
u=np.zeros(shape=self.other_shape,dtype=np.uint8)
v=np.zeros(shape=self.other_shape,dtype=np.uint8)
for line in range(0, self.height):
for column in range(0,self.width):
pixel=self.decodeWithBitstream(3,bs,g,bitsResto)
a=self.getYUVPixel(frame,line,column-1, resized=False)
c=self.getYUVPixel(frame,line-1,column-1, resized=False)
b=self.getYUVPixel(frame,line-1,column, resized=False)
x=self.predict(a,c,b)
pixel=self.sum(x,pixel)
pixel=tuple(pixel)
l,c=self.adjustCoord(line,column)
y[line,column]=pixel[0]
u[l,c]=pixel[1]
v[l,c]=pixel[2]
#
self.frameY[frame]=y
self.frameU[frame]=u
self.frameV[frame]=v
#por cada frame
self.frameY+=[y]
self.frameU+=[u]
self.frameV+=[v]
#
bs.close()
## handleHeader function
# Interpreting the header of the file, containing width, height, frames per second and color space, assigning them to class variables
# This header can also contain other parameters added while encoding, such as the parameter for Golomb and the quantization steps used for lossy coding
def handleHeader(self):
print(self.header)
fields=self.header.split(" ")
for field in fields:
c=field[0]
if c=='W':
self.width=int(field[1:])
elif c=='H':
self.height=int(field[1:])
elif c=='F':
self.fps=int(field[1:3])
elif c=='C':
self.colorSpace=int(field[1:])
elif c=='G':
self.golombParam=int(field[-1:])
self.encoded=True
elif c=='z':
self.TotalFrames=int(field[1:])
elif c=='q':
qlist=field[1:]
qsteps=qlist.split(':')
self.quantizationStep=[int(qsteps[0]),int(qsteps[1]),int(qsteps[2])]
self.computeShape()
print('width=',self.width, 'height=',self.height, self.fps, self.colorSpace, self.frameLength)
if self.encoded:
print('g=',self.golombParam, 'totalframes=',self.TotalFrames)
if self.quantizationStep!=None:
print('q=',self.quantizationStep)
## adjustCoord function
# @param[in] line Line where the pixel is located
# @param[in] column Column where the pixel is located
# @param[out] line Adjusted line number
# @param[out] column Adjusted column number
# Adjusts given line and column considering the different array shapes in different color spaces
# Useful when assigning new values to a certain pixel position
def adjustCoord(self,line,column):
if self.colorSpace=='4:2:2':
c=math.floor((column/2))
return line,c
elif self.colorSpace=='4:2:0':
c=math.floor((column/2))
l=math.floor((line/2))
return l,c
else:
return line,column
## computeShape function
# Calculating array shapes for YUV components based on the color space
def computeShape(self):
if self.colorSpace==444:
self.colorSpace='4:4:4'
self.frameLength=int(self.width*self.height*3)
self.yLength=self.uLength=self.vLength=int(self.frameLength/3)
self.shape = (int(self.height), self.width)
self.other_shape = (int(self.height), self.width)
elif self.colorSpace==422:
self.colorSpace='4:2:2'
self.frameLength=int(self.width*self.height*2)
self.yLength=int(self.frameLength/2)
self.vLength=self.uLength=int(self.frameLength/4)
self.shape = (int(self.height), self.width)
self.other_shape = (int(self.height), int(self.width/2))
else:
self.colorSpace='4:2:0'
self.frameLength=int(self.width*self.height*3/2)
self.yLength=int(self.frameLength*(2/3))
self.uLength=self.vLength=int(self.frameLength*(1/6))
self.shape = (int(self.height), self.width)
self.other_shape = (int(self.height/2), int(self.width/2))
## getYUVPixel function
# @param[in] frame Number of the frame from which to read the pixel from
# @param[in] line Line in which the pixel is located
# @param[in] column Column in which the pixel is located
# @param[in] resized A flag used to indicate if the arrays have been resized or not
# @param[out] p The pixel tuple in YUV format
# Returns 0,0,0 for non existent pixels, useful for the Codecs
# Adjust line and column numbers based on the color space (and array shapes)
def getYUVPixel(self, frame, line, column, resized):
yf=self.frameY[frame]
uf=self.frameU[frame]
vf=self.frameV[frame]
if resized==False:
if self.colorSpace=='4:2:2':
c=math.floor((column/2))
if line<0 or column<0 or c<0:
return 0,0,0
p=yf[line,column], uf[line,c], vf[line,c]
elif self.colorSpace=='4:2:0':
c=math.floor((column/2))
l=math.floor((line/2))
if line<0 or column<0 or c<0 or l<0:
return 0,0,0
p=yf[line,column], uf[l,c], vf[l,c]
else:
if line<0 or column<0:
return 0,0,0
p=yf[line,column], uf[line,column], vf[line,column]
else:
if line<0 or column<0:
return 0,0,0
p=yf[line,column], uf[line,column], vf[line,column]
return p
## updateYUVPixel function
# @param[in] compNumb Number of the pixel component to be changed (0=Y,1=U,2=V)
# @param[in] frame Number of the frame where the pixel is located
# @param[in] line Line in which the pixel is located
# @param[in] column Column in which the pixel is located
# @param[in] value New value of the pixel's component
# Used for avoiding error propagation in lossy coding
def updateYUVPixel(self,compNumb,frame,line,column,value):
l,c=self.adjustCoord(line,column)
if compNumb==0:
rf=self.frameY[frame]
rf.setflags(write=1)
rf[line,column]=value
elif compNumb==1:
rf=self.frameU[frame]
rf.setflags(write=1)
rf[l,c]=value
else:
rf=self.frameV[frame]
rf.setflags(write=1)
rf[l,c]=value
## encode_video function
# @param[in] filename Path of file to write with the encoded video information
# @param[in] golombparam Golomb's parameter M (factor)
# @param[in] q Optional parameter for specifying each components quantization steps for lossy coding
# @param[in] limitFrames Optional parameter for limiting number of frames to encode
# Starts by encoding the header, passing additional parameters such as the Golomb factor
# Proceeds to encode each pixel, by calculating each component's error according to the predictor function
def encode_video(self, filename, golombparam, q=None, limitFrames=None):
if limitFrames==None:
l=self.TotalFrames
else:
l=limitFrames
g=Golomb(golombparam)
bs=BitStream(filename,'WRITE')
header='ENCODED '+self.header+' Golomb'+str(golombparam)+' z'+str(self.TotalFrames)
if q!=None:
header+=' q'+str(q[0])+':'+str(q[1])+':'+str(q[2])
self.quantizationStep=q
headerlen=len(header)
bs.write_n_bits(headerlen,8)
bs.writeTxt(header)
for frame in range(0,l):
print('encoding frame',frame)
for line in range(0,self.height):
for column in range(0,self.width):
p=self.getYUVPixel(frame,line,column, resized=False)
a=self.getYUVPixel(frame,line,column-1, resized=False)
c=self.getYUVPixel(frame,line-1,column-1, resized=False)
b=self.getYUVPixel(frame,line-1,column, resized=False)
x=self.predict(a,c,b)
erro=self.diff(p,x)
self.encodeWithBitstream(erro,bs,g,pixel=p,frame=frame,line=line,column=column)
bs.close()
## predict function
# @param[in] a Adjacent pixel in position (line,col-1)
# @param[in] c Adjacent pixel in position (line-1,col-1)
# @param[in] b Adjacent pixel in position (line-1,col)
# @param[out] ret Most similar pixel
# The returned pixel is calculated using the JPEG-LS non-linear predictor formula
def predict(self,a,c,b):
y=[int(a[0]),int(c[0]),int(b[0])]
u=[int(a[1]),int(c[1]),int(b[1])]
v=[int(a[2]),int(c[2]),int(b[2])]
l=[y]+[u]+[v]
ret=[]
for component in l:
if component[1]>=max(component[0],component[2]):
x=min(component[0],component[2])
elif component[1]<=min(component[0],component[2]):
x=max(component[0],component[2])
else:
x=component[0]+component[2]-component[1]
ret.append(x)
return ret
## diff function
# @param[in] p First pixel
# @param[in] x Second pixel
# @param[out] r Pixel result of the difference between the two pixels
# Calculates the result pixel by calculating the difference between each yuv component
def diff(self,p,x):
ey=int(p[0])-int(x[0])
eu=int(p[1])-int(x[1])
ev=int(p[2])-int(x[2])
return(ey,eu,ev)
## sum function
# @param[in] p First pixel
# @param[in] x Second pixel
# @param[out] r Pixel result of the sum between the two pixels
# Calculates the result pixel by calculating the sum between each yuv component
def sum(self,p,x):
ey=p[0]+x[0]
eu=p[1]+x[1]
ev=p[2]+x[2]
return(ey,eu,ev)
## printPixels function
# Function for printing pixels, useful during development
def printPixels(self):
l=self.TotalFrames
l=1
h=self.height
#h=20
w=self.width
#w=20
for frame in range(0,l):
#print('processing frame',frame)
for line in range(0,h):
for column in range(0,w):
if line==0 and w-10<=column<w:
p=self.getYUVPixel(frame,line,column, resized=False)
print(p, end=';')
#print('')
## decode_binary_string function
# @param[in] s String
# @param[out] r Decoded binary string
# Additional function to decode binary strings
def decode_binary_string(self,s):
return ''.join(chr(int(s[i*8:i*8+8],2)) for i in range(len(s)//8))
## getFrames function
# @param[out] frames The data structures with all the frames of each component
# Useful to check data integrity
def getFrames(self):
return self.frameY, self.frameU,self.frameV
## encodeWithBitStream function
# @param[in] value Value to be encoded
# @param[in] bs Bitstream class object
# @param[in] g Golomb class object
# @param[in] pixel Current pixel values being encoded, used for lossy coding
# @param[in] frame Frame where the pixel being encoded is located
# @param[in] line Line where the pixel being encoded is located
# @param[in] column Column where the pixel being encoded is located
# Switches the value to be encoded to positive, writing a 1 or 0 according to the original value
# If using lossy coding functionality, divides color component by quantization step and updates pixel value
# Proceeds to write the encoded value by Golomb with the Bitstream
def encodeWithBitstream(self, value,bs,g, pixel=None, frame=None, line=None, column=None):
for i in range(0,len(value)):
if value[i]<0:
n=value[i]*-1
bs.writebits(1,1)
else:
bs.writebits(0,1)
n=value[i]
if self.quantizationStep!=None and self.quantizationStep[i]!=0:
#newValue=pixel[i]+(n)
n=math.floor(n/self.quantizationStep[i])
#if line!=0 and column!=0:
#self.updateYUVPixel(i,frame,line,column,newValue)
n=g.encode(n)
bs.writebits(int(n,2),len(n))
## decodeWithBitStream function
# @param[in] len Number of values to read
# @param[in] bs Bitstream class object
# @param[in] g Golomb class object
# @param[in] bitsResto Number of bits of the remainder = log(factor,2)
# @param[out] pixel Decoded value
# Starts by reading one bit 0 or 1, determing if number was negative
# Reads the bits from the Bitstream and decodes them with Golomb
# Multiplies by quantization step if using lossy coding
def decodeWithBitstream(self, len,bs,g,bitsResto):
pixel=[]
for i in range(0,len):
ay=bs.read_n_bits(1)
seq=''
while True:
r=str(bs.read_n_bits(1))
seq+=r
if r=='0':
break
seq+=str(bs.readbits(bitsResto))
comp=g.decode(seq)
if ay==1:
comp=comp*-1
if self.quantizationStep!=None and self.quantizationStep[i]!=0:
comp=comp*self.quantizationStep[i]
pixel.append(comp)
return pixel
## verifyData function
# @param[in] video Class containing video for comparison
# @param[in] numberoframes Limits number of frames to check
# Compares data between two videos
def verifyData(self,video,numberoframes):
m1,m2,m3=self.getFrames()
m4,m5,m6=video.getFrames()
for i in range(0,numberoframes):
if (np.array_equal(m1[i],m4[i])):
print('Y-',i,'correct')
for i in range(0,numberoframes):
if (np.array_equal(m2[i],m5[i])):
print('U-',i,'correct')
for i in range(0,numberoframes):
if (np.array_equal(m3[i],m6[i])):
print('V-',i,'correct')
``` |
{
"source": "Joao-Norberto/SBC-XXV-maratona-de-progrmacao",
"score": 4
} |
#### File: aquecimento/python/B.py
```python
def fatorial(n):
fat = 1
for i in range(n, 1, -1):
fat = fat * i
return fat
#retorna o fatorial mais próximo de um número
def fatorial_min(n):
X = int(1)
while fatorial(X) <= N:
X = X + 1
return X - 1
N = int(input())
qtd_fatoriais = int(0)
while N > 0:
N = N - fatorial(fatorial_min(N))
qtd_fatoriais = qtd_fatoriais + 1
print(qtd_fatoriais)
``` |
{
"source": "JoaoNunoAbreu/Cartos",
"score": 2
} |
#### File: app/base/models.py
```python
from bcrypt import gensalt, hashpw
from flask_login import UserMixin
from app import neo4j_db, login_manager
class User(UserMixin):
def __init__(self,username):
self._id = username
def __repr__(self):
return str(self._id)
def get_id(self):
return self._id
@login_manager.user_loader
def user_loader(email):
user = neo4j_db.evaluate('match (x:User) where x.email=$v return x limit 1',v=email)
return User(user) if user else None
```
#### File: app/home/routes.py
```python
from calendar import leapdays
from app.home import blueprint
from flask import render_template
from flask_login import login_required
from app import token_required, neo4j_db
from app import token_required
from bson import json_util
from flasgger import swag_from
from flask_cors import CORS, cross_origin
CORS(blueprint)
@blueprint.route('/index', methods=['GET'])
#@token_required
#@login_required
@swag_from('docs/index-get.yml')
def index():
n_users = neo4j_db.evaluate('match (x:User) return count(x)')
n_elementos = neo4j_db.evaluate('match (x:Elemento) return count(x)')
n_colecoes = neo4j_db.evaluate('match (x:Colecao) return count(x)')
lastEle = neo4j_db.run('MATCH (n:Elemento) RETURN n ORDER BY n.created_at desc LIMIT 6')
# --------------
colecoes = neo4j_db.run('match (x:Colecao) return x.designacao').data()
colecoesContadas = {}
for i in colecoes:
n = neo4j_db.evaluate(f"match(n:Colecao)<-[]-(b) where n.designacao=\"{i['x.designacao']}\" return count(b)")
colecoesContadas[i['x.designacao']] = n
# --------------
editoras = neo4j_db.run('match (x:Editora) return x.designacao').data()
editorasContadas = {}
for i in editoras:
n = neo4j_db.evaluate(f"match(n:Editora)<-[]-(b) where n.designacao=\"{i['x.designacao']}\" return count(b)")
editorasContadas[i['x.designacao']] = n
lElem = [ ]
for elem in lastEle :
colecao = neo4j_db.evaluate(f'match (e:Elemento)-[]->(c:Colecao) where e.id="{elem[0]["id"]}" return c.designacao')
editora = neo4j_db.evaluate(f'match (e:Elemento)-[]->(c:Editora) where e.id="{elem[0]["id"]}" return c.designacao')
lingua = neo4j_db.evaluate(f'match (e:Elemento)-[]->(c:Lingua) where e.id="{elem[0]["id"]}" return c.designacao')
tipo = neo4j_db.evaluate(f'match (e:Elemento)-[]->(c:Tipo) where e.id="{elem[0]["id"]}" return c.designacao')
lElem.append({
"id":elem[0]['id'] ,
"titulo":elem[0]['titulo'] ,
"capa": elem[0]['capa'],
"estado": elem[0]['estado'],
"numero": elem[0]['numero'],
"nr_paginas": elem[0]['nr_paginas'],
"texto": elem[0]['texto'],
"observacoes": elem[0]['observacoes'],
"tamanho": elem[0]['tamanho'],
"personagens": elem[0]['personagens'],
"serie": elem[0]['serie'],
"data_publicacao":elem[0]['data_publicacao'],
"colecao":colecao,
"editora":editora,
"lingua":lingua,
"tipo": tipo
})
data = {
"n_users":n_users,
"n_elementos":n_elementos,
"n_colecoes":n_colecoes,
"colecoesContadas":colecoesContadas,
"editorasContadas":editorasContadas,
"lastElementos": lElem
}
return json_util.dumps(data)
``` |
{
"source": "Joaooh/Villager-Bot",
"score": 2
} |
#### File: src/speedups/__init__.py
```python
import importlib
import sys
import speedups.mixins
import speedups.gateway
import speedups.activity
import speedups.utils
import speedups.message
import speedups.ext.commands.cooldowns as speedups_cooldowns
import speedups.ext.commands.view as speedups_view
def install_module(new_module, old_module):
for thing in new_module.__all__:
if hasattr(old_module, thing):
setattr(old_module, thing, getattr(new_module, thing))
def install():
discord = sys.modules.get("discord")
for new_module in (speedups.mixins, speedups.gateway, speedups.activity, speedups.utils, speedups.message):
install_module(new_module, discord)
install_module(speedups_cooldowns, discord.ext.commands.cooldowns)
install_module(speedups_view, discord.ext.commands.view)
importlib.reload(discord.ext.commands.bot)
```
#### File: src/util/setup.py
```python
from discord.ext import commands
import aiofiles
import discord
import logging
import asyncpg
import json
import os
from util.cj import ClassyDict
def villager_bot_intents() -> discord.Intents:
intents = discord.Intents.default()
intents.guilds = True
intents.members = True
intents.bans = True
intents.emojis = False
intents.integrations = False
intents.webhooks = False
intents.invites = False
intents.voice_states = False
intents.presences = True
intents.messages = True
# intents.guild_messages = True
# intents.dm_messages = True
intents.reactions = True
# intents.guild_reactions = True
# intents.dm_reactions = True
intents.typing = False
# intents.guild_typing = False
# intents.dm_typing = False
return intents
def setup_logging() -> logging.Logger:
logging.basicConfig(level=logging.INFO, format="%(levelname)s:%(name)s: %(message)s")
logging.getLogger("asyncio").setLevel(logging.WARNING) # hide annoying asyncio info
logging.getLogger("discord.gateway").setLevel(logging.WARNING) # hide annoying gateway info
return logging.getLogger("main")
async def setup_database(bot: commands.AutoShardedBot, keys: ClassyDict) -> None: # init pool connection to database
bot.db = await asyncpg.create_pool(
host=keys.database.host, # where db is hosted
database=keys.database.name, # name of database
user=keys.database.user, # database username
password=keys.<PASSWORD>, # password which goes with user
max_size=20,
command_timeout=10,
)
def load_text() -> ClassyDict:
text = {}
for filename in os.listdir("data/text"):
with open(f"data/text/{filename}", "r", encoding="utf8") as f:
text.update(json.load(f))
return ClassyDict(text)
async def load_text_async() -> ClassyDict:
text = {}
for filename in os.listdir("data/text"):
async with aiofiles.open(f"data/text/{filename}", "r", encoding="utf8") as f:
text.update(json.loads(await f.read()))
return ClassyDict(text)
``` |
{
"source": "joaoolavo/crazyflie-kinect-control",
"score": 2
} |
#### File: crazyflie-kinect-control/crazyflie_t/kalman_args_t.py
```python
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class kalman_args_t(object):
__slots__ = ["input_rpy", "input_acc", "input_dt", "valid_vicon", "meas_xyz", "smooth_xyz", "smooth_dxyz"]
def __init__(self):
self.input_rpy = [ 0.0 for dim0 in range(3) ]
self.input_acc = [ 0.0 for dim0 in range(3) ]
self.input_dt = 0.0
self.valid_vicon = False
self.meas_xyz = [ 0.0 for dim0 in range(3) ]
self.smooth_xyz = [ 0.0 for dim0 in range(3) ]
self.smooth_dxyz = [ 0.0 for dim0 in range(3) ]
def encode(self):
buf = BytesIO()
buf.write(kalman_args_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack('>3d', *self.input_rpy[:3]))
buf.write(struct.pack('>3d', *self.input_acc[:3]))
buf.write(struct.pack(">db", self.input_dt, self.valid_vicon))
buf.write(struct.pack('>3d', *self.meas_xyz[:3]))
buf.write(struct.pack('>3d', *self.smooth_xyz[:3]))
buf.write(struct.pack('>3d', *self.smooth_dxyz[:3]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != kalman_args_t._get_packed_fingerprint():
raise ValueError("Decode error")
return kalman_args_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = kalman_args_t()
self.input_rpy = struct.unpack('>3d', buf.read(24))
self.input_acc = struct.unpack('>3d', buf.read(24))
self.input_dt = struct.unpack(">d", buf.read(8))[0]
self.valid_vicon = bool(struct.unpack('b', buf.read(1))[0])
self.meas_xyz = struct.unpack('>3d', buf.read(24))
self.smooth_xyz = struct.unpack('>3d', buf.read(24))
self.smooth_dxyz = struct.unpack('>3d', buf.read(24))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if kalman_args_t in parents: return 0
tmphash = (0x550a26ef4fa794dd) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if kalman_args_t._packed_fingerprint is None:
kalman_args_t._packed_fingerprint = struct.pack(">Q", kalman_args_t._get_hash_recursive([]))
return kalman_args_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
```
#### File: crazyflie-kinect-control/exlcm/angle_t.py
```python
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class angle_t(object):
__slots__ = ["timestamp", "roll", "pitch", "yaw", "enabled"]
def __init__(self):
self.timestamp = 0
self.roll = 0.0
self.pitch = 0.0
self.yaw = 0.0
self.enabled = False
def encode(self):
buf = BytesIO()
buf.write(angle_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">qdddb", self.timestamp, self.roll, self.pitch, self.yaw, self.enabled))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != angle_t._get_packed_fingerprint():
raise ValueError("Decode error")
return angle_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = angle_t()
self.timestamp, self.roll, self.pitch, self.yaw = struct.unpack(">qddd", buf.read(32))
self.enabled = bool(struct.unpack('b', buf.read(1))[0])
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if angle_t in parents: return 0
tmphash = (0x1052689d368f1671) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if angle_t._packed_fingerprint is None:
angle_t._packed_fingerprint = struct.pack(">Q", angle_t._get_hash_recursive([]))
return angle_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
``` |
{
"source": "JoaoOliveira123/Founse-improved",
"score": 2
} |
#### File: Founse-improved/tests/server.py
```python
from quart import Quart, render_template, redirect, url_for, request, session, flash, get_flashed_messages
from secrets import token_urlsafe
import json
app = Quart('Founse-improved', template_folder='public')
app.secret_key = token_urlsafe(16)
async def define_template_in_post(user_informations, url_args, return_final_value=True,**kwargs):
ur = await read_document_and_verify_infos(user_informations,
**kwargs)
if not return_final_value:
if ur != 'error':
oi = await mkrightdict(user_informations, organize, True)
for i in ('cities', 'districts', 'streets', 'houses'):
if not user_informations.get(i, ''):
url_args[i] = oi[i]
return url_args
return ur
async def mkrightdict(dto, ro=False, continue_dict=False):
wet = exclude_empty(dto)
if ro:
organized = ro(list(wet.keys()))
if continue_dict:
return {i: wet[i] for i in organized}
return organized
return wet
async def read_document_and_verify_infos(user_informations,**kwgs):
bd = await read_json_document(kwgs['master_archive_name'])
rf = await mkrightdict(user_informations)
soo_kwgs = {'main': rf, 'cause_error': kwgs['eth'],
'keys': organize(list(rf.keys()))}
return await solve_recursive_objects(bd, kwgs['future_data'], **soo_kwgs)
def exclude_empty(dct):
for i in list(dct):
if not dct[i]:
del dct[i]
return dct
async def get_form():
pre_form = await request.form
return dict(pre_form)
def organize(keys):
return [i for g in ('cities', 'districts', 'streets', 'numbers') for i in keys if i == g]
async def read_json_document(filename):
with open(filename, 'r', encoding='utf-8') as file:
data = await load(file)
return data
async def load(filename):
return json.load(filename)
async def redirect_to(url, **url_values):
if url_values:
return redirect(url_for(url, **url_values))
return redirect(url_for(url))
def capitalize_thing(obj: str):
return ' '.join(i[0].upper() + i[1:] for i in obj.split())
async def solve_recursive_objects(master, return_args: dict, **kwgs):
"""It get data with a recursive way with the args of this being other dictionary's arguments"""
main = kwgs['main']
cause_error = kwgs['cause_error']
keys = kwgs['keys']
before_key = ''
for e, i in enumerate(keys):
if e > 0:
return_args[before_key] = main[before_key]
try:
obj_list = master[i]
for s in obj_list:
if s["Name"] == capitalize_thing(main[i]):
master = s
break
if master not in obj_list:
return_args[i] = obj_list
before_key = i
continue
raise KeyError
except KeyError:
if cause_error[e]:
return 'error'
break
return return_args
# HERE THE APP START
@app.route('/', methods=['GET', 'POST'])
async def index():
form = await get_form()
url_args = await mkrightdict(form, organize, continue_dict=True)
if request.method == 'POST':
ur = await define_template_in_post(url_args, {}, **{'eth': (True, True, False, False), 'future_data': {}, 'master_archive_name': 'data/houses.json'})
if ur != 'error':
session['user_informations'] = ur
await flash('Search Made')
return await redirect_to('houses', **url_args)
form['error_message'] = 'Por favor insira algo válido'
return await render_template('index.html', **form)
@app.route('/houses', methods=['GET', 'POST'])
async def houses():
user_search = dict(request.args)
if not user_search:
return await redirect_to('index')
form = await get_form()
url_args = await mkrightdict(form, organize, True)
error_hapnd = ''
if request.method == 'POST':
ur = define_template_in_post(url_args, {}, **{'eth': (True, True, False, False), 'future_data': {}, 'master_archive_name': 'data/houses.json'})
if ur != 'error':
session['user_informations'] = ur
flash('Search made')
return await redirect_to('houses', **url_args)
error_hapnd = ur
elif request.method == 'GET':
if not get_flashed_messages():
ur = await define_template_in_post(user_search, {}, **{'eth': (True, True, False, False), 'future_data': {}, 'master_archive_name': 'data/houses.json'})
else:
ur = session['user_informations']
if ur == 'error':
error_hapnd = ur
else:
if len(list(ur.keys())) < 4:
copy_of_ur = ur.copy()
order_of_possible_keys_htf = ['districts', 'streets', 'houses'] #htf means "hard to find"
for i in ur.keys():
if type(copy_of_ur[i]) == list:
more_specific_values = copy_of_ur.pop(i)
organizer_len = len(list(copy_of_ur.keys())) - 1
final_value = []
useful_order = order_of_possible_keys_htf[organizer_len:]
for i in useful_order:
counter = 0
next_ld = []
for x in more_specific_values:
if i != 'houses':
dict_of_time = dict(copy_of_ur,
**{i: x['Name']}
)
for j in x[useful_order[useful_order.index(i) + 1]]:
next_ld.append(j)
else:
dict_of_time = dict(copy_of_ur,
**{i: x}
)
if useful_order.index(i) == 0:
final_value.append({})
try:
final_value[counter].update(dict_of_time)
except:
final_value.append(dict(final_value[counter-1], **dict_of_time))
counter += 1
more_specific_values = next_ld
if error_hapnd == 'error':
form['error_message'] = 'Por Favor insira algo válido'
return await render_template('houses.html', **form)
return await render_template('houses.html', **dict(form, **{'informations_returned': final_value}))
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "joao/open-marchiver",
"score": 3
} |
#### File: open-marchiver/utils/hocr-to-pdf.py
```python
from __future__ import print_function
import argparse
import base64
import glob
import io
import os.path
import re
import sys
import zlib
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
from lxml import etree, html
from PIL import Image
class StdoutWrapper:
"""
Wrapper around stdout that ensures 'bytes' data is decoded
to 'latin1' (0x00 - 0xff) before writing out. This is necessary for
the invisible font to be injected as bytes but written out as a string.
"""
def write(self, data, *args, **kwargs):
if bytes != str and isinstance(data, bytes):
data = data.decode('latin1')
sys.stdout.write(data)
def export_pdf(playground, default_dpi):
"""Create a searchable PDF from a pile of HOCR + JPEG"""
load_invisible_font()
pdf = Canvas(StdoutWrapper(), pageCompression=1)
pdf.setCreator('hocr-tools')
pdf.setTitle(os.path.basename(playground))
images = sorted(glob.glob(os.path.join(playground, '*.jpg')))
dpi = default_dpi
for image in images:
im = Image.open(image)
w, h = im.size
try:
dpi = im.info['dpi'][0]
except KeyError:
pass
width = w * 72 / dpi
height = h * 72 / dpi
pdf.setPageSize((width, height))
pdf.drawImage(image, 0, 0, width=width, height=height)
add_text_layer(pdf, image, height, dpi)
pdf.showPage()
pdf.save()
def add_text_layer(pdf, image, height, dpi):
"""Draw an invisible text layer for OCR data"""
p1 = re.compile('bbox((\s+\d+){4})')
p2 = re.compile('baseline((\s+[\d\.\-]+){2})')
hocrfile = os.path.splitext(image)[0] + ".hocr"
hocr = etree.parse(hocrfile, html.XHTMLParser())
for line in hocr.xpath('//*[@class="ocr_line"]'):
linebox = p1.search(line.attrib['title']).group(1).split()
try:
baseline = p2.search(line.attrib['title']).group(1).split()
except AttributeError:
baseline = [ 0, 0 ]
linebox = [float(i) for i in linebox]
baseline = [float(i) for i in baseline]
xpath_elements = './/*[@class="ocrx_word"]'
if (not(line.xpath('boolean(' + xpath_elements + ')'))):
#if there are no words elements present,
#we switch to lines as elements
xpath_elements = '.'
for word in line.xpath(xpath_elements):
rawtext = word.text_content().strip()
if rawtext == '':
continue
font_width = pdf.stringWidth(rawtext, 'invisible', 8)
if font_width <= 0:
continue
box = p1.search(word.attrib['title']).group(1).split()
box = [float(i) for i in box]
b = polyval(baseline, (box[0] + box[2]) / 2 - linebox[0]) + linebox[3]
text = pdf.beginText()
text.setTextRenderMode(3) # double invisible
text.setFont('invisible', 8)
text.setTextOrigin(box[0] * 72 / dpi, height - b * 72 / dpi)
box_width = (box[2] - box[0]) * 72 / dpi
text.setHorizScale(100.0 * box_width / font_width)
text.textLine(rawtext)
pdf.drawText(text)
def polyval(poly, x):
return x * poly[0] + poly[1]
# Glyphless variation of vedaal's invisible font retrieved from
# http://www.angelfire.com/pr/pgpf/if.html, which says:
# 'Invisible font' is unrestricted freeware. Enjoy, Improve, Distribute freely
def load_invisible_font():
font = """
<KEY>
""".encode('latin1')
uncompressed = bytearray(zlib.decompress(base64.decodestring(font)))
ttf = io.BytesIO(uncompressed)
setattr(ttf ,"name", "(invisible.ttf)")
pdfmetrics.registerFont(TTFont('invisible', ttf))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a searchable PDF from a pile of hOCR and JPEG")
parser.add_argument("imgdir", help="directory with the hOCR and JPEG files (corresponding JPEG and hOCR files have to have the same name with their respective file ending)")
args = parser.parse_args()
export_pdf(args.imgdir, 300)
``` |
{
"source": "JoaoOrtigao/search_NCBI",
"score": 3
} |
#### File: search_NCBI/py/Search_NCBI_v02_functions.py
```python
from pandas import DataFrame as pd_DataFrame
from pandas import ExcelWriter as pd_ExcelWriter
from os.path import join as pathjoin
from os.path import exists as pathexists
from os.path import isfile
from os import mkdir as osmkdir
from os import getcwd as osgetcwd
from Bio import Entrez
from re import sub
from glob import glob as gb
import xml.etree.ElementTree as ET
parser = ET.XMLParser(encoding="utf-8")
##############################################################################
def CREATE_DIR(OUTDIR):
if not pathexists(pathjoin(OUTDIR)):
osmkdir(pathjoin(OUTDIR))
for DIR in ["IdListDIR","IdListDIR/disease","IdListDIR/query"]:
if not pathexists(pathjoin(OUTDIR,DIR)):
osmkdir(pathjoin(OUTDIR,DIR))
##############################################################################
def MAKE_DICIONARY(DISEASE_LIST):
DISEASES=[]
DISEASES = [line.rstrip('\n') for line in open(DISEASE_LIST)]
CODES = [s.replace(' ', '_') for s in DISEASES]
CODES = [s.replace('\'', '') for s in CODES]
DIC = dict(zip(DISEASES,CODES))
return(DIC)
##############################################################################
def esearch_disease(DISEASE_LIST,OUTDIR):
CREATE_DIR(OUTDIR)
DISEASE_DIC = MAKE_DICIONARY(DISEASE_LIST)
# data frame to store all Counts
# +2 for one extra line for "COUNTS" and "TOTAL1"
df=pd_DataFrame(index=range(0,len(DISEASE_DIC)+2),columns=range(0,8))
df.columns=["disease","COD","QUERY1","QUERY2","QUERY3","QUERY4",\
"QUERY5","TOTAL2"]
COL1=list(DISEASE_DIC); COL1.append('COUNTS'); COL1.append('TOTAL1')
df['disease']=COL1
# data frame to store all the commands used for each search
COMMAND=pd_DataFrame(index=range(0,len(DISEASE_DIC)),columns=range(0,8))
COMMAND.columns=["disease","COD","QUERY1","QUERY2","QUERY3","QUERY4",\
"QUERY5","END"]
COMMAND["disease"]=COL1[0:len(DISEASE_DIC)]
COMMAND["END"]='.'
# data frameto store the queries' explanations
QUERY_description=pd_DataFrame(index=range(0,5),columns=range(0,1))
QUERY_description.columns=["DESCRIPTION"]
QUERY_description.index=["QUERY1","QUERY2","QUERY3","QUERY4","QUERY5"]
QUERY1_desc='Procura o nome da doença em todos os campos e filtra por'\
' experimentos de expressão gênica feitos com amostras '\
'humanas. Essa é a QUERY mais abrangente.'
QUERY2_desc='Igual a QUERY1 só que também procura por "patient" OU '\
'"patients" em todos os campos'
QUERY3_desc='Igual a QUERY2 só que também filtra por bioprojects '\
'presentes na base de dados SRA'
QUERY4_desc='Procura o nome da doença somente no título do bioproject, '\
'procura por "patient" OU "patients" em todos os campos e '\
'filtra por experimentos de expressão gênica feitos com '\
'amostras humanas'
QUERY5_desc='Igual a QUERY4 só que também filtra por bioprojects '\
'presentes na base de dados SRA'
QUERY_description["DESCRIPTION"]=[QUERY1_desc,QUERY2_desc,QUERY3_desc,\
QUERY4_desc,QUERY5_desc]
IdList_QUERY1=[]
IdList_QUERY2=[]
IdList_QUERY3=[]
IdList_QUERY4=[]
IdList_QUERY5=[]
IdList_total=[]
N=0
for DISEASE in list(DISEASE_DIC):
print(str(N)+'\t'+DISEASE)
COD=DISEASE_DIC[DISEASE]
df["COD"][N]=COD
COMMAND["COD"][N]=COD
QUERY_DIC={'1':'("'+DISEASE+'"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter])',
'2':'("'+DISEASE+'"[All Fields]AND'\
'("patient"[All Fields]OR"patients"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter])',
'3':'("'+DISEASE+'"[All Fields]AND'\
'("patient"[All Fields]OR"patients"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter]AND"bioproject sra"[Filter])',
'4':'("'+DISEASE+'"[Title]AND'\
'("patient"[All Fields]OR"patients"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter])',
'5':'("'+DISEASE+'"[Title]AND'\
'("patient"[All Fields]OR"patients"[All Fields])AND'\
'("transcriptome gene expression"[Filter]AND"org '\
'human"[Filter])AND"bioproject sra"[Filter])'}
Idlist_disease=[]
ROUND=['1','2','3','4','5']
for R in ROUND:
QUERY='QUERY'+R
TERM=QUERY_DIC[R]
# COMMAND[locals[QUERY]][N]=TERM
handle = Entrez.esearch(db="bioproject", retmax=1000,
term=TERM)
record = Entrez.read(handle)
handle.close()
if int(record["Count"]) > 1000:
print('\nATTENTION!\nn'+record["Count"]+' bioprojects are '\
'related to this esearch and only 1000 will be written '\
'to the Idlist for the further analysis.\n\n'+QUERY+\
'for '+DISEASE+'\n\n'+QUERY_DIC[R]+'\n')
exit
# MONTAR LISTA POR DOENÇA
Idlist_disease+=list(record["IdList"])
IdList_total+=list(record["IdList"])
# ADD IDS TO QUERY AND TOTAL LISTS
# IdList_total+=record["IdList"]
if R == '1':
IdList_QUERY1+=list(record["IdList"])
COMMAND['QUERY1'][N]=TERM
df['QUERY1'][N]=int(record["Count"])
elif R == '2':
IdList_QUERY2+=list(record["IdList"])
COMMAND['QUERY2'][N]=TERM
df['QUERY2'][N]=int(record["Count"])
elif R == '3':
IdList_QUERY3+=list(record["IdList"])
COMMAND['QUERY3'][N]=TERM
df['QUERY3'][N]=int(record["Count"])
elif R == '4':
IdList_QUERY4+=list(record["IdList"])
COMMAND['QUERY4'][N]=TERM
df['QUERY4'][N]=int(record["Count"])
elif R == '5':
IdList_QUERY5+=list(record["IdList"])
COMMAND['QUERY5'][N]=TERM
df['QUERY5'][N]=int(record["Count"])
#remove replicates from the list
Idlist_disease=list(set(Idlist_disease))
df['TOTAL2'][N]=len(Idlist_disease)
outfile=pathjoin(OUTDIR,"IdListDIR/disease",COD+".txt")
with open(outfile, 'w') as f:
print( "\n".join(Idlist_disease), file = f)
f.close()
N+=1
#preencher a linha com totais
for COL in list(df)[2:len(df)]: #COL da terceira coluna até a última
df[COL][len(DISEASE_DIC)]=df[COL][0:len(DISEASE_DIC)].sum(axis=0)
# ESCREVER DEMAIS LISTAS PARA ARQUIVOS TXT
IdList_total=list(set(IdList_total))
outfile=pathjoin(OUTDIR,"IdListDIR/IdList_total.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_total), file = f)
f.close()
IdList_QUERY1=list(set(IdList_QUERY1))
df.loc[len(DISEASE_DIC)+1,"QUERY1"] = len(IdList_QUERY1)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY1.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY1), file = f)
f.close()
IdList_QUERY2=list(set(IdList_QUERY2))
df.loc[len(DISEASE_DIC)+1,"QUERY2"] = len(IdList_QUERY2)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY2.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY2), file = f)
f.close()
IdList_QUERY3=list(set(IdList_QUERY3))
df.loc[len(DISEASE_DIC)+1,"QUERY3"] = len(IdList_QUERY3)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY3.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY3), file = f)
f.close()
IdList_QUERY4=list(set(IdList_QUERY4))
df.loc[len(DISEASE_DIC)+1,"QUERY4"] = len(IdList_QUERY4)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY4.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY4), file = f)
f.close()
IdList_QUERY5=list(set(IdList_QUERY5))
df.loc[len(DISEASE_DIC)+1,"QUERY5"] = len(IdList_QUERY5)
outfile=pathjoin(OUTDIR,"IdListDIR/query","IdList_QUERY5.txt")
with open(outfile, 'w') as f:
print( "\n".join(IdList_QUERY5), file = f)
f.close()
#ESCREVER TODOS OS RESULTADOS PARA UM ARQUIVO EXCEL
writer = pd_ExcelWriter(pathjoin(OUTDIR,'search_NCBI_RESULT.xlsx'),
engine='xlsxwriter')
df.to_excel(writer, sheet_name='counts')
COMMAND.to_excel(writer, sheet_name='command_lines')
QUERY_description.to_excel(writer, sheet_name='query_description')
writer.save()
return(pathjoin(osgetcwd(),OUTDIR))
##############################################################################
def efetch_found_bioprojects(OUTDIR):
def printProgressBar (iteration, total, prefix = '', suffix = '', \
decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent \
complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}")\
.format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
"""
COLETAR INFORMAÇOES SOBRE BIOPROJECTS ECONTRADOS
"""
if pathexists(OUTDIR):
for DIR in ['Bioprojects','Bioprojects/xml']:
if not pathexists(pathjoin(OUTDIR,DIR)):
osmkdir(pathjoin(OUTDIR,DIR))
path_to_list=pathjoin(OUTDIR,'IdListDIR/IdList_total.txt')
if isfile(path_to_list):
with open(path_to_list,'r') as f:
IdList_total=list(filter(None, f.read().splitlines()))
else:
print('File '+f+' was not found. Run esearch_disease(OUTDIR) '\
'for making it.')
exit()
else:
print('Directory '+pathjoin(OUTDIR)+' is not accessible. Did you run'\
'esearch_disease() previously? If not, do it and try again.')
exit()
df2=pd_DataFrame(index=range(0,len(IdList_total)),columns=range(0,7))
df2.columns=["ID","accession","GEO","title","abstract","disease","COD"]
df2["ID"]=IdList_total
print("\n\n") # ESSE PRINT SERVE PARA DISTANCIAR A BARRA DE PROCESSAMENTO
# QUE VEM LOGO ABAIXO DENTRO DO LOOPING
# prepare bar progress
l = len(IdList_total)
i=0
printProgressBar(0, l, prefix = 'Download:', suffix = 'Complete',
length = 50)
RECALL=[] # if download fails, the ID is stored in RECALL
DIC_ID={}
for ID in IdList_total:
try:
handle = Entrez.efetch(db="bioproject", id=ID)
except:
RECALL+=[ID]
print('handle = Entrez.efetch(db="bioproject", id='+ID+')\tFAILED')
continue # avoid catastrophic event in case NCBI fails to give
# the informatio for one ID
try:
record = handle.read()
root = ET.fromstring(record)
DIC = root.find(".//ProjectID/ArchiveID").attrib
DIC_ID[DIC['accession']] = DIC_ID.get(DIC['accession'],DIC['id'])
outfile=pathjoin(OUTDIR,'Bioprojects/xml',DIC['accession']+\
'_'+DIC['id']+'.xml')
#print(outfile)
with open(outfile, "w", encoding="utf-8") as f:
print(record, file = f)
except:
RECALL+=[ID]
print('FAILED to process '+ID+' during the first trial')
continue
printProgressBar(i+1, l, prefix = 'Download:', suffix = 'Complete',
length = 50)
i+=1
# RECALL for failure IDs
if len(RECALL) > 0:
print("\n\nFailure to download IDs. STARTING RECALL.")
l = len(RECALL)
i=0
printProgressBar(0, l, prefix = 'Download:', suffix = 'Complete',
length = 50)
RECALL2=[]
for ID in RECALL:
try:
handle = Entrez.efetch(db="bioproject", id=ID)
except:
RECALL2+=[ID]
print('handle = Entrez.efetch(db="bioproject", id='+ID+')'\
'\tFAILED in RECALL')
continue
try:
record = handle.read()
root = ET.fromstring(record)
DIC = root.find(".//ProjectID/ArchiveID").attrib
DIC_ID[DIC['accession']] = DIC_ID.get(DIC['accession'],DIC['id'])
outfile=pathjoin(OUTDIR,'Bioprojects/xml',DIC['accession']+\
'_'+DIC['id']+'.xml')
#print(outfile)
with open(outfile, "w", encoding="utf-8") as f:
print(record, file = f)
except:
RECALL2+=[ID]
print('FAILED to process '+ID+' during the RECALL')
continue
printProgressBar(i+1, l, prefix = 'RECALL:', suffix = 'Complete',
length = 50)
i+=1
if len(RECALL2) > 0:
outfile=pathjoin(OUTDIR,'Bioprojects/','RECALL_failure.txt')
open(outfile,'w').write(str(RECALL2))
print("It was not possible to get ID even during the RECALL\nYou"\
"can find the problematic IDs on file:\n"+outfile)
outfile=pathjoin(OUTDIR,'Bioprojects/','dict_ID_ACC.txt')
open(outfile,'w').write(str(DIC_ID))
##############################################################################
def collect_XML_file(OUTDIR):
# aqui são importados os xml com a descrição de cada bioproject
files = gb(pathjoin(OUTDIR,'Bioprojects/xml/*.xml'))
df=pd_DataFrame(index=range(0,len(files)),columns=range(0,13))
df.columns=["ID","accession","GEO","title","abstract","disease1",\
"COD1","disease2","COD2","disease3","COD3","disease4","COD4"]
DIC_ID={}
N=0
for file in files:
#with open(file, "r", encoding="utf-8") as f:
#contents = f.read()
#tree = ET.fromstring(contents)
tree = ET.parse(file,parser=ET.XMLParser(encoding="utf-8"))
root = tree.getroot()
try:
GEO = root.find(".//ExternalLink/dbXREF/ID").text
except:
GEO = None # declare empty variable
title = root.find(".//ProjectDescr/Title").text
abstract = root.find(".//ProjectDescr/Description").text
DIC = root.find(".//ProjectID/ArchiveID").attrib
DIC_ID[DIC['accession']] = DIC_ID.get(DIC['accession'],DIC['id'])
accession=DIC['accession']
ID=DIC['id']
for COL in ['ID','accession','GEO','title','abstract']:
df[COL][N]=locals()[COL]
#print(N)
N+=1
return df
##############################################################################
def classify_disease(df2,OUTDIR,DISEASE_LIST):
DATADIR=OUTDIR
COD_DIC = MAKE_DICIONARY(DISEASE_LIST)
COD_DIC = {v: k for k, v in COD_DIC.items()} # invert the dictionary map
files2 = gb(pathjoin(OUTDIR,'IdListDIR/disease/*.txt'))
DISEASE1=[]
DISEASE2=[]
DISEASE3=[]
DISEASE4=[]
for file2 in files2:
COD = sub('.txt','',sub('.*IdListDIR/disease\\\\','',file2))
DISEASE = COD_DIC[COD]
with open(file2,'r') as f:
IDs = filter(None, f.read().splitlines())
f.close()
ROUND=['1','2','3','4']
for ID in IDs:
#print(ID)
for R in ROUND:
if ID not in locals()["DISEASE"+R]:
POS=df2[df2["ID"] == ID].index[0]
df2.loc[[POS],'disease'+R] = DISEASE
df2.loc[[POS],'COD'+R] = COD
locals()["DISEASE"+R].append(ID)
break
return df2
##############################################################################
def writer(df2, OUTDIR):
writer = pd_ExcelWriter(pathjoin(OUTDIR,'db_para_curagem.xlsx'),
engine='xlsxwriter')
df2.to_excel(writer, sheet_name='db_completo_nov18')
writer.save()
df2.to_csv(pathjoin(OUTDIR,'db_para_curagem.tsv'),sep='\t')
``` |
{
"source": "joaootavio93/3DFaceTracking",
"score": 3
} |
#### File: joaootavio93/3DFaceTracking/face_fit.py
```python
import cv2
import inspect
import os
import utils
import numpy as np
import face_alignment as fa
from face3d import bfm
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
morphable_model = bfm.MorphabelModel(current_dir + '/face3d/BFM/BFM.mat') # The default BFM (Base Face Model).
shape = morphable_model.get_shape('random')
expression = morphable_model.get_expression('random')
texture = morphable_model.get_texture('random')
vertices = morphable_model.generate_vertices(shape, expression)
colors = morphable_model.generate_colors(texture)
colors = np.minimum(np.maximum(colors, 0), 1)
def fit_3dmm(rotation, translation, scale, width, height, max_iter=3):
""" Fits a 3D face morphable model on a 2D face.
Parameters:
rotation (ndarray): The 2D face rotation matrix.
translation (list): The 2D face translation vector.
scale (float): The 2D face scale.
width (int): The source image width.
height (int): The source image height.
max_iter (int): The number of fits iterates.
Returns:
mat: The fitted 3DMM image with alpha channel.
ndarray: The fitted 3DMM 68 keypoints.
"""
transform_vertices = fa.similarity_transform(vertices, rotation, translation, scale)
keypoints = transform_vertices[morphable_model.kpt_ind, : 2]
keypoints_index = morphable_model.kpt_ind
fit_shape, fit_exp, fit_scale, fit_rotation, fit_t3d = morphable_model.fit(keypoints, keypoints_index, max_iter=max_iter)
fitted_vertices = morphable_model.generate_vertices(fit_shape, fit_exp)
fit_rotation = fa.euler_angles_to_rotation_matrix(fit_rotation)
transform_vertices = fa.similarity_transform(fitted_vertices, fit_rotation, [0, 0, 0], fit_scale)
img_2d_vertices = fa.to_coord_system(transform_vertices, width, height)
fit_kpts = img_2d_vertices[morphable_model.kpt_ind, : 2]
fit_img = fa.render_colors(img_2d_vertices, morphable_model.triangles, colors, width, height)
fit_img = fit_img * 255
fit_img = fit_img.astype('uint8')
fit_img = cv2.cvtColor(fit_img, cv2.COLOR_RGB2BGR)
fit_img = utils.add_alpha_channel(fit_img)
return fit_img, fit_kpts
```
#### File: joaootavio93/3DFaceTracking/face_tracking.py
```python
import cv2
import utils
import face_alignment as fa
import face_detection as fd
import face_fit as ff
def track_faces(img):
""" Computer 3D face tracking.
Parameters:
img (list): The input image.
Returns:
mat: A image showing the 3D face tracking.
"""
h, w, _ = img.shape
if img is not None:
bboxes = fd.detect_faces(img, detector=fd.Face_Detector.DLIB)
if len(bboxes):
new_bboxes = fd.resize_bboxes(bboxes)
crops = fd.crop_faces(img, new_bboxes)
count_crops = 0
for crop_img in crops:
crop_h, crop_w, _ = crop_img.shape
new_img = utils.resize_image(crop_img.copy(), width=fa.std_size, height=fa.std_size)
pitch, yaw, roll, scale, rotation, t3d, cam_matrix, landmarks, factor = fa.predict_pose(new_img, new_bboxes[count_crops], dense=False)
fit_img, fit_kpts = ff.fit_3dmm(rotation, t3d, scale * factor, width=crop_w, height=crop_h)
fore_lmks, back_lmks = utils.landmarks_to_np_array(fit_kpts, landmarks)
homography, mask = cv2.findHomography(fore_lmks, back_lmks, cv2.RANSAC)
warp_img = cv2.warpPerspective(fit_img, homography, (w, h))
img = utils.create_transparent_overlay_faster(warp_img, img, w, h)
count_crops += 1
return img
``` |
{
"source": "joaop-aguiar/DNC-Group",
"score": 3
} |
#### File: DNC-Group/Gerador de senha/generator.py
```python
import PySimpleGUI as sg
import random
import os
char_list_maiu = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
char_list_minu = 'abcdefghijklmnopqrstuvwxyz'
numero_list = '1234567890'
especial = '!@#$%&*'
def nova_senha():
chars = random.choices(char_list_minu+char_list_maiu+numero_list+especial,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_letrasMm_numeros():
chars = random.choices(char_list_minu+char_list_maiu+numero_list,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_letrasMm_especial():
chars = random.choices(char_list_maiu+char_list_minu+especial,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_letrasM_numeros_especial():
chars = random.choices(char_list_maiu+numero_list+especial,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_letrasm_numeros_especial():
chars = random.choices(char_list_minu+numero_list+especial,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_maiu_minu():
chars = random.choices(char_list_minu+char_list_maiu,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_maiu_num():
chars = random.choices(char_list_maiu+numero_list,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_maiu_esp():
chars = random.choices(char_list_maiu+especial,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_minu_num():
chars = random.choices(char_list_minu+numero_list,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_minu_esp():
chars = random.choices(char_list_minu+especial,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_num_esp():
chars = random.choices(numero_list+especial,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_minu():
chars = random.choices(char_list_minu,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_maiu():
chars = random.choices(char_list_maiu,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_numero():
chars = random.choices(numero_list,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def nova_senha_especial():
chars = random.choices(especial,k=int(values['n_char']))
senha_nova = ''.join(chars)
return senha_nova
def copiar_senha(text):
command = 'echo | set /p nul=' + text.strip() + '| clip'
os.system(command)
sg.theme('DarkBlue13')
layout = [ [sg.Text('Gerador de senha')],
[sg.Checkbox(text='Incluir Maiusculas',key='Maiu')],
[sg.Checkbox(text='Incluir Minusculas',key='Minu')],
[sg.Checkbox(text='Incluir Números',key='Num')],
[sg.Checkbox(text='Incluir Caracteres especiais',key='Esp')],
[sg.Combo(values=list(range(1,41)),key='n_char',default_value=1),sg.Text('Números de Caracteres')],
[sg.Output(size=(50,2),key='saida')],
[sg.Button('Gerar senha'), sg.Button('Copiar senha')] ]
window = sg.Window('Gerador de senhas', layout)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
break
if event == 'Gerar senha':
window.FindElement('saida').Update('')
if values['Minu'] is True and values['Maiu'] is True and values['Num'] is True and values['Esp'] is True:
senha = nova_senha()
print(senha)
elif values['Minu'] is True and values['Maiu'] is True and values['Num'] is True:
senha = nova_senha_letrasMm_numeros()
print(senha)
elif values['Minu'] is True and values['Maiu'] is True and values['Esp'] is True:
senha = nova_senha_letrasMm_especial()
print(senha)
elif values['Maiu'] is True and values['Num'] is True and values['Esp'] is True:
senha = nova_senha_letrasM_numeros_especial()
print(senha)
elif values['Minu'] is True and values['Num'] is True and values['Esp'] is True:
senha = nova_senha_letrasm_numeros_especial()
print(senha)
elif values['Minu'] is True and values['Maiu'] is True:
senha = nova_senha_maiu_minu()
print(senha)
elif values['Num'] is True and values['Maiu'] is True:
senha = nova_senha_maiu_num()
print(senha)
elif values['Esp'] is True and values['Maiu'] is True:
senha = nova_senha_maiu_esp()
print(senha)
elif values['Minu'] is True and values['Num'] is True:
senha = nova_senha_minu_num()
print(senha)
elif values['Minu'] is True and values['Esp'] is True:
senha = nova_senha_minu_esp()
print(senha)
elif values['Num'] is True and values['Esp'] is True:
senha = nova_senha_num_esp()
print(senha)
elif values['Minu'] is True:
senha = nova_senha_minu()
print(senha)
elif values['Maiu'] is True:
senha = nova_senha_maiu()
print(senha)
elif values['Num'] is True:
senha = nova_senha_numero()
print(senha)
elif values['Esp'] is True:
senha = nova_senha_especial()
print(senha)
else:
print('Escolher pelo menos um campo acima')
if event == 'Copiar senha':
copiar_senha(senha)
print('Senha copiada para área de transferência')
window.close()
``` |
{
"source": "joaopalmeiro/advent-of-code-2019",
"score": 3
} |
#### File: 2019/day-2/part-2.py
```python
def get_sum(x, y):
return(x + y)
def get_mul(x, y):
return(x * y)
with open('input.txt', 'r') as reader:
raw_inp = list(map(int, reader.read().split(',')))
STEP = 4
HALT = 99
OPCODE = {
1: get_sum,
2: get_mul
}
for noun in range(100):
for verb in range(100):
BEGIN = 0
inp = raw_inp.copy()
inp[1] = noun
inp[2] = verb
while inp[BEGIN] != HALT:
op_value = OPCODE[inp[BEGIN]](
inp[inp[BEGIN + 1]], inp[inp[BEGIN + 2]])
inp[inp[BEGIN + 3]] = op_value
BEGIN += STEP
if inp[0] == 19690720:
print(100 * noun + verb)
break
``` |
{
"source": "joaopalmeiro/cookiecutter-templates",
"score": 2
} |
#### File: python-pkg/hooks/post_gen_project.py
```python
import os
PROJECT_DIRECTORY: str = os.path.realpath(os.path.curdir)
def remove_file(filepath: str) -> None:
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
if __name__ == "__main__":
if "{{ cookiecutter.use_jupyter_notebook }}" != "y":
remove_file("demo.ipynb")
``` |
{
"source": "joaopalmeiro/datavis-python-playground",
"score": 3
} |
#### File: datavis-python-playground/altair-andrews-curve/charts.py
```python
import altair as alt
import pandas as pd
from configs import COLORS
def andrews_curve(
data: pd.DataFrame,
xvar: str = "t",
yvar: str = "curve_value",
targetvar: str = "target",
samplevar: str = "sample",
w: int = 450,
h: int = 300,
) -> alt.LayerChart:
selection = alt.selection_single(fields=[targetvar], bind="legend")
base = alt.Chart(data).properties(width=w, height=h).mark_line()
background_chart = base.encode(
x=alt.X(f"{xvar}:Q", axis=alt.Axis(title=None), scale=alt.Scale(nice=False)),
y=alt.Y(f"{yvar}:Q", axis=alt.Axis(title=None)),
detail=alt.Detail(f"{samplevar}:N"),
color=alt.value(COLORS["light_gray"]),
)
chart = background_chart.encode(
color=alt.condition(
selection,
f"{targetvar}:N",
alt.value("transparent"),
legend=alt.Legend(title=f"{targetvar.title()} (click to highlight)"),
),
).add_selection(selection)
return background_chart + chart
```
#### File: datavis-python-playground/altair-score-distribution-chart/utils.py
```python
import math
import re
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
CONFUSION_CATEGORIES: Tuple[str, str, str, str] = ("TP", "FP", "FN", "TN")
CONFUSION_CATEGORIES_COL_NAME: str = "confusion_category"
COUNT_COL_NAME: str = "count"
def make_classification_df(
seed: int,
n_samples: int = 10_000,
n_features: int = 25,
n_informative: int = 10,
n_redundant: int = 10,
n_repeated: int = 5,
class_sep: float = 0.2,
flip_y: float = 0.1,
target_col: str = "target",
target_dist: Optional[Sequence[float]] = None,
) -> pd.DataFrame:
"""Source of default values: https://queirozf.com/entries/scikit-learn-examples-making-dummy-dataset."""
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_redundant=n_redundant,
n_repeated=n_repeated,
class_sep=class_sep,
flip_y=flip_y,
random_state=seed,
weights=target_dist,
)
df = pd.DataFrame(np.c_[X, y])
df[df.columns[-1]] = df[df.columns[-1]].astype(int)
df = df.rename(columns={df.columns[-1]: target_col})
return df
def compute_confusion_categories(
data: pd.DataFrame,
score_var: str,
target_var: str,
threshold: float,
add_counts: bool = True,
) -> pd.DataFrame:
conditions = [
data[target_var].eq(1) & data[score_var].ge(threshold),
data[target_var].eq(0) & data[score_var].ge(threshold),
data[target_var].eq(1) & data[score_var].lt(threshold),
data[target_var].eq(0) & data[score_var].lt(threshold),
]
data[CONFUSION_CATEGORIES_COL_NAME] = np.select(conditions, CONFUSION_CATEGORIES)
agg_data = (
data[CONFUSION_CATEGORIES_COL_NAME]
.value_counts()
.rename_axis(CONFUSION_CATEGORIES_COL_NAME)
.reset_index(name=COUNT_COL_NAME)
)
return agg_data
# More info: https://numpy.org/devdocs/reference/typing.html
# import numpy.typing as npt
# npt.ArrayLike
def trunc(values: pd.Series, decs: int = 0) -> pd.Series:
return np.trunc(values * 10 ** decs) / (10 ** decs)
def get_order_of_magnitude(number: int) -> int:
return int(math.log10(number))
def compute_bins(
data: pd.DataFrame, xvar: str, target_var: str, nbins: int = 10
) -> pd.DataFrame:
decimal_places = get_order_of_magnitude(nbins)
nbins += 1
bins = np.linspace(0.0, 1.0, nbins)
# binned = data.groupby(
# [target_var, pd.cut(data[xvar], bins=bins, right=False)]
# ).count()
binned = data.copy()
binned["trunc_score_count"] = trunc(binned[xvar], decs=decimal_places)
binned = binned.groupby([target_var, "trunc_score_count"])[
["trunc_score_count"]
].count()
binned = (
binned.reset_index(level=target_var)
.rename_axis("bin_min")
.reset_index(drop=False)
)
full_binned = pd.DataFrame(
{
target_var: np.repeat(binned[target_var].unique(), nbins - 1),
"bin_min": np.tile(bins[:-1], 2),
}
)
binned["bin_min"] = binned["bin_min"].round(decimal_places)
full_binned["bin_min"] = full_binned["bin_min"].round(decimal_places)
full_binned = full_binned.merge(
binned, how="left", on=[target_var, "bin_min"]
).fillna(0)
full_binned["trunc_score_count"] = full_binned["trunc_score_count"].astype("int32")
full_binned["bin_max"] = np.tile(bins[1:], 2)
return full_binned
def prepara_bin_edges_for_silhouette_line(
data: pd.DataFrame, target_var: str, value: int
) -> pd.DataFrame:
# shifted_data = data.copy()
# shifted_data["bin_min"] = shifted_data["bin_min"] + 0.1
# return (
# pd.concat([data, shifted_data], ignore_index=True)
# .rename(columns={"bin_min": "bin"})
# .drop(columns=["bin_max"])
# )
silhouette_data = data.query(f"{target_var} == {value}")
last_row = (
silhouette_data.tail(1)
.rename(columns={"bin_max": "bin"})
.drop(columns=["bin_min"])
)
silhouette_data = silhouette_data.rename(columns={"bin_min": "bin"}).drop(
columns=["bin_max"]
)
return pd.concat([silhouette_data, last_row], ignore_index=True)
```
#### File: datavis-python-playground/bar-chart-confusion-matrix/charts.py
```python
import altair as alt
import pandas as pd
from utils import (
CONFUSION_CATEGORIES,
CONFUSION_CATEGORIES_COL_NAME,
COUNT_COL_NAME,
compute_confusion_categories,
humanize_title,
millify,
)
def bar_chart(
data: pd.DataFrame,
xvar: str,
yvar: str,
excluded_category: str,
w: int = 300,
h: int = 300,
) -> alt.Chart:
return (
alt.Chart(data, width=w, height=h)
.mark_bar()
.encode(
x=alt.X(f"{xvar}:N", axis=alt.Axis(labelAngle=0, title=None), sort="-y"),
y=alt.Y(f"{yvar}:Q", axis=alt.Axis(title=humanize_title(yvar))),
tooltip=[
alt.Tooltip(f"{xvar}:N", title=humanize_title(xvar)),
alt.Tooltip(f"{yvar}:Q", title=humanize_title(yvar)),
],
)
.transform_filter(alt.datum[xvar] != excluded_category)
)
def stacked_bar(
data: pd.DataFrame, xvar: str, yvar: str, main_category: str, w: int = 300
) -> alt.Chart:
main_value = data.query(f"{xvar} == '{main_category}'")[yvar].item()
others_value = data.query(f"{xvar} != '{main_category}'")[yvar].sum()
df = pd.DataFrame(
{
"Confusion Category": [
" + ".join(CONFUSION_CATEGORIES[:-1]),
main_category,
],
"Count": [others_value, main_value],
}
)
base = alt.Chart(df, width=300)
stacked_bar = base.mark_bar(
size=5, strokeWidth=0.5, stroke="black", strokeOpacity=1, tooltip=True
).encode(
x=alt.X("Count:Q", axis=None),
color=alt.Color("Confusion Category:N", legend=None, sort=None),
)
return stacked_bar
def confusion_matrix(
data: pd.DataFrame,
score_var: str,
target_var: str,
threshold: float,
subtitle_sep: str = " • ",
) -> alt.VConcatChart:
data = compute_confusion_categories(data, score_var, target_var, threshold)
tn_name = CONFUSION_CATEGORIES[-1]
total = millify(data[COUNT_COL_NAME].sum())
# print(data)
bar = bar_chart(data, CONFUSION_CATEGORIES_COL_NAME, COUNT_COL_NAME, tn_name)
aux_bar = stacked_bar(data, CONFUSION_CATEGORIES_COL_NAME, COUNT_COL_NAME, tn_name)
return (
alt.vconcat(bar, aux_bar, spacing=0)
.properties(
title={
"text": "Confusion Matrix",
"subtitle": [
f"{total} instances{subtitle_sep}Threshold: {threshold}",
"",
],
}
)
.configure_title(anchor="start")
.configure_view(strokeOpacity=0)
)
```
#### File: datavis-python-playground/pandas-matplotlib-table/script.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def mpl_table(
data,
col_width=3.0,
row_height=0.625,
bbox=[0, 0, 1, 1],
font_size=14,
edge_color="w",
header_color="#44475a",
font_color="#44475a",
row_colors=["#f5f5f5", "w"],
header_columns=0,
**kwargs,
):
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array(
[col_width, row_height]
)
fig, ax = plt.subplots(figsize=size) # (width, height)
ax.axis("off")
mpl_table = ax.table(
cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs,
)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in mpl_table.get_celld().items():
cell.set_edgecolor(edge_color)
cell.get_text().set_color(font_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight="bold", color="w")
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0] % len(row_colors)])
return fig
if __name__ == "__main__":
data = pd.DataFrame(np.random.randint(0, 100, size=(20, 5)), columns=list("ABCDE"))
table = mpl_table(data)
table.savefig("table.png", dpi=300, bbox_inches="tight")
```
#### File: datavis-python-playground/tk-stellar-chart/chart.py
```python
import math
import os
import tkinter as tk
from PIL import Image
class SpiderChart(tk.Canvas):
def __init__(self, master, data, concentrics=10, scale=200, width=500, height=500):
super().__init__(master, width=width, height=height)
self.scale = scale
self.center = width // 2, height // 2 # Tuple
self.labels = tuple(d[0] for d in data)
self.values = tuple(d[1] for d in data)
self.num_pts = len(self.labels)
self.concentrics = [n / (concentrics) for n in range(1, concentrics + 1)]
self.draw()
def position(self, x, y):
cx, cy = self.center
return x + cx, cy - y
def draw_circle_from_radius_center(self, radius):
rad = radius * self.scale
x0, y0 = self.position(-rad, rad)
x1, y1 = self.position(rad, -rad)
return self.create_oval(x0, y0, x1, y1, dash=(1, 3))
def draw_label(self, idx, label):
angle = idx * (2 * math.pi) / self.num_pts
d = self.concentrics[-1] * self.scale
x, y = d * math.cos(angle), d * math.sin(angle)
self.create_line(*self.center, *self.position(x, y), dash=(1, 3))
d *= 1.1
x, y = d * math.cos(angle), d * math.sin(angle)
self.create_text(*self.position(x, y), text=label)
def draw_polygon(self):
points = []
for idx, val in enumerate(self.values):
d = (val / 100) * self.scale
angle = idx * (2 * math.pi) / self.num_pts
x, y = d * math.cos(angle), d * math.sin(angle)
points.append(self.position(x, y))
self.create_polygon(points, fill="dark turquoise")
def draw(self):
self.draw_polygon()
for concentric in self.concentrics:
self.draw_circle_from_radius_center(concentric)
for idx, label in enumerate(self.labels):
self.draw_label(idx, label)
def save_as(self, filename, fmt="png"):
self.update()
self.postscript(
file=f"{filename}.ps", colormode="color",
)
img = Image.open(f"{filename}.ps")
img.save(f"{filename}.{fmt}")
os.remove(f"{filename}.ps")
class StellarChart(SpiderChart):
def draw_polygon(self):
da = math.pi / self.num_pts # To be between labels
b = 0.05 * self.scale
points = []
for idx, val in enumerate(self.values):
d = (val / 100) * self.scale
angle = idx * (2 * math.pi) / self.num_pts
x, y = d * math.cos(angle), d * math.sin(angle)
points.append(self.position(x, y))
xb, yb = b * math.cos(angle + da), b * math.sin(angle + da)
points.append(self.position(xb, yb))
self.create_polygon(points, width=3, outline="red", fill="pink", join=tk.ROUND)
if __name__ == "__main__":
data = [
("stamina", 70),
("python-skill", 100),
("strength", 80),
("break-dance", 66),
("speed", 45),
("health", 72),
("healing", 90),
("energy", 12),
("libido", 100),
]
root = tk.Tk()
stellar = StellarChart(root, data)
stellar.pack(side=tk.LEFT)
spider = SpiderChart(root, data)
spider.pack(side=tk.LEFT)
stellar.save_as("stellar_chart")
root.mainloop()
```
#### File: datavis-python-playground/unicode-sparkline/script.py
```python
from functools import partial
SPARKBAR_CHARS = "▁▂▃▅▆▇"
def get_sparkbar_char(datum, mn, incr, empty_zero, none_value):
"""Source: fastcore (https://fastcore.fast.ai/).
>>> not 0
True
>>> not 1
False
"""
if datum is None:
return none_value
elif empty_zero and not datum:
return " "
else:
bar_idx = int((datum - mn) / incr - 0.5)
return SPARKBAR_CHARS[bar_idx]
def sparkbars(data, empty_zero=True, none_value=" "):
"""Source: fastcore (https://fastcore.fast.ai/)."""
valid = [datum for datum in data if datum is not None]
mn = min(valid)
mx = max(valid)
n = len(SPARKBAR_CHARS) # Number of bars available
extent = mx - mn
bar_incr = extent / n # "Uniform"
res = [
get_sparkbar_char(datum, mn, bar_incr, empty_zero, none_value) for datum in data
]
return "".join(res)
if __name__ == "__main__":
print = partial(print, end="\n\n")
print(sparkbars([9, 6, None, 1, 4, 0, 8, 15, 10]))
print(sparkbars([9, 6, None, 1, 4, 0, 8, 15, 10], empty_zero=False))
print(
sparkbars([9, 6, None, 1, 4, 0, 8, 15, 10], none_value="�")
) # Replacement character
print(sparkbars([1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2, 1]))
print(sparkbars([0, 1, 19, 20]))
``` |
{
"source": "joaopalmeiro/galho",
"score": 4
} |
#### File: galho/galho/main.py
```python
from pathlib import Path
from .utils import clean_terminal
SPACE = " " * 4
ONE_PART = "│ "
TWO_PARTS = "└── "
THREE_PARTS = "├── "
def tree_generator(root, pre_indentation_line):
"""
Based on: https://stackoverflow.com/questions/9727673/list-directory-tree-structure-in-python
"""
file_and_dir_names = sorted(root.iterdir())
indentation_lines = [THREE_PARTS] * (len(file_and_dir_names) - 1) + [TWO_PARTS]
# Using `zip` (instead of comparing each element to the last one on a list,
# for example) is important to abstract the possible names of files and folders
# from which indentation lines they should have.
branches = zip(indentation_lines, file_and_dir_names)
for indentation_line, path in branches:
yield pre_indentation_line + indentation_line + path.name
if path.is_dir():
new_pre_indentation_line = (
ONE_PART if indentation_line == THREE_PARTS else SPACE
)
yield from tree_generator(
path, pre_indentation_line + new_pre_indentation_line
)
def get_root_representation(root, root_representation):
if root_representation == "name":
return root.name
elif root_representation == "dot":
return "."
else:
return ""
def main(root=Path.cwd(), pre_indentation_line="", root_representation="dot"):
clean_terminal()
# tree_wo_indentation_lines()
print(get_root_representation(root, root_representation))
for branch in tree_generator(root, ""):
print(branch)
if __name__ == "__main__":
main()
``` |
{
"source": "joaopalmeiro/glone",
"score": 2
} |
#### File: glone/glone/cli.py
```python
from datetime import datetime
from pathlib import Path
from typing import Generator
import click
from fastcore.net import urlsend
from ghapi.all import GH_HOST, GhApi, paged
from humanize import naturalsize
from . import __version__
from .constants import (
ARCHIVE_FILE_FORMATS,
DEFAULT_ENV_VARIABLE,
OUTPUT_FOLDER_PREFIX,
OUTPUT_FOLDER_SEP,
)
from .utils import get_folder_file_count, get_folder_size
@click.command()
@click.argument("username", type=str)
@click.option(
"-o",
"--output",
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
default=".",
metavar="PATH",
show_default=True,
)
@click.option(
"-f",
"--file-format",
type=click.Choice(ARCHIVE_FILE_FORMATS, case_sensitive=False),
default=ARCHIVE_FILE_FORMATS[0],
show_default=True,
)
@click.option(
"-t",
"--token",
type=str,
metavar="VALUE",
envvar=DEFAULT_ENV_VARIABLE,
show_envvar=True,
)
@click.version_option(version=__version__)
def main(username: str, output: str, file_format: str, token: str) -> None:
"""A Python CLI to backup all your GitHub repositories."""
# More info:
# - https://ghapi.fast.ai/core.html#GhApi
# - https://ghapi.fast.ai/core.html#Operations
# (if don't pass the token parameter, then your GITHUB_TOKEN environment variable
# will be used, if available)
# - https://ghapi.fast.ai/page.html#paged
api = GhApi(owner=username, token=token)
# click.echo(api.headers)
# click.echo(api.func_dict)
# More info:
# - https://ghapi.fast.ai/fullapi.html#repos
# - https://docs.github.com/en/rest/reference/repos#list-public-repositories
# (it is not for a specific user)
# - https://docs.github.com/en/rest/reference/repos#list-repositories-for-a-user
# (public repositories)
# - https://docs.github.com/en/rest/reference/repos#list-repositories-for-the-authenticated-user
# (all repositories)
# repos = api.repos.list_for_user(username=username, type="all", sort="pushed")
repos: Generator = paged(
api.repos.list_for_authenticated_user,
visibility="all",
affiliation="owner",
sort="full_name",
)
# More info:
# - https://stackoverflow.com/a/50110841
# - https://docs.python.org/3.6/library/pathlib.html#pathlib.Path.mkdir
# - https://stackoverflow.com/a/32490661
# - https://docs.python.org/3.6/library/pathlib.html#pathlib.Path.open
timestamp: str = datetime.today().strftime(f"%Y%m%d{OUTPUT_FOLDER_SEP}%H%M%S")
output_folder = (
Path(output) / f"{OUTPUT_FOLDER_PREFIX}{OUTPUT_FOLDER_SEP}{timestamp}"
)
output_folder.mkdir(parents=False, exist_ok=False)
click.echo(f"Output folder: {output_folder}")
# More info:
# - https://docs.github.com/en/rest/reference/repos#download-a-repository-archive-zip
# - https://docs.github.com/en/rest/reference/repos#download-a-repository-archive-tar
# - https://github.com/fastai/ghapi/issues/22
# - https://github.com/fastai/fastcore/pull/308
# - https://github.com/fastai/fastcore/blob/1.3.27/fastcore/net.py#L203
# - https://stackoverflow.com/a/67964008
# (ref="" for the master/main branch)
# Note: It is not working. Use an alternative. See error message for debugging.
# It would work if the execution was via this if branch, for example:
# https://github.com/fastai/fastcore/blob/1.3.27/fastcore/net.py#L209
# api.repos.download_zipball_archive(repo="glone", ref="")
# api.repos.download_zipball_archive(repo="glone", ref="", archive_format="zip")
# Workaround:
# - https://fastcore.fast.ai/net.html#urlsend
# - https://docs.github.com/en/rest/reference/actions#download-an-artifact
# - https://docs.python.org/3.6/library/functions.html#open
# - https://stackoverflow.com/a/6633693
# - https://click.palletsprojects.com/en/7.x/options/?highlight=choice#choice-options
# zip_url = (
# f"{GH_HOST}/repos/{username}/" + "{repo}/" + f"{file_format}ball" + "/{ref}"
# )
# route = {"repo": "glone", "ref": "", "archive_format": file_format}
# or
# route = {"repo": "glone", "ref": "", "archive_format": "zip"}
# click.echo(zip_url)
# click.echo(route)
# res, headers = urlsend(
# zip_url, "GET", headers=api.headers, route=route, return_headers=True
# )
# click.echo(headers)
# _, _, output_filename = headers["content-disposition"].partition("filename=")
# click.echo(output_filename)
# with open(output_folder / output_filename, "wb") as fh:
# fh.write(res)
zip_url = (
f"{GH_HOST}/repos/{username}/" + "{repo}/" + f"{file_format}ball" + "/{ref}"
)
for page in repos:
# click.echo(len(page))
for repo in page:
click.echo(f"Repo: {repo.name}")
route = {"repo": repo.name, "ref": "", "archive_format": file_format}
res, headers = urlsend(
zip_url, "GET", headers=api.headers, route=route, return_headers=True
)
_, _, output_filename = headers["content-disposition"].partition(
"filename="
)
output_file_path = output_folder / output_filename
with open(output_file_path, "wb") as fh:
fh.write(res)
click.echo(f"Archive file: {output_file_path}")
# break
# break
click.echo(f"Number of archive files/repos: {get_folder_file_count(output_folder)}")
# Compare with:
# du -ch <OUTPUT_FOLDER>/*
# du -sh <OUTPUT_FOLDER>
size = get_folder_size(output_folder)
click.echo(
"Output folder size (approximate): "
f"{naturalsize(size, binary=False, gnu=False)}"
)
click.echo("Done!")
``` |
{
"source": "joaopalmeiro/hackerrank",
"score": 4
} |
#### File: 10-Days-of-Statistics/Python/day-0_weighted_mean.py
```python
N = int(input())
X = list(map(int, input().split()))
W = list(map(int, input().split()))
def weighted_mean(X, W):
numerator = sum([a * b for a, b in zip(X, W)])
denominator = sum(W)
weighted_mean_value = numerator / denominator
return round(weighted_mean_value, 1)
print(weighted_mean(X, W))
```
#### File: 10-Days-of-Statistics/Python/day-1_interquartile_range.py
```python
n = int(input())
X = list(map(int, input().split()))
F = list(map(int, input().split()))
S = [x for i, x in enumerate(X) for f in range(F[i])]
S.sort()
n = len(S)
assert sum(F) == n, "The number of elements must equal the total frequency."
def median(n, X):
if n % 2 == 0:
numerator = float(X[int(n / 2)] + X[int(n / 2 - 1)])
median_value = numerator / 2.0
else:
median_value = float(X[int(n / 2)])
return median_value
def IQR(n, X):
Q1 = median(int(n / 2), X[: int(n / 2)])
if n % 2 == 0:
Q3 = median(int(n / 2), X[int(n / 2) :])
else:
Q3 = median(int(n / 2), X[int(n / 2) + 1 :])
return round(Q3 - Q1, 1)
print(IQR(n, S))
```
#### File: Python/01. Introduction/python_if_else.py
```python
def weird_or_not_weird(n):
if n % 2 == 0:
if n <= 5 or n > 20:
return "Not Weird"
else:
return "Weird"
else:
return "Weird"
if __name__ == "__main__":
n = int(input().strip())
print(weird_or_not_weird(n))
```
#### File: Python/03. Strings/capitalize.py
```python
def solve(s: str) -> str:
# Explicitly define `sep` with a whitespace character to ensure that
# only a single whitespace character is used to break the string.
name = " ".join(map(str.capitalize, s.split(sep=" ")))
return name
if __name__ == "__main__":
s = input()
result = solve(s)
print(result)
``` |
{
"source": "joaopalmeiro/istant",
"score": 4
} |
#### File: istant/istant/strings.py
```python
from typing import Any
def is_string(obj: Any) -> bool:
"""Check whether an object is a string."""
return isinstance(obj, str)
def is_mixed_case(string: str) -> bool:
"""Check whether a string contains uppercase and lowercase characters."""
return not string.islower() and not string.isupper()
``` |
{
"source": "joaopalmeiro/itils",
"score": 3
} |
#### File: itils/itils/cli.py
```python
from pathlib import Path
import plac
from halo import Halo
from wand.image import Image
from . import __description__, __version__
from .constants import INPUT_HELP, RESIZE_HELP, THRESHOLD, Log
def generate_size_message(img: Image, prefix: str) -> str:
return f"{prefix} size: {img.size} • {img.size[0] * img.size[1]:,} megapixels\n"
# Based on: https://plac.readthedocs.io/en/latest/#implementing-subcommands
class ItilsInterface(object):
commands = ["gslide", "quit"]
def __init__(self):
self.__doc__ = f"{__description__} ({__version__})\n"
@plac.pos("input_img", help=INPUT_HELP, type=Path)
@plac.opt("resize", help=RESIZE_HELP, type=int, metavar="PCT")
def gslide(self, input_img, resize=None):
"""
Resize an image to be smaller than 25 megapixels for Google Slides.
The image can be resized using an explicit percentage as well.
"""
spinner = Halo(text=Log.READ.value, spinner="arc")
spinner.start()
with Image(filename=input_img) as img:
spinner.succeed()
print(generate_size_message(img, "Original"))
spinner.start(Log.RESIZE.value)
if resize is None:
# Resize `img` to have the specified area in pixels.
# Aspect ratio is preserved.
img.transform(resize=f"{THRESHOLD}@")
else:
# -resize X% (70%, for example)
# Option #1:
# scaler = 0.7
# img.resize(int(img.width * scaler), int(img.height * scaler))
# Option #2:
img.transform(resize=f"{resize}%")
spinner.succeed()
print(generate_size_message(img, "New"))
spinner.start(Log.SAVE.value)
img.save(filename=f"{input_img.stem}_output.png")
spinner.succeed()
print(f"Filename: {input_img.stem}_output.png\n")
print("All done! ✨")
def quit(self):
raise plac.Interpreter.Exit
# @plac.pos("input_img", help="The path to the image to be transformed", type=Path)
# def main(input_img):
# pass
# The following entry point definition is for the console_scripts
# keyword option (setuptools) or [tool.poetry.scripts] (Poetry).
# The entry point for console_scripts has to be a function that
# takes zero arguments.
# Source:
# - https://github.com/ialbert/plac/issues/31#issuecomment-572239360
# - https://github.com/caltechlibrary/handprint
def console_scripts_main():
# version: https://github.com/ialbert/plac/blob/master/plac_core.py#L411
# plac.call(main, version=__version__)
# plac.Interpreter(plac.call(ItilsInterface, version=__version__)).interact()
plac.Interpreter.call(ItilsInterface, prompt="itils> ")
``` |
{
"source": "joaopalmeiro/mahou",
"score": 2
} |
#### File: mahou/mahou/__init__.py
```python
from .formatter import Formatter
__package_name__ = "mahou"
__version__ = "0.1.0"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__description__ = "A package full of tricks... sorry, full of IPython magic commands."
__url__ = "https://github.com/joaopalmeiro/mahou"
def load_ipython_extension(ipython):
ipython.register_magics(Formatter)
``` |
{
"source": "joaopalmeiro/pybites",
"score": 4
} |
#### File: pybites/Beginner-Bites/008.py
```python
from collections import deque
def rotate(string: str, n: int) -> str:
"""Rotate characters in a string.
Expects string and n (int) for number of characters to move.
"""
characters = deque(string)
characters.rotate(-n)
return "".join(characters)
print(rotate("hello", 2))
print(rotate("hello", -2))
``` |
{
"source": "joaopalmeiro/pyrocco",
"score": 3
} |
#### File: pyrocco/pyrocco/cli.py
```python
from math import ceil
import pkg_resources
from PIL import Image
from .constants import DELAY, MEGA_PATH
def layer_images(background, foreground):
canvas = Image.new("RGBA", background.size, color=(0, 0, 0, 0))
offset = background.size[1] - foreground.size[1]
canvas.paste(background, (0, 0), background)
canvas.paste(foreground, (0, offset), foreground)
return canvas
def main():
frames = sorted(pkg_resources.resource_listdir(__name__, MEGA_PATH))
imgs = [
Image.open(pkg_resources.resource_stream(__name__, f"{MEGA_PATH}/{frame}"))
for frame in frames
]
background = Image.open("logo.png")
thumbnail_size = (
imgs[0].size[0],
ceil(imgs[0].size[0] * background.size[1] / background.size[0]),
) # Figma: ceil
background.thumbnail(
(max(thumbnail_size), max(thumbnail_size)), Image.LANCZOS
) # `thumbnail`: floor
# offset = background.size[1] - imgs[0].size[1]
to_gif = [layer_images(background, img) for img in imgs]
to_gif[0].save(
"mega.gif",
format="GIF",
append_images=to_gif[1:],
save_all=True,
duration=DELAY,
loop=0,
optimize=False,
transparency=255,
disposal=2,
)
if __name__ == "__main__":
main()
``` |
{
"source": "joaopalmeiro/pytest-alt-reg",
"score": 3
} |
#### File: pytest-alt-reg/pytest_alt_reg/utils.py
```python
import json
from .constants import JSON_INDENT
def make_location_message(banner, filename):
msg = [banner, f"- {filename}"]
return "\n".join(msg)
def json_loader(filename):
with open(filename) as file:
data = json.load(file)
return data
# More info:
# - https://github.com/altair-viz/altair/blob/master/tools/schemapi/schemapi.py#L340
def altair_dumper(filename, chart):
chart_spec = chart.to_json(indent=JSON_INDENT, sort_keys=False)
with open(filename, "w", encoding="utf-8") as file:
# json.dump(chart_spec, file, indent=JSON_INDENT, sort_keys=False)
file.write(chart_spec)
``` |
{
"source": "joaopalmeiro/pytest-alt-vrt",
"score": 3
} |
#### File: pytest-alt-vrt/pytest_alt_vrt/driver.py
```python
import base64
from io import BytesIO
from pathlib import Path
from PIL import Image
from .image_diff import ImageDiffEngine
DEFAULT_BASELINE_DIR = Path.cwd() / "screenshots" / "baseline"
DEFAULT_OUTPUT_DIR = Path.cwd() / "screenshots"
DEFAULT_VIEWPORT_SIZE = "1024x768"
class AltVRTDriver:
def __init__(self, driver, **options):
self.driver = driver
self.options = options
self.driver.set_window_position(0, 0)
self.set_viewport()
def set_viewport(self):
viewport_dimensions = DEFAULT_VIEWPORT_SIZE.split("x")
self.driver.set_window_size(
*[int(dimension) for dimension in viewport_dimensions]
)
def get_screenshot(self):
stream = BytesIO(
base64.b64decode(self.driver.get_screenshot_as_base64().encode("ascii"))
)
image = Image.open(stream).convert("RGB")
return image
@staticmethod
def _create_dir(directory):
Path(directory).mkdir(parents=True, exist_ok=True)
def generate_html(self, file_path, chart):
self._create_dir(self.baseline_dir)
baseline_html = self.baseline_dir / f"{file_path}.html"
if self.save_baseline:
chart.save(str(baseline_html), format="html")
return baseline_html.as_uri()
self._create_dir(self.output_dir)
fresh_html = self.output_dir / f"{file_path}.html"
chart.save(str(fresh_html), format="html")
return fresh_html.as_uri()
def assert_screenshot(self, file_path, threshold=0):
self._create_dir(self.baseline_dir)
baseline_image = self.baseline_dir / f"{file_path}.png"
if self.save_baseline:
self.get_screenshot().save(baseline_image)
return
self._create_dir(self.output_dir)
fresh_image = self.get_screenshot()
fresh_image_file = self.output_dir / f"{file_path}.png"
fresh_image.save(fresh_image_file)
engine = ImageDiffEngine(baseline_image, fresh_image_file, threshold)
engine.assert_same_images()
@property
def baseline_dir(self):
return self.options["baseline_dir"]
@baseline_dir.setter
def baseline_dir(self, value):
self.options["baseline_dir"] = value
@property
def output_dir(self):
return self.options["output_dir"]
@output_dir.setter
def output_dir(self, value):
self.options["output_dir"] = value
@property
def save_baseline(self):
return self.options["save_baseline"]
@save_baseline.setter
def save_baseline(self, value):
self.options["save_baseline"] = value
```
#### File: pytest-alt-vrt/pytest_alt_vrt/image_diff.py
```python
import math
import pytest
from PIL import Image
from skimage.metrics import mean_squared_error
from .utils import pil2np
class ImageDiffEngine:
def __init__(self, baseline_file, output_file, threshold):
self.baseline_file = baseline_file
self.output_file = output_file
self.threshold = threshold
self.baseline_image = Image.open(baseline_file).convert("RGB")
self.output_image = Image.open(output_file).convert("RGB")
def assert_same_images(self):
diff = self.root_mean_squared_error()
if diff > self.threshold:
pytest.fail(f"New screenshot did not match the baseline ({diff})")
# Version that uses Pillow and Python:
# def root_mean_squared_error(self):
# diff = ImageChops.difference(self.baseline_image, self.output_image)
# squared_values = [d ** 2 for d in flatten(diff.getdata())]
# mse = mean(squared_values)
# rmse = math.sqrt(mse)
# return rmse
# Version that uses NumPy, scikit-image, and Python:
def root_mean_squared_error(self):
rmse = math.sqrt(
mean_squared_error(pil2np(self.baseline_image), pil2np(self.output_image))
)
return rmse
```
#### File: pytest-alt-vrt/tests/test_example.py
```python
import altair as alt
import pytest
@pytest.fixture
def example_chart():
source = alt.pd.DataFrame(
{
"a": ["A", "B", "C", "D", "E", "F", "G", "H", "I"],
"b": [28, 55, 43, 91, 81, 53, 19, 87, 52],
}
)
chart = alt.Chart(source).mark_bar().encode(x="a", y="b")
return chart
def test_example_chart(alt_vrt, example_chart):
name = "example_chart"
uri = alt_vrt.generate_html(name, example_chart)
alt_vrt.driver.get(uri)
alt_vrt.assert_screenshot(name)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.