metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joaovcaetano/Algoritmos-Bioinspirados",
"score": 3
} |
#### File: joaovcaetano/Algoritmos-Bioinspirados/ag.py
```python
from random import randint
from random import uniform
import math
import matplotlib.pyplot as plt
tamanho_populacao = 50
numero_individuos = 3
taxa_mutacao = 0.05
geracoes = 200
alfa = 0.8
melhores = []
individuo = []
def roleta(porcentagem_individual):
roleta = uniform(0.0, 1.0)
soma = 0
j = 0
while(soma <= roleta):
soma = soma + porcentagem_individual[j]
j = j + 1
pos_pai = j - 1
return pos_pai
def selecao(total_fit, fit_uni):
porcentagem_individual = []
maximiza = []
for i in range(0,len(fit_uni)):
maximiza.append(1 / fit_uni[i])
total_fit = sum(maximiza)
for i in range(0,len(fit_uni)):
porcent = maximiza[i] / total_fit
porcentagem_individual.append(porcent)
pai1 = roleta(porcentagem_individual)
pai2 = roleta(porcentagem_individual)
pais = []
pais.append(pai1)
pais.append(pai2)
return pais
def cruzamento(individuo, pais):
pos1 = individuo[pais[0]][0]
pos2 = individuo[pais[1]][0]
pos3 = individuo[pais[0]][1]
pos4 = individuo[pais[1]][1]
pos5 = individuo[pais[0]][2]
pos6 = individuo[pais[1]][2]
media1 = (pos1+pos2)/2
media2 = (pos3+pos4)/2
media3 = (pos5+pos6)/2
filho = []
filho.append(media1)
filho.append(media2)
filho.append(media3)
return filho
def blend_fator(extrapola, fp1, fp2):
d = fp1 - fp2
if (d < 0):
menor = fp1 - extrapola
maior = fp2 + extrapola
else:
menor = fp2 - extrapola
maior = fp1 + extrapola
fator_novo = uniform(float(menor), float(maior))
return fator_novo
def cruzamento_blend(individuo, pais):
d1 = individuo[pais[0]][0] - individuo[pais[1]][0]
d2 = individuo[pais[0]][1] - individuo[pais[1]][1]
d3 = individuo[pais[0]][2] - individuo[pais[1]][2]
extrapola1 = (abs(d1) * alfa)
extrapola2 = (abs(d2) * alfa)
extrapola3 = (abs(d3) * alfa)
f1 = blend_fator(extrapola1, individuo[pais[0]][0], individuo[pais[1]][0])
f2 = blend_fator(extrapola2, individuo[pais[0]][1], individuo[pais[1]][1])
f3 = blend_fator(extrapola3, individuo[pais[0]][2], individuo[pais[1]][2])
filho = []
filho.append(f1)
filho.append(f2)
filho.append(f3)
return filho
def popInicial():
individuo = []
for i in range(0,tamanho_populacao):
individuo.append([])
for j in range(0,numero_individuos):
a = uniform(-2.0,2.0)
individuo[i].append(a)
return individuo
def plota(melhores):
x = []
y = []
for k in range(0,geracoes):
x.append(k)
y.append(melhores[k])
fig,ax = plt.subplots()
ax.plot(x, y)
plt.show()
y = 0
m = 0
analise = 100
vetor_analise = []
while(m<analise):
y = 0
individuo = popInicial()
melhores = []
while(y<geracoes):
z = []
for i in range(0,len(individuo)):
x = 0
for j in range(0,numero_individuos):
x += (1.0/(individuo[i][j])**2)
x = math.sqrt(x)
z.append(x)
random = []
ind = []
total_fit = 0
for i in range(0, len(z)):
total_fit = total_fit + z[i]
filhos = []
for i in range(0,(tamanho_populacao/2)):
pais = selecao(total_fit, z)
fi = cruzamento_blend(individuo, pais)
filhos.append(fi)
fi2 = cruzamento_blend(individuo, pais)
filhos.append(fi2)
fitfilhos = []
for i in range(0,len(filhos)):
for j in range(0,numero_individuos):
x = uniform(0.0,1.0)
if(taxa_mutacao>x):
filhos[i][j] = uniform(-2.0,2.0)
for i in range(0,len(filhos)):
x = 0
for j in range(0,numero_individuos):
x += (1.0/(filhos[i][j])**2)
x = math.sqrt(x)
fitfilhos.append(x)
test1 = sorted(z)
indice_melhor = z.index(min(z))
aux = individuo[indice_melhor]
individuo = []
individuo = filhos
individuo[0] = aux
melhores.append(test1[0])
y = y + 1
vetor_analise.append(min(melhores))
m = m + 1
#plota(melhores)
print m
print vetor_analise
```
#### File: joaovcaetano/Algoritmos-Bioinspirados/mochila_ed.py
```python
from random import randint
from random import uniform
import math
import matplotlib.pyplot as plt
import random
tamanho_populacao = 200
numero_individuos = 8
cr = 0.9
f = 1.0 #0.6
geracoes = 100
melhores = []
individuo = []
def mochila():
produtos = []
produtos.append([11,1])
produtos.append([21,11])
produtos.append([31,21])
produtos.append([33,23])
produtos.append([43,33])
produtos.append([53,43])
produtos.append([55,45])
produtos.append([65,55])
return produtos
def popInicial():
for i in range(0,tamanho_populacao):
individuo.append([])
for j in range(0,numero_individuos):
if(random.random()< 0.5):
individuo[i].append(0)
else:
individuo[i].append(1)
return individuo
def geraPeso(individuo):
peso_individuo = 0
utilidade_mochila = 0
for i in range(0,len(individuo)):
if(individuo[i] == 1):
peso_individuo = peso_individuo + mochila[i][0]
utilidade_mochila = utilidade_mochila + mochila[i][1]
mochila_individuo = []
mochila_individuo.append(peso_individuo)
mochila_individuo.append(utilidade_mochila)
return mochila_individuo
def fitness(individuo):
fit_ind = []
peso_uti = geraPeso(individuo)
if(peso_uti[0] > 100):
if(peso_uti[0] >= 200):
peso_uti[0] = 199
extrapola = peso_uti[0] - 100
porcentagem_extrapolada = float(extrapola) / float(100)
reduz_uti = porcentagem_extrapolada * float(peso_uti[1])
peso_uti[0] = 100
peso_uti[1] = peso_uti[1] - reduz_uti
fit_ind.append(peso_uti)
else:
fit_ind.append(peso_uti)
return fit_ind
def mutacao(individuo):
x1 = 0
x2 = 1
x3 = 2
while((x1 != x3) and (x1 != x2) and (x2 != x3)):
x1 = uniform(0.0, float(tamanho_populacao))
x2 = uniform(0.0, float(tamanho_populacao))
x3 = uniform(0.0, float(tamanho_populacao))
x1 = int(x1)
x2 = int(x2)
x3 = int(x3)
ind1 = individuo[x1]
ind2 = individuo[x2]
ind3 = individuo[x3]
for i in range(0,numero_individuos):
ind1[i] = ind1[i] +(ind3[i] * ind2[i])
if ind1[i] > 1:
ind1[i] = 1
return ind1
def cruzamento(individuo, pop_inter):
filhos = []
for i in range(0,tamanho_populacao):
filho = []
for j in range(0, numero_individuos):
random = uniform(0.0, 1.0)
if(random < cr):
filho.append(individuo[i][j])
else:
filho.append(pop_inter[i][j])
filhos.append(filho)
return filhos
def prox_geracao(individuo, filhos, fit_pai):
novo_ind = []
for i in range(0, len(individuo)):
fit_filho = fitness(filhos[i])
if (fit_pai[0][0][1] > fit_filho[0][1]):
novo_ind.append(individuo[i])
else:
novo_ind.append(filhos[i])
return novo_ind
def fit_geral(populacao):
fit_geral = [0] * tamanho_populacao
for i in range(0, len(populacao)):
fit_i = fitness(populacao[i])
fit_geral[i] = fit_i
return fit_geral
mochila = mochila()
individuo = popInicial()
k = 0
melhor = []
fit_melhor = fit_geral(individuo)
while(k<geracoes):
pop_int = []
for i in range(0,tamanho_populacao):
ind_int = mutacao(individuo)
pop_int.append(ind_int)
filhos = cruzamento(individuo, pop_int)
novo_ind = prox_geracao(individuo, filhos, fit_melhor)
util = []
for i in range(0,len(fit_melhor)):
util.append(fit_melhor[i][0][1])
index = util.index(max(util))
melhor.append(max(util))
novo_ind[0] = individuo[index]
k = k + 1
individuo = novo_ind
print melhor
``` |
{
"source": "JoaoVicente129/GamestonkTerminal",
"score": 3
} |
#### File: gamestonk_terminal/discovery/disc_controller.py
```python
__docformat__ = "numpy"
import argparse
from typing import List
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import get_flair
from gamestonk_terminal.menu import session
from gamestonk_terminal.discovery import (
alpha_vantage_view,
ark_view,
fidelity_view,
finviz_view,
seeking_alpha_view,
short_interest_view,
simply_wallst_view,
spachero_view,
unusual_whales_view,
yahoo_finance_view,
)
class DiscoveryController:
""" Discovery Controller """
# Command choices
CHOICES = [
"help",
"q",
"quit",
"map",
"sectors",
"gainers",
"losers",
"orders",
"ark_orders",
"up_earnings",
"high_short",
"low_float",
"simply_wallst",
"spachero",
"uwhales",
]
def __init__(self):
"""Constructor"""
self.disc_parser = argparse.ArgumentParser(add_help=False, prog="disc")
self.disc_parser.add_argument(
"cmd",
choices=self.CHOICES,
)
@staticmethod
def print_help():
""" Print help """
print("\nDiscovery Mode:")
print(" help show this discovery menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print("")
print(" map S&P500 index stocks map [Finviz]")
print(" sectors show sectors performance [Alpha Vantage]")
print(" gainers show latest top gainers [Yahoo Finance]")
print(" losers show latest top losers [Yahoo Finance]")
print(" orders orders by Fidelity Customers [Fidelity]")
print(" ark_orders orders by ARK Investment Management LLC")
print(" up_earnings upcoming earnings release dates [Seeking Alpha]")
print(
" high_short show top high short interest stocks of over 20% ratio [www.highshortinterest.com]"
)
print(
" low_float show low float stocks under 10M shares float [www.lowfloat.com]"
)
print(" simply_wallst Simply Wall St. research data [Simply Wall St.]")
print(" spachero great website for SPACs research [SpacHero]")
print(" uwhales good website for SPACs research [UnusualWhales]")
print("")
def switch(self, an_input: str):
"""Process and dispatch input
Returns
-------
True, False or None
False - quit the menu
True - quit the program
None - continue in the menu
"""
(known_args, other_args) = self.disc_parser.parse_known_args(an_input.split())
return getattr(
self, "call_" + known_args.cmd, lambda: "Command not recognized!"
)(other_args)
def call_help(self, _):
"""Process Help command"""
self.print_help()
def call_q(self, _):
"""Process Q command - quit the menu"""
return False
def call_quit(self, _):
"""Process Quit command - quit the program"""
return True
def call_map(self, other_args: List[str]):
"""Process map command"""
finviz_view.map_sp500_view(other_args)
def call_sectors(self, other_args: List[str]):
"""Process sectors command"""
alpha_vantage_view.sectors_view(other_args)
def call_gainers(self, other_args: List[str]):
"""Process gainers command"""
yahoo_finance_view.gainers_view(other_args)
def call_losers(self, other_args: List[str]):
"""Process losers command"""
yahoo_finance_view.losers_view(other_args)
def call_orders(self, other_args: List[str]):
"""Process orders command"""
fidelity_view.orders_view(other_args)
def call_ark_orders(self, other_args: List[str]):
"""Process ark_orders command"""
ark_view.ark_orders_view(other_args)
def call_up_earnings(self, other_args: List[str]):
"""Process up_earnings command"""
seeking_alpha_view.earnings_release_dates_view(other_args)
def call_high_short(self, other_args: List[str]):
"""Process high_short command"""
short_interest_view.high_short_interest_view(other_args)
def call_low_float(self, other_args: List[str]):
"""Process low_float command"""
short_interest_view.low_float_view(other_args)
def call_simply_wallst(self, other_args: List[str]):
"""Process simply_wallst command"""
simply_wallst_view.simply_wallst_view(other_args)
def call_spachero(self, other_args: List[str]):
"""Process spachero command"""
spachero_view.spachero_view(other_args)
def call_uwhales(self, other_args: List[str]):
"""Process uwhales command"""
unusual_whales_view.unusual_whales_view(other_args)
def menu():
"""Discovery Menu"""
disc_controller = DiscoveryController()
disc_controller.call_help(None)
# Loop forever and ever
while True:
# Get input command from user
if session and gtff.USE_PROMPT_TOOLKIT:
completer = NestedCompleter.from_nested_dict(
{c: None for c in disc_controller.CHOICES}
)
an_input = session.prompt(
f"{get_flair()} (disc)> ",
completer=completer,
)
else:
an_input = input(f"{get_flair()} (disc)> ")
try:
process_input = disc_controller.switch(an_input)
if process_input is not None:
return process_input
except SystemExit:
print("The command selected doesn't exist\n")
continue
``` |
{
"source": "joaovicentesouto/INE5416",
"score": 3
} |
#### File: paradigma_funcional/roteiro8/main.py
```python
from vector_operations import *
from matrix_operations import *
def main():
# Parte 1
v = [1,2,3]
w = [10,10,10]
print "v: ", v
print "w: ", w
print "\n||v||: ", norma(v)
print "2 * v: ", multiplyByScalar(v, 2)
print "v + w: ", additionVectors(v, w)
print "v . w: ", scalarProduct(v, w)
print "v x w: ", crossProduct(v, w)
# Parte 2
A = [[2,0,0], [0,2,0], [0,0,2]]
B = [[3,3,2], [1,3,1], [3,3,1]]
print "\nMatriz A:\n", A
print "\nMatriz B:\n", B
print "\nTransposta de A:\n", transpose(A)
print "\n10 * A:\n", multiplyMatrixByScalar(A, 10)
print "\nA + B:\n", addMatrices(A, B)
print "\nA * B:\n", multiplyMatrices(A, B)
print "\nMatriz A:\n", A
print "\nDeterminante de A: ", determinante(A)
print "\nInversa de A: \n", matrixInversa(A)
if __name__ == "__main__":
main()
```
#### File: paradigma_funcional/roteiro8/matrix_operations.py
```python
def transpose(A):
B = [[0] * len(A) for i in range(len(A[0]))]
for i in range(len(A[0])):
for j in range(len(A)):
B[i][j] = A[j][i]
return B
def multiplyMatrixByScalar(A, scalar):
B = [[] for i in range(len(A))]
for i in range(len(A)):
B[i] = map(lambda x: x * scalar, A[i])
return B
def addMatrices(A, B):
C = [[0] * len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B)):
C[i][j] = A[i][j] + B[i][j]
return C
def multiplyMatrices(A, B):
C = [[0] * len(B[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
C[i][j] = 0
for k in range(len(B)):
C[i][j] += A[i][k] * B[k][j]
return C
def determinante(A):
positive = 0
negative = 0
for k in range(len(A)):
j = k
aux = 1
for i in range(len(A)):
aux = aux * A[i][j]
j = (j+1) % len(A)
positive += aux
i = k
aux = 1
for j in range(len(A)-1, -1, -1):
aux = aux * A[i][j]
i = (i+1) % len(A)
negative += aux
return (positive - negative)
def cofator3x3(i, j, A):
i1 = i2 = j1 = j2 = 0
if (i == 0):
i1 = i+1
i2 = i+2
elif (i == 1):
i1 = i-1
i2 = i+1
else:
i1 = i-2
i2 = i-1
if (j == 0):
j1 = j+1
j2 = j+2
elif (j == 1):
j1 = j-1
j2 = j+1
else:
j1 = j-2
j2 = j-1
return A[i1][j1] * A[i2][j2] - A[i1][j2] * A[i2][j1]
def matrixInversa(A):
Cofatores = [[0] * len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A)):
Cofatores[i][j] = cofator3x3(i, j, A)
Adjunta = transpose(Cofatores)
mult = 1.0/determinante(A)
Inversa = [[0.0] * len(Adjunta[0]) for i in range(len(Adjunta))]
for i in range(len(Adjunta)):
for j in range(len(Adjunta[0])):
Inversa[i][j] = Adjunta[i][j] * mult
return Inversa
``` |
{
"source": "joaovicentesouto/INE5452",
"score": 3
} |
#### File: lists/two/test.py
```python
import functools
def givemaior(m):
def mabs(x, y):
if x < 0: return -(abs(x) % y)
return x % y
def maior(a, b):
if mabs(a, m) == mabs(b, m):
if a == b: return 0
if a % 2 == 0 and b % 2 == 1: return 1
if a % 2 == 0 and b % 2 == 0: return 1 if a > b else -1
return 1 if a < b else -1
if mabs(a, m) > mabs(b, m): return 1
return -1
return maior
while True:
n, k = [int(k) for k in input().split()]
if n == k == 0: break
m = abs(k)
xs = []
for i in range(n):
x = int(input())
xs.append(x)
xs = sorted(xs, key = functools.cmp_to_key(givemaior(m)))
print(str(n) + ' ' + str(k))
for i in range(n):
print(xs[i])
print('0 0')
``` |
{
"source": "joaovicentesouto/template-ine5410-t2",
"score": 4
} |
#### File: joaovicentesouto/template-ine5410-t2/aeroporto.py
```python
from time import sleep
import simulacao as sim
class Pista:
'''
Definição de apenas uma pista.
'''
def __init__(self, id = -1):
self.nome = "Pista " + str(id)
def overview(self):
return self.nome
class Portao:
'''
Definição de apenas uma portão.
'''
def __init__(self, id = -1):
self.nome = "Portão " + str(id)
def overview(self):
return self.nome
class Esteira:
'''
Definição de apenas uma esteira.
'''
def __init__(self, id = -1):
self.quant_avioes = 0
self.nome = "Esteira " + str(id)
def overview(self):
return self.nome + " (" + str(self.quant_avioes) + ")"
class Aeroporto:
'''
O Aeroporto possui recursos para atender os aviões que trafegarão
por ele.
É sua responsabilidade CRIAR esses recursos e CONTROLAR
a alocação/liberação deles pelos diferentes aviões.
A criação dos recursos deve seguir as variáveis globais do arquivo
simulacao.py. Você pode utilizá-las assim: 'sim.quant_pistas'
Só deve existir uma instância do aeroporto (olhe no final do arquivo)
que já está criada.
IMPORTANTE: os recursos devem ser únicos e dois aviões nunca podem
utilizar o mesmo recurso ao mesmo tempo, tirando casos especiais como
a esteira que pode atender uma quantidade limitada de aviões ao mesmo
tempo.
'''
def __init__(self):
self.nome = "Aeroporto"
def overview(self):
n_entrou = sim.contadores["entrando"]
n_aproximando = sim.contadores["aproximando"]
n_pousando = sim.contadores["pousando"]
n_acoplando = sim.contadores["acoplando"]
n_descarregando = sim.contadores["descarregando"]
n_carregando = sim.contadores["carregando"]
n_desacoplando = sim.contadores["desacoplando"]
n_decolando = sim.contadores["decolando"]
n_saindo = sim.contadores["saindo"]
descricao = self.nome + " ("
descricao += str(n_entrou) + ", "
descricao += str(n_aproximando) + ", "
descricao += str(n_pousando) + ", "
descricao += str(n_acoplando) + ", "
descricao += str(n_descarregando) + ", "
descricao += str(n_carregando) + ", "
descricao += str(n_desacoplando) + ", "
descricao += str(n_decolando) + ", "
descricao += str(n_saindo) + ")"
return descricao
# Varivável global para armazenar o objeto Aeroporto.
# Utilize essa variável dentro das funções de um avião.
aeroporto = Aeroporto()
``` |
{
"source": "joaovictorfonseca/OptionsPricing",
"score": 3
} |
#### File: dx/derivatives/constant_short_rate.py
```python
from derivatives import get_year_deltas
import numpy as np
class ConstantShortRate:
""" Class to generate constant short rate from a datetime list."""
def __init__(self, name, short_rate):
self.name = name
self.short_rate = short_rate
if short_rate < 0:
raise ValueError('Short rate negative.')
def get_discount_factors(self, date_list, datetime_objects=True):
if datetime_objects is True:
datetime_list = get_year_deltas(date_list)
else:
datetime_list = np.array(date_list)
disc_factor_list = np.exp(self.short_rate * np.sort(-datetime_list))
return np.array((date_list, disc_factor_list)).T
```
#### File: dx/derivatives/plot_option_stats.py
```python
import matplotlib.pyplot as plt
def plot_option_stats(s_list, p_list, d_list, v_list):
plt.figure(figsize=(9, 7))
sub1 = plt.subplot(311)
plt.plot(s_list, p_list, 'ro', label='present value')
plt.plot(s_list, p_list, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub1.get_xticklabels(), visible=False)
sub2 = plt.subplot(312)
plt.plot(s_list, d_list, 'go', label='Delta')
plt.plot(s_list, d_list, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.ylim(min(d_list) - 0.1, max(d_list) + 0.1)
plt.setp(sub2.get_xticklabels(), visible=False)
sub3 = plt.subplot(313)
plt.plot(s_list, v_list, 'yo', label='Vega')
plt.plot(s_list, v_list, 'b')
plt.xlabel('initial value of underlying')
plt.grid(True)
plt.legend(loc=0)
```
#### File: dx/derivatives/simulation_class.py
```python
import numpy as np
import pandas as pd
class SimulationClass:
""" Providing base methods for simulation classes."""
def __init__(self, name, mar_env):
try:
self.name = name
self.pricing_date = mar_env.pricing_date
self.initial_value = mar_env.get_constant('initial_value')
self.volatility = mar_env.get_constant('volatility')
self.final_date = mar_env.get_constant('final_date')
self.currency = mar_env.get_constant('currency')
self.frequency = mar_env.get_constant('frequency')
self.paths = mar_env.get_constant('paths')
self.discount_curve = mar_env.get_curve('discount_curve')
self.time_grid = mar_env.get_list('time_grid')
self.special_dates = mar_env.get_list('special_dates')
if self.special_dates is None:
self.special_dates = []
self.instrument_values = None
except Exception as error:
print(f"Error parsing market environment.{error}")
def generate_time_grid(self):
start = self.pricing_date
end = self.final_date
# pandas date_range function
time_grid = pd.date_range(start=start, end=end, freq=self.frequency).to_pydatetime()
time_grid = list(time_grid)
# enhance time_grid by start, end, and special_dates
if start not in time_grid:
time_grid.insert(0, start)
# insert start date if not in list
if end not in time_grid:
time_grid.append(end)
# insert end date if not in list
if len(self.special_dates) > 0:
# add all special dates
time_grid.extend(self.special_dates)
# delete duplicates
time_grid = list(set(time_grid))
# sort list
time_grid.sort()
self.time_grid = np.array(time_grid)
def get_instrument_values(self, fixed_seed=True):
if self.instrument_values is None:
# only initiate simulation if there are no instrument values
self.generate_paths(fixed_seed=fixed_seed, day_count=365.)
elif fixed_seed is False:
# also initiate re-simulation when fixed_seed is False
self.generate_paths(fixed_seed=fixed_seed, day_count=365.)
return self.instrument_values
``` |
{
"source": "JoaoVictorLacerda/SafeLanguage",
"score": 2
} |
#### File: SafeLanguage/telas/descript.py
```python
import PySimpleGUI as tela
import os
import platform
var =os.path.expanduser('~')
cwd = os.getcwd()
if platform.system() == 'Windows':
img=f'{cwd}\\img\\01.ico'
elif platform.system() =='Linux':
img = f'{cwd}/img/01.png'
def chama(cont):
tela.change_look_and_feel('DarkTeal4')
ini=[
[tela.Text('Seja bem-vindo', font=('Consolas',20) ,text_color='#000000', justification='center', size=(53, 0))],
[tela.Text(' Cole o código que será descriptografado:', font=('Consolas',20),text_color='#000000', justification='center', size=(53, 0))],
[tela.Button('Clique Aqui',size=(27, 1), font=('Consolas', 20),key='bot2',button_color=['#fff','#1a1a1a']), tela.Text(' ←', font=('Consolas', 25),text_color='#33cc33'),tela.Text('Para Criptografar', font=('Consolas', 20),text_color='#33cc33')],
[tela.Multiline(cont,font=("Consolas", 12), size=(90, 23), key='codigo1',background_color='#1a1a1a',text_color='#33cc33')],
[tela.Button('Descriptografia', font=('Consolas',14),button_color=['#fff','#1a1a1a']),
tela.Button('Limpar',font=('Consolas',14), key= 'bot_limpar',button_color=['#fff','#1a1a1a']),tela.Button('Colar', font =('Consolas', 14),button_color=['#fff','#1a1a1a'])]
]
return tela.Window(
'Descriptografia',
return_keyboard_events=True,
layout=ini,
location=(250,20),
icon=img,
finalize=True)
def descript(valor):
tela.change_look_and_feel('DarkTeal4')
init =[
[tela.Multiline(valor,size=(90, 23),font=("Consolas", 14),background_color='#000000',text_color='#33cc33')],
[tela.Button('Copiar', font=('Consolas',14 ),button_color=['#fff','#1a1a1a'])]
]
return tela.Window(
'Resultado',
return_keyboard_events=True,
layout=init,
icon=img,
finalize=True )
def pergunta():
tela.change_look_and_feel('DarkTeal4')
init=[
[tela.Text('Deseja salvar sua conta? ',text_color='#33cc33', font=('Consolas, 14'),justification='center')],
[tela.Button('SIM', font=('Consolas, 12'),button_color=['#fff','#1a1a1a']),tela.Button('NÃO', font=('Consolas, 12'),button_color=['#fff','#1a1a1a'])]
]
return tela.Window(
'Salvar',
return_keyboard_events=True,
layout=init,
icon=img,
finalize=True)
``` |
{
"source": "joaovictor-loureiro/data-science",
"score": 4
} |
#### File: livro-introducao-a-programacao-com-python/capitulo-8/exercicio8-1.py
```python
def maior(x, y):
if x > y:
print('\nMaior(%d, %d) == %d\n' % (x, y, x))
else:
print('\nMaior(%d, %d) == %d\n' % (x, y, y))
maior(5,6)
maior(2,1)
maior(7,7)
```
#### File: livro-introducao-a-programacao-com-python/capitulo-8/exercicio8-5.py
```python
def pesquisa(lista, valor):
if valor in lista:
n = lista.index(valor)
print('\n%d encontrado na posição %d.\n' % (valor, n))
else:
print('\n%d não encontrado.\n' % valor)
lista = [10, 20, 25, 30]
pesquisa(lista, 25)
pesquisa(lista, 27)
``` |
{
"source": "joaovictorsantos/array",
"score": 3
} |
#### File: array/ArrayComprehension/list_comprehensions.py
```python
def fibo(n):
return n if n <= 1 else (fibo(n-1) + fibo(n-2))
nums = [1,2,3,4,5,6]
[fibo(x) for x in nums]
# [1, 1, 2 ,3 ,5, 8]
[y for x in nums if (y:= fibo(x)) % 2 == 0]
# [2, 8]
```
#### File: array/ArrayComprehension/main.py
```python
import math
from time import sleep
import re
#from classsum import sumAll
def sort_given_list():
arr = []
num = int(input('How many numbers: '))# take the serie number
for n in range(num):
numbers = int(input('Enter number: '))
arr.append(numbers)# add to the list the numbers in the serie
arr.sort()
sleep(0.1)
print('The sorted given list is: ' + str(arr))
sm = sum(arr[0:len(arr)])
print('The sum list is: ' + str(sm))
sort_given_list()
```
#### File: array/QuickSort/quick.py
```python
def quicksort(arr):
if len(arr) < 2:
return arr
else:
pivot = arr[0]
less = [i for i in arr[1:] if i <= pivot]
greater = [i for i in arr[1:] if i > pivot]
return quicksort(less) + [pivot] + quicksort(greater)
print('The sorted given list: ' + str(quicksort([90,7,85,13,71,22,67,36,59,45])))
``` |
{
"source": "joaovictortr/pypicloud",
"score": 2
} |
#### File: pypicloud/access/aws_secrets_manager.py
```python
import boto3
import json
from botocore.exceptions import ClientError
from .base_json import IMutableJsonAccessBackend
from pypicloud.util import get_settings
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
class AWSSecretsManagerAccessBackend(IMutableJsonAccessBackend):
"""
This backend allows you to store all user and package permissions in
AWS Secret Manager
"""
def __init__(self, request=None, secret_id=None, kms_key_id=None,
client=None, **kwargs):
super(AWSSecretsManagerAccessBackend, self).__init__(request, **kwargs)
self.secret_id = secret_id
self.kms_key_id = kms_key_id
self.client = client
self.dirty = False
@classmethod
def configure(cls, settings):
kwargs = super(AWSSecretsManagerAccessBackend, cls).configure(settings)
kwargs['secret_id'] = settings['auth.secret_id']
kwargs['kms_key_id'] = settings.get('auth.kms_key_id')
session = boto3.session.Session(**get_settings(
settings,
'auth.',
region_name=str,
aws_access_key_id=str,
aws_secret_access_key=str,
aws_session_token=str,
profile_name=str,
))
kwargs['client'] = session.client('secretsmanager')
return kwargs
def _get_db(self):
""" Hit a server endpoint and return the json response """
try:
response = self.client.get_secret_value(
SecretId=self.secret_id
)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return {}
elif e.response['Error']['Code'] == 'InvalidRequestException':
raise Exception("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
raise Exception("The request had invalid params:", e)
raise
try:
return json.loads(response['SecretString'])
except JSONDecodeError as e:
raise Exception('Invalid json detected: {}'.format(e))
def _save(self):
if not self.dirty:
self.dirty = True
self.request.tm.get().addAfterCommitHook(self._do_save)
def _do_save(self, succeeded):
""" Save the auth data to the backend """
if not succeeded:
return
kwargs = {
'SecretString': json.dumps(self._db),
}
if self.kms_key_id is not None:
kwargs['KmsKeyId'] = self.kms_key_id
try:
self.client.update_secret(
SecretId=self.secret_id,
**kwargs
)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
self.client.create_secret(
Name=self.secret_id,
**kwargs
)
raise
```
#### File: pypicloud/access/__init__.py
```python
from functools import partial
from pyramid.path import DottedNameResolver
from .aws_secrets_manager import AWSSecretsManagerAccessBackend
from .config import ConfigAccessBackend
from .base import (IAccessBackend, IMutableAccessBackend, get_pwd_context,
DEFAULT_ROUNDS)
from .remote import RemoteAccessBackend
from .sql import SQLAccessBackend
def includeme(config):
""" Configure the app """
settings = config.get_settings()
resolver = DottedNameResolver(__name__)
dotted_name = settings.get('pypi.auth', 'config')
if dotted_name == 'config':
dotted_name = ConfigAccessBackend
elif dotted_name == 'remote':
dotted_name = RemoteAccessBackend
elif dotted_name == 'sql':
dotted_name = SQLAccessBackend
elif dotted_name == 'ldap':
dotted_name = "pypicloud.access.ldap_.LDAPAccessBackend"
elif dotted_name == 'aws_secrets_manager':
dotted_name = AWSSecretsManagerAccessBackend
access_backend = resolver.maybe_resolve(dotted_name)
kwargs = access_backend.configure(settings)
config.add_request_method(partial(access_backend, **kwargs), name='access',
reify=True)
config.add_postfork_hook(partial(access_backend.postfork, **kwargs))
```
#### File: pypicloud/pypicloud/models.py
```python
import re
import pkg_resources
import six
from datetime import datetime
from functools import total_ordering
from .util import normalize_name
@six.python_2_unicode_compatible
@total_ordering
class Package(object):
"""
Representation of a versioned package
Parameters
----------
name : str
The name of the package (will be normalized)
version : str
The version number of the package
filename : str
The name of the package file
last_modified : datetime, optional
The datetime when this package was uploaded (default now)
summary : str, optional
The summary of the package
**kwargs : dict
Metadata about the package
"""
def __init__(self, name, version, filename, last_modified=None,
summary=None, **kwargs):
self.name = normalize_name(name)
self.version = version
self._parsed_version = None
self.filename = filename
if last_modified is not None:
self.last_modified = last_modified
else:
self.last_modified = datetime.utcnow()
self.summary = summary
self.data = kwargs
def get_url(self, request):
""" Create path to the download link """
return request.db.get_url(self)
@property
def parsed_version(self):
""" Parse and cache the version using pkg_resources """
# Use getattr because __init__ isn't called by some ORMs.
if getattr(self, '_parsed_version', None) is None:
self._parsed_version = pkg_resources.parse_version(self.version)
return self._parsed_version
@property
def is_prerelease(self):
""" Returns True if the version is a prerelease version """
return re.match(r'^\d+(\.\d+)*$', self.version) is None
def __hash__(self):
return hash(self.name) + hash(self.version)
def __eq__(self, other):
return self.name == other.name and self.version == other.version
def __lt__(self, other):
return ((self.name, self.parsed_version) <
(other.name, other.parsed_version))
def __repr__(self):
return self.__str__()
def __str__(self):
return u'Package(%s)' % (self.filename)
def __json__(self, request):
return {
'name': self.name,
'filename': self.filename,
'last_modified': self.last_modified,
'version': self.version,
'url': self.get_url(request),
'summary': self.summary,
}
def search_summary(self):
""" Data to return from a pip search """
return {
'name': self.name,
'summary': self.summary or '', # May be None
'version': self.version,
}
```
#### File: pypicloud/tests/test_scripts.py
```python
from mock import patch
from pypicloud import scripts
from pypicloud.access import get_pwd_context
try:
import unittest2 as unittest # pylint: disable=F0401
except ImportError:
import unittest
class TestScripts(unittest.TestCase):
""" Tests for commandline scripts """
@patch.object(scripts, 'getpass')
def test_gen_password(self, getpass):
""" Generate a password """
pwd_context = get_pwd_context()
passwds = ['foo', 'foo', 'bar', 'baz']
getpass.getpass.side_effect = passwds.pop
ret = scripts._gen_password()
self.assertEqual(len(passwds), 0)
self.assertTrue(pwd_context.verify('foo', ret))
@patch.object(scripts, '_gen_password')
def test_cli_gen_password(self, genpass):
""" Commandline prints generated password """
scripts.gen_password([])
self.assertTrue(genpass.called)
@patch('pypicloud.scripts.wrapped_input', return_value='')
def test_prompt_default(self, _):
""" If user hits 'enter', return default value """
ret = scripts.prompt('', default='abc')
self.assertEqual(ret, 'abc')
@patch('pypicloud.scripts.wrapped_input')
def test_prompt_no_default(self, stdin):
""" If no default, require a value """
invals = ['', 'foo']
stdin.side_effect = lambda x: invals.pop(0)
ret = scripts.prompt('')
self.assertEqual(ret, 'foo')
@patch('pypicloud.scripts.wrapped_input')
def test_prompt_validate(self, stdin):
""" Prompt user until return value passes validation check """
invals = ['foo', 'bar']
stdin.side_effect = lambda x: invals.pop(0)
ret = scripts.prompt('', validate=lambda x: x == 'bar')
self.assertEqual(ret, 'bar')
@patch('pypicloud.scripts.prompt')
def test_prompt_choice(self, prompt):
""" Prompt the user to choose from a list """
prompt.return_value = 2
ret = scripts.prompt_option('', ['a', 'b', 'c'])
self.assertEqual(ret, 'b')
@patch('pypicloud.scripts.prompt')
def test_prompt_choice_bad_int(self, prompt):
""" Bad ints require user to re-input value """
invals = ['a', 'b', 1]
prompt.side_effect = lambda *_, **__: invals.pop(0)
ret = scripts.prompt_option('', ['a', 'b', 'c'])
self.assertEqual(ret, 'a')
@patch('pypicloud.scripts.prompt')
def test_prompt_choice_index_error(self, prompt):
""" Out-of-range ints require user to re-input value """
invals = [44, 4, 0, -1, 3]
prompt.side_effect = lambda *_, **__: invals.pop(0)
ret = scripts.prompt_option('', ['a', 'b', 'c'])
self.assertEqual(ret, 'c')
@patch('pypicloud.scripts.prompt')
def test_promptyn_yes(self, prompt):
""" Prompt user for y/n user says yes """
prompt.return_value = 'y'
ret = scripts.promptyn('')
self.assertTrue(ret)
@patch('pypicloud.scripts.prompt')
def test_promptyn_no(self, prompt):
""" Prompt user for y/n user says no """
prompt.return_value = 'n'
ret = scripts.promptyn('')
self.assertFalse(ret)
@patch('pypicloud.scripts.prompt')
def test_promptyn_no_default(self, prompt):
""" Prompt user for y/n requires an answer """
invals = ['', '42', 'yeees', 'wat', '1', 'no']
prompt.side_effect = lambda *_, **__: invals.pop(0)
ret = scripts.promptyn('')
self.assertEqual(len(invals), 0)
self.assertFalse(ret)
@patch('pypicloud.scripts.prompt')
def test_promptyn_default(self, prompt):
""" Prompt user for y/n user default on no input """
prompt.return_value = ''
ret = scripts.promptyn('', True)
self.assertTrue(ret)
ret = scripts.promptyn('', False)
self.assertFalse(ret)
def test_bucket_validate(self):
""" Validate bucket name """
ret = scripts.bucket_validate('bucketname')
self.assertTrue(ret)
ret = scripts.bucket_validate('bucket.name')
self.assertTrue(ret)
ret = scripts.bucket_validate('bucketname.')
self.assertFalse(ret)
ret = scripts.bucket_validate('.bucketname')
self.assertFalse(ret)
ret = scripts.bucket_validate('bucket..name')
self.assertFalse(ret)
``` |
{
"source": "joaovitocn/pybar",
"score": 3
} |
#### File: pybar/pybaf/__init__.py
```python
import pandas as pd
import requests
import json
class pybaf():
def __init__(self, key: 'api key str' = None):
if key is None:
raise ValueError('Key must be inserted')
self.api_key = key
def __version__():
version = '2.0.4'
print(version)
# checks that will ensure correct variable type is passed
def _check_error(self, df_destination, df_origin, destination_id, origin_id):
self._check_if_df(df_destination)
self._check_if_df(df_origin)
self._check_if_column_in_df(df_destination, destination_id, 'df_destination')
self._check_if_column_in_df(df_origin, origin_id, 'df_origin')
self._check_if_column_in_df(df_destination, 'latitude', 'df_destination')
self._check_if_column_in_df(df_origin, 'latitude', 'df_origin')
self._check_if_column_in_df(df_destination, 'longitude', 'df_destination')
self._check_if_column_in_df(df_origin, 'longitude', 'df_origin')
def _check_if_df(self, df):
if not isinstance(df, pd.DataFrame):
raise ValueError("Value is not a pd.DataFrame")
def _check_if_column_in_df(self, df: 'data frame', column_name: 'Column Name', df_name=None):
if not column_name in df.columns:
print(df.columns)
raise ValueError("Column {} is not on DataFrame : {}".format(str(column_name), df_name))
# transforms data received from bing to pd.DataFrame()
def _data_to_df(self, data):
number = len(data['resourceSets'][0]['resources'][0]['results'])
df_data = pd.DataFrame(columns=[ self.origin_id,self.destination_id, "distancia", "duracao"])
for x in range(0, (number)):
id_origem = data['resourceSets'][0]['resources'][0]['results'][x]['originIndex']
id_destination = data['resourceSets'][0]['resources'][0]['results'][x]['destinationIndex']
distance = data['resourceSets'][0]['resources'][0]['results'][x]['travelDistance']
duration = data['resourceSets'][0]['resources'][0]['results'][x]['travelDuration']
df_data.loc[x] = [id_origem, id_destination, distance, duration]
df_data.reset_index(inplace=True, drop=True)
return df_data
# get latitude and longitude from pd.DataFrame to API format .
def _get_lat_lon(self, df):
rows = len(df.index)
lat_long = []
lat_long_list = []
for y in range(0, (rows)):
lat = df.iloc[y]['latitude']
lon = df.iloc[y]['longitude']
lat_long.append([lat, lon])
return lat_long
# post request to API
def _post_request(self, payload):
url = 'https://dev.virtualearth.net/REST/v1/Routes/DistanceMatrix?key={}'.format(self.api_key)
# Adding empty header as parameters are being sent in payload
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(payload), headers=headers)
return (json.loads(r.text))
# will split pd.DataFrame to ensure API limit of combinations is respect
def _split_origin_destination(self, origins, destinations,lista_origin):
number = self.api_limit
len_origins = len(origins)
len_destinations = len(destinations)
combinations = len_origins * len_destinations
if combinations < number:
return True, origins, destinations, lista_origin
else:
n_origins = number // len_destinations
teste = [origins[x:(x + n_origins)] for x in range(0, len(origins), n_origins)]
names = [lista_origin[x:(x + n_origins)] for x in range(0, len(lista_origin), n_origins)]
print('loops: {} || origens: {} || destinations: {} || limite ={}'
.format(len(teste), len(origins), len(destinations), number))
return False, teste, destinations,names
# create text that will be used on api request
def _create_text(self, origins, destinations):
origins_rows = len(origins)
rows = len(destinations)
if self.starttime == 0:
if len(origins) == 1:
teste3 = {
"origins": [{
"latitude": origins[0][0],
"longitude": origins[0][1]
}
],
"destinations": [{
"latitude": destinations[0][0],
"longitude": destinations[0][1]
},
],
"travelMode": 'driving',
}
else:
teste3 = {
"origins": [{
"latitude": origins[0][0],
"longitude": origins[0][1]
}
],
"destinations": [{
"latitude": destinations[0][0],
"longitude": destinations[0][1]
},
],
"travelMode": 'driving',
}
for x in range(1, (origins_rows)):
teste3['origins'].append(
{'latitude': origins[x][0],
'longitude': origins[x][1]})
for x in range(1, rows):
teste3['destinations'].append(
{'latitude': destinations[x][0],
'longitude': destinations[x][1]}
)
else:
if len(origins) == 1:
teste3 = {
"origins": [{
"latitude": origins[0][0],
"longitude": origins[0][1]
}
],
"destinations": [{
"latitude": destinations[0][0],
"longitude": destinations[0][1]
},
],
"travelMode": 'driving',
"startTime" : self.starttime,
}
else:
teste3 = {
"origins": [{
"latitude": origins[0][0],
"longitude": origins[0][1]
}
],
"destinations": [{
"latitude": destinations[0][0],
"longitude": destinations[0][1]
},
],
"travelMode": 'driving',
"startTime" : self.starttime,
}
for x in range(1, (origins_rows)):
teste3['origins'].append(
{'latitude': origins[x][0],
'longitude': origins[x][1]})
for x in range(1, rows):
teste3['destinations'].append(
{'latitude': destinations[x][0],
'longitude': destinations[x][1]}
)
return teste3
# attach the api results to the IDs from original pd.DataFrames
def _attach_ids(self, df_final, lista_origin, lista_destination):
for x in range(0, len(lista_destination)):
df_final[self.destination_id] = df_final[self.destination_id].replace(x, lista_destination[x])
for x in range(0, len(lista_origin)):
df_final[self.origin_id] = df_final[self.origin_id].replace(x, lista_origin[x])
return df_final
# distance matrix will return the combinations of distance between df_destination and df_origin
def distance_matrix(self, df_destination, df_origin, destination_id, origin_id,start_time=0,limit = 2500):
self.api_limit = limit
self.destination_id = destination_id
self.origin_id = origin_id
self.starttime = start_time
df_final = pd.DataFrame()
check = self._check_error(df_destination, df_origin, destination_id, origin_id)
lista_destination = list(df_destination[self.destination_id])
lista_origin = list(df_origin[self.origin_id])
origins = [x for x in self._get_lat_lon(df_origin)]
destinations = self._get_lat_lon(df_destination)
var1, origins, destinations,lista_origin = self._split_origin_destination(origins, destinations,lista_origin)
try:
if var1:
payload = self._create_text(origins, destinations)
json_text = self._post_request(payload)
df = self._data_to_df(json_text)
df_final = df_final.append(df)
df_final = self._attach_ids(df_final, lista_origin, lista_destination)
else:
for origin in origins:
print('loop number {}'.format(origins.index(origin)))
payload = self._create_text(origin, destinations)
json_text = self._post_request(payload)
df = self._data_to_df(json_text)
df_final = df_final.append(df)
origin_name = lista_origin[origins.index(origin)]
df_final = self._attach_ids(df_final, origin_name, lista_destination)
except:
print("An error has eccured, here's the json output for debug")
print(json_text)
return df_final
# n_smallest will return the n smallest of calculated distance matrix
def n_smallest(self, df_final, origin_id: 'Origin id', destination_id: 'Destination Id',
value: 'value that will be considered', n=1):
self._check_if_df(df_final)
if value is None or origin_id is None or destination_id is None:
raise ValueError('id and value must be column names')
self._check_if_column_in_df(df_final, origin_id)
self._check_if_column_in_df(df_final, value)
df_final = df_final.sort_values(by=origin_id)
df_groupby = df_final.groupby([origin_id, destination_id])[value].nsmallest(n).reset_index()
df_groupby = df_groupby.drop(columns='level_2')
result = pd.merge(df_groupby, df_final, on=[origin_id, value], how='inner')
``` |
{
"source": "joaovitor123jv/rontext",
"score": 2
} |
#### File: rontext/filesystem_listener/localization.py
```python
import time
import threading
import subprocess
import settings
import database
import helpers
def point_inside_circle(point, circle):
a = (point["latitude"] - circle["latitude"]) * (point["latitude"] - circle["latitude"])
b = (point["longitude"] - circle["longitude"]) * (point["longitude"] - circle["longitude"])
precision = float(settings.loaded["localization_precision"])
return (a + b) < (precision * precision)
def already_in_database(cursor, localization):
stored_localizations = database.get_localizations(cursor)
if stored_localizations != None:
for stored_localization in stored_localizations:
temp_localization = {
'latitude': stored_localization[1],
'longitude': stored_localization[2]
}
if point_inside_circle(localization, temp_localization):
return True
return False
def listener():
# print("**** SIDE_THREAD ID == ", threading.get_ident())
time.sleep(1)
connection = database.connect()
cursor = connection.cursor()
while True:
return_data = subprocess.run([settings.loaded['localization_bin']], stdout=subprocess.PIPE)
parsed_return = helpers.parse_yaml_string(return_data.stdout.decode('utf8'))
settings.add_runtime('localization', parsed_return)
if already_in_database(cursor, settings.runtime['localization']):
# print("Localization already in database, skipping")
pass
else:
# print("Localization is not in database, inserting")
with connection:
database.store_localization(cursor, settings.runtime['localization'])
time.sleep(settings.loaded['localization_plugin_wait_time']) # Waits 1 second till the next localization check
def start_plugin():
try:
thread = threading.Thread(target=listener)
thread.start()
except:
print("Failed to start localization plugin")
```
#### File: rontext/filesystem_listener/operations.py
```python
import subprocess
import settings
import time_parser
import time
import database
import helpers
import os
def isProjectRelated(path):
return (
path.startswith( os.environ['HOME'] + "/Documentos/UFG-CDC/PFC/PFC2/Sistema" )
or
path.startswith( os.environ['HOME'] + "/.ctxt_search-")
)
def shouldIgnore(path):
if isProjectRelated(path):
return True
for ignored_substring in settings.loaded['ignore_occurrences']:
if ignored_substring in path:
return True
return False
def created_something(type_names, path):
return True if (type_names[0] in ["IN_MOVED_TO", "IN_CREATE"]) and not shouldIgnore(path) else False
def accessed_something(type_names, path):
return True if (type_names[0] == "IN_OPEN") and not shouldIgnore(path) else False
def deleted_something(type_names, path):
return True if (type_names[0] in ["IN_MOVED_FROM", "IN_DELETE"]) and not shouldIgnore(path) else False
def store_events(connection, events):
if events != None:
normalized_events = []
for event in events:
normalized_events.append((
time_parser.convert_to_python_datetime(event[':start']),
time_parser.convert_to_python_datetime(event[':end']),
event[':summary']
))
database.store_events(connection, normalized_events)
def call_ics_plugin(connection, file):
print("PARSING ICS FILE: ", file)
return_data = subprocess.run([settings.loaded['ics_parser_bin'], file], stdout=subprocess.PIPE)
parsed_return = helpers.parse_yaml_string(return_data.stdout.decode('utf8'))
with connection:
store_events(connection, parsed_return)
print("FILE PARSED")
def handle_access(path, filename):
file = path + '/' + filename
if filename == 'START':
print("Starting time monitoring")
settings.add_runtime('start_timestamp', time.time())
if os.path.isfile(file):
file_id = database.store_file(file)
def handle_file_created(connection, path, filename):
file = path + '/' + filename
if filename.endswith('.ics'):
# print("ICS File detected")
call_ics_plugin(connection, file)
elif file == (settings.loaded['database'] + '-journal'):
return
else:
file_id = database.store_file(file)
def handle_file_deleted(connection, path, filename):
file = path + '/' + filename
if(filename.endswith('.ics')):
print("File calendar deleted: ", file)
# callIcsPlugin(file)
elif file == (settings.loaded['database'] + '-journal'):
return
else:
# print("Deleted file ", file)
database.delete_file_reference(connection.cursor(), file)
```
#### File: rontext/localization/settings.py
```python
import os
def init():
global loaded
loaded = {}
global runtime
runtime = {}
global default
default = {
'use_mock': True,
'mock': {
'actual_localization': {
'latitude': 50.21, # RANGE: -90 ~ 90
'longitude': -51.11 # RANGE: -180 ~ 180
},
'precision': 0.001 # In decimal degrees (see table below). Precision relative to neighborhood, street
}
}
# REFERENCE: Decimal degrees -> precision at equator -> precision at 23N/S (mexico, cuba...)
# 1.0 111.32km 102.47km
# 0.1 11.132km 10.247km
# 0.01 1.1132km 1.0247km
# 0.001 111.32m 102.47m
``` |
{
"source": "joaovitor2020/sadchat",
"score": 3
} |
#### File: joaovitor2020/sadchat/buffer.py
```python
import state
# Módulo para o buffer do teclado
buffer = ""
def get_buffer():
global buffer
return buffer
def set_buffer(new):
global buffer
buffer = new
process_buffer() # 'Corrige' o conteúdo do buffer (acentuação)
state.update_buffer()
def backspace():
global buffer
set_buffer(buffer[0: state.get("cursor_position")-1] + buffer[state.get("cursor_position"):])
state.dec_cursor_position()
def append(char):
global buffer
set_buffer(buffer[0: state.get("cursor_position")] + char + buffer[state.get("cursor_position"):])
state.inc_cursor_position()
def clear_buffer():
global buffer
set_buffer("")
state.zero_cursor_position()
# Função para 'corrigir' o buffer, substituindo sequências como
# "~a", "~o", "`a", etc. para "ã", "õ", "à"
def process_buffer():
global buffer
equivalences = [
["~a", "ã"],
["´a", "á"],
["`a", "ã"],
["^a", "â"],
["¨a", "ä"],
["~A", "Ã"],
["´A", "Á"],
["`A", "À"],
["^A", "Â"],
["¨A", "Ä"],
["~e", "ẽ"],
["´e", "é"],
["`e", "è"],
["^e", "ê"],
["¨e", "ë"],
["~E", "Ẽ"],
["´E", "É"],
["`E", "È"],
["^E", "Ê"],
["¨E", "Ë"],
["~i", "ĩ"],
["´i", "í"],
["`i", "ì"],
["^i", "î"],
["¨i", "ï"],
["~I", "Ĩ"],
["´I", "Í"],
["`I", "Ì"],
["^I", "Î"],
["¨I", "Ï"],
["~o", "õ"],
["´o", "ó"],
["`o", "ò"],
["^o", "ô"],
["¨o", "ö"],
["~O", "Õ"],
["´O", "Ó"],
["`O", "Ò"],
["^O", "Ô"],
["¨O", "Ö"],
["~u", "ũ"],
["´u", "ú"],
["`u", "Ù"],
["^u", "û"],
["¨u", "ü"],
["~U", "Ũ"],
["´U", "Ú"],
["`U", "Ù"],
["^U", "Û"],
["¨U", "Ü"]
]
# processed_buffer = buffer
for equivalence in equivalences:
buffer = buffer.replace(equivalence[0], equivalence[1])
# return processed_buffer
```
#### File: joaovitor2020/sadchat/hmac.py
```python
import hashlib
md5_block_size = hashlib.md5().block_size
ENCODING = 'latin1'
INNER = 0
OUTER = 1
def resize_string(s):
m = md5_block_size - len(s)
for t in range(0,m):
s+= chr(0)
return s
def xor_pad(key,pad):
if pad not in [INNER, OUTER]: raise Exception('Param pad must be either INNER or OUTER')
x = 0
xored_bytes = []
for c in key:
y = (0x36 if pad == INNER else 0x5c)
x = ord(c)^ y
xored_bytes.append(x)
return xored_bytes
def convert_int_char(v):
result = ""
for l in range(0,len(v)):
caracter = chr(v[l])
result += caracter
return result
def hmac_md5(key,text):
if(len(key) < md5_block_size):
key = resize_string(key)
result1 = convert_int_char(xor_pad(key,INNER))
result2 = result1 + text
chave1 = hashlib.md5(result2.encode(ENCODING))
result3 = convert_int_char(xor_pad(key,OUTER))
chave2 = hashlib.md5(result3.encode(ENCODING) + chave1.digest())
return (chave2).digest()
def verify_hmac(key,text,expected_hash):
return hmac_md5(key,text) == expected_hash
``` |
{
"source": "joaovitor3/readthedocs.org",
"score": 2
} |
#### File: rtd_tests/tests/test_doc_building.py
```python
from itertools import zip_longest
import os
import tempfile
import uuid
from unittest import mock
from unittest.mock import Mock, PropertyMock, patch
import pytest
from django.test import TestCase, override_settings
from django_dynamic_fixture import get
from docker.errors import APIError as DockerAPIError
from readthedocs.builds.models import Version
from readthedocs.doc_builder.config import load_yaml_config
from readthedocs.doc_builder.environments import (
BuildCommand,
DockerBuildCommand,
DockerBuildEnvironment,
LocalBuildEnvironment,
)
from readthedocs.doc_builder.exceptions import BuildAppError
from readthedocs.doc_builder.python_environments import Conda, Virtualenv
from readthedocs.projects.models import Project
from readthedocs.rtd_tests.mocks.paths import fake_paths_lookup
from readthedocs.rtd_tests.tests.test_config_integration import create_load
DUMMY_BUILD_ID = 123
SAMPLE_UNICODE = 'HérÉ îß sömê ünïçó∂é'
SAMPLE_UTF8_BYTES = SAMPLE_UNICODE.encode('utf-8')
# TODO: these tests need to be re-written to make usage of the Celery handlers
# properly to check not recorded/recorded as success. For now, they are
# minimally updated to keep working, but they could be improved.
class TestLocalBuildEnvironment(TestCase):
@patch('readthedocs.doc_builder.environments.api_v2')
def test_command_not_recorded(self, api_v2):
build_env = LocalBuildEnvironment()
with build_env:
build_env.run('true', record=False)
self.assertEqual(len(build_env.commands), 0)
api_v2.command.post.assert_not_called()
@patch('readthedocs.doc_builder.environments.api_v2')
def test_record_command_as_success(self, api_v2):
project = get(Project)
build_env = LocalBuildEnvironment(
project=project,
build={
'id': 1,
},
)
with build_env:
build_env.run('false', record_as_success=True)
self.assertEqual(len(build_env.commands), 1)
command = build_env.commands[0]
self.assertEqual(command.exit_code, 0)
api_v2.command.post.assert_called_once_with({
'build': mock.ANY,
'command': command.get_command(),
'output': command.output,
'exit_code': 0,
'start_time': command.start_time,
'end_time': command.end_time,
})
# TODO: translate these tests into
# `readthedocs/projects/tests/test_docker_environment.py`. I've started the
# work there but it requires a good amount of work to mock it properly and
# reliably. I think we can skip these tests (3) for now since we are raising
# BuildAppError on these cases which we are already handling in other test
# cases.
#
# Once we mock the DockerBuildEnvironment properly, we could also translate the
# new tests from `readthedocs/projects/tests/test_build_tasks.py` to use this
# mocks.
@pytest.mark.skip
class TestDockerBuildEnvironment(TestCase):
"""Test docker build environment."""
fixtures = ['test_data', 'eric']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version, bulk=False)
def test_container_already_exists(self):
"""Docker container already exists."""
self.mocks.configure_mock(
'docker_client', {
'inspect_container.return_value': {'State': {'Running': True}},
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'This is the return',
'exec_inspect.return_value': {'ExitCode': 0},
},
)
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
def _inner():
with build_env:
build_env.run('echo', 'test', cwd='/tmp')
self.assertRaises(BuildAppError, _inner)
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 0)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The build failed before executing any command
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'exit_code': 1,
'length': 0,
'error': 'A build environment is currently running for this version',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_container_timeout(self):
"""Docker container timeout and command failure."""
response = Mock(status_code=404, reason='Container not found')
self.mocks.configure_mock(
'docker_client', {
'inspect_container.side_effect': [
DockerAPIError(
'No container found',
response,
'No container found',
),
{'State': {'Running': False, 'ExitCode': 42}},
],
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'This is the return',
'exec_inspect.return_value': {'ExitCode': 0},
},
)
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo', 'test', cwd='/tmp')
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 1)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': 0,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'exit_code': 1,
'length': 0,
'error': 'Build exited due to time out',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
# NOTE: these tests should be migrated to not use `LocalBuildEnvironment`
# behind the scenes and mock the execution of the command itself by using
# `DockerBuildEnvironment`.
#
# They should be merged with the following test suite `TestDockerBuildCommand`.
#
# Also note that we require a Docker setting here for the tests to pass, but we
# are not using Docker at all.
@override_settings(RTD_DOCKER_WORKDIR='/tmp')
class TestBuildCommand(TestCase):
"""Test build command creation."""
def test_command_env(self):
"""Test build command env vars."""
env = {'FOOBAR': 'foobar', 'BIN_PATH': 'foobar'}
cmd = BuildCommand('echo', environment=env)
for key in list(env.keys()):
self.assertEqual(cmd._environment[key], env[key])
def test_result(self):
"""Test result of output using unix true/false commands."""
cmd = BuildCommand('true')
cmd.run()
self.assertTrue(cmd.successful)
cmd = BuildCommand('false')
cmd.run()
self.assertTrue(cmd.failed)
def test_missing_command(self):
"""Test missing command."""
path = os.path.join('non-existant', str(uuid.uuid4()))
self.assertFalse(os.path.exists(path))
cmd = BuildCommand(path)
cmd.run()
self.assertEqual(cmd.exit_code, -1)
# There is no stacktrace here.
self.assertIsNone(cmd.output)
self.assertIsNone(cmd.error)
def test_output(self):
"""Test output command."""
cmd = BuildCommand(['/bin/bash', '-c', 'echo -n FOOBAR'])
# Mock BuildCommand.sanitized_output just to count the amount of calls,
# but use the original method to behaves as real
original_sanitized_output = cmd.sanitize_output
with patch('readthedocs.doc_builder.environments.BuildCommand.sanitize_output') as sanitize_output: # noqa
sanitize_output.side_effect = original_sanitized_output
cmd.run()
self.assertEqual(cmd.output, 'FOOBAR')
# Check that we sanitize the output
self.assertEqual(sanitize_output.call_count, 2)
def test_error_output(self):
"""Test error output from command."""
cmd = BuildCommand(['/bin/bash', '-c', 'echo -n FOOBAR 1>&2'])
cmd.run()
self.assertEqual(cmd.output, 'FOOBAR')
self.assertIsNone(cmd.error)
def test_sanitize_output(self):
cmd = BuildCommand(['/bin/bash', '-c', 'echo'])
checks = (
(b'Hola', 'Hola'),
(b'H\x00i', 'Hi'),
(b'H\x00i \x00\x00\x00You!\x00', 'Hi You!'),
)
for output, sanitized in checks:
self.assertEqual(cmd.sanitize_output(output), sanitized)
@patch('subprocess.Popen')
def test_unicode_output(self, mock_subprocess):
"""Unicode output from command."""
mock_process = Mock(**{
'communicate.return_value': (SAMPLE_UTF8_BYTES, b''),
})
mock_subprocess.return_value = mock_process
cmd = BuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.run()
self.assertEqual(
cmd.output,
'H\xe9r\xc9 \xee\xdf s\xf6m\xea \xfcn\xef\xe7\xf3\u2202\xe9',
)
# TODO: translate this tests once we have DockerBuildEnvironment properly
# mocked. These can be done together with `TestDockerBuildEnvironment`.
@pytest.mark.skip
class TestDockerBuildCommand(TestCase):
"""Test docker build commands."""
def test_wrapped_command(self):
"""Test shell wrapping for Docker chdir."""
cmd = DockerBuildCommand(
['pip', 'install', 'requests'],
cwd='/tmp/foobar',
)
self.assertEqual(
cmd.get_wrapped_command(),
"/bin/sh -c 'pip install requests'",
)
cmd = DockerBuildCommand(
['python', '/tmp/foo/pip', 'install', 'Django>1.7'],
cwd='/tmp/foobar',
bin_path='/tmp/foo',
)
self.assertEqual(
cmd.get_wrapped_command(),
(
'/bin/sh -c '
"'PATH=/tmp/foo:$PATH "
r"python /tmp/foo/pip install Django\>1.7'"
),
)
def test_unicode_output(self):
"""Unicode output from command."""
self.mocks.configure_mock(
'docker_client', {
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': SAMPLE_UTF8_BYTES,
'exec_inspect.return_value': {'ExitCode': 0},
},
)
cmd = DockerBuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.build_env = Mock()
cmd.build_env.get_client.return_value = self.mocks.docker_client
type(cmd.build_env).container_id = PropertyMock(return_value='foo')
cmd.run()
self.assertEqual(
cmd.output,
'H\xe9r\xc9 \xee\xdf s\xf6m\xea \xfcn\xef\xe7\xf3\u2202\xe9',
)
self.assertEqual(self.mocks.docker_client.exec_start.call_count, 1)
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 1)
self.assertEqual(self.mocks.docker_client.exec_inspect.call_count, 1)
def test_command_oom_kill(self):
"""Command is OOM killed."""
self.mocks.configure_mock(
'docker_client', {
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'Killed\n',
'exec_inspect.return_value': {'ExitCode': 137},
},
)
cmd = DockerBuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.build_env = Mock()
cmd.build_env.get_client.return_value = self.mocks.docker_client
type(cmd.build_env).container_id = PropertyMock(return_value='foo')
cmd.run()
self.assertIn(
'Command killed due to timeout or excessive memory consumption\n',
str(cmd.output),
)
class TestPythonEnvironment(TestCase):
def setUp(self):
self.project_sphinx = get(Project, documentation_type='sphinx')
self.version_sphinx = get(Version, project=self.project_sphinx)
self.project_mkdocs = get(Project, documentation_type='mkdocs')
self.version_mkdocs = get(Version, project=self.project_mkdocs)
self.build_env_mock = Mock()
self.base_requirements = [
'mock',
'pillow',
'alabaster',
]
self.base_conda_requirements = [
'mock',
'pillow',
]
self.pip_install_args = [
mock.ANY, # python path
'-m',
'pip',
'install',
'--upgrade',
'--no-cache-dir',
]
def assertArgsStartsWith(self, args, call):
"""
Assert that each element of args of the mock start
with each element of args.
"""
args_mock, _ = call
for arg, arg_mock in zip_longest(args, args_mock):
if arg is not mock.ANY:
self.assertIsNotNone(arg_mock)
self.assertTrue(arg_mock.startswith(arg))
@patch('readthedocs.projects.models.Project.checkout_path')
def test_install_core_requirements_sphinx(self, checkout_path):
tmpdir = tempfile.mkdtemp()
checkout_path.return_value = tmpdir
python_env = Virtualenv(
version=self.version_sphinx,
build_env=self.build_env_mock,
)
python_env.install_core_requirements()
requirements_sphinx = [
'commonmark',
'recommonmark',
'sphinx',
'sphinx-rtd-theme',
'readthedocs-sphinx-ext',
]
self.assertEqual(self.build_env_mock.run.call_count, 2)
calls = self.build_env_mock.run.call_args_list
core_args = self.pip_install_args + ['pip', 'setuptools<58.3.0']
self.assertArgsStartsWith(core_args, calls[0])
requirements = self.base_requirements + requirements_sphinx
args = self.pip_install_args + requirements
self.assertArgsStartsWith(args, calls[1])
@mock.patch('readthedocs.doc_builder.config.load_config')
@patch('readthedocs.projects.models.Project.checkout_path')
def test_install_core_requirements_sphinx_system_packages_caps_setuptools(self, checkout_path, load_config):
config_data = {
'python': {
'use_system_site_packages': True,
},
}
load_config.side_effect = create_load(config_data)
config = load_yaml_config(self.version_sphinx)
tmpdir = tempfile.mkdtemp()
checkout_path.return_value = tmpdir
python_env = Virtualenv(
version=self.version_sphinx,
build_env=self.build_env_mock,
config=config,
)
python_env.install_core_requirements()
requirements_sphinx = [
'commonmark',
'recommonmark',
'sphinx',
'sphinx-rtd-theme',
'readthedocs-sphinx-ext',
'setuptools<58.3.0',
]
self.assertEqual(self.build_env_mock.run.call_count, 2)
calls = self.build_env_mock.run.call_args_list
core_args = self.pip_install_args + ['pip', 'setuptools<58.3.0']
self.assertArgsStartsWith(core_args, calls[0])
requirements = self.base_requirements + requirements_sphinx
args = self.pip_install_args + ['-I'] + requirements
self.assertArgsStartsWith(args, calls[1])
@patch('readthedocs.projects.models.Project.checkout_path')
def test_install_core_requirements_mkdocs(self, checkout_path):
tmpdir = tempfile.mkdtemp()
checkout_path.return_value = tmpdir
python_env = Virtualenv(
version=self.version_mkdocs,
build_env=self.build_env_mock,
)
python_env.install_core_requirements()
requirements_mkdocs = [
'commonmark',
'recommonmark',
'mkdocs',
]
self.assertEqual(self.build_env_mock.run.call_count, 2)
calls = self.build_env_mock.run.call_args_list
core_args = self.pip_install_args + ['pip', 'setuptools<58.3.0']
self.assertArgsStartsWith(core_args, calls[0])
requirements = self.base_requirements + requirements_mkdocs
args = self.pip_install_args + requirements
self.assertArgsStartsWith(args, calls[1])
@patch('readthedocs.projects.models.Project.checkout_path')
def test_install_user_requirements(self, checkout_path):
"""
If a projects does not specify a requirements file,
RTD will choose one automatically.
First by searching under the docs/ directory and then under the root.
The files can be named as:
- ``pip_requirements.txt``
- ``requirements.txt``
"""
tmpdir = tempfile.mkdtemp()
checkout_path.return_value = tmpdir
self.build_env_mock.project = self.project_sphinx
self.build_env_mock.version = self.version_sphinx
python_env = Virtualenv(
version=self.version_sphinx,
build_env=self.build_env_mock,
)
checkout_path = python_env.checkout_path
docs_requirements = os.path.join(
checkout_path, 'docs', 'requirements.txt',
)
root_requirements = os.path.join(
checkout_path, 'requirements.txt',
)
paths = {
os.path.join(checkout_path, 'docs'): True,
}
args = [
mock.ANY, # python path
'-m',
'pip',
'install',
'--exists-action=w',
'--no-cache-dir',
'-r',
'requirements_file',
]
# One requirements file on the docs/ dir
# should be installed
paths[docs_requirements] = True
paths[root_requirements] = False
with fake_paths_lookup(paths):
python_env.install_requirements()
args[-1] = 'docs/requirements.txt'
self.build_env_mock.run.assert_called_with(
*args, cwd=mock.ANY, bin_path=mock.ANY
)
# One requirements file on the root dir
# should be installed
paths[docs_requirements] = False
paths[root_requirements] = True
with fake_paths_lookup(paths):
python_env.install_requirements()
args[-1] = 'requirements.txt'
self.build_env_mock.run.assert_called_with(
*args, cwd=mock.ANY, bin_path=mock.ANY
)
# Two requirements files on the root and docs/ dirs
# the one on docs/ should be installed
paths[docs_requirements] = True
paths[root_requirements] = True
with fake_paths_lookup(paths):
python_env.install_requirements()
args[-1] = 'docs/requirements.txt'
self.build_env_mock.run.assert_called_with(
*args, cwd=mock.ANY, bin_path=mock.ANY
)
# No requirements file
# no requirements should be installed
self.build_env_mock.run.reset_mock()
paths[docs_requirements] = False
paths[root_requirements] = False
with fake_paths_lookup(paths):
python_env.install_requirements()
self.build_env_mock.run.assert_not_called()
@patch('readthedocs.projects.models.Project.checkout_path')
def test_install_core_requirements_sphinx_conda(self, checkout_path):
tmpdir = tempfile.mkdtemp()
checkout_path.return_value = tmpdir
python_env = Conda(
version=self.version_sphinx,
build_env=self.build_env_mock,
)
python_env.install_core_requirements()
conda_sphinx = [
'sphinx',
'sphinx_rtd_theme',
]
conda_requirements = self.base_conda_requirements + conda_sphinx
pip_requirements = [
'recommonmark',
'readthedocs-sphinx-ext',
]
args_pip = [
mock.ANY, # python path
'-m',
'pip',
'install',
'-U',
'--no-cache-dir',
]
args_pip.extend(pip_requirements)
args_conda = [
'conda',
'install',
'--yes',
'--quiet',
'--name',
self.version_sphinx.slug,
]
args_conda.extend(conda_requirements)
self.build_env_mock.run.assert_has_calls([
mock.call(*args_conda, cwd=mock.ANY),
mock.call(*args_pip, bin_path=mock.ANY, cwd=mock.ANY),
])
@patch('readthedocs.projects.models.Project.checkout_path')
def test_install_core_requirements_mkdocs_conda(self, checkout_path):
tmpdir = tempfile.mkdtemp()
checkout_path.return_value = tmpdir
python_env = Conda(
version=self.version_mkdocs,
build_env=self.build_env_mock,
)
python_env.install_core_requirements()
conda_requirements = self.base_conda_requirements
pip_requirements = [
'recommonmark',
'mkdocs',
]
args_pip = [
mock.ANY, # python path
'-m',
'pip',
'install',
'-U',
'--no-cache-dir',
]
args_pip.extend(pip_requirements)
args_conda = [
'conda',
'install',
'--yes',
'--quiet',
'--name',
self.version_mkdocs.slug,
]
args_conda.extend(conda_requirements)
self.build_env_mock.run.assert_has_calls([
mock.call(*args_conda, cwd=mock.ANY),
mock.call(*args_pip, bin_path=mock.ANY, cwd=mock.ANY),
])
@patch('readthedocs.projects.models.Project.checkout_path')
def test_install_user_requirements_conda(self, checkout_path):
tmpdir = tempfile.mkdtemp()
checkout_path.return_value = tmpdir
python_env = Conda(
version=self.version_sphinx,
build_env=self.build_env_mock,
)
python_env.install_requirements()
self.build_env_mock.run.assert_not_called()
``` |
{
"source": "JoaoVitorBernardino/Sistema-de-locadora-de-filmes",
"score": 2
} |
#### File: LocadoraDeFilmes/DataBase/dados_aluguel.py
```python
from DataBase.gerenc_arquivos import *
__aluguel_list = list(lerArquivo(ALUGUEL_PATH))
def add_alugar(alugar):
__aluguel_list.append(alugar)
salvar_lista(ALUGUEL_PATH, __aluguel_list)
def novo_aluguel(alugador):
return {
"alugador": alugador,
"aluguel": []
}
def add_aluguel(alugar, aluguel):
alugar["aluguel"].append(aluguel)
```
#### File: LocadoraDeFilmes/DataBase/dados_clientes.py
```python
from DataBase.gerenc_arquivos import *
__clientes_cadastrados = list(lerArquivo(CLIENTES_PATH))
def add_cliente(cliente):
__clientes_cadastrados.append(cliente)
salvar()
def novo_cliente(nome, idade, cpf):
return{
'nome': str(nome),
'idade': int(idade),
'CPF': int(cpf)
}
def salvar():
salvar_lista(CLIENTES_PATH, __clientes_cadastrados)
def get_cliente():
return lerArquivo(CLIENTES_PATH)
def set_cliente(endereco, cliente):
__clientes_cadastrados[endereco] = cliente
salvar()
def apagar(endereco):
__clientes_cadastrados.pop(int(endereco))
salvar()
def apagar(endereco):
__clientes_cadastrados.pop(endereco)
salvar()
```
#### File: Sistema-de-locadora-de-filmes/LocadoraDeFilmes/funcoes_dos_filmes.py
```python
import os
from DataBase.dados_filmes import *
from DataBase.dados_aluguel import *
def cadastrar_filme():
os.system('cls' if os.name == 'nt' else 'clear')
nome = input('Digite o nome do filme: ')
ano = input('Digite o ano de lançamento do filme: ')
codigo = input('Digite o código do filme: ')
filmeAlugado = ""
filme = novo_filme(nome, ano, codigo, filmeAlugado)
add_filme(filme)
print(f'O filme {filme} foi cadastrado.')
def mostrar_catalogo():
os.system('cls' if os.name == 'nt' else 'clear')
filmes = get_filme()
for f in filmes:
print('-'*80)
print(f'{f["nome"]} - {f["ano"]} - {f["codigo"]} - {f["filmeAlugado"]}')
print('-'*80)
def ver_posicao_Filme():
i = 0
for pos in get_filme():
print('-'*80)
print(f'Posição {i}º - {pos["nome"]} - {pos["ano"]} - {pos["codigo"]} - {pos["filmeAlugado"]}')
i += 1
print('-' * 80)
def alugar():
alugar = novo_aluguel(str(input("Digite seu nome: ")))
resposta = "Sim"
filme= get_filme()
while resposta == 'sim' or resposta == 'Sim':
os.system('cls' if os.name == 'nt' else 'clear')
ver_posicao_Filme()
endereco = int(input("Digite a posição do filme desejado: "))
if filme[endereco]["filmeAlugado"] != "":
print("Este filme já foi alugado")
else:
filme[endereco]["filmeAlugado"] = alugar["alugador"]
add_aluguel(alugar, filme[endereco])
set_filme(endereco, filme[endereco])
resposta = input("Deseja alugar outro filme ? (Sim ou Não): ")
add_alugar(alugar)
def devolver():
devolver = novo_aluguel(str(input("Digite seu nome: ")))
resposta = "Sim"
filme= get_filme()
while resposta == 'sim' or resposta == 'Sim':
os.system('cls' if os.name == 'nt' else 'clear')
ver_posicao_Filme()
endereco = int(input("Digite a posição do filme desejado: "))
if filme[endereco]["filmeAlugado"] == "":
print("Este filme ainda não foi alugado")
else:
filme[endereco]["filmeAlugado"] = ""
add_aluguel(devolver, filme[endereco])
set_filme(endereco, filme[endereco])
resposta = input("Você possui outro filme para devolver ? (Sim ou Não): ")
add_alugar(devolver)
def tabela_de_preco():
preco_por_quant = {"01 Filme": 15,
"02 Filmes": 26,
"03 Filmes": 33,
"04 Filmes": 42,
"05 Filmes": 50}
print("Tabela de preços da quantidade de filmes alugados: ")
print(preco_por_quant)
def apagar_Filme():
ver_posicao_Filme()
endereco = int(input("Digite a posição do filme desejado: "))
apagar_Filme(endereco)
``` |
{
"source": "joaovitorblabres/sumo-rl",
"score": 3
} |
#### File: joaovitorblabres/sumo-rl/results.py
```python
import statistics
import copy
import pandas as pd
import argparse
import glob
from matplotlib import pyplot as plt
import os
import numpy as np
def moving_average(interval, window_size):
if window_size == 1:
return interval
window = np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def bests(results):
sums = []
means = []
mins = []
moving = []
avg = []
params = {}
i = 0
for alpha in results.keys():
for alphaG in results[alpha].keys():
for gamma in results[alpha][alphaG].keys():
for gammaG in results[alpha][alphaG][gamma].keys():
for decay in results[alpha][alphaG][gamma][gammaG].keys():
sums.append(results[alpha][alphaG][gamma][gammaG][decay]['sum'])
mins.append(min(results[alpha][alphaG][gamma][gammaG][decay]['values']))
means.append(results[alpha][alphaG][gamma][gammaG][decay]['mean'])
avg.append(results[alpha][alphaG][gamma][gammaG][decay]['avgs'])
moving.append(sum(results[alpha][alphaG][gamma][gammaG][decay]['values'][-10:])/10)
params[i] = "-".join([str(alpha), str(alphaG), str(gamma), str(gammaG), str(decay)])
# print(alpha, alphaG, gamma, gammaG, decay, results[alpha][alphaG][gamma][gammaG][decay]['mean'], results[alpha][alphaG][gamma][gammaG][decay]['sum'], results[alpha][alphaG][gamma][gammaG][decay]['values'][-20:], i)
i += 1
original = copy.deepcopy(moving)
print(min(moving), moving.index(min(moving)))
moving.sort()
print(moving[:10], [params[original.index(val)] for val in moving[:10]], [original.index(val) for val in moving[:10]])
print(min(sums), params[sums.index(min(sums))])
# print(avg)
print(min(mins), params[mins.index(min(mins))])
print(min(means), params[means.index(min(means))])
prs = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Plot Traffic Signal Metrics""")
prs.add_argument('-f', nargs='+', required=True, help="Measures files\n")
args = prs.parse_args()
results = {}
for file in args.f:
print(file)
for alphas in glob.glob(file+"/*"):
main_df = pd.DataFrame()
params = alphas.split("_")
alpha = params[1].split('/')[-1][5:]
# alpha = params[1].split('/')[-1][5:]
alphaG = 0
# alphaG = params[3][6:]
gamma = params[2][5:]
# gamma = params[2][5:]
gammaG = 0
# gammaG = params[4][6:]
eps = params[3][3:]
# eps = params[5][3:]
print(alpha, gamma, eps, alphaG, gammaG)
if alpha not in results.keys():
results[alpha] = {}
if alphaG not in results[alpha].keys():
results[alpha][alphaG] = {}
if gamma not in results[alpha][alphaG].keys():
results[alpha][alphaG][gamma] = {}
if gammaG not in results[alpha][alphaG][gamma].keys():
results[alpha][alphaG][gamma][gammaG] = {}
if eps not in results[alpha][alphaG][gamma][gammaG].keys():
results[alpha][alphaG][gamma][gammaG][eps] = {'sum':[],'values': [], 'mean':[], 'avgs': []}
for data in glob.glob(alphas+"/*"):
for hora in glob.glob(data+"/*"):
print(hora)
for f in sorted(glob.glob(hora+"/_r*"), key=os.path.getmtime):
df = pd.read_csv(f, sep=',')
if main_df.empty:
main_df = df
else:
main_df = pd.concat((main_df, df))
# all = df.groupby('step_time').sum()['average_wait_time']
all = df.groupby('step_time').sum()['flow']*-1
results[alpha][alphaG][gamma][gammaG][eps]['values'].append(sum(all))
results[alpha][alphaG][gamma][gammaG][eps]['avgs'].append(statistics.mean(all))
results[alpha][alphaG][gamma][gammaG][eps]['mean'].append(statistics.mean(results[alpha][alphaG][gamma][gammaG][eps]['values']))
results[alpha][alphaG][gamma][gammaG][eps]['sum'].append(sum(results[alpha][alphaG][gamma][gammaG][eps]['values']))
# if any([val < 10000 for val in results[alpha][alphaG][gamma][gammaG][eps]['avgs']]):
# meanDF = pd.DataFrame(results[alpha][alphaG][gamma][gammaG][eps]['avgs'][:])
# plt.xlim([-10,910])
# plt.plot(range(0,len(results[alpha][alphaG][gamma][gammaG][eps]['avgs'][:])), moving_average(results[alpha][alphaG][gamma][gammaG][eps]['avgs'][:], 1), 'ro-')
# for i in range(0, 3):
# plt.axvspan((3*i)*100, (1+(3*i))*100-1, facecolor='lightblue', alpha=0.5)
# plt.axvspan((1+(3*i))*100, (2+(3*i))*100-1, facecolor='coral', alpha=0.5)
# plt.axvspan((2+(3*i))*100, (3+(3*i))*100-1, facecolor='lawngreen', alpha=0.5)
# plt.vlines([100-1, 200-1, 300-1, 400-1, 500-1, 600-1, 700-1, 800-1], -1000, min(results[alpha][alphaG][gamma][gammaG][eps]['avgs'])+1000)
# plt.xlabel("Simulated Days")
# plt.ylabel("Average Waited Time")
# plt.title(f)
# plt.show()
# yes = input()
# if yes == 's':
# meanDF.to_csv(hora+"merged.csv")
# results[alpha][alphaG][gamma][gammaG][eps] = {'sum':[],'values': [], 'mean':[], 'avgs': []}
bests(results)
```
#### File: sumo_rl/agents/pql_agent_non.py
```python
import numpy as np
from pygmo import hypervolume
import random
from sumo_rl.exploration.epsilon_greedy import MOSelection
def get_non_dominated(solutions):
is_efficient = np.ones(solutions.shape[0], dtype=bool)
for i, c in enumerate(solutions):
if is_efficient[i]:
# Remove dominated points, will also remove itself
is_efficient[is_efficient] = np.any(solutions[is_efficient] > c, axis=1)
# keep this solution as non-dominated
is_efficient[i] = 1
return solutions[is_efficient]
def compute_hypervolume(q_set, nA, ref):
q_values = np.zeros(nA)
for i in range(nA):
# pygmo uses hv minimization,
# negate rewards to get costs
points = np.array(q_set[i]) * -1.
hv = hypervolume(points)
# use negative ref-point for minimization
q_values[i] = hv.compute(ref*-1)
return q_values
class mPQLAgent:
def __init__(self, starting_state, state_space, action_space, ref_point=[-10000, -10000], alpha=0.3, gamma=0.95, exploration_strategy=MOSelection(), groupRecommendation=0.2, number_obejctives=2):
self.state = starting_state
self.state_space = 1
for space in state_space:
self.state_space *= space.n
self.action_space = action_space
self.action = None
self.gamma = gamma
self.alpha = alpha
self.groupAction = None
self.groupActing = False
self.groupRecommendation = groupRecommendation
self.decayGroup = 1
self.minEpsilonGroup = 0.05
self.q_table = {self.state: [[np.zeros(number_obejctives)] for _ in range(action_space.n)]}
self.exploration = exploration_strategy
self.acc_reward = np.zeros(number_obejctives)
self.followed = False
self.nO = number_obejctives
self.non_dominated = [[[np.zeros(number_obejctives)] for _ in range(self.action_space.n)] for _ in range(self.state_space)]
self.avg_r = np.zeros((self.state_space, self.action_space.n, number_obejctives))
self.n_visits = np.zeros((self.state_space, self.action_space.n))
self.ref_point = ref_point
# print(self.state_space, self.action_space.n)
def compute_q_set(self, s):
q_set = []
for a in range(self.action_space.n):
nd_sa = self.non_dominated[s][a]
rew = self.avg_r[s, a]
set = nd_sa * rew
# print(set)
q = random.choice(set)
q_set.append([q + self.alpha*(rew + self.gamma*nd - q) for nd in nd_sa])
return np.array(q_set)
def update_non_dominated(self, s, a, s_n):
q_set_n = self.compute_q_set(s_n)
# update for all actions, flatten
solutions = np.concatenate(q_set_n, axis=0)
# append to current pareto front
# solutions = np.concatenate([solutions, self.non_dominated[s][a]])
# compute pareto front
self.non_dominated[s][a] = get_non_dominated(solutions)
def act(self):
if self.groupActing:
# print(self.groupAction, self.state, self.action_space, self.groupRecommendation)
if self.followGroup:
self.followed = True
self.action = self.groupAction
# print("GROUP", self.action, self.groupAction)
else:
self.followed = False
self.action = self.exploration.choose(self.q_table, self.state, self.action_space)
# print("GREEDY", self.action)
self.groupRecommendation = max(self.groupRecommendation*self.decayGroup, self.minEpsilonGroup)
else:
q_set = self.compute_q_set(self.state)
# q_set = self.q_table[self.state]
# print(self.compute_q_set(self.state), q_set, self.state, self.action_space)
self.action = self.exploration.choose(np.array(q_set), self.state, self.action_space)
return self.action
def learn(self, next_state, reward, done=False):
if next_state not in self.q_table:
self.q_table[next_state] = [np.zeros(self.nO) for _ in range(self.action_space.n)]
s = self.state
s1 = next_state
a = self.action
self.update_non_dominated(s, a, s1)
self.n_visits[s, a] += 1
self.avg_r[s, a] += (reward - self.avg_r[s, a]) / self.n_visits[s, a]
# print(s, a, s1, self.q_table[s], self.compute_q_set(self.state), self.action_space.n, self.q_table[s] + self.alpha*(self.compute_q_set(self.state) - self.q_table[s]))
# self.q_table[s] = np.array([self.q_table[s] + self.alpha*(self.compute_q_set(self.state) - self.q_table[s])])
self.state = s1
self.acc_reward += reward
```
#### File: sumo_rl/agents/ql_agent.py
```python
import numpy as np
import random
from sumo_rl.exploration.epsilon_greedy import EpsilonGreedy
class QLAgent:
def __init__(self, starting_state, state_space, action_space, alpha=0.1, gamma=0.95, exploration_strategy=EpsilonGreedy(), groupRecommendation=0.2):
self.state = starting_state
self.state_space = state_space
self.action_space = action_space
self.action = None
self.alpha = alpha
self.gamma = gamma
self.groupAction = None
self.groupActing = False
self.groupRecommendation = groupRecommendation
self.decayGroup = 1
self.minEpsilonGroup = 0.05
self.q_table = {self.state: [0 for _ in range(action_space.n)]}
self.exploration = exploration_strategy
self.acc_reward = 0
self.followed = False
def act(self):
if self.groupActing:
# print(self.groupAction, self.state, self.action_space, self.groupRecommendation)
if self.followGroup:
self.followed = True
self.action = self.groupAction
# print("GROUP", self.action, self.groupAction)
else:
self.followed = False
self.action = self.exploration.choose(self.q_table, self.state, self.action_space)
# print("GREEDY", self.action)
self.groupRecommendation = max(self.groupRecommendation*self.decayGroup, self.minEpsilonGroup)
else:
self.action = self.exploration.choose(self.q_table, self.state, self.action_space)
return self.action
def learn(self, next_state, reward, done=False):
if next_state not in self.q_table:
self.q_table[next_state] = [random.uniform(0, 0) for _ in range(self.action_space.n)]
s = self.state
s1 = next_state
a = self.action
# print(s, a, s1, self.action_space.n)
self.q_table[s][a] = self.q_table[s][a] + self.alpha*(reward[0] + self.gamma*max(self.q_table[s1]) - self.q_table[s][a])
self.state = s1
self.acc_reward += reward[0]
``` |
{
"source": "JoaoVitorChaves-05/Netflix-bot",
"score": 3
} |
#### File: JoaoVitorChaves-05/Netflix-bot/netflixBot.py
```python
import time
import requests
import pandas as pd
#from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import json
def Catch_User(line):
mail = line.split(' ')
return mail[len(mail) - 1]
def Try_User(user, password):
inputUser = driver.find_element_by_name('userLoginId')
inputPassword = driver.find_element_by_name('password')
inputUser.clear()
inputPassword.clear()
inputUser.send_keys(user)
inputPassword.send_keys(password)
time.sleep(5)
driver.find_element_by_xpath('/html/body/div[1]/div/div[3]/div/div/div[1]/form/button').click()
time.sleep(2)
fileName = '5000x Netflix Premium.txt'
file = open(fileName, 'r', encoding="utf8")
url = 'https://www.netflix.com/br/login?nextpage=https%3A%2F%2Fwww.netflix.com%2Fbrowse'
option = Options()
option.headless = True
driver = webdriver.Chrome("chromedriver.exe")
driver.get(url)
time.sleep(4)
while True:
line = file.readline()
if line.startswith('║ Mail :'):
mail = Catch_User(line)
if line.startswith('║ Şifre :'):
password = Catch_User(line)
Try_User(mail, password)
``` |
{
"source": "JoaoVitorFernandesVitor/Rede_social",
"score": 3
} |
#### File: Rede_social/Button_with_variable/__init__.py
```python
from tkinter import *
from Chat import *
class Bt_contact_chat(Button):
def __init__(self, user, parent,
variable = 0,
text = '',
font = 0,
font_color = 0,
image = 0):
super().__init__()
self['text'] = text
if font != 0:
self['font'] = font
if font_color != 0:
self['fg'] = font_color
if image != 0:
self['image'] = image
self['width'] = 60
def call_chat():
Chat(user, variable).place(in_ = parent, x = 0, y = 0)
self['command'] = call_chat
``` |
{
"source": "JoaoVitorLeite/CodeWars",
"score": 4
} |
#### File: CodeWars/7_kyu/Find_the_divisors!.py
```python
def divisors(integer):
aux = [i for i in range(2, integer) if integer % i == 0]
if len(aux) == 0:
return "{} is prime".format(integer)
else:
return aux
```
#### File: CodeWars/7_kyu/Valid_Spacing.py
```python
def valid_spacing(s):
if s == '':
return True
elif len(s.strip()) != len(s):
return False
else:
return not any([len(w.strip()) != len(w) or w == '' for w in s.split(' ')])
```
#### File: CodeWars/8_kyu/Exclamation_marks_series_#_:_Remove_an_exclamation_mark_from_the_end_of_string.py
```python
def remove(s):
return s[:-1] if s.endswith("!") else s
```
#### File: CodeWars/8_kyu/Reversed_Words.py
```python
def reverse_words(s):
return " ".join(reversed(s.split()))
``` |
{
"source": "joaovitorsilvestre/graphene-mongodb",
"score": 3
} |
#### File: tests/fields/test_fields_query.py
```python
from graphene_mongodb import MongoSchema
def test_string_field(schema_builder, mock_person):
p = mock_person(name="John")
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
name
}
}""")
assert not result.errors
assert result.data == {'person': {'name': p.name}}
def test_boolean_field(mock_person, schema_builder):
p = mock_person(active=True)
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
active
}
}""")
assert result.data == {'person': {'active': p.active}}
def test_int_field(mock_person, schema_builder):
p = mock_person(age=20)
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
age
}
}""")
assert result.data == {'person': {'age': p.age}}
def test_float_field(mock_person, schema_builder):
p = mock_person(score=9.5)
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
score
}
}""")
assert result.data == {'person': {'score': p.score}}
def test_datetime_field(mock_person, schema_builder):
from datetime import datetime
birth = datetime(2017, 1, 1)
p = mock_person(birthday=birth).save()
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
birthday
}
}""")
assert result.data == {'person': {'birthday': birth.isoformat()}}
assert result.data == {'person': {'birthday': p.birthday.isoformat()}}
def test_id_field(mock_person, schema_builder):
p = mock_person()
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
id
}
}""")
assert result.data == {'person': {'id': str(p.id)}}
def test_url_field(mock_person, schema_builder):
site_url = "https://github.com/joaovitorsilvestre/MongographQL"
p = mock_person(site_url=site_url)
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
siteUrl
}
}""")
assert result.data == {'person': {'siteUrl': p.site_url}}
assert result.data == {'person': {'siteUrl': site_url}}
def test_dict_field(mock_person, schema_builder):
info = {
"author": "João",
"date": "2017-01-01"
}
p = mock_person(book_info=info)
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
bookInfo
}
}""")
assert result.data == {'person': {'bookInfo': info}}
assert result.data == {'person': {'bookInfo': p.book_info}}
def test_email_field(mock_person, schema_builder):
p = mock_person(email="<EMAIL>")
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
email
}
}""")
assert result.data == {'person': {'email': p.email}}
def test_long_field(mock_person, schema_builder):
long = pow(2, 63) - 1
p = mock_person(super_id=long)
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
superId
}
}""")
assert result.data == {'person': {'superId': long}}
assert result.data == {'person': {'superId': p.super_id}}
def test_decimal_field(mock_person, schema_builder):
p = mock_person(remember_pi=3.14159265359)
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
rememberPi
}
}""")
assert result.data == {'person': {'rememberPi': float(p.remember_pi)}}
def test_binary_field(mock_person, schema_builder):
p = mock_person(nickname=b"<NAME>")
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
nickname
}
}""")
assert result.data == {'person': {'nickname': p.nickname.decode('utf-8')}}
def test_point_field(mock_person, schema_builder):
p = mock_person(location=[29.977291, 31.132493])
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
location
}
}""")
assert result.data == {'person':
{'location': {
"type": "Point",
"coordinates": [29.977291, 31.132493]
}}
}
def test_list_field(mock_person, schema_builder):
p = mock_person(favourite_colors=['blue', 'red'])
p.save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
favouriteColors
}
}""")
assert result.data == {'person': {'favouriteColors': p.favourite_colors}}
def test_list_reference_field(mock_person, mock_post, schema_builder):
post1 = mock_post(text="<NAME>")
post2 = mock_post(text="Say my name")
post1.save()
post2.save()
mock_person(posts=[post1, post2]).save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
posts {
text
}
}
}""")
assert result.data == {'person': {
'posts': [
{"text": post1.text},
{"text": post2.text}
]
}}
def test_reference_field(mock_person, mock_post, schema_builder):
post = mock_post(text="<NAME>")
post.save()
mock_person(best_post=post).save()
PersonSchema = MongoSchema(mock_person)
schema = schema_builder([(PersonSchema, PersonSchema.single)])
result = schema.execute(""" query testQuery {
person {
bestPost {
text
}
}
}""")
assert result.data == {'person': {
'bestPost': {
'text': post.text
}
}}
```
#### File: tests/mutation/test_mutations_query.py
```python
import pytest
def test_gen_mutation_user_mutation_func(schema_builder, mock_person):
from graphene_mongodb import MongoSchema
assert mock_person.objects().count() == 0
def mutation_func(args, context):
PersonSchema._called_custom_mutate = True
p = mock_person(**args)
p.save()
return p
class PersonSchema(MongoSchema):
model = mock_person
mutate = staticmethod(mutation_func)
_called_custom_mutate = False # just to test if the user function mutate was called
schema = schema_builder(mutations=[PersonSchema])
result = schema.execute("""mutation testMutation {
createPerson(name:"Test") {
person {
name
}
}
}""")
assert not result.errors
assert result.data == {'createPerson': {'person': {'name': 'Test'}}}
assert mock_person.objects().count() == 1
assert mock_person.objects().first().name == 'Test'
assert PersonSchema._called_custom_mutate
def test_gen_mutation_generic_mutate(schema_builder, mock_person):
from graphene_mongodb import MongoSchema
assert mock_person.objects().count() == 0
class PersonSchema(MongoSchema):
model = mock_person
schema = schema_builder(mutations=[PersonSchema])
result = schema.execute("""mutation testMutation {
createPerson(name:"Test") {
person {
name
}
}
}""")
assert not result.errors
assert result.data == {'createPerson': {'person': {'name': 'Test'}}}
assert mock_person.objects().count() == 1
assert mock_person.objects().first().name == 'Test'
def test_gen_mutation_user_mutate_wrong_return(mock_person):
import graphene
from graphql.execution.base import ResolveInfo
from graphene_mongodb.mutation import gen_mutation
from graphene_mongodb.model import ModelSchema
def mutate(args, context):
return False
model_schema = ModelSchema(mock_person, mock_person._fields, mutate, None)
result = gen_mutation(mock_person, model_schema.schema, model_schema.operators_mutation,
model_schema.fields_mutation, model_schema.mutate_func, None)
assert issubclass(result, graphene.Mutation)
assert hasattr(result, 'mutate')
with pytest.raises(Exception) as e_info:
context = graphene.types.Context()
info = ResolveInfo('name', *[None for i in range(8)], context)
result.mutate(None, info, **{'name': "Test"})
assert str(e_info.value) == 'Failed to resolve mutation of the schema {}' \
' because mutate function must return a instance of {}, and the return type was {}.'\
.format(model_schema.schema.__name__, model_schema.model.__name__, type(False))
``` |
{
"source": "joao-vitor-viera-rodacki/Interfase_Grafica_Em_Python",
"score": 3
} |
#### File: Interfase_Grafica_Em_Python/interfase_materiais/interfase.py
```python
import PySimpleGUI as sg
class Interfase :
def __init__(self):
self.produtos = {'bolacha' : 'bolacha',
'leite' : 'leite'
}
layuot = [
[sg.Text('testando'),sg.Input(size=(20,0))],
[self.filtraItens()]
]
#janela
self.janela_usuario = sg.Window('Dados Bancarios').layout(layuot)
def update_usuario(self):
while True:
#extraindo dados em tempo real do layout
self.botton, self.values = self.janela_usuario.read()
if self.values == sg.WIN_CLOSED:
break
def filtraItens (self):
for item in self.produtos.keys():
print(item)
'''
def list_produtos (self):
layout = [
[sg.T('Lista de produtos')],
]
'''
self.janela_produtos = sg.Window('Lista').layout(layout)
def update_listaPrecos (self):
while True:
button , values = self.janela_produtos.Read()
if values == sg.WINDOW_CLOSED:
break
janela = Interfase()
janela.list_produtos()
janela.update_listaPrecos()
janela.update_usuario()
#---------------------
``` |
{
"source": "joaovmalheiros/Exercicios-Python-Curso-Em-Video",
"score": 4
} |
#### File: Exercicios-Python-Curso-Em-Video/ex069/ex069.py
```python
def verificaNumero():
pass
class Pessoa():
idade = 0
sexo = None
def __init__(self, i, s):
self.idade = i
self.sexo = s
maiorDeIdade = 0
menorDeVinte = 0
homens = 0
continua = 'S'
listasDePessoas = list()
while continua in 'Ss':
idade = int(input('Idade: '))
if idade < 20:
menorDeVinte = menorDeVinte + 1
if idade > 18:
maiorDeIdade = maiorDeIdade + 1
else:
maiorDeIdade = maiorDeIdade + 1
sexo = str(input('Sexo: ')).upper().strip()[0]
if sexo == 'M':
homens = homens + 1
p = Pessoa(idade, sexo)
listasDePessoas.append(p)
while True:
continua = str(input('Quer continuar? [S/N]')).upper().strip()[0]
print(continua)
if continua not in 'Ss' and continua not in 'Nn':
print('Opção inválida!')
else:
break
print(f'Total de pessoas com mais de 18 anos: {maiorDeIdade}.')
print(f'Ao todo temos {homens} homens cadastrados.')
print(f'E temos {menorDeVinte} mulheres com menos de 20 anos.')
``` |
{
"source": "joao-voltarelli/botcity-framework-core-python",
"score": 3
} |
#### File: botcity/core/mouse.py
```python
import pyautogui
import random
from .utils import ensure_state, only_if_element
from . import config
@ensure_state
@only_if_element
def click_on(label, *, state=None):
"""
Click on the element.
Args:
label (str): The image identifier
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
from .display import get_element_coords_centered
x, y = get_element_coords_centered(label, state=state)
pyautogui.click(x, y)
@ensure_state
def get_last_x():
"""
Get the last X position for the mouse.
Returns:
x (int): The last x position for the mouse.
"""
return pyautogui.position().x
def get_last_y():
"""
Get the last Y position for the mouse.
Returns:
y (int): The last y position for the mouse.
"""
return pyautogui.position().y
def mouse_move(x, y):
"""
Mouse the move to the coordinate defined by x and y
Args:
x (int): The X coordinate
y (int): The Y coordinate
"""
pyautogui.moveTo(x, y)
def click_at(x, y):
"""
Click at the coordinate defined by x and y
Args:
x (int): The X coordinate
y (int): The Y coordinate
"""
pyautogui.click(x, y)
@ensure_state
@only_if_element
def click(wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0, button='left', state):
"""
Click on the last found element.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
clicks (int, optional): Number of times to click. Defaults to 1.
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
button (str, optional): One of 'left', 'right', 'middle'. Defaults to 'left'
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
from .misc import sleep
x, y = state.center()
pyautogui.click(x, y, clicks=clicks, button=button, interval=interval_between_clicks)
sleep(wait_after)
@ensure_state
@only_if_element
def click_relative(x, y, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0, button='left', state):
"""
Click Relative on the last found element.
Args:
x (int): Horizontal offset
y (int): Vertical offset
wait_after (int, optional): Interval to wait after clicking on the element.
clicks (int, optional): Number of times to click. Defaults to 1.
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
button (str, optional): One of 'left', 'right', 'middle'. Defaults to 'left'
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
from .misc import sleep
x = state.x() + x
y = state.y() + y
pyautogui.click(x, y, clicks=clicks, button=button, interval=interval_between_clicks)
sleep(wait_after)
@ensure_state
@only_if_element
def double_click(wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, state):
"""
Double Click on the last found element.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
x, y = state.center()
click(x, y, wait_after=wait_after, click=2)
@ensure_state
@only_if_element
def double_click_relative(x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, state):
"""
Double Click Relative on the last found element.
Args:
x (int): Horizontal offset
y (int): Vertical offset
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
wait_after (int, optional): Interval to wait after clicking on the element.
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
x = state.x() + x
y = state.y() + y
click_relative(x, y, wait_after=wait_after, click=2, interval_between_clicks=interval_between_clicks)
@ensure_state
@only_if_element
def triple_click(wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, state):
"""
Triple Click on the last found element.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
x, y = state.center()
click(x, y, wait_after=wait_after, click=3)
@ensure_state
@only_if_element
def triple_click_relative(x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, state):
"""
Triple Click Relative on the last found element.
Args:
x (int): Horizontal offset
y (int): Vertical offset
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
wait_after (int, optional): Interval to wait after clicking on the element.
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
x = state.x() + x
y = state.y() + y
click_relative(x, y, wait_after=wait_after, click=3, interval_between_clicks=interval_between_clicks)
def scroll_down(clicks):
"""
Scroll Down n clicks
Args:
clicks (int): Number of times to scroll down.
"""
pyautogui.scroll(-1*clicks)
def scroll_up(clicks):
"""
Scroll Up n clicks
Args:
clicks (int): Number of times to scroll up.
"""
pyautogui.scroll(clicks)
@ensure_state
@only_if_element
def move(*, state):
"""
Move to the center position of last found item.
Args:
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
x, y = state.center()
pyautogui.moveTo(x, y)
def move_relative(x, y):
"""
Move the mouse relative to its current position.
Args:
x (int): Horizontal offset
y (int): Vertical offset
"""
x = get_last_x() + x
y = get_last_y() + y
pyautogui.moveTo(x, y)
def move_random(range_x, range_y):
"""
Move randomly along the given x, y range.
Args:
range_x (int): Horizontal range
range_y (int): Vertical range
"""
x = int(random.random()*range_x)
y = int(random.random()*range_y)
pyautogui.moveTo(x, y)
@ensure_state
@only_if_element
def right_click(wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *,
clicks=1, interval_between_clicks=0, state):
"""
Right click on the last found element.
Args:
wait_after (int, optional): Interval to wait after clicking on the element.
clicks (int, optional): Number of times to click. Defaults to 1.
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
from .misc import sleep
x, y = state.center()
pyautogui.click(x, y, clicks=clicks, button='right', interval=interval_between_clicks)
sleep(wait_after)
def right_click_at(x, y):
"""
Right click at the coordinate defined by x and y
Args:
x (int): The X coordinate
y (int): The Y coordinate
"""
pyautogui.click(x, y, button='right')
@ensure_state
@only_if_element
def right_click_relative(x, y, interval_between_clicks=0, wait_after=config.DEFAULT_SLEEP_AFTER_ACTION, *, state):
"""
Right Click Relative on the last found element.
Args:
x (int): Horizontal offset
y (int): Vertical offset
interval_between_clicks (int, optional): The interval between clicks in ms. Defaults to 0.
wait_after (int, optional): Interval to wait after clicking on the element.
state (State, optional): An instance of BaseState. If not provided, the singleton State is used.
"""
x = state.x() + x
y = state.y() + y
click_relative(x, y, wait_after=wait_after, click=3, interval_between_clicks=interval_between_clicks,
button='right')
# Java API compatibility
clickOn = click_on
getLastX = get_last_x
getLastY = get_last_y
mouseMove = mouse_move
clickAt = click_at
doubleclick = double_click
doubleClick = double_click
doubleClickRelative = double_click_relative
tripleClick = triple_click
tripleClickRelative = triple_click_relative
scrollDown = scroll_down
scrollUp = scroll_up
moveTo = mouse_move
moveRelative = move_relative
moveRandom = move_random
moveAndClick = click
rightClick = right_click
rightClickAt = right_click_at
rightClickRelative = right_click_relative
moveAndRightClick = right_click
```
#### File: botcity/core/utils.py
```python
from functools import wraps
import platform
import subprocess
from .base import SingleState
def is_retina():
"""
Check whether or not the system is running in "retina" display mode.
Returns:
(bool)
"""
if platform.system() == 'Darwin':
check = subprocess.call("system_profiler SPDisplaysDataType | grep -i 'retina'", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if check == 0:
return True
return False
def ensure_state(func):
"""
Decorator to ensure that a State instance is being provided.
If no State instance is provided it uses the singleton SingleState.
Args:
func (callable): The function to be wrapped
Returns:
wrapper (callable): The decorated function
"""
@wraps(func)
def wrapper(*args, **kwargs):
if 'state' not in kwargs:
kwargs['state'] = SingleState()
return func(*args, **kwargs)
return wrapper
def only_if_element(func):
"""
Decorator which raises if element is None.
Args:
func (callable): The function to be wrapped
Returns:
wrapper (callable): The decorated function
"""
@wraps(func)
def wrapper(*args, **kwargs):
state = kwargs.get('state', SingleState())
if state.element is None:
raise ValueError(f'Element not available. Cannot invoke {func.__name__}.')
return func(*args, **kwargs)
return wrapper
def find_bot_class(module):
"""
Args:
module (module): The module in which to search for the BaseBot class.
Returns:
klass (type): A class that inherits from BaseBot.
"""
import inspect
from botcity.core import BaseBot
klass = [obj for name, obj in inspect.getmembers(module) if
inspect.isclass(obj) and issubclass(obj, BaseBot) and 'botcity.core' not in obj.__module__
and module.__name__ in obj.__module__]
if not klass:
raise ValueError('No BaseBot class could be found. Please add at least one class'
'inheriting from DesktopBot or similar.')
return klass
``` |
{
"source": "joaovpassos/USP-Programs",
"score": 2
} |
#### File: USP-Programs/files/agentes.py
```python
import random
# para mt.init_matriz() usada na função sorteie_cidade()
import matriz as mt
#------------------------------------------------------------------
# CONSTANTES
VAZIA = ' ' # para casa vazia
AZUL = 'a' # para casa habitada por azul
VERMELHO = 'v' # para casa habitada por vermelho
# valores para teste/debug
DEBUG = False
SEMENTE = 10 # para random.seed()
TAMANHO = 6 # para a dimensão da cidade
PORCENT_A = 40 # porcentagem agentes azuis
PORCENT_V = 40 # porcentagem agentes vermelhos
DESEJADO = 3 # número de vizinhos que tornam um agente feliz
######################################################################
def main():
# inicialização: teste seu programa inicialmente com
# DEBUG = True e depois com Debug = False para
# usar outros valores
if DEBUG:
semente = SEMENTE
tamanho = TAMANHO
porc_A = PORCENT_A
porc_V = PORCENT_V
min_viz_desejado = DESEJADO
else:
print("Bem vindo ao simulador do modelo de segregação de Schelling\n")
semente = int(input("Digite a semente para o gerador de números pseudo-aleatórios: "))
tamanho = int(input("Digite o tamanho da matriz cidade: "))
porc_A = int(input("Digite a porcentagem da população azul: "))
porc_V = int(input("Digite a porcentagem da população vermelha: "))
min_viz_desejado = int(input("Digite o número mínimo de vizinhos desejado: "))
# inicialização do gerador de números pseudo-aleatórios
random.seed( semente )
# configuração inicial das moradias na cidade
mapa = sorteie_cidade( tamanho, porc_A, porc_V )
print()
print("Configuração Inicial")
for lin in range(tamanho):
print(f"linha {lin:3d} - {mapa[lin]}")
# simulação do processo
fim = False
iteracao = 0
while not fim:
muda_a, muda_v = ache_infelizes( mapa, min_viz_desejado )
print()
print(f"{len(muda_a):3d} moradores azuis querem mudar")
print(f"{len(muda_v):3d} moradores vermelhos querem mudar")
print(f"Origem dos azuis : {muda_a}")
print(f"Origem dos vermelhos: {muda_v}")
origem = muda_a + muda_v
destino = atualize_mapa(mapa, origem)
for i in range(len(origem)):
print(f"Morador em {origem[i]} se moveu para {destino[i]}")
iteracao += 1
print(f"\nEstado do mapa após {iteracao} iterações")
for lin in range(tamanho):
print(f"linha {lin:3d} - {mapa[lin]}")
if len(muda_a) == 0 and len(muda_v) == 0:
print("Todos os moradores estão felizes e ninguém quer se mudar!")
fim = True
else:
op = input("\nDigite fim para terminar, ou enter para mais uma iteração: ")
if op == 'fim':
fim = True
######################################################################
def conte_vizinhos(M, lin, col):
''' (matriz, int, int) -> int
RECEBE uma matriz M que representa uma cidade e um par [lin, col] que
indica uma casa [lin][col] de M.
RETORNA o número de casas vizinhas da casa [lin][col] de M em que
habitam agentes iguais a M[lin][col].
'''
# modifique o código abaixo para conter a sua solução.
vizinhos = 0
if (lin-1) >=0 and (col-1) >= 0:
if M[lin-1][col-1] == M[lin][col]:
vizinhos += 1
if (lin-1) >= 0:
if M[lin-1][col] == M[lin][col]:
vizinhos += 1
if (col+1) < len(M) and (lin-1) >= 0:
if M[lin-1][col+1] == M[lin][col]:
vizinhos += 1
if (col-1) >= 0:
if M[lin][col-1] == M[lin][col]:
vizinhos += 1
if (col+1) < len(M):
if M[lin][col+1] == M[lin][col]:
vizinhos += 1
if (lin+1) < len(M[0]) and (col-1) >= 0:
if M[lin+1][col-1] == M[lin][col]:
vizinhos += 1
if (lin+1) < len(M[0]):
if M[lin+1][col] == M[lin][col]:
vizinhos += 1
if (lin+1) < len(M[0]) and (col+1) < len(M):
if M[lin+1][col+1] == M[lin][col]:
vizinhos += 1
return vizinhos
######################################################################
def ache_infelizes(M, minviz):
''' (matriz, int) -> lista de listas, lista de listas
RECEBE uma matriz M que representa uma cidade e um inteiro minviz.
RETORNA duas listas. Cada cada item destas listas é um par
[lin, col] que indica uma casa [lin][col] de M habitada por
um agente infeliz.
A PRIMEIRA lista contém TODOS os pares [lin, col] tais que o agente que
habita a casa [lin][col] é AZUL e está INFELIZ.
A SEGUNDA lista contém TODOS os pares [lin, col] tais que o agente que
habita a casa [lin][col] é VERMELHO e está INFELIZ.
Um AGENTE está INFELIZ se o número de vizinhos iguais a ele é
menor que minviz.
'''
# modifique o código abaixo para conter a sua solução.
azuis = []
vermelhos = []
for i in range(len(M)):
for j in range(len(M[i])):
if conte_vizinhos(M,i,j) < minviz:
if M[i][j] == 'a':
azuis += [[i, j]]
elif M[i][j] == 'v':
vermelhos += [[i, j]]
return azuis, vermelhos
######################################################################
def atualize_mapa(M, origem):
''' (matriz, list) -> lista de listas
RECEBE uma matriz M que representa uma cidade e uma lista origem em
que cada item é um par [lin, col] que indica uma casa [lin][col]
de M.
MODIFICA a matriz M mudando, cada morador de uma casa
[lin][col] indicada por um par [lin, col] em origem, para uma
casa vazia escolhida de forma aleatória. Para essa escolha
aleatoria utilize a função sorteie_casa_vazia() que está totalmente
feita mais adiante.
Após cada mudança a casa origem fica desocupada/vazia.
RETORNA uma lista em que cada item é um par [lin, col] que indica
uma casa [lin][col] de M. A lista destino deve ser tal que o
agente que habitava a casa origem[i] mudou-se para a casa
destino[i].
Estude o enunciado e os exemplos fornecidos no enunciado.
A função sortei_casa_vazia também pode ser fonte de inspiração.
'''
# modifique o código abaixo para conter a sua solução.
new_positions = []
for i in origem:
x = sorteie_casa_vazia(M)
M[x[0]][x[1]] = M[i[0]][i[1]]
M[i[0]][i[1]] = VAZIA
new_positions += [[x[0],x[1]]]
return new_positions
######################################################################
######################################################################
##
## Utilidades para você usar caso desejar
##
######################################################################
def sorteie_cidade(nlins, pa, pv):
''' (int, int, int) -> matriz
RECEBE inteiros nlins, pa e pv.
RETORNA uma matriz M que representa uma cidade de dimensão
nlins x nlins em que o número (provavelmente) de
- moradores azuis é (pa/100)*nlins*nlins,
- moradores vermelhos é (pv/100)*nlins*nlins, e
- casa desabitadas é (1-(pa+pv)/100)*nlins*nlins
'''
ncols = nlins # matriz é quadrada
# 1 inicialmente todas as casas estão desabitadas = VAZIA
M = mt.init_matriz(nlins, ncols, VAZIA)
# 2 pecorra a cidade povoando cada casa com um agente azul ou
# um agente vermelho ou deixando desabitada
for lin in range(nlins):
for col in range(ncols):
# faça um sorteio para decidir se a casa [lin][col]
# será habitada por um agente azul, ou por um agente vermelho
# ou ficará desabitada
sorteio = random.randrange(100)
if sorteio < pa: # sorteio em {0,1,...,pa-1}
M[lin][col] = AZUL
elif sorteio < pa+pv: # sorteio em {pa,pa+1,...,pa+pv-1}
M[lin][col] = VERMELHO
return M
######################################################################
def sorteie_casa_vazia( M ):
''' (matriz) -> [int, int]
RECEBE uma matriz M que representa uma cidade.
RETORNA um par [lin, col] que indica uma casa vazia [lin][col]
de M escohida aleatoriamente.
'''
# 1 pegue a dimensão da cidade
nlins = len(M)
ncols = nlins # = len(M[0]), nlins == ncols
# 2 crie um lista para armazenar os pares [lin, col] que indicam as
# casas [lin][col] de M que estão desabitadas
casaVazia = []
# 3 pecorra a cidade a procura de casas desabitadas
for lin in range(nlins):
for col in range(ncols):
# a casa esta desabitada, vazia?
if M[lin][col] == VAZIA:
# insira o para [lin, col] na nossa lista
casaVazia += [ [lin, col] ]
# 4 sorteie uma das casas desabitadas aleatoriamente
sorteada = random.randrange( len(casaVazia) )
# retorne a casa sorteda.
return casaVazia[sorteada]
######################################################################
if __name__ == "__main__":
main()
```
#### File: USP-Programs/files/area.py
```python
EPSILON = 1.0e-6
#------------------------------------------------------------------
# O import abaixo permite que o programa utilize todas as funções do módulo math,
# como por exemplo, math.exp e math.sin.
import math
#------------------------------------------------------------------
def main():
'''() -> None
Modifique essa função, escrevendo outros testes.
'''
# escolha a função que desejar e atribuia a f_x
f_x = math.cos
# f_x = math.sin
# f_x = math.exp # etc, para integração com outras funções.
# f_x = identidade # identidade() definidas mais adiante
# f_x = circunferencia # circunferencia() definida mais adiante
# f_x = exp # exp() definida mais adiante
print("Início dos testes.")
# Testes da f_x
nome = f_x.__name__ # nome da f_x usada
print(f"A função f_x usada nos testes é {nome}()")
print(f"Valor de f_x(0.0)= {f_x( 0.0 )}")
print(f"Valor de f_x(0.5)= {f_x( 0.5 )}")
print(f"Valor de f_x(1.0)= {f_x( 1.0 )}")
# testes da função área_por_retangulos
print()
print("Área por retângulos:")
a, b = 0, 1 # intervalo [a,b]
k = 1 # número de retângulos
n = 3 # número de iterações
i = 0
while i < n:
print(f"teste {i+1}: para {k} retângulos no intervalo [{a}, {b}]:")
print(f" área aproximada = {area_por_retangulos(f_x, a, b, k):g}")
k *= 10
i += 1
# testes da função área_aproximada
print()
print("Área aproximada:")
a, b = 0, 1 # intervalo
k, area = area_aproximada(f_x, a, b) # número de retângulos e aproximação
print(f"teste 1: para eps = {EPSILON:g} e intervalo [{a}, {b}]:")
print(f" com {k} retângulo a área é aproximadamente = {area:g}")
eps = 1e-6 # erro relativo aceitável
i = 1
n = 4
while i < n:
eps *= 10 # aumenta o erro relativo aceitável
k, area = area_aproximada(f_x, a, b, eps)
print(f"teste {i+1}: para eps = {eps:g} e intervalo [{a}, {b}]:")
print(f" com {k} retângulos a área é aproximadamente = {area:g}")
i += 1
print("Fim dos testes.")
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x)=x
def identidade( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA o valor recebido.
EXEMPLOS:
In [6]: identidade(3.14)
Out[6]: 3.14
In [7]: identidade(1)
Out[7]: 1
In [8]: identidade(-3)
Out[8]: -3
'''
return x
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x)=sqrt(1 - x*x)
def circunferencia( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA um valor y >= 0 tal que (x,y) é um ponto na circunferência de raio 1 e centro (0,0).
PRÉ-CONDIÇÃO: a função supõe que x é um valor tal que -1 <= x <= 1.
EXEMPLOS:
In [9]: circunferencia(-1)
Out[9]: 0.0
In [10]: circunferencia(0)
Out[10]: 1.0
In [11]: circunferencia(1)
Out[11]: 0.0
'''
y = math.sqrt( 1 - x*x )
return y
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x) = e^x
def exp( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA (uma aproximação de) exp(x).
EXEMPLOS:
In [12]: exp(1)
Out[12]: 2.718281828459045
In [13]: exp(0)
Out[13]: 1.0
In [14]: exp(-1)
Out[14]: 0.36787944117144233
'''
y = math.exp( x )
return y # return math.exp( x )
#------------------------------------------------------------------
#
def erro_rel(y, x):
''' (float, float) -> float
RECEBE dois números x e y.
RETORNA o erro relativo entre eles.
EXEMPLOS:
In [1]: erro_rel(0, 0)
Out [1]: 0.0
In [2]: erro_rel(0.01, 0)
Out [2]: 1.0
In [3]: erro_rel(1.01, 1.0)
Out [3]: 0.01
'''
if x == 0 and y == 0:
return 0.0
elif x == 0:
return 1.0
erro = (y-x)/x
if erro < 0:
return -erro
return erro
#------------------------------------------------------------------
def area_por_retangulos(f, a, b, k):
'''(function, float, float, int) -> float
RECEBE uma função f, dois números a e b e um inteiro k.
RETORNA uma aproximação da área sob a função f no intervalo [a,b]
usando k retângulos.
PRÉ-CONDIÇÃO: a função supõe que a função f é continua no intervalo [a,b] e que
f(x) >= 0 para todo x, a <= x <= b.
EXEMPLOS:
In [15]area_por_retangulos(identidade, 0, 1, 1)
Out[15]: 0.5
In [16]:area_por_retangulos(circunferencia, -1, 0, 1)
Out[16]: 0.8660254037844386
'''
# escreva a sua solução a seguir
# remova ou modifique a linha abaixo como desejar
base = (b-a)/k
i = 0
x_meio = ((b-a)/(2*k)) + a
soma = 0
while i < k:
area = f(x_meio)*base
x_meio += base
i += 1
soma += area
return soma
#------------------------------------------------------------------
def area_aproximada(f, a, b, eps=EPSILON):
'''(function, float, float, float) -> int, float
RECEBE uma função f, dois números a, b, eps.
RETORNA um inteiro k e uma aproximação da área sob a função f no intervalo [a,b]
usando k retângulo.
O valor de k deve ser a __menor potência__ de 2 tal que o erro relativo
da aproximação retornada seja menor que eps.
Assim, os possíveis valores de k são 1, 2, 4, 8, 16, 32, 64, ...
PRÉ-CONDIÇÃO: a função supõe que a função f é continua no intervalo [a,b] e que
f(x) >= 0 para todo x, a <= x <= b.
EXEMPLOS:
In [22]: area_aproximada(identidade, 1, 2)
Out[22]: (2, 1.5)
In [23]: area_aproximada(exp, 1, 2, 16)
Out[23]: (2, 4.6224728167337865)
'''
# escreva o corpo da função
# remova ou modifique a linha abaixo como desejar
k = 1
sub = eps + 1
while sub >= eps:
sub = erro_rel(area_por_retangulos(f,a,b,k*2),area_por_retangulos(f,a,b,k))
k *= 2
return k, area_por_retangulos(f,a,b,k) # para retornar um int e um float
# basta separá-los por vírgula
#######################################################
### FIM ###
#######################################################
#
# NÃO MODIFIQUE AS LINHAS ABAIXO
#
# Esse if serve para executar a função main() apenas quando
# este é o módulo a partir do qual a execução foi iniciada.
if __name__ == '__main__':
main()
``` |
{
"source": "joaovs2004/Simple-Calculator",
"score": 2
} |
#### File: Simple-Calculator/interface/mainUi.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 600)
MainWindow.setMinimumSize(QtCore.QSize(400, 600))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 43, 43))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(58, 58, 58))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 48, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(19, 19, 19))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 26, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 43, 43))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 39, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(77, 77, 77))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(19, 19, 19))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 43, 43))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(58, 58, 58))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 48, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(19, 19, 19))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 26, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 43, 43))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 39, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(77, 77, 77))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(19, 19, 19))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(19, 19, 19))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 43, 43))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(58, 58, 58))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(48, 48, 48))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(19, 19, 19))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(26, 26, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(19, 19, 19))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(19, 19, 19))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 39, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 39, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(31, 32, 42))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 39, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
MainWindow.setPalette(palette)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.pushButton_10 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_10.sizePolicy().hasHeightForWidth())
self.pushButton_10.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_10.setFont(font)
self.pushButton_10.setObjectName("pushButton_10")
self.buttonGroup = QtWidgets.QButtonGroup(MainWindow)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.pushButton_10)
self.gridLayout.addWidget(self.pushButton_10, 3, 1, 1, 1)
self.pushButton_13 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_13.sizePolicy().hasHeightForWidth())
self.pushButton_13.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_13.setFont(font)
self.pushButton_13.setObjectName("pushButton_13")
self.buttonGroup.addButton(self.pushButton_13)
self.gridLayout.addWidget(self.pushButton_13, 4, 0, 1, 1)
self.pushButton_12 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_12.sizePolicy().hasHeightForWidth())
self.pushButton_12.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_12.setFont(font)
self.pushButton_12.setObjectName("pushButton_12")
self.buttonGroup.addButton(self.pushButton_12)
self.gridLayout.addWidget(self.pushButton_12, 4, 3, 1, 1)
self.btnDelete = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.btnDelete.sizePolicy().hasHeightForWidth())
self.btnDelete.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.btnDelete.setFont(font)
self.btnDelete.setObjectName("btnDelete")
self.gridLayout.addWidget(self.btnDelete, 1, 1, 1, 2)
self.pushButton_8 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_8.sizePolicy().hasHeightForWidth())
self.pushButton_8.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_8.setFont(font)
self.pushButton_8.setObjectName("pushButton_8")
self.buttonGroup.addButton(self.pushButton_8)
self.gridLayout.addWidget(self.pushButton_8, 3, 3, 1, 1)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_5.sizePolicy().hasHeightForWidth())
self.pushButton_5.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_5.setFont(font)
self.pushButton_5.setObjectName("pushButton_5")
self.buttonGroup.addButton(self.pushButton_5)
self.gridLayout.addWidget(self.pushButton_5, 2, 0, 1, 1)
self.pushButton_7 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_7.sizePolicy().hasHeightForWidth())
self.pushButton_7.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_7.setFont(font)
self.pushButton_7.setObjectName("pushButton_7")
self.buttonGroup.addButton(self.pushButton_7)
self.gridLayout.addWidget(self.pushButton_7, 2, 2, 1, 1)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_4.sizePolicy().hasHeightForWidth())
self.pushButton_4.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_4.setFont(font)
self.pushButton_4.setObjectName("pushButton_4")
self.buttonGroup.addButton(self.pushButton_4)
self.gridLayout.addWidget(self.pushButton_4, 2, 3, 1, 1)
self.btnClear = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.btnClear.sizePolicy().hasHeightForWidth())
self.btnClear.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.btnClear.setFont(font)
self.btnClear.setObjectName("btnClear")
self.gridLayout.addWidget(self.btnClear, 1, 0, 1, 1)
self.pushButton_17 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_17.sizePolicy().hasHeightForWidth())
self.pushButton_17.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_17.setFont(font)
self.pushButton_17.setObjectName("pushButton_17")
self.buttonGroup.addButton(self.pushButton_17)
self.gridLayout.addWidget(self.pushButton_17, 6, 0, 1, 2)
self.pushButton_15 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_15.sizePolicy().hasHeightForWidth())
self.pushButton_15.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_15.setFont(font)
self.pushButton_15.setObjectName("pushButton_15")
self.buttonGroup.addButton(self.pushButton_15)
self.gridLayout.addWidget(self.pushButton_15, 4, 2, 1, 1)
self.Res = QtWidgets.QLineEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(8)
sizePolicy.setHeightForWidth(self.Res.sizePolicy().hasHeightForWidth())
self.Res.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(36, 36, 36))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(36, 36, 36))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 39, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.Res.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(16)
self.Res.setFont(font)
self.Res.setText("")
self.Res.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Res.setReadOnly(True)
self.Res.setObjectName("Res")
self.gridLayout.addWidget(self.Res, 0, 0, 1, 4)
self.pushButton_14 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_14.sizePolicy().hasHeightForWidth())
self.pushButton_14.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_14.setFont(font)
self.pushButton_14.setObjectName("pushButton_14")
self.buttonGroup.addButton(self.pushButton_14)
self.gridLayout.addWidget(self.pushButton_14, 4, 1, 1, 1)
self.pushButton_18 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_18.sizePolicy().hasHeightForWidth())
self.pushButton_18.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_18.setFont(font)
self.pushButton_18.setObjectName("pushButton_18")
self.buttonGroup.addButton(self.pushButton_18)
self.gridLayout.addWidget(self.pushButton_18, 6, 2, 1, 1)
self.pushButton_11 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_11.sizePolicy().hasHeightForWidth())
self.pushButton_11.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_11.setFont(font)
self.pushButton_11.setObjectName("pushButton_11")
self.buttonGroup.addButton(self.pushButton_11)
self.gridLayout.addWidget(self.pushButton_11, 3, 2, 1, 1)
self.pushButton_9 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_9.sizePolicy().hasHeightForWidth())
self.pushButton_9.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_9.setFont(font)
self.pushButton_9.setObjectName("pushButton_9")
self.buttonGroup.addButton(self.pushButton_9)
self.gridLayout.addWidget(self.pushButton_9, 3, 0, 1, 1)
self.btnResult = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.btnResult.sizePolicy().hasHeightForWidth())
self.btnResult.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.btnResult.setFont(font)
self.btnResult.setObjectName("btnResult")
self.gridLayout.addWidget(self.btnResult, 6, 3, 1, 1)
self.pushButton_6 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_6.sizePolicy().hasHeightForWidth())
self.pushButton_6.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_6.setFont(font)
self.pushButton_6.setObjectName("pushButton_6")
self.buttonGroup.addButton(self.pushButton_6)
self.gridLayout.addWidget(self.pushButton_6, 2, 1, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())
self.pushButton_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.pushButton_3.setFont(font)
self.pushButton_3.setObjectName("pushButton_3")
self.buttonGroup.addButton(self.pushButton_3)
self.gridLayout.addWidget(self.pushButton_3, 1, 3, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Calculator"))
self.pushButton_10.setText(_translate("MainWindow", "5"))
self.pushButton_10.setShortcut(_translate("MainWindow", "5"))
self.pushButton_13.setText(_translate("MainWindow", "1"))
self.pushButton_13.setShortcut(_translate("MainWindow", "1"))
self.pushButton_12.setText(_translate("MainWindow", "-"))
self.pushButton_12.setShortcut(_translate("MainWindow", "-"))
self.btnDelete.setText(_translate("MainWindow", "⌫"))
self.btnDelete.setShortcut(_translate("MainWindow", "Backspace"))
self.pushButton_8.setText(_translate("MainWindow", "+"))
self.pushButton_8.setShortcut(_translate("MainWindow", "+"))
self.pushButton_5.setText(_translate("MainWindow", "7"))
self.pushButton_5.setShortcut(_translate("MainWindow", "7"))
self.pushButton_7.setText(_translate("MainWindow", "9"))
self.pushButton_7.setShortcut(_translate("MainWindow", "9"))
self.pushButton_4.setText(_translate("MainWindow", "*"))
self.pushButton_4.setShortcut(_translate("MainWindow", "*"))
self.btnClear.setText(_translate("MainWindow", "C"))
self.btnClear.setShortcut(_translate("MainWindow", "C"))
self.pushButton_17.setText(_translate("MainWindow", "0"))
self.pushButton_17.setShortcut(_translate("MainWindow", "0"))
self.pushButton_15.setText(_translate("MainWindow", "3"))
self.pushButton_15.setShortcut(_translate("MainWindow", "3"))
self.Res.setPlaceholderText(_translate("MainWindow", "0"))
self.pushButton_14.setText(_translate("MainWindow", "2"))
self.pushButton_14.setShortcut(_translate("MainWindow", "2"))
self.pushButton_18.setText(_translate("MainWindow", "."))
self.pushButton_18.setShortcut(_translate("MainWindow", "."))
self.pushButton_11.setText(_translate("MainWindow", "6"))
self.pushButton_11.setShortcut(_translate("MainWindow", "6"))
self.pushButton_9.setText(_translate("MainWindow", "4"))
self.pushButton_9.setShortcut(_translate("MainWindow", "4"))
self.btnResult.setText(_translate("MainWindow", "="))
self.btnResult.setShortcut(_translate("MainWindow", "Return"))
self.pushButton_6.setText(_translate("MainWindow", "8"))
self.pushButton_6.setShortcut(_translate("MainWindow", "8"))
self.pushButton_3.setText(_translate("MainWindow", "÷"))
self.pushButton_3.setShortcut(_translate("MainWindow", "/"))
``` |
{
"source": "Joaovtrs/pygame_examples",
"score": 3
} |
#### File: src/sandpiles/play.py
```python
import numpy as np
import pygame
from pygame.locals import *
def draw(fps):
screen.fill(black)
for i, colum in enumerate(sandpiles):
for j, cell in enumerate(colum):
if cell == 1:
skin = pygame.Surface((gride_size, gride_size))
skin.fill(red)
screen.blit(skin, (i * gride_size, j * gride_size))
elif cell == 2:
skin = pygame.Surface((gride_size, gride_size))
skin.fill(green)
screen.blit(skin, (i * gride_size, j * gride_size))
elif cell == 3:
skin = pygame.Surface((gride_size, gride_size))
skin.fill(blue)
screen.blit(skin, (i * gride_size, j * gride_size))
elif cell > 3:
skin = pygame.Surface((gride_size, gride_size))
skin.fill(white)
screen.blit(skin, (i * gride_size, j * gride_size))
txt2 = font2.render(f"FPS: {fps:.2f}", True, white)
txt2_rect = txt2.get_rect()
txt2_rect.topleft = (10, 10)
screen.blit(txt2, txt2_rect)
def next_generation():
global sandpiles
new_sandpiles = np.zeros(gride)
for i in range(gride[0]):
for j in range(gride[1]):
new_sandpiles[i][j] = sandpiles[i][j]
for i in range(gride[0]):
for j in range(gride[1]):
if sandpiles[i][j] >= 4:
new_sandpiles[i][j] -= 4
if i > 0:
new_sandpiles[i - 1][j] += 1
if i < gride[0] - 1:
new_sandpiles[i + 1][j] += 1
if j > 0:
new_sandpiles[i][j - 1] += 1
if j < gride[1] - 1:
new_sandpiles[i][j + 1] += 1
sandpiles = new_sandpiles
playing = True
clock = pygame.time.Clock()
# Colors
black = (0, 0, 0)
white = (255, 255, 255)
gray = (50, 50, 50)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
# Control variables
window_height = 500
window_length = 700
gride_size = 5
gride = (int(window_length / gride_size), int(window_height / gride_size))
pygame.init()
screen = pygame.display.set_mode((window_length, window_height))
pygame.display.set_caption('Name')
try:
font = pygame.font.Font('RobotoMono-Bold.ttf', 100)
font2 = pygame.font.Font('RobotoMono-Bold.ttf', 20)
except FileNotFoundError:
font = pygame.font.Font(None, 100)
font2 = pygame.font.Font(None, 20)
sandpiles = np.zeros(gride)
sandpiles[gride[0] // 2][gride[1] // 2] = 100000
paused = False
while playing:
clock.tick(20)
for event in pygame.event.get():
if event.type == QUIT:
playing = False
if event.type == KEYDOWN:
if event.key == 32:
paused = not paused
if not paused:
for _ in range(3):
next_generation()
draw(clock.get_fps())
pygame.display.update()
pygame.quit()
exit()
``` |
{
"source": "joapaspe/tesismometro",
"score": 3
} |
#### File: joapaspe/tesismometro/main.py
```python
import os
import random
import json
from flask import Flask
import jinja2
import stats
from flask import render_template, redirect
import tesis_bd
from flask import request
from google.appengine.api import users
# jinja2 configuration.
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
app = Flask(__name__)
@app.route('/')
def show_results():
"""
Main page.
:return: The request with the rendered template.
"""
doctors = tesis_bd.Doctor.query().fetch()
results = []
for doctor in doctors:
last_record = tesis_bd.LastRecord.query(tesis_bd.LastRecord.doctor == doctor.key).fetch()
if last_record:
last_record = last_record[0]
record = last_record.record.get()
res = {
'name': doctor.name
}
for field in tesis_bd.RECORD_FIELDS:
res[field] = getattr(record, field)
results.append(res)
results.sort(key=lambda x: -x["words"])
# Select the default drawing field.
draw_field = request.args.get('draw')
if not draw_field or draw_field not in tesis_bd.RECORD_FIELDS[:-1]:
draw_field = 'words'
draw_data = stats.get_draw_info(draw_field)
# Compute the draw standings.
week_standings = stats.get_week_standings()
return render_template('index.html',
results=results,
headers=tesis_bd.RECORD_NAMES,
fields=tesis_bd.RECORD_FIELDS,
draw_data=json.dumps(draw_data),
week_standings=week_standings,
draw_field=tesis_bd.record_field_to_name[draw_field]
)
@app.route('/hist/<username>/')
def show_hist(username):
"""Extracts the histograms and show them to the final user.
:param username: Doctor name to show stats about.
:return: Rendered template with the doctor data stats.
"""
draw_field = request.args.get('draw')
doctor = tesis_bd.Doctor.query(tesis_bd.Doctor.name == username).fetch()
if not doctor:
return render_template('hist.html')
doctor = doctor[0]
records = tesis_bd.Record.query(
tesis_bd.Record.doctor == doctor.key).order(-tesis_bd.Record.date).fetch()
# Compute the difference with the previous record.
diffs = []
for i, record in enumerate(records):
record_diff = {}
for field in tesis_bd.RECORD_FIELDS[:-1]:
if i == len(records) - 1:
record_diff[field] = 0
else:
act = getattr(record, field)
ant = getattr(records[i + 1], field)
record_diff[field] = act - ant
diffs.append(record_diff)
# Default graph field.
if not draw_field or draw_field not in tesis_bd.RECORD_FIELDS[:-1]:
draw_field = 'words'
draw_dates = [x.date.strftime('%Y-%m-%d') for x in records]
return render_template('hist.html', records=records, doctor=doctor.name, difs=diffs,
fields=tesis_bd.RECORD_FIELDS[:-1], headers=tesis_bd.RECORD_NAMES,
draw_dates=draw_dates, draw_field=draw_field)
@app.route('/post', methods=['GET', 'POST'])
def post_record():
"""
Request for adding the data to the database.
:return: An http response with the submitted information
"""
if request.method != 'POST':
return render_template(
"error.html", message="GET mode not allowed for adding a new record.")
params = request.form
doctor = tesis_bd.Doctor.query(tesis_bd.Doctor.name == params["name"]).fetch()
if not doctor:
return render_template("error.html", message="The doctor is not found.")
doctor = doctor[0]
# Check the token.
if "token" not in params or doctor.token != params["token"]:
return render_template("error.html", message="Unable to authenticate the doctor.")
# Get the record tu update.
record_list = tesis_bd.LastRecord.query(tesis_bd.LastRecord.doctor == doctor.key).fetch()
if not record_list:
# Create an empty record
empty_record = tesis_bd.Record(doctor=doctor.key)
empty_record.put()
last_record = tesis_bd.LastRecord(doctor=doctor.key, record=empty_record.key)
last_record.put()
else:
last_record = record_list[0]
# If the record is from the same day we update it.
import datetime
now = datetime.datetime.now()
day, month, year = now.day, now.month, now.year
equations = int(params["equations"])
words = int(params["words"])
equations_inline = int(params["equations_inline"])
figures = int(params["figures"])
cites = int(params["cites"])
pages = int(params["pages"])
record = last_record.record.get()
last_values = [getattr(record, field) for field in tesis_bd.RECORD_FIELDS[:-1]]
if record.date.day == day and record.date.month == month and record.date.year == year:
record.equations = equations
record.words = words
record.equations_inline = equations_inline
record.figures = figures
record.pages = pages
record.cites = cites
record.date = datetime.datetime.now() + datetime.timedelta(hours=1)
record.put()
else:
record = tesis_bd.Record(doctor=doctor.key, words=words,
equations=equations, equations_inline=equations_inline,
figures=figures, cites=cites, pages=pages,
date=datetime.datetime.now()+datetime.timedelta(hours=1))
record.put()
last_record.record = record.key
last_record.put()
diff_values = [getattr(record, field) - last_values[i]
for i, field in enumerate(tesis_bd.RECORD_FIELDS[:-1])]
diff_values.append(record.date.strftime('%Y-%m-%d %H:%M'))
stats.update_data()
return render_template('show_post.html', doctor=diff_values, fields=tesis_bd.RECORD_FIELDS)
@app.route('/user', methods=['GET'])
def user_view():
"""
User interface (only shows the token).
:return: An http response with the submitted information.
"""
user = users.get_current_user()
if not user:
return redirect(users.create_login_url("/user"))
email = user.email()
doctors = tesis_bd.Doctor.query(tesis_bd.Doctor.email == email).fetch()
if len(doctors) == 0:
return render_template('error.html', message="User not found in the DB.")
doctor = doctors[0]
name = doctor.name
if not doctor.token:
doctor.token = "%<PASSWORD>" % random.getrandbits(64)
code = doctor.token
doctor.put()
logout_url = users.create_logout_url("/")
return render_template('user_view.html', login=doctor.name, name=name, email=email, code=code,
logout_url=logout_url)
if __name__ == '__main__':
app.run()
``` |
{
"source": "joapolarbear/byteps",
"score": 2
} |
#### File: example/mxnet-gluon/train_mnist_byteps.py
```python
import time
import argparse
import logging
import mxnet as mx
import byteps.mxnet as bps
from mxnet import autograd, gluon, nd
from mxnet.gluon.data.vision import MNIST
from byteps.mxnet.mx_wrapper import BPSDatasetLoader
# Higher download speed for chinese users
# os.environ['MXNET_GLUON_REPO'] = 'https://apache-mxnet.s3.cn-north-1.amazonaws.com.cn/'
# Training settings
parser = argparse.ArgumentParser(description='MXNet MNIST Example')
parser.add_argument('--batch-size', type=int, default=64,
help='training batch size (default: 64)')
parser.add_argument('--dtype', type=str, default='float32',
help='training data type (default: float32)')
parser.add_argument('--epochs', type=int, default=5,
help='number of training epochs (default: 5)')
parser.add_argument('--j', type=int, default=2,
help='number of cpu processes for dataloader')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum (default: 0.9)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disable training on GPU (default: False)')
args = parser.parse_args()
if not args.no_cuda:
# Disable CUDA if there are no GPUs.
if mx.context.num_gpus() == 0:
args.no_cuda = True
logging.basicConfig(level=logging.INFO)
logging.info(args)
def dummy_transform(data, label):
im = data.astype(args.dtype, copy=False) / 255 - 0.5
im = nd.transpose(im, (2, 0, 1))
return im, label
# Function to get mnist iterator
def get_mnist_iterator():
train_set = MNIST(train=True, transform=dummy_transform)
train_iter = gluon.data.DataLoader(train_set, args.batch_size, True, num_workers=args.j, last_batch='discard')
val_set = MNIST(train=False, transform=dummy_transform)
val_iter = gluon.data.DataLoader(val_set, args.batch_size, False, num_workers=0)
return train_iter, val_iter, len(train_set)
# Function to define neural network
def conv_nets():
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=20, kernel_size=5, activation='relu'))
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Conv2D(channels=50, kernel_size=5, activation='relu'))
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dense(10))
return net
# Function to evaluate accuracy for a model
def evaluate(model, data_iter, context):
metric = mx.metric.Accuracy()
for _, batch in enumerate(data_iter):
data = batch[0].as_in_context(context)
label = batch[1].as_in_context(context)
output = model(data.astype(args.dtype, copy=False))
metric.update([label], [output])
return metric.get()
# Load training and validation data
train_data, val_data, train_size = get_mnist_iterator()
train_data = BPSDatasetLoader(train_data)
# Initialize BytePS
bps.init()
# BytePS: pin context to local rank
context = mx.cpu(bps.local_rank()) if args.no_cuda else mx.gpu(bps.local_rank())
num_workers = bps.size()
# Build model
model = conv_nets()
model.cast(args.dtype)
# Initialize parameters
model.initialize(mx.init.MSRAPrelu(), ctx=context)
# if bps.rank() == 0:
model.summary(nd.ones((1, 1, 28, 28), ctx=mx.gpu(bps.local_rank())))
model.hybridize()
# BytePS: fetch and broadcast parameters
params = model.collect_params()
# Create loss function and train metric
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
loss_fn.hybridize(static_alloc=True, static_shape=True)
# BytePS: create DistributedTrainer, a subclass of gluon.Trainer
optimizer_params = {'momentum': args.momentum, 'learning_rate': args.lr * num_workers}
trainer = bps.DistributedTrainer(params, "sgd", optimizer_params, block=model, loss=[loss_fn])
# Train model
for epoch in range(args.epochs):
tic = time.time()
metric.reset()
for i, batch in enumerate(train_data):
data = batch[0].as_in_context(context)
label = batch[1].as_in_context(context)
with autograd.record():
output = model(data)
loss = loss_fn(output, label)
loss.backward()
trainer.step(args.batch_size)
metric.update([label], [output])
if i % 100 == 0:
name, acc = metric.get()
logging.info('[Epoch %d Batch %d] Training: %s=%f' %
(epoch, i, name, acc))
if bps.rank() == 0:
elapsed = time.time() - tic
speed = train_size * num_workers / elapsed
logging.info('Epoch[%d]\tSpeed=%.2f samples/s\tTime cost=%f',
epoch, speed, elapsed)
# Evaluate model accuracy
_, train_acc = metric.get()
name, val_acc = evaluate(model, val_data, context)
if bps.rank() == 0:
logging.info('Epoch[%d]\tTrain: %s=%f\tValidation: %s=%f', epoch, name,
train_acc, name, val_acc)
if bps.rank() == 0 and epoch == args.epochs - 1:
assert val_acc > 0.96, "Achieved accuracy (%f) is lower than expected\
(0.96)" % val_acc
```
#### File: byteps/launcher/launch.py
```python
from __future__ import print_function
import os
import subprocess
import threading
import sys
import time
import traceback
COMMON_REQUIRED_ENVS = ["DMLC_ROLE", "DMLC_NUM_WORKER", "DMLC_NUM_SERVER",
"DMLC_PS_ROOT_URI", "DMLC_PS_ROOT_PORT"]
WORKER_REQUIRED_ENVS = ["DMLC_WORKER_ID"]
def check_env():
assert "DMLC_ROLE" in os.environ and \
os.environ["DMLC_ROLE"].lower() in ["worker", "server", "scheduler"]
required_envs = COMMON_REQUIRED_ENVS
if os.environ["DMLC_ROLE"] == "worker":
assert "DMLC_NUM_WORKER" in os.environ
num_worker = int(os.environ["DMLC_NUM_WORKER"])
assert num_worker >= 1
if num_worker == 1:
required_envs = []
required_envs += WORKER_REQUIRED_ENVS
for env in required_envs:
if env not in os.environ:
print("The env " + env + " is missing")
os._exit(0)
def worker(local_rank, local_size, command):
my_env = os.environ.copy()
my_env["BYTEPS_LOCAL_RANK"] = str(local_rank)
my_env["BYTEPS_LOCAL_SIZE"] = str(local_size)
if int(os.getenv("BYTEPS_ENABLE_GDB", 0)):
if command.find("python") != 0:
command = "python " + command
command = "gdb -ex 'run' -ex 'bt' -batch --args " + command
if os.environ.get("BYTEPS_TRACE_ON", "") == "1":
print("\n!!!Enable profiling for WORKER_ID: %s and local_rank: %d!!!" % (os.environ.get("DMLC_WORKER_ID"), local_rank))
print("BYTEPS_TRACE_START_STEP: %s\tBYTEPS_TRACE_END_STEP: %s\t BYTEPS_TRACE_DIR: %s" % (os.environ.get("BYTEPS_TRACE_START_STEP", ""), os.environ.get("BYTEPS_TRACE_END_STEP", ""), os.environ.get("BYTEPS_TRACE_DIR", "")))
print("Command: %s\n" % command)
sys.stdout.flush()
## To avoid integrating multiple operators into one single events
# \TODO: may influence the performance
my_env["MXNET_EXEC_BULK_EXEC_TRAIN"] = "0"
trace_path = os.path.join(os.environ.get("BYTEPS_TRACE_DIR", "."), str(local_rank))
if not os.path.exists(trace_path):
os.makedirs(trace_path)
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
if __name__ == "__main__":
DMLC_ROLE = os.environ.get("DMLC_ROLE")
print("BytePS launching " + (DMLC_ROLE if DMLC_ROLE else 'None'))
BYTEPS_SERVER_MXNET_PATH = os.getenv("BYTEPS_SERVER_MXNET_PATH")
print("BYTEPS_SERVER_MXNET_PATH: " + (BYTEPS_SERVER_MXNET_PATH if BYTEPS_SERVER_MXNET_PATH else 'None'))
sys.stdout.flush()
check_env()
if os.environ["DMLC_ROLE"] == "worker":
if "NVIDIA_VISIBLE_DEVICES" in os.environ:
local_size = len(os.environ["NVIDIA_VISIBLE_DEVICES"].split(","))
else:
local_size = 1
t = [None] * local_size
for i in range(local_size):
command = ' '.join(sys.argv[1:])
t[i] = threading.Thread(target=worker, args=[i, local_size, command])
t[i].daemon = True
t[i].start()
for i in range(local_size):
t[i].join()
else:
import byteps.server
``` |
{
"source": "joapolarbear/horovod",
"score": 2
} |
#### File: horovod/examples/tensorflow_resnet50.py
```python
import numpy as np
import tensorflow as tf
from tensorflow import keras
import os, sys
import argparse
import horovod.tensorflow as hvd
parser = argparse.ArgumentParser(description='Tensorflow MNIST Example')
parser.add_argument('--tensorboard', action='store_true', default=False,
help='output summary for tensorboard visualization')
parser.add_argument('--amp', action='store_true', default=False,
help='use automatically mixed precision')
args = parser.parse_args()
from google.protobuf.json_format import MessageToJson
from tensorflow.python.client import timeline
import json
import networkx as nx
class TimelineSession:
def __init__(self, sess):
self.sess = sess
self.graph = sess.graph
self.step_cnt = 0
self.trace_dir = os.path.join(os.environ.get("BYTEPS_TRACE_DIR", "."), str(hvd.local_rank()))
if not os.path.exists(self.trace_dir):
os.makedirs(self.trace_dir)
if os.environ.get("BYTEPS_TRACE_ON", "") != '1':
self._end_trace = True
return
self._end_trace = False
self.end_step = int(os.environ.get("BYTEPS_TRACE_END_STEP", "30"))
self.start_step = int(os.environ.get("BYTEPS_TRACE_START_STEP", "20"))
if not self._end_trace and self.start_step < 1:
raise ValueError("BYTEPS_TRACE_START_STEP must be larger than 1")
if not self._end_trace and self.end_step <= self.start_step:
raise ValueError("BYTEPS_TRACE_END_STEP must be larger than BYTEPS_TRACE_START_STEP")
### Timeline configuratoin
self.run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
self.run_metadata = tf.RunMetadata()
self.traces = {"traceEvents":[]}
self.dag = None
def run(self, *args_, **kwargs_):
if self._end_trace:
ret = self.sess.run(*args_, **kwargs_)
elif not self._end_trace and self.step_cnt < self.start_step:
ret = self.sess.run(*args_, **kwargs_)
self.step_cnt += 1
elif not self._end_trace and self.step_cnt < self.end_step:
ret = self.sess.run(*args_, options=self.run_options, run_metadata=self.run_metadata, **kwargs_)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(self.run_metadata.step_stats)
ctf = json.loads(tl.generate_chrome_trace_format())
self.traces["traceEvents"] += ctf["traceEvents"]
print("Add the {}th step of traces".format(self.step_cnt))
self.step_cnt += 1
### Create the DAG
if self.dag is None:
self.dag = nx.DiGraph()
for trace in ctf["traceEvents"]:
if trace["ph"] == "M" or "args" not in trace:
continue
op = trace["args"]["op"]
name = trace["args"]["name"]
### Add nodes to the DAG
if name not in self.dag.nodes:
self.dag.add_node(name)
### Add dependency info
for k, v in trace["args"].items():
if "input" in k:
self.dag.add_edge(v, name)
try:
not_found = False
nx.find_cycle(self.dag.cycle)
except:
not_found = True
assert not_found
### Output traces
if self.step_cnt == self.end_step:
self._end_trace = True
self.output_traces()
### Return all fetches
return ret
def output_traces(self):
with open(os.path.join(self.trace_dir, "temp.json"), "w") as f:
json.dump(self.traces, f, indent=4)
### collect graph info
graphdef = tf.get_default_graph().as_graph_def()
graph_str = json.loads(MessageToJson(graphdef))
with open(os.path.join(self.trace_dir, "graph.json"), "w") as f:
json.dump(graph_str, f, indent=4)
nx.write_gml(self.dag, os.path.join(self.trace_dir, "dag.gml"), lambda x: str(x))
print("Stop tracing, output trace: %s" % self.trace_dir)
def should_stop(self):
return self.sess.should_stop()
hvd.init()
sess = TimelineSession(tf.Session())
from keras import backend as K
K.set_session(sess)
# Load Cifar-10 data-set
(train_im, train_lab), (test_im, test_lab) = tf.keras.datasets.cifar10.load_data()
#### Normalize the images to pixel values (0, 1)
train_im, test_im = train_im/255.0 , test_im/255.0
#### Check the format of the data
print ("train_im, train_lab types: ", type(train_im), type(train_lab))
#### check the shape of the data
print ("shape of images and labels array: ", train_im.shape, train_lab.shape)
print ("shape of images and labels array ; test: ", test_im.shape, test_lab.shape)
#### Check the distribution of unique elements
(unique, counts) = np.unique(train_lab, return_counts=True)
frequencies = np.asarray((unique, counts)).T
print (frequencies)
print (len(unique))
class_types = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck'] # from cifar-10 website
### One hot encoding for labels
train_lab_categorical = tf.keras.utils.to_categorical(
train_lab, num_classes=10, dtype='uint8')
test_lab_categorical = tf.keras.utils.to_categorical(
test_lab, num_classes=10, dtype='uint8')
### Train -test split
from sklearn.model_selection import train_test_split
train_im, valid_im, train_lab, valid_lab = train_test_split(train_im, train_lab_categorical, test_size=0.20,
stratify=train_lab_categorical,
random_state=40, shuffle = True)
print ("train data shape after the split: ", train_im.shape)
print ('new validation data shape: ', valid_im.shape)
print ("validation labels shape: ", valid_lab.shape)
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, ZeroPadding2D,\
Flatten, BatchNormalization, AveragePooling2D, Dense, Activation, Add
from tensorflow.keras.models import Model
from tensorflow.keras import activations
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.regularizers import l2
##### Include Little Data Augmentation
batch_size = 64 # try several values
train_DataGen = tf.keras.preprocessing.image.ImageDataGenerator(zoom_range=0.2,
width_shift_range=0.1,
height_shift_range = 0.1,
horizontal_flip=True)
valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator()
train_set_conv = train_DataGen.flow(train_im, train_lab, batch_size=batch_size) # train_lab is categorical
valid_set_conv = valid_datagen.flow(valid_im, valid_lab, batch_size=batch_size) # so as valid_lab
def half_precision(layer_f, input_, *args_, **kwargs_):
if args.amp:
input_fp16 = tf.keras.backend.cast(input_, dtype="float16")
output_fp16 = layer_f(input_fp16, *args_, **kwargs_)
output_fp32 = tf.keras.backend.cast(output_fp16, dtype="float32")
else:
output_fp32 = layer_f(input_, *args_, **kwargs_)
return output_fp32
def single_precision(layer_f, input_, *args_, **kwargs_):
output_fp32 = layer_f(input_, *args_, **kwargs_)
return output_fp32
def res_identity(x, filters):
#renet block where dimension doesnot change.
#The skip connection is just simple identity conncection
#we will have 3 blocks and then input will be added
x_skip = x # this will be used for addition with the residual block
f1, f2 = filters
#first block
x = Conv2D(f1, kernel_size=(1, 1), strides=(1, 1), padding='valid', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
#second block # bottleneck (but size kept same with padding)
x = Conv2D(f1, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
# third block activation used after adding the input
x = Conv2D(f2, kernel_size=(1, 1), strides=(1, 1), padding='valid', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
# x = Activation(activations.relu)(x)
# add the input
x = Add()([x, x_skip])
x = Activation(activations.relu)(x)
return x
def res_conv(x, s, filters):
'''
here the input size changes'''
x_skip = x
f1, f2 = filters
# first block
x = Conv2D(f1, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_regularizer=l2(0.001))(x)
# when s = 2 then it is like downsizing the feature map
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
# second block
x = Conv2D(f1, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
#third block
x = Conv2D(f2, kernel_size=(1, 1), strides=(1, 1), padding='valid', kernel_regularizer=l2(0.001))(x)
x = BatchNormalization()(x)
# shortcut
x_skip = Conv2D(f2, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_regularizer=l2(0.001))(x_skip)
x_skip = BatchNormalization()(x_skip)
# add
x = Add()([x, x_skip])
x = Activation(activations.relu)(x)
return x
def resnet50():
input_im = Input(shape=(train_im.shape[1], train_im.shape[2], train_im.shape[3])) # cifar 10 images size
x = ZeroPadding2D(padding=(3, 3))(input_im)
# 1st stage
# here we perform maxpooling, see the figure above
x = single_precision(Conv2D, 64, kernel_size=(7, 7), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
#2nd stage
# frm here on only conv block and identity block, no pooling
x = single_precision(res_conv, x, s=1, filters=(64, 256))
x = res_identity(x, filters=(64, 256))
x = res_identity(x, filters=(64, 256))
# 3rd stage
x = single_precision(res_conv, x, s=2, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
# 4th stage
x = single_precision(res_conv, x, s=2, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
# 5th stage
x = single_precision(res_conv, x, s=2, filters=(512, 2048))
x = res_identity(x, filters=(512, 2048))
x = res_identity(x, filters=(512, 2048))
# ends with average pooling and dense connection
x = AveragePooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(len(class_types), activation='softmax', kernel_initializer='he_normal')(x) #multi-class
# define the model
model = Model(inputs=input_im, outputs=x, name='Resnet50')
return model
### Define some Callbacks
def lrdecay(epoch):
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
#print('Learning rate: ', lr)
return lr
# if epoch < 40:
# return 0.01
# else:
# return 0.01 * np.math.exp(0.03 * (40 - epoch))
lrdecay = tf.keras.callbacks.LearningRateScheduler(lrdecay) # learning rate decay
def earlystop(mode):
if mode=='acc':
estop = tf.keras.callbacks.EarlyStopping(monitor='val_acc', patience=15, mode='max')
elif mode=='loss':
estop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=15, mode='min')
return estop
resnet50_model = resnet50()
# resnet50_model.summary()
resnet50_model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=1e-3),
metrics=['acc'])
trace_dir = os.path.join(os.environ.get("BYTEPS_TRACE_DIR", "."), str(0))
if not os.path.exists(trace_dir):
os.makedirs(trace_dir)
logdir = os.path.join(trace_dir, "board")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
batch_size=batch_size # test with 64, 128, 256
resnet_train = resnet50_model.fit(train_set_conv,
epochs=160,
steps_per_epoch=train_im.shape[0]/batch_size,
validation_steps=valid_im.shape[0]/batch_size,
validation_data=valid_set_conv,
callbacks=[lrdecay, tensorboard_callback])
``` |
{
"source": "joaquimcampos/deepsplines",
"score": 3
} |
#### File: deepsplines/ds_modules/deepspline_base.py
```python
import torch
import torch.nn as nn
from abc import ABC, abstractproperty, abstractmethod
from deepsplines.ds_utils import spline_grid_from_range
class DeepSplineBase(ABC, nn.Module):
"""
Abstract class for DeepSpline activations (deepReLUspline/deepBspline*)
Args:
mode (str):
'conv' (convolutional) or 'fc' (fully-connected).
num_activations :
number of convolutional filters (if mode='conv');
number of units (if mode='fc').
size (odd int):
number of coefficients of spline grid;
the number of knots K = size - 2.
---- Mutually exclusive arguments ---------------------------
range_ (float):
positive range of the B-spline expansion.
B-splines range = [-range_, range_].
If it is set, the "grid" argument needs to be None,
as it will be computed from size and range_ using
ds_utils.spline_grid_from_range().
grid (float):
spacing between the spline knots.
If given, the "grid" argument needs to be None.
-------------------------------------------------------------
init (str):
Function to initialize activations as (e.g. 'leaky_relu').
For deepBsplines: 'leaky_relu', 'relu' or 'even_odd';
For deepReLUspline: 'leaky_relu', 'relu'.
"""
def __init__(self,
mode,
num_activations,
size=51,
range_=4,
grid=None,
init='leaky_relu',
**kwargs):
if mode not in ['conv', 'fc']:
raise ValueError('Mode should be either "conv" or "fc".')
if int(num_activations) < 1:
raise TypeError('num_activations needs to be a '
'positive integer...')
if int(size) % 2 == 0:
raise TypeError('size should be an odd number.')
if range_ is None:
if grid is None:
raise ValueError('One of the two args (range_ or grid) '
'required.')
elif float(grid) <= 0:
raise TypeError('grid should be a positive float...')
elif grid is not None:
raise ValueError('range_ and grid should not be both set.')
super().__init__()
self.mode = mode
self.size = int(size)
self.num_activations = int(num_activations)
self.init = init
if range_ is None:
self.grid = torch.Tensor([float(grid)])
else:
grid = spline_grid_from_range(size, range_)
self.grid = torch.Tensor([grid])
@property
def device(self):
"""
Get the module's device (torch.device)
Returns the device of the first found parameter.
"""
return getattr(self, next(self.parameter_names())).device
@staticmethod
@abstractmethod
def parameter_names():
""" Yield names of the module parameters """
pass
@abstractproperty
def relu_slopes(self):
""" ReLU slopes of activations """
pass
def reshape_forward(self, input):
"""
Reshape inputs for deepspline activation forward pass, depending on
mode ('conv' or 'fc').
"""
input_size = input.size()
if self.mode == 'fc':
if len(input_size) == 2:
# one activation per conv channel
# transform to 4D size (N, num_units=num_activations, 1, 1)
x = input.view(*input_size, 1, 1)
elif len(input_size) == 4:
# one activation per conv output unit
x = input.view(input_size[0], -1).unsqueeze(-1).unsqueeze(-1)
else:
raise ValueError(f'input size is {len(input_size)}D '
'but should be 2D or 4D...')
else:
assert len(input_size) == 4, \
'input to activation should be 4D (N, C, H, W) if mode="conv".'
x = input
return x
def reshape_back(self, output, input_size):
"""
Reshape back outputs after deepspline activation forward pass,
depending on mode ('conv' or 'fc').
"""
if self.mode == 'fc':
# transform back to 2D size (N, num_units)
output = output.view(*input_size)
return output
def totalVariation(self, **kwargs):
"""
Computes the second-order total-variation regularization.
deepspline(x) = sum_k [a_k * ReLU(x-kT)] + (b1*x + b0)
The regularization term applied to this function is:
TV(2)(deepsline) = ||a||_1.
"""
return self.relu_slopes.norm(1, dim=1)
def get_epsilon_sparsity(self, eps=1e-4):
"""
Computes the number of relus for which |a_k| > eps.
This function acts as a sanity check on the sparsification.
After applying the threshold to the ReLU coefficients, we check that
epsilon_sparsity = threshold_sparsity (check apply_threshold()).
"""
sparsity_mask = ((self.relu_slopes.abs() - eps) > 0.)
sparsity = sparsity_mask.sum(dim=1)
return sparsity, sparsity_mask
def get_threshold_sparsity(self, threshold):
"""
Computes the number of activated relus (|a_k| > threshold)
"""
relu_slopes_abs = self.relu_slopes.abs()
threshold_sparsity_mask = (relu_slopes_abs > threshold)
threshold_sparsity = threshold_sparsity_mask.sum(dim=1)
return threshold_sparsity, threshold_sparsity_mask
def apply_threshold(self, threshold):
"""
Applies a threshold to the activations, eliminating the relu
slopes smaller than a threshold.
deepspline(x) = sum_k [a_k * ReLU(x-kT)] + (b1*x + b0)
This function sets a_k to zero if |a_k| < knot_threshold.
Operations performed:
. [a] = L[c], [a] -> sparsification -> [a_hat].
Args:
threshold (float)
"""
with torch.no_grad():
new_relu_slopes = self.relu_slopes
threshold_sparsity, threshold_sparsity_mask = \
self.get_threshold_sparsity(threshold)
new_relu_slopes[~threshold_sparsity_mask] = 0.
eps = 1e-4
if threshold >= eps:
# sanity test: check that the relu slopes below threshold were
# indeed eliminated, i.e., smaller than epsilon, where
# 0 < epsilon <= threshold.
epsilon_sparsity, epsilon_sparsity_mask = \
self.get_epsilon_sparsity(eps)
assert epsilon_sparsity.sum() == threshold_sparsity.sum()
assert torch.all(
~epsilon_sparsity_mask[~threshold_sparsity_mask])
return new_relu_slopes
def fZerofOneAbs(self, **kwargs):
"""
Computes |f(0)| + |f(1)| where f is a deepspline activation.
Required for the BV(2) regularization.
"""
zero_one_vec = torch.tensor([0, 1]).view(-1, 1).to(self.device)
zero_one_vec = zero_one_vec.expand((-1, self.num_activations))
if self.mode == 'conv':
zero_one_vec = zero_one_vec.unsqueeze(-1).unsqueeze(-1) # 4D
fzero_fone = self.forward(zero_one_vec)
if self.mode == 'conv':
# (2, num_activations)
fzero_fone = fzero_fone.squeeze(-1).squeeze(-1)
assert fzero_fone.size() == (2, self.num_activations)
return fzero_fone.abs().sum(0)
```
#### File: deepsplines/networks/convnet_mnist.py
```python
import torch.nn as nn
from deepsplines.ds_modules import BaseModel
class ConvNetMnist(BaseModel):
"""
Simple convolutional network for MNIST digit classification.
MNIST input size: N x 1 x 28 x 28.
Network (layer type -> output size):
conv1 -> (N, c1, 24, 24)
pool2d -> (N, c1, 12, 12)
conv2 -> (N, c1, 8, 8)
pool2d -> (N, c1, 4, 4)
reshape -> (N, c1 * 4 * 4)
fc1 -> N x 10
The convolutions are valid convolutions with filter size = 5.
"""
def __init__(self, **params):
super().__init__(**params)
c1, self.c2 = 2, 2 # conv: number of channels
activation_specs = []
# (in_channels, out_channels, kernel_size)
self.conv1 = nn.Conv2d(1, c1, 5)
activation_specs.append(('conv', c1))
self.pool = nn.MaxPool2d(2, 2) # (kernel_size, stride)
self.conv2 = nn.Conv2d(c1, self.c2, 5)
activation_specs.append(('conv', self.c2))
self.fc1 = nn.Linear(self.c2 * 4 * 4, 10)
self.activations = self.init_activation_list(activation_specs)
self.num_params = self.get_num_params()
def forward(self, x):
""" """
x = self.pool(self.activations[0](self.conv1(x)))
x = self.pool(self.activations[1](self.conv2(x)))
x = x.view(-1, self.c2 * 4 * 4)
x = self.fc1(x)
return x
```
#### File: deepsplines/deepsplines/project.py
```python
import os
import sys
import glob
import math
import collections
import itertools
import torch
from abc import ABC, abstractproperty
from deepsplines.datasets import init_dataset
from deepsplines.dataloader import DataLoader
from deepsplines.ds_utils import size_str
from deepsplines.ds_utils import dict_recursive_merge, flatten_structure
from deepsplines.ds_utils import json_load, json_dump
class Project(ABC):
train_results_json_filename = 'train_results.json'
test_results_json_filename = 'test_results.json'
train_sorting_key = 'latest_valid_acc'
test_sorting_key = 'test_acc'
def __init__(self, params, user_params):
self.params = params
self.user_params = user_params
self.training = (self.params["mode"] == 'train')
self.log_dir_model = os.path.join(self.params["log_dir"],
self.params["model_name"])
self.best_train_acc = 0.
self.best_valid_acc = 0.
if self.training:
self.start_epoch, self.global_step = 0, 0
self.dataset = init_dataset(**self.params['dataset'])
self.init_dataloader()
def init_dataloader(self):
"""
Initialize dataloader.
"""
# Load the data
print('\n==> Loading the data...')
self.dataloader = DataLoader(self.dataset, **self.params['dataloader'])
self.trainloader, self.validloader = \
self.dataloader.get_train_valid_loader()
self.testloader = self.dataloader.get_test_loader()
self.save_train_info()
def save_train_info(self):
""" """
assert (self.trainloader is not None)
if self.dataset.is_user_dataset is True:
self.num_train_samples = sum(
inputs.size(0) for inputs, _ in self.trainloader)
else:
self.num_train_samples = len(self.trainloader.sampler)
self.num_train_batches = \
math.ceil(self.num_train_samples / self.dataloader.batch_size)
# TODO: do this just with the model and optimizer states
@abstractproperty
def net(self):
pass
@abstractproperty
def main_optimizer(self):
pass
@abstractproperty
def main_scheduler(self):
pass
@abstractproperty
def aux_optimizer(self):
pass
@abstractproperty
def aux_scheduler(self):
pass
def init_log(self):
"""
Create Log directory for training the model as
self.params["log_dir"]/self.params["model_name"].
"""
if not os.path.isdir(self.log_dir_model):
os.makedirs(self.log_dir_model)
def init_device(self):
""" """
if self.params['device'].startswith('cuda'):
if torch.cuda.is_available():
self.device = 'cuda:0' # Using GPU0 by default
print('\nUsing GPU.')
else:
self.device = 'cpu'
print('\nCUDA not available. Using CPU.')
else:
self.device = 'cpu'
print('\nUsing CPU.')
@property
def results_json_filename(self):
"""
Name of json file with logged results.
"""
if self.training is True:
return self.train_results_json_filename
else:
return self.test_results_json_filename
@property
def sorting_key(self):
"""
Key for sorting models in json file.
"""
if self.training:
return self.train_sorting_key
else:
return self.test_sorting_key
def init_json(self):
"""
Init json file for train/test results.
"""
# initialize/verify json log file
self.results_json = os.path.join(self.params['log_dir'],
self.results_json_filename)
if not os.path.isfile(self.results_json):
results_dict = {}
else:
results_dict = json_load(self.results_json)
if self.params['model_name'] not in results_dict:
# initialize model log
results_dict[self.params['model_name']] = {}
# add minimal information for sorting models in results_json file
if self.sorting_key not in results_dict[self.params['model_name']]:
results_dict[self.params['model_name']][self.sorting_key] = 0.
json_dump(results_dict, self.results_json)
comb_list = list(
itertools.product(['latest', 'best'], ['train', 'valid'],
['acc', 'loss']))
self.info_list = ['_'.join(k)
for k in comb_list] + ['test_acc', 'test_loss']
def update_json(self, info, value):
"""
Update json file with latest/best validation/test accuracy/loss,
if training, and with test accuracy otherwise.
Args:
info (str):
e.g. 'latest_valid_loss', 'best_train_acc'.
value (float):
value for the given info.
"""
assert info in self.info_list, \
f'{info} should be in {self.info_list}...'
# save in json
results_dict = json_load(self.results_json)
if isinstance(value, dict):
if info not in self.params["model_name"]:
results_dict[self.params["model_name"]][info] = {}
for key, val in value.items():
results_dict[self.params["model_name"]][info][key] = \
float('{:.3f}'.format(val))
else:
results_dict[self.params["model_name"]][info] = \
float('{:.3f}'.format(value))
sorted_acc = sorted(results_dict.items(),
key=lambda kv: kv[1][self.sorting_key],
reverse=True)
sorted_results_dict = collections.OrderedDict(sorted_acc)
json_dump(sorted_results_dict, self.results_json)
@property
def load_ckpt(self):
"""
Returns True if loading a checkpoint and restoring its parameters,
for resuming training or testing a model. Otherwise, returns False.
"""
if (self.params["ckpt_filename"]
is not None) or (self.params["resume"] is True):
return True
else:
return False
def restore_ckpt_params(self):
"""
Attempts to restore a checkpoint if resuming training or testing
a model.
If successful, it gets the loaded checkpoint and merges the saved
parameters.
Returns True if a checkpoint was successfully loaded,
and False otherwise.
"""
if self.params["ckpt_filename"] is not None:
try:
self.load_merge_params(self.params["ckpt_filename"])
except FileNotFoundError:
print('\nCheckpoint file not found... Unable to '
'restore model.\n')
raise
except BaseException:
print('\nUnknown error in restoring model.')
raise
print('\nSuccessfully loaded ckpt ' + self.params["ckpt_filename"])
return True
elif self.params["resume"] is True:
log_dir_model = os.path.join(self.params["log_dir"],
self.params["model_name"])
if self.params["resume_from_best"] is True:
regexp_ckpt = os.path.join(log_dir_model,
'*_best_valid_acc.pth')
else:
regexp_ckpt = os.path.join(log_dir_model, '*_net_*.pth')
files = glob.glob(regexp_ckpt)
# sort by time from oldest to newest
files.sort(key=os.path.getmtime)
if len(files) > 0:
print('\nRestoring model from {}.'.format(files[-1]))
# restore from most recent file
self.load_merge_params(files[-1])
return True
else:
print('\nNo model saved to resume training. '
'Starting from scratch.')
return False
else:
print('\nStarting from scratch.')
return False
def load_merge_params(self, ckpt_filename):
"""
Load and merge the parameters from ckpt_filename into self.params
and save the loaded checkpoint (dictionary).
The parameters introduced by the user (via command-line arguments)
override the corresponding saved parameters. The ones not specified
by the user, are loaded from the checkpoint.
Args:
ckpt_filename (str): Name of checkpoint (.pth) file.
"""
torch.load(ckpt_filename, map_location=lambda storage, loc: storage)
ckpt = self.get_loaded_ckpt(ckpt_filename)
self.loaded_ckpt = ckpt # save loaded_ckpt for restore_model
saved_params = ckpt['params']
# merge w/ saved params
self.params = dict_recursive_merge(self.params, saved_params)
# merge w/ user params (precedence over saved)
self.params = dict_recursive_merge(self.params, self.user_params)
def restore_model(self):
""" """
self.load_model(self.loaded_ckpt)
if self.training and self.start_epoch == self.params["num_epochs"]:
print('\nTraining in this checkpoint is already completed. '
'Please increase the number of epochs.')
sys.exit()
def load_model(self, ckpt):
"""
Load model from a loaded checkpoint.
Args:
ckpt (dictionary): loaded checkpoint.
"""
print('\n==> Resuming from checkpoint...')
self.net.load_state_dict(ckpt['model_state'],
strict=(self.training is True))
self.best_train_acc = ckpt['best_train_acc']
self.best_valid_acc = ckpt['best_valid_acc']
if self.training:
self.start_epoch = ckpt['num_epochs_finished']
self.global_step = ckpt['global_step']
self.main_optimizer.load_state_dict(ckpt['main_optimizer_state'])
if ckpt['aux_optimizer_state'] is not None:
self.aux_optimizer.load_state_dict(ckpt['aux_optimizer_state'])
if 'main_scheduler_state' in ckpt:
self.main_scheduler.load_state_dict(
ckpt['main_scheduler_state'])
if ckpt['aux_scheduler_state'] is not None:
self.aux_scheduler.load_state_dict(
ckpt['aux_scheduler_state'])
return
@staticmethod
def get_loaded_ckpt(ckpt_filename):
"""
Returns a loaded checkpoint (ckpt dictionary)
from ckpt_filename, if it exists.
Args:
ckpt_filename (str): Name of checkpoint (.pth) file.
"""
try:
# TODO: Check if model is always loaded on cpu.
# Use net.to(device) after.
ckpt = torch.load(ckpt_filename,
map_location=lambda storage, loc: storage)
except FileNotFoundError:
print('\nCheckpoint file not found... Unable '
'to load checkpoint.\n')
raise
except BaseException:
print('\nUnknown error in loading checkpoint parameters.')
raise
return ckpt
@classmethod
def load_ckpt_params(cls, ckpt_filename, flatten=False):
"""
Returns the ckpt dictionary and the parameters saved
in a checkpoint file.
Args:
ckpt_filename (str):
Name of checkpoint (.pth) file.
flatten (bool):
whether to flatten the structure of the parameters dictionary
into a single level
(see structure in struct_default_values.py).
"""
ckpt = cls.get_loaded_ckpt(ckpt_filename)
params = ckpt['params']
if flatten is True:
params = flatten_structure(params)
return ckpt, params
@staticmethod
def get_ckpt_from_log_dir_model(log_dir_model):
"""
Get last ckpt from log_dir_model (log_dir/model_name).
"""
regexp_ckpt = os.path.join(log_dir_model, '*_net_*.pth')
files = glob.glob(regexp_ckpt)
files.sort(key=os.path.getmtime) # sort by time from oldest to newest
if len(files) > 0:
ckpt_filename = files[-1]
print(f'Restoring {ckpt_filename}')
return ckpt_filename
else:
print(f'No ckpt found in {log_dir_model}...')
return None
@classmethod
def load_results_dict(cls, log_dir, mode='train'):
"""
Load train or test results from the corresponding
json file in log_dir.
Args:
log_dir (str):
log directory where results json file is located.
mode (str):
'train' or 'test'.
Returns:
results_dict (dict): dictionary with train/test results.
"""
assert mode in ['train', 'test'], 'mode should be "train" or "test"...'
if mode == 'train':
results_json_filename = cls.train_results_json_filename
else:
results_json_filename = cls.test_results_json_filename
results_json = os.path.join(log_dir, results_json_filename)
results_dict = json_load(results_json)
return results_dict
@classmethod
def dump_results_dict(cls, results_dict, log_dir, mode='train'):
"""
Dump results dictionary in the train or test results json file
in log_dir.
Args:
results_dict (dict):
dictionary with train/test results.
log_dir (str):
log directory where results json file is located.
mode (str):
'train' or 'test'.
"""
assert mode in ['train', 'test'], 'mode should be "train" or "test"...'
if mode == 'train':
results_json_filename = cls.train_results_json_filename
else:
results_json_filename = cls.test_results_json_filename
results_json = os.path.join(log_dir, results_json_filename)
json_dump(results_dict, results_json)
@classmethod
def get_best_model(cls, log_dir, mode='train'):
"""
Get the name and checkpoint filename of the best model
(best validation/test) from the train/test results json file.
Args:
log_dir (str):
log directory where results json file is located.
mode (str):
'train' or 'test'.
"""
results_dict = cls.load_results_dict(log_dir, mode)
# models are ordered by validation accuracy; choose first one.
best_model_name = next(iter(results_dict))
log_dir_best_model = os.path.join(log_dir, best_model_name)
ckpt_filename = cls.get_ckpt_from_log_dir_model(log_dir_best_model)
return best_model_name, ckpt_filename
def train_log_step(self, epoch, batch_idx, train_acc, losses_dict):
"""
Log the training.
Args:
epoch (int):
current epoch.
batch_idx (int):
current batch.
train_acc (float):
computed train accuracy.
losses_dict (dict):
A dictionary of the form {loss name (str) : loss value (float)}
"""
print('[{:3d}, {:6d} / {:6d}] '.format(epoch + 1, batch_idx + 1,
self.num_train_batches),
end='')
for key, value in losses_dict.items():
print('{}: {:7.3f} | '.format(key, value), end='')
print('train acc: {:7.3f}%'.format(train_acc))
self.update_json('latest_train_loss', losses_dict)
self.update_json('latest_train_acc', train_acc)
if train_acc > self.best_train_acc:
self.best_train_acc = train_acc
self.update_json('best_train_acc', train_acc)
def valid_log_step(self, epoch, valid_acc, losses_dict):
"""
Log the validation.
Args:
epoch (int):
current epoch.
valid_acc (float):
computed validation accuracy.
losses_dict (dict):
A dictionary of the form {loss name (str) : loss value (float)}
"""
print('\nvalidation_step : ', end='')
for key, value in losses_dict.items():
print('{}: {:7.3f} | '.format(key, value), end='')
print('valid acc: {:7.3f}%'.format(valid_acc), '\n')
self.update_json('latest_valid_loss', losses_dict)
self.update_json('latest_valid_acc', valid_acc)
if valid_acc > self.best_valid_acc:
self.best_valid_acc = valid_acc
self.update_json('best_valid_acc', valid_acc)
def ckpt_log_step(self, epoch, valid_acc):
"""
Save the model in a checkpoint.
Only allow at most params['ckpt_nmax_files'] checkpoints.
Delete the oldest checkpoint, if necessary.
Also log the best results so far in a separate checkpoint.
Args:
epoch (int):
current epoch.
valid_acc (float):
computed validation accuracy.
"""
base_ckpt_filename = os.path.join(
self.log_dir_model,
self.params["model_name"] + '_net_{:04d}'.format(epoch + 1))
regexp_ckpt = os.path.join(self.log_dir_model, "*_net_*.pth")
regexp_best_valid_acc_ckpt = os.path.join(self.log_dir_model,
"*_best_valid_acc.pth")
# save checkpoint as *_net_{epoch+1}.pth
ckpt_filename = base_ckpt_filename + '.pth'
# remove best_valid_acc ckpt from files
files = list(
set(glob.glob(regexp_ckpt)) -
set(glob.glob(regexp_best_valid_acc_ckpt)))
# sort from newest to oldest
files.sort(key=os.path.getmtime, reverse=True)
if (not self.params["ckpt_nmax_files"] < 0) and \
(len(files) >= self.params["ckpt_nmax_files"]):
assert len(files) == (self.params["ckpt_nmax_files"]), \
'There are more than (ckpt_nmax_files+1) ' \
'*_net_*.pth checkpoints.'
filename = files[-1]
os.remove(filename)
self.save_network(ckpt_filename, epoch, valid_acc)
if valid_acc == self.best_valid_acc:
# if valid_acc = best_valid_acc, also save checkpoint as
# *_net_{global_step}_best_valid_acc.pth
# and delete previous best_valid_acc checkpoint
best_valid_acc_ckpt_filename = \
base_ckpt_filename + '_best_valid_acc.pth'
files = glob.glob(regexp_best_valid_acc_ckpt)
if len(files) > 0:
assert len(files) == 1, \
'More than one *_best_valid_acc.pth checkpoint.'
os.remove(files[0])
self.save_network(best_valid_acc_ckpt_filename, epoch, valid_acc)
return
def save_network(self, ckpt_filename, epoch, valid_acc):
"""
Save the network in a checkpoint.
Args:
ckpt_filename (str):
Name of checkpoint (.pth) file.
epoch (int):
current epoch.
valid_acc (float):
computed validation accuracy.
"""
state = {
'model_state': self.net.state_dict(),
'main_optimizer_state': self.main_optimizer.state_dict(),
'main_scheduler_state': self.main_scheduler.state_dict(),
'params': self.params,
'best_train_acc': self.best_train_acc,
'best_valid_acc': self.best_valid_acc,
'valid_acc': valid_acc,
'num_epochs_finished': epoch + 1,
'global_step': self.global_step
}
if self.aux_optimizer is not None:
state['aux_optimizer_state'] = self.aux_optimizer.state_dict()
state['aux_scheduler_state'] = self.aux_scheduler.state_dict()
else:
state['aux_optimizer_state'] = None
state['aux_scheduler_state'] = None
torch.save(state, ckpt_filename)
return
def print_train_info(self):
""" """
assert (self.validloader is not None)
assert hasattr(self, 'num_train_samples')
assert hasattr(self, 'num_train_batches')
if self.dataset.is_user_dataset is True:
num_valid_samples = sum(
inputs.size(0) for inputs, _ in self.validloader)
sample_data, sample_target = self.trainloader[0]
else:
num_valid_samples = len(self.validloader.sampler)
# dataloader iterator to get next sample
dataiter = iter(self.trainloader)
sample_data, sample_target = dataiter.next()
num_valid_batches = \
math.ceil(num_valid_samples / self.dataloader.batch_size)
print('\n==> Train info:')
print('batch (data, target) size : '
f'({size_str(sample_data)}, {size_str(sample_target)}).')
print('no. of (train, valid) samples : '
f'({self.num_train_samples}, {num_valid_samples}).')
print('no. of (train, valid) batches : '
f'({self.num_train_batches}, {num_valid_batches}).')
def print_test_info(self):
""" """
assert (self.testloader is not None)
if self.dataset.is_user_dataset is True:
num_test_samples = sum(
inputs.size(0) for inputs, _ in self.testloader)
sample_data, sample_target = self.testloader[0]
else:
num_test_samples = len(self.testloader.dataset)
# dataloader iterator to get next sample
dataiter = iter(self.testloader)
sample_data, sample_target = dataiter.next()
num_test_batches = math.ceil(num_test_samples /
self.dataloader.batch_size)
print('\n==> Test info:')
print('batch (data, target) size : '
f'({size_str(sample_data)}, {size_str(sample_target)}).')
print(f'no. of test samples : {num_test_samples}.')
print(f'no. of test batches : {num_test_batches}.')
def print_optimization_info(self):
""" """
print('\n==> Optimizer info:')
print('--Main Optimizer:')
print(self.main_optimizer)
if self.aux_optimizer is not None:
print('--Aux Optimizer :')
print(self.aux_optimizer)
# scheduler
scheduler_list = [self.main_scheduler, self.aux_scheduler]
scheduler_name_list = ['Main', 'Aux']
for scheduler, aux_str in zip(scheduler_list, scheduler_name_list):
if scheduler is not None:
print('--' + aux_str + ' Scheduler : ')
print(f'class - {type(scheduler).__name__}; '
f'milestones - {scheduler.milestones}; '
f'gamma - {scheduler.gamma}.')
def log_additional_info(self):
""" Log additional information to self.results_json
"""
# TODO: Review this
if not self.params['additional_info']: # empty list
return
results_dict = json_load(self.results_json)
if 'sparsity' in self.params['additional_info']:
results_dict[self.params['model_name']]['sparsity'] = \
'{:d}'.format(self.net.compute_sparsity())
if 'lipschitz_bound' in self.params['additional_info']:
results_dict[self.params['model_name']]['lipschitz_bound'] = \
'{:.3f}'.format(self.net.lipschitz_bound())
json_dump(results_dict, self.results_json)
```
#### File: deepsplines/scripts/deepsplines_tutorial.py
```python
import time
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Need to import dsnn (takes the role of torch.nn for DeepSplines)
from deepsplines.ds_modules import dsnn
########################################################################
# ReLU network
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.num_params = sum(p.numel() for p in self.parameters())
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
########################################################################
# Deepspline network
# We now show how to replace the ReLUs with DeepSpline activations in
# the previous network.
# we can use deepspline modules of three types:
# - DeepBspline
# - DeepBSplineExplicitLinear
# - DeepReLUSpline
# In this tutorial, we use the first as an example.
# The model needs to inherit from dsnn.DSModule. This is a wrap around
# nn.Module that contains all the DeepSpline functionality.
class DSNet(dsnn.DSModule):
def __init__(self):
super().__init__()
# we put the deepsplines (ds) of the convolutional and fully-connected
# layers in two separate nn.ModuleList() for simplicty.
self.conv_ds = nn.ModuleList()
self.fc_ds = nn.ModuleList()
# We define some optional parameters for the deepspline
# (see DeepBSpline.__init__())
opt_params = {
'size': 51,
'range_': 4,
'init': 'leaky_relu',
'save_memory': False
}
self.conv1 = nn.Conv2d(3, 6, 5)
# 1st parameter (mode): 'conv' (convolutional) / 'fc' (fully-connected)
# 2nd parameter: nb. channels (mode='conv') / nb. neurons (mode='fc')
self.conv_ds.append(dsnn.DeepBSpline('conv', 6, **opt_params))
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.conv_ds.append(dsnn.DeepBSpline('conv', 16, **opt_params))
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc_ds.append(dsnn.DeepBSpline('fc', 120, **opt_params))
self.fc2 = nn.Linear(120, 84)
self.fc_ds.append(dsnn.DeepBSpline('fc', 84, **opt_params))
self.fc3 = nn.Linear(84, 10)
self.initialization(opt_params['init'], init_type='He')
self.num_params = self.get_num_params()
def forward(self, x):
x = self.pool(self.conv_ds[0](self.conv1(x)))
x = self.pool(self.conv_ds[1](self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = self.fc_ds[0](self.fc1(x))
x = self.fc_ds[1](self.fc2(x))
x = self.fc3(x)
return x
if __name__ == "__main__":
########################################################################
# Load the data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
batch_size = 4
trainset = torchvision.datasets.CIFAR10(root='./data',
train=True,
download=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=batch_size,
shuffle=True,
num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transform)
testloader = torch.utils.data.DataLoader(testset,
batch_size=batch_size,
shuffle=False,
num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f'\nDevice: {device}')
########################################################################
# Network, optimizer, loss
net = Net() # relu network
net.to(device)
print('ReLU: nb. parameters - {:d}'.format(net.num_params))
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
dsnet = DSNet() # deepsplines network
dsnet.to(device)
print('DeepSpline: nb. parameters - {:d}'.format(dsnet.num_params))
# For the parameters of the deepsplines, an optimizer different from "SGD"
# is usually required for stability during training (Adam is recommended).
# Therefore, when using an SGD optimizer for the network parameters, we
# require an auxiliary one for the deepspline parameters.
# Inherenting from DSModule allows us to use the parameters_deepspline()
# and parameters_no_deepspline() methods for this.
main_optimizer = optim.SGD(dsnet.parameters_no_deepspline(),
lr=0.001,
momentum=0.9)
aux_optimizer = optim.Adam(dsnet.parameters_deepspline())
criterion = nn.CrossEntropyLoss()
########################################################################
# Training the ReLU network
print('\nTraining ReLU network.')
start_time = time.time()
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
end_time = time.time()
print('Finished Training ReLU network. \n'
'Took {:d} seconds. '.format(int(end_time - start_time)))
########################################################################
# Training the DeepSpline network
# Note: Since the original network is small, the time it takes to train
# deepsplines is significantly larger.
# Regularization weight for the TV(2)/BV(2) regularization
# Needs to be tuned for performance
lmbda = 1e-4
# lipschitz control: if True, BV(2) regularization is used instead of TV(2)
lipschitz = False
print('\nTraining DeepSpline network.')
start_time = time.time()
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
main_optimizer.zero_grad()
aux_optimizer.zero_grad()
# forward + backward + optimize
outputs = dsnet(inputs)
loss = criterion(outputs, labels)
# add regularization loss
if lipschitz is True:
loss = loss + lmbda * dsnet.BV2()
else:
loss = loss + lmbda * dsnet.TV2()
loss.backward()
main_optimizer.step()
aux_optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
end_time = time.time()
print('Finished Training DeepSpline network. \n'
'Took {:d} seconds. '.format(int(end_time - start_time)))
########################################################################
# Testing the ReLU and DeepSpline networks
for model, name in zip([net, dsnet], ['ReLU', 'DeepSpline']):
print(f'\nTesting {name} network.')
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients
# for our outputs
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
# calculate outputs by running images through the network
outputs = model(images)
# the class with the highest energy is what we choose
# as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Accuracy of the {name} network '
'on the 10000 test images: %d %%' % (100 * correct / total))
```
#### File: deepsplines/scripts/plot_deepspline_basis.py
```python
import os
import argparse
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
def Bspline(x, center=0, grid=1, coeff=1, mode='both'):
"""
Evaluates a B-spline basis element at x.
Args:
x (np.array): input locations.
center (float): center of the basis function.
grid (float): grid spacing (determines width of B-spline).
coeff (float): coefficient of the B-spline (height).
Returns:
y (np.array): of the same size as x.
"""
assert mode in ['both', 'left', 'right']
y = np.zeros(x.shape)
if not (mode == 'right'):
left_idx = (x > (center - grid)) * (x <= center)
y[left_idx] = (x[left_idx] - (center - grid)) / grid
if not (mode == 'left'):
right_idx = (x < (center + grid)) * (x >= center)
y[right_idx] = ((center + grid) - x[right_idx]) / grid
return y * coeff # basis * coefficient
def plot_deepspline_basis(args):
"""
Args:
args: verified arguments from arparser
"""
plt.figure()
ax = plt.gca()
# (B-spline expansion range, grid spacing,
# nb. plot points, extrapolation range)
range_, grid, nb_points, extrap = 3, 1, 10001, 2
# the total plot x axis range is then [-5, 5] = [-(range_+extrap),
# (range+extrap)]
# for B-spline expansion
x_middle = np.linspace(-range_, range_, nb_points)
# for linear extrapolations outside B-spline range
x_left = np.linspace(-(range_ + extrap), -range_, nb_points)
x_right = np.linspace(range_, (range_ + extrap), nb_points)
# grid for plotting B-spline elements in [-3, 3]
grid_points = np.arange(-range_ - 1, range_ + 2, grid)
# grid for plotting boundary elements in [-5, -3] and [3, 5]
left_grid_points = np.arange(-(range_ + extrap), -range_ + 1, grid)
right_grid_points = np.arange(range_, range_ + extrap + 1, grid)
# B-spline coefficients
coeff = np.array([4.5, 3.3, 5.3, 2.3, 3.3, 1.3, 4.5, 3.5, 3.1])
# left and right linear extrapolations at grid locations in [-5, -3] and
# [3, 5]
left_extrap = (coeff[0] - coeff[1]) * \
np.array(list(range(0, extrap + 1)))[::-1] + coeff[1]
right_extrap = (coeff[-1] - coeff[-2]) * \
np.array(list(range(0, extrap + 1))) + coeff[-2]
# values of boundary basis at grid locations in [-5, -3] and [3, 5]
right_straight = np.ones(extrap + 1) * coeff[-2]
left_straight = np.ones(extrap + 1) * coeff[1]
left_relu = (coeff[0] - coeff[1]) * \
np.array(list(range(0, extrap + 1)))[::-1]
right_relu = (coeff[-1] - coeff[-2]) * np.array(list(range(0, extrap + 1)))
# B-spline expansion function
f = interp1d(grid_points, coeff)
# extrapolation functions
f_left = interp1d(left_grid_points, left_extrap)
f_right = interp1d(right_grid_points, right_extrap)
# boundary functions
f_left_straight = interp1d(left_grid_points, left_straight)
f_right_straight = interp1d(right_grid_points, right_straight)
f_left_relu = interp1d(left_grid_points, left_relu)
f_right_relu = interp1d(right_grid_points, right_relu)
# Move left y-axis and bottom x-axis to centre, passing through (0,0)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('zero')
# Eliminate upper and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.yaxis.set_ticks_position('left')
ax.set_yticks([1, 2, 4, 5])
ax.set_xticks([-4, -3, -2, -1, 1, 2, 3, 4])
ax.set_xticklabels(np.concatenate((np.arange(-4, 0), np.arange(1, 5))),
fontdict={
'horizontalalignment': 'center',
'fontsize': 10},
minor=False)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.tick_params(axis='both', which='minor', labelsize=12)
if args.no_ticks is True:
ax.tick_params(axis=u'both', which=u'both', length=0)
ax.set_xticks([])
ax.set_yticks([])
# draw B-spline (triangular-shaped) basis elements
for i, center in enumerate(grid_points):
mode = 'both'
if i == 0 or i == (grid_points.shape[0] - 1):
# skip (boundaries)
continue
elif i == 1:
# first B-spline basis: only right part is drawn
mode = 'right'
elif i == (grid_points.shape[0] - 2):
# last B-spline basis: only left part is drawn
mode = 'left'
# evaluate B-spline basis element on a grid
bspline_x = np.linspace(-(range_ + 2) + i * grid,
-(range_ + 2) + (i + 2) * grid, nb_points)
bspline_y = Bspline(bspline_x, center, grid, coeff[i], mode=mode)
if mode == 'left':
center_idx = bspline_x.shape[0] // 2
# draws right part of first B-spline basis elemnt
plt.plot(bspline_x[:center_idx:],
bspline_y[:center_idx:],
color='lightsteelblue',
ls='--')
elif mode == 'right':
center_idx = bspline_x.shape[0] // 2
# draws left part of first B-spline basis elemnt
plt.plot(bspline_x[center_idx::],
bspline_y[center_idx::],
color='lightsteelblue',
ls='--')
else:
# draws full B-spline basis elemnt
plt.plot(bspline_x, bspline_y, color='crimson', ls='--')
# plot B-spline expansion
plt.plot(x_middle, f(x_middle), color='black')
# plot linear extrapolations
plt.plot(x_left, f_left(x_left), color='black')
plt.plot(x_right, f_right(x_right), color='black')
# plot boundary elements
plt.plot(x_left, f_left_straight(x_left), color='lightsteelblue', ls='--')
plt.plot(x_right,
f_right_straight(x_right),
color='lightsteelblue',
ls='--')
plt.plot(x_left, f_left_relu(x_left), color='lightsteelblue', ls='--')
plt.plot(x_right, f_right_relu(x_right), color='lightsteelblue', ls='--')
plt.xlim(-(range_ + extrap - 0.2), (range_ + extrap - 0.2))
plt.ylim(-0.8, 5.5)
plt.gca().set_position([0, 0, 1, 1])
if args.save_dir is not None:
plt.savefig(os.path.join(args.save_dir, 'deepspline_basis.png'))
plt.show()
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description='Plot finite spline representation.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--save_dir',
metavar='[STR]',
type=str,
help='directory for saving plots. If not given, plots are not saved.')
parser.add_argument(
'--no_ticks',
action='store_true',
help='Do not add ticks to plot.')
args = parser.parse_args()
if args.save_dir is not None and not os.path.isdir(args.save_dir):
raise OSError(f'Save directory {args.save_dir} does not exist.')
plot_deepspline_basis(args)
```
#### File: deepsplines/scripts/sparsify_with_optimal_knot_threshold.py
```python
import os
import sys
import argparse
import copy
import shutil
import numpy as np
from deepsplines.ds_utils import ArgCheck
from deepsplines.project import Project
from deepsplines.main import main_prog
def delete_model(log_dir, model_name, results_dict):
# delete model directory
log_dir_model = os.path.join(log_dir, model_name)
assert os.path.isdir(log_dir_model)
shutil.rmtree(log_dir_model)
# delete model entry in train_result.json
del results_dict[model_name]
Project.dump_results_dict(results_dict, log_dir)
def sparsify_with_optimal_knot_threshold(args):
"""
Args:
args: verified arguments from arparser
"""
ckpt, params = Project.load_ckpt_params(args.ckpt_filename, flatten=True)
if 'deep' not in params['activation_type']:
raise ValueError('This ckpt contains activations of type '
'{params["activation_type"]} and not deepsplines.')
base_model_name = params['model_name']
print('\n=> Compute optimal threshold/sparsity '
f'for model: {base_model_name}.')
# take the trained model, do one more epoch with a slope threshold
# applied and the model frozen, and compute the train accuracy.
params['resume'] = True
params['num_epochs'] = ckpt['num_epochs_finished']
params['log_step'] = None # at every epoch
params['valid_log_step'] = -1 # at every epoch
params['log_dir'] = args.out_log_dir
params['ckpt_filename'] = args.ckpt_filename
# also log sparsity and lipschitz bound
params['additional_info'] = ['sparsity']
if params['lipschitz'] is True:
params['additional_info'].append('lipschitz_bound')
# variable initialization
base_train_acc = 0.
prev_model_name = None # previous model name
chosen_model, chosen_threshold = None, None
# training accuracy maximum allowed percentage drop
acc_drop_threshold = args.acc_drop_threshold
threshold_list = np.concatenate(
(np.zeros(1), np.arange(0.0002, 0.004,
0.0002), np.arange(0.004, 1, 0.05),
np.arange(1, 3, 0.2), np.arange(3, 10, 0.5), np.arange(10, 100, 2)))
for k in range(threshold_list.shape[0]):
threshold = threshold_list[k]
params['model_name'] = base_model_name + \
'_knot_threshold_{:.4f}'.format(threshold)
params['knot_threshold'] = threshold
sys.stdout = open(os.devnull, "w")
main_prog(copy.deepcopy(params), isloaded_params=True)
sys.stdout = sys.__stdout__
results_dict = Project.load_results_dict(args.out_log_dir)
model_dict = results_dict[params['model_name']]
if k == 0:
assert np.allclose(threshold, 0)
# TODO: Abstract from 'latest_train_acc'
base_train_acc = model_dict['latest_train_acc']
acc_drop = np.clip((model_dict['latest_train_acc'] - base_train_acc),
a_max=100,
a_min=-100)
print('\nThreshold: {:.4f}'.format(threshold))
print('Accuracy drop: {:.3f}%'.format(acc_drop))
print('Sparsity: {:d}'.format(int(model_dict['sparsity'])))
if acc_drop < acc_drop_threshold or model_dict['sparsity'] == 0:
# delete current model; chosen_model is the previous one
delete_model(args.out_log_dir, params['model_name'], results_dict)
break
else:
if k > 0:
# delete previous model
delete_model(args.out_log_dir, prev_model_name, results_dict)
chosen_model = {params['model_name']: model_dict}
chosen_threshold = params['knot_threshold']
prev_model_name = params['model_name']
assert chosen_model is not None
assert chosen_threshold is not None
print('\nChosen model: ', chosen_model, sep='\n')
# test chosen model
model_name = next(iter(chosen_model))
log_dir_model = os.path.join(args.out_log_dir, model_name)
ckpt_filename = Project.get_ckpt_from_log_dir_model(log_dir_model)
_, params = Project.load_ckpt_params(ckpt_filename, flatten=True)
# TODO: Check if activations are in fact already sparsified.
params['mode'] = 'test'
params['ckpt_filename'] = ckpt_filename
sys.stdout = open(os.devnull, "w")
main_prog(copy.deepcopy(params))
sys.stdout = sys.__stdout__
print(f'\n=> Finished testing chosen model {params["model_name"]}.\n\n')
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description='Sparsify ckpt model using an "optimal" slope '
'threshold (highest threshold for which the train '
'accuracy drop is within a specification). ',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('ckpt_filename',
metavar='ckpt_filename [STR]',
type=str,
help='')
parser.add_argument('out_log_dir',
metavar='out_log_dir [STR]',
type=str,
help='Output log directory for sparsified model.')
parser.add_argument('acc_drop_threshold',
metavar='acc_drop_threshold [FLOAT(-1, 0)]',
type=ArgCheck.n_float,
default=-0.25,
help='Maximum train accuracy percentage drop '
'allowed for sparsification. (default: %(default)s)')
args = parser.parse_args()
if not os.path.isdir(args.out_log_dir):
os.makedirs(args.out_log_dir)
if args.acc_drop_threshold <= -1:
raise argparse.ArgumentTypeError(
f'{args.acc_drop_threshold} should be in (-1, 0).')
sparsify_with_optimal_knot_threshold(args)
``` |
{
"source": "joaquimcampos/HTV-Learn",
"score": 3
} |
#### File: HTV-Learn/htvlearn/data.py
```python
import os
import torch
import numpy as np
import math
import scipy
from htvlearn.lattice import Lattice
from htvlearn.delaunay import Delaunay
from htvlearn.grid import Grid
class Hex():
"""Hexagonal lattice vectors"""
v1 = Lattice.hexagonal_matrix[:, 0].numpy()
v2 = Lattice.hexagonal_matrix[:, 1].numpy()
class BoxSpline():
"""Three-directional hexagonal box spline"""
center_points = np.array([0., 0.])
border_points = np.array([Hex.v1, Hex.v2, -Hex.v1 + Hex.v2,
-Hex.v1, -Hex.v2, -Hex.v2 + Hex.v1])
points = np.vstack((center_points, border_points, 2 * border_points))
values = np.array([math.sqrt(3) / 2,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
htv = 12
class SimplicialSpline():
"""Simplicial spline with randomly positioned vertices"""
np.random.seed(3)
center_points = np.array([0., 0.]) + np.random.uniform(-0.2, 0.2, (2, ))
border_points = np.array([Hex.v1, Hex.v2, -Hex.v1 + Hex.v2,
-Hex.v1, -Hex.v2, -Hex.v2 + Hex.v1]) + \
np.random.uniform(-0.2, 0.2, (6, 2))
points = np.vstack((center_points, border_points, 2 * border_points))
values = np.array([math.sqrt(3) / 2,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
class CutPyramid():
"""Pyramid with flat top"""
points = np.vstack((BoxSpline.center_points,
BoxSpline.border_points,
2 * BoxSpline.border_points,
3 * BoxSpline.border_points))
values = np.array([1., 1., 1., 1., 1., 1., 1.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., ])
htv = 16 * math.sqrt(3)
class SimpleJunction():
"""A simple two-polytope junction"""
points = np.array([[0., 0.], [1., 0.], [0., 1.], [1., 1.],
[0., 3. / 4], [1., 1. / 4]])
values = np.array([0., 2. / 3, 2. / 3, 0., 1., 1.])
# gradients of each polytope
a1_affine_coeff = np.array([2. / 3, 4. / 3., 0.])
a2_affine_coeff = np.array([-2. / 3, -4. / 3., 2.])
htv = 10. / 3
def init_distorted_grid(size_=(3, 3), range_=(-1, 1)):
"""
Initialize a distorted grid.
Args:
size (2-tuple): grid size.
range (2-tuple):
range of points in each dimension before distortion.
Returns:
points (np.array):
distorted grid points. size: (size_[0]*size_[1]) x 2.
"""
assert isinstance(size_, tuple)
assert len(size_) == 2
# initialize undistorted grid points (u, v)
vec1 = np.linspace(*range_, size_[0]) * 1.
vec2 = np.linspace(*range_, size_[1]) * 1.
u, v = np.meshgrid(vec1, vec2)
u = u.flatten()
v = v.flatten()
# add noise to the interior vertices of the grid
mask = np.ma.mask_or(np.abs(u) == u.max(), np.abs(v) == v.max())
points = np.hstack((u[:, np.newaxis], v[:, np.newaxis])).copy()
# the noise is scaled according to the grid spacing
noise = (np.random.rand(*points.shape) - 0.5) * (u[1] - u[0])
# don't add noise to boundary vertices
points[~mask] = points[~mask] + noise[~mask]
return points
class DistortedGrid:
"""Dataset with random values in a distorted random grid"""
points = init_distorted_grid(size_=(3, 3))
values = (np.random.rand(points.shape[0], ) - 0.5)
class Data():
"""Data class for algorithms"""
def __init__(self,
data_from_ckpt=None,
dataset_name=None,
num_train=None,
data_dir='./data',
valid_fraction=0.2,
test_as_valid=False,
non_uniform=False,
noise_ratio=0.,
seed=-1,
verbose=False,
**kwargs):
"""
Args:
data_from_ckpt (dict):
dictionary with 'train', 'valid' and 'test' data loaded
from a checkpoint.
dataset_name (str)
num_train (int):
number of training+valid samples. The effective number of
training samples is a multiple of 1000. Further, if the
dataset has gaps the data inside the gaps will also removed.
data_dir (int):
data directory (for face dataset)
valid_fraction (float [0,1]):
fraction of num_train samples that is used for validation
test_as_valid (bool):
if True, use test set in validation.
non_uniform (bool):
if True, perform non-uniform data sampling (face dataset only).
noise_ratio (float >= 0):
noise that should be applied to the samples as a fraction of
the data range.
seed (int):
seed for random generation. If negative, no seed is set.
verbose (bool):
print more info.
"""
self.data_from_ckpt = data_from_ckpt
self.dataset_name = dataset_name
self.num_train = num_train
if self.data_from_ckpt is None:
assert self.dataset_name is not None
if not self.dataset_name.startswith('pyramid'):
assert self.num_train is not None
self.data_dir = data_dir
self.valid_fraction = valid_fraction
self.test_as_valid = test_as_valid
self.non_uniform = non_uniform
self.noise_ratio = noise_ratio
self.seed = seed
self.verbose = verbose
# if not overwritten, computed in add_noise_to_values()
# from self.noise_ratio and dataset height range
self.noise_std = None
if self.seed >= 0:
# set seed
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
np.random.seed(self.seed)
self.train, self.valid, self.test = {}, {}, {}
self.delaunay = {} # points and values for delaunay triangulation
if self.data_from_ckpt is not None:
# load data from self.data_from_ckpt
assert 'train' in self.data_from_ckpt
assert 'valid' in self.data_from_ckpt
self.train = self.data_from_ckpt['train']
self.valid = self.data_from_ckpt['valid']
if 'delaunay' in self.data_from_ckpt:
assert 'points' in self.data_from_ckpt['delaunay']
assert 'values' in self.data_from_ckpt['delaunay']
self.delaunay['points'] = \
self.data_from_ckpt['delaunay']['points']
self.delaunay['values'] = \
self.data_from_ckpt['delaunay']['values']
self.init_data()
def init_data(self):
"""Initialize cpwl dataset, and train/test/valid sets"""
if not bool(self.delaunay):
if self.dataset_name.startswith('pyramid'):
self.delaunay['points'], self.delaunay['values'] = \
self.init_pyramid()
# training is made of all pyramid's points except apex
self.train['input'] = \
torch.from_numpy(self.delaunay['points'][:-1]).clone()
self.train['values'] = \
torch.from_numpy(self.delaunay['values'][:-1]).clone()
# force validation set to be equal to test set
self.test_as_valid = True
elif self.dataset_name.endswith('planes'):
self.delaunay['points'], self.delaunay['values'] = \
self.init_planes()
elif 'face' in self.dataset_name:
self.delaunay['points'], self.delaunay['values'] = \
self.init_face(self.data_dir,
cut=True
if 'cut' in self.dataset_name
else False)
self.cpwl = Delaunay(points=self.delaunay['points'],
values=self.delaunay['values'])
if not self.cpwl.has_rectangular_range:
if self.dataset_name.endswith('planes'):
h = (self.cpwl.tri.points[:, 0].max() -
self.cpwl.tri.points[:, 0].min()) / 400
self.test['input'] = \
Grid(x1_min=self.cpwl.tri.points[:, 0].min(),
x1_max=self.cpwl.tri.points[:, 0].max(),
x2_min=self.cpwl.tri.points[:, 1].min(),
x2_max=self.cpwl.tri.points[:, 1].max(),
h=h,
to_numpy=False,
to_float32=True).x
# discard samples outside convex set
idx = self.cpwl.tri.find_simplex(self.test['input'])
self.test['input'] = self.test['input'][idx >= 0]
else:
# generate uniformly distributed samples in cpwl convex set
# the final number of test samples will be smaller because
# samples outside lattice are discarded
nb_samples = 160000 # 400*400
self.test['input'] = \
self.generate_random_samples(nb_samples)
else:
# test set is sampled on a grid inside the convex hull of cpwl
# this gives a test grid 500 x 500 samples
self.test['input'] = self.cpwl.get_grid(h=0.0025,
to_numpy=False,
to_float32=True).x
self.test['values'] = self.cpwl.evaluate(self.test['input'])
print(f'\nnb. of test data points : {self.test["input"].size(0)}')
if (not bool(self.valid)) and (self.test_as_valid is True):
self.valid['input'] = self.test['input'].clone()
self.valid['values'] = self.test['values'].clone()
if not bool(self.train):
num_train_valid_samples = int(self.num_train)
if self.dataset_name.endswith('planes'):
# generate grid in lattice reference
x_lat = torch.empty((num_train_valid_samples, 2))
x_lat.uniform_(-0.5, 0.5)
# convert to standard coordinates
x = (Lattice.hexagonal_matrix @ x_lat.t()).t()
elif self.non_uniform is True:
hull_points = \
self.cpwl.tri.points[self.cpwl.convex_hull_points_idx]
# compute largest distance
max_dist = np.amax(np.sqrt(np.sum(hull_points ** 2, axis=1)))
# radius
r = (torch.empty((num_train_valid_samples, 1))
.uniform_(0., max_dist * 0.8))
# angle
theta = (torch.empty((num_train_valid_samples, 1))
.uniform_(0., 2 * np.pi))
# points
x = torch.cat((r * theta.cos(), r * theta.sin()), dim=1)
# Only keep points inside cpwl convex hull
x_simplices_idx = self.cpwl.tri.find_simplex(x)
x = x[x_simplices_idx >= 0]
else:
# generate num_train_valid_samples uniformly distributed
# in cpwl convex set
x = self.generate_random_samples(num_train_valid_samples)
# training / validation split indices
if not self.test_as_valid:
split_idx = int((1 - self.valid_fraction) *
x.size(0))
else:
# full training set, validation set = test set
split_idx = x.size(0)
self.train['input'] = x[0:split_idx]
self.train['values'] = self.cpwl.evaluate(self.train['input'])
if self.dataset_name.endswith('gaps'):
# [(gap_x_min, gap_x_max)...]
gap_x_range = [[0.108, 0.234], [-0.07, 0.226],
[-0.234, -0.108]]
# [(gap_y_min, gap_y_max)...]
gap_y_range = [[-0.21, 0.07], [0.19, 0.311], [-0.21, 0.063]]
# remove data inside gaps
for i in range(len(gap_x_range)):
gap_mask = (
(self.train['input'][:, 0] >= gap_x_range[i][0]) *
(self.train['input'][:, 0] <= gap_x_range[i][1]) *
(self.train['input'][:, 1] >= gap_y_range[i][0]) *
(self.train['input'][:, 1] <= gap_y_range[i][1]))
self.train['input'] = self.train['input'][~gap_mask]
self.train['values'] = self.train['values'][~gap_mask]
if not np.allclose(self.noise_ratio, 0.):
# add noise to training data
self.train['values'] = \
self.add_noise_to_values(self.train['values'])
if self.train['input'].size(0) >= 3000:
# effective number of samples (rounded to 1000)
num = int(np.floor(self.train['input'].size(0) / 1000.) * 1000)
idx = torch.randperm(self.train['input'].size(0))[:num]
self.train['input'] = self.train['input'][idx]
self.train['values'] = self.train['values'][idx]
print('nb. of training data points : '
f'{self.train["input"].size(0)}')
if not bool(self.valid):
self.valid['input'] = x[(split_idx + 1)::]
self.valid['values'] = \
self.cpwl.evaluate(self.valid['input'])
@staticmethod
def add_lattice_vertices(points, values, eps=0.):
"""Add lattice vertices (up to eps distance away)
Args:
points (torch.Tensor or np.ndarray): size (m, 2)
values (torch.Tensor or np.ndarray): size (m,)
eps (float): buffer distance from boundaries of lattice.
"""
nparray = False
if isinstance(points, np.ndarray):
nparray = True
# convert to torch
points = torch.from_numpy(points)
values = torch.from_numpy(values)
# add lattice corners
br = Lattice.bottom_right_std
ur = Lattice.upper_right_std
a, b = eps * np.sqrt(3) / 2., eps * .5
lat_points = \
torch.tensor([[-ur[0] + a, -ur[1] + b],
[br[0] - b, br[1] + a],
[-br[0] + b, -br[1] - a],
[ur[0] - a, ur[1] - b]])
points = torch.cat((points, lat_points), dim=0)
values = torch.cat((values, torch.zeros(4)))
if nparray is True:
# convert to numpy
points = points.numpy()
values = values.numpy()
return points, values
def generate_random_samples(self, num_samples):
"""
Generate uniformly distributed data inside convex set.
Works by generating num_samples points and then rejecting the
ones outside the convex set.
Args:
num_samples (int) (before possible rejection)
Returns:
x (torch.tensor)
"""
x = torch.empty((num_samples, 2))
x[:, 0].uniform_(self.cpwl.tri.points[:, 0].min(),
self.cpwl.tri.points[:, 0].max())
x[:, 1].uniform_(self.cpwl.tri.points[:, 1].min(),
self.cpwl.tri.points[:, 1].max())
# reject samples outside convex set
idx = self.cpwl.tri.find_simplex(x)
x = x[idx >= 0]
return x
def add_noise_to_values(self, values):
"""
Add gaussian noise to values.
if self.noise_std exists, it is used as the noise standard deviation,
otherwise noise_std is computed from self.noise_ratio and the data
height range.
Args:
values (torch.tensor):
values to add noise to.
Returns the noisy values.
"""
noise_std = self.noise_std
if noise_std is None:
noise_std = self.noise_ratio * (values.max() - values.min())
if self.verbose:
print('Adding noise of standard deviation '
'sigma = {:.2E}'.format(noise_std))
noise = torch.empty_like(values).normal_(std=noise_std)
return values + noise
@staticmethod
def init_pyramid():
"""
Initialize the pyramid dataset.
Returns:
points (np.array): size (M, 2).
values (np.array): size (M,)
"""
# points in lattice coordinates
h = 0.1
points = torch.tensor([[2 * h, 0.], [0., 2 * h],
[2 * h, -2 * h], [0., -2 * h],
[-2 * h, 0.], [-2 * h, 2 * h],
[h, 0.], [0., h],
[h, -h], [0., -h],
[-h, 0.], [-h, h],
[0., 0.]]) # last element -> apex
values = torch.tensor([.0, .0, .0, .0, .0, .0,
.1, .1, .1, .1, .1, .1,
.2])
# convert to standard coordinates
points = (Lattice.hexagonal_matrix @ points.t()).t()
return points.numpy(), values.numpy()
@classmethod
def init_zero_boundary_planes(cls):
"""
Initialize the planes dataset with zero boundaries.
Returns:
points (torch.tensor): size (M, 2).
values (torch.tensor): size (M,)
"""
# fit planes function in the lattice
pad = 0.08
x_min, _, x_max, _ = cls.get_data_boundaries(hw_ratio=0.01, pad=pad)
_, y_min, _, y_max = cls.get_data_boundaries(hw_ratio=100, pad=pad)
dx = (x_max - x_min) / 100 # delta x step
dy = (y_max - y_min) / 100 # delta y step
# control points with values - (x1, x2, val)
vert = \
torch.tensor([[x_min + 30 * dx, y_min + 35 * dy, dx * 20], # 0
[x_max - 40 * dx, y_min + 30 * dy, dx * 20], # 1
[x_max - 35 * dx, y_max - 30 * dy, dx * 20], # 2
[x_min + 40 * dx, y_max - 30 * dy, dx * 20], # 3
[x_max - 25 * dx, y_min + 5 * dy, 0.], # 4
[x_min + 25 * dx, y_max - 5 * dy, 0.]]) # 5
# auxiliary triangulation of the function
# size (num_simplices, vertices)
simplices = torch.tensor([[0, 1, 3],
[1, 2, 3],
[4, 1, 0],
[0, 3, 5],
[4, 2, 1],
[3, 2, 5]])
# check values of vertices so that there is a seamless plane junction
x_v6 = cls.get_zero_loc(vert, simplices, 2, 3)
x_v7 = cls.get_zero_loc(vert, simplices, 4, 5)
br = Lattice.bottom_right_std
ur = Lattice.upper_right_std
# add x_v6, x_v7, and lattice corners
new_vert = torch.tensor([[x_v6[0], x_v6[1], 0.], # 6
[x_v7[0], x_v7[1], 0.], # 7
[-ur[0], -ur[1], 0.], # 8
[br[0], br[1], 0.], # 9
[-br[0], -br[1], 0.], # 10
[ur[0], ur[1], 0.]]) # 11
vert = torch.cat((vert, new_vert), dim=0)
points, values = vert[:, 0:2], vert[:, 2]
return points, values
@staticmethod
def add_linear_func(points, values):
"""
Add a linear term to the dataset.
Args:
points (torch.tensor): size (M, 2).
values (torch.tensor): size (M,)
Returns:
values (torch.tensor): size (M,).
"""
# add linear term to vertices
a = torch.tensor([0.1, 0.05])
b = torch.tensor([-0.05])
values += (points * a.unsqueeze(0)).sum(1) + b
return values
def init_planes(self):
"""
Initialize the planes dataset. Set self.noise_std.
Returns:
points (torch.tensor): size (M, 2).
values (torch.tensor): size (M,)
"""
# initialize planes dataset with zero boundaries
points, values = self.init_zero_boundary_planes()
# overwrite noise standard deviation
self.noise_std = (self.noise_ratio * values.max())
# add linear function to dataset
values = self.add_linear_func(points, values)
# convert to numpy
points, values = points.numpy(), values.numpy()
return points, values
@staticmethod
def get_zero_loc(vert, simplices, idx1, idx2):
"""
Get zero locations of vertices for a seamless junction of the planes.
Args:
vert (np.array):
size: (M, 3) (points in the first two columns,
values in the third)
simplices (np.array):
indexes of vertices for each simplex (row). size: (P, 3).
idx1, idx2 (int>=0):
indices of simplices to join.
Returns:
x (torch.tensor): size (2,)
"""
# size (2, 3, 3)
idx_vec = [idx1, idx2]
simplices_vert = \
torch.cat(tuple(vert[simplices[i]].unsqueeze(0)
for i in idx_vec), dim=0)
plane_coeff = Lattice.solve_method(simplices_vert)
affine_coeff = Lattice.get_affine_coeff_from_plane_coeff(plane_coeff)
assert affine_coeff.size() == (2, 3)
B = -affine_coeff[:, -1:]
A = affine_coeff[:, 0:2]
x = torch.linalg.solve(A, B)
return x.squeeze(-1)
@staticmethod
def read_face(data_dir, cut_eps=0.6):
"""
Read the 3D face dataset and construct a function from it by
cutting and eliminating duplicates.
Args:
cut_eps (float in [0,1]):
what height to cut face relative to its maximum height.
Returns:
cleaned_vert (np.array):
with vertices below cut_eps and duplicates removed and
zero mean.
size: (M, 3) (points in the first two columns,
values in the third)
"""
obj_file = os.path.join(data_dir, 'obj_free_male_head.obj')
V = []
with open(obj_file, "r") as file1:
for line in file1.readlines():
f_list = [i for i in line.split(" ") if i.strip()]
if len(f_list) == 0:
continue
if f_list[0] != 'v':
continue
V += [float(i) for i in f_list[1::]]
# vertices
vert = np.array(V).reshape(-1, 3)
# sort vertices by z coordinates in descending direction
sort_vert = vert[vert[:, 2].argsort()][::-1]
# get unique_idx of first occurences (largest height)
_, unique_dx = np.unique(sort_vert[:, 0:2], return_index=True, axis=0)
unique_sort_vert = sort_vert[unique_dx]
# eliminate vertices whose height is below cutoff
min_height = unique_sort_vert[:, 2].min()
max_height = unique_sort_vert[:, 2].max()
cutoff_val = min_height + (max_height - min_height) * cut_eps
cutoff_mask = np.where(unique_sort_vert[:, 2] > cutoff_val)[0]
cleaned_vert = unique_sort_vert[cutoff_mask]
cleaned_vert[:, 2] = cleaned_vert[:, 2] - \
cutoff_val # shift z.min() to z = 0
x_mean = cleaned_vert[:, 0].min() / 2. + cleaned_vert[:, 0].max() / 2.
y_mean = cleaned_vert[:, 1].min() / 2. + cleaned_vert[:, 1].max() / 2.
cleaned_vert[:, 0] = cleaned_vert[:, 0] - x_mean # shift x around 0
cleaned_vert[:, 1] = cleaned_vert[:, 1] - y_mean # shift t around 0
return cleaned_vert
@classmethod
def init_face(cls, data_dir, cut=False):
"""
Initialize the face dataset.
Args:
cut (bool):
if True, use only a smaller section of the face.
Otherwise, use full face with zero boundaries.
Returns:
points (torch.tensor): size (M, 2).
values (torch.tensor): size (M,)
"""
vert = cls.read_face(data_dir)
# normalize face to fit in [-0.8, 0.8]^2 square
max_ = max(np.abs(vert[:, 0]).max(), np.abs(vert[:, 1]).max())
vert = vert / max_ * 0.8
if cut is True:
# cut a smaller portion of the face
cpwl_aux = Delaunay(points=vert[:, 0:2].copy(),
values=vert[:, 2].copy())
x_min, x_max = -0.324, 0.324
y_min, y_max = -0.45, 0.419
mask = (vert[:, 0] > x_min) * (vert[:, 0] < x_max) * \
(vert[:, 1] > y_min) * (vert[:, 1] < y_max)
vert = vert[mask]
# add extreme points of the convex hull to vertices
hull_points = np.array([[x_min, y_min], [x_max, y_min],
[x_max, y_max], [x_min, y_max]])
hull_values = cpwl_aux.evaluate(hull_points)
new_vertices = np.concatenate(
(hull_points, hull_values[:, np.newaxis]), axis=1)
vert = np.concatenate((vert, new_vertices), axis=0)
else:
points = vert[:, 0:2]
hull = scipy.spatial.ConvexHull(points)
hull_points = points[hull.vertices]
# add points along the convex hull
for i in range(hull_points.shape[0]):
frac = np.linspace(0.01, 0.99, num=99)[:, np.newaxis]
next_vert = i + 1 if i != hull_points.shape[0] - 1 else 0
new_points = hull_points[next_vert][np.newaxis, :] * frac + \
hull_points[i][np.newaxis, :] * (1 - frac)
if cut is True:
# evaluate on convex hull of face
new_values = cpwl_aux.evaluate(new_points)
else:
# add zeros around face (to its convex hull contour)
new_values = np.zeros(new_points.shape[0])
new_vertices = np.concatenate(
(new_points, new_values[:, np.newaxis]), axis=1)
vert = np.concatenate((vert, new_vertices), axis=0)
if cut is False:
# create grid of points with zero value around face
h = 0.01
x_r = vert[:, 0].max() * 10. / 8.
y_r = vert[:, 1].max() * 9.5 / 8.
fine_grid = Grid(x1_min=-x_r,
x1_max=x_r + h,
x2_min=-y_r,
x2_max=y_r + h,
h=h,
to_float32=True).x
# only retain points outside face convex hull
aux_delaunay = scipy.spatial.Delaunay(points)
fine_grid = fine_grid[aux_delaunay.find_simplex(fine_grid) < 0]
# add zeros around face
new_vertices = np.concatenate(
(fine_grid, np.zeros((fine_grid.shape[0], 1))), axis=1)
vert = np.concatenate((vert, new_vertices), axis=0)
vert = cls.fit_in_lattice(vert)
points, values = vert[:, 0:2], vert[:, 2]
return points, values
@classmethod
def fit_in_lattice(cls, vert):
"""
Fit points in lattice.
Args:
vert (np.array):
size: (M, 3) (points in the first two columns,
values in the third)
Returns:
vert (np.array):
scaled vertices that fit in lattice.
"""
# normalize face to fit in lattice
hw_ratio = (vert[:, 1].max() - vert[:, 1].min()) / \
(vert[:, 0].max() - vert[:, 0].min())
_, _, x_max, y_max = cls.get_data_boundaries(hw_ratio=hw_ratio,
pad=0.03)
# recenter data
x_mean = (vert[:, 0].max() + vert[:, 0].min()) / 2
y_mean = (vert[:, 1].max() + vert[:, 1].min()) / 2
vert[:, 0] = vert[:, 0] - x_mean
vert[:, 1] = vert[:, 1] - y_mean
# x,y scaling factors
# vert[i,0] should be within (-x_max, x_max)
# vert[i,1] should be within (-y_max, y_max)
x_norm = x_max / vert[:, 0].max()
y_norm = y_max / vert[:, 1].max()
if x_norm < y_norm:
vert = vert * x_norm
else:
vert = vert * y_norm
return vert
@staticmethod
def get_data_boundaries(hw_ratio=math.sqrt(3), pad=0.1):
"""
Get the data boundaries that allow fitting the data in centered
rectangular region of the lattice with a specified height/width ratio,
so as to maximize occupied space within the interior lattice.
Pad a given distance from the limits if pad > 0.
Takes into account geometry of hexagonal lattice:
if hw_ratio > math.sqrt(3), the data touches the upper and bottom
interior border; otherwise, it touch the left and right borders.
Args:
hw_ratio (float>0):
height/width ratio of rectangular region.
pad (float>=0):
distance to pad from the limits of the region.
Returns:
4-tuple (x_min, x_max, y_min, y_max): data boundaries
"""
# requires that lattice is hexagonal and lsize*h = 1 (enforced)
bottom_right_std = Lattice.bottom_right_std
if hw_ratio > math.sqrt(3): # from geometry maximize space usage
y_min = bottom_right_std[1]
x_min = y_min * (1. / hw_ratio)
else:
a = (bottom_right_std[0] * 2) / (1 + hw_ratio * math.sqrt(3) / 3)
x_min = -a
y_min = x_min * hw_ratio
x_min, y_min = x_min + pad, y_min + pad
x_max, y_max = -x_min, -y_min
return x_min.item(), y_min.item(), x_max.item(), y_max.item()
```
#### File: HTV-Learn/htvlearn/delaunay.py
```python
import torch
import numpy as np
import scipy.spatial
from htvlearn.lattice import Lattice
from htvlearn.grid import Grid
from htvlearn.hessian import (
get_finite_second_diff_Hessian,
get_exact_grad_Hessian
)
class Delaunay():
"""Wrapper around scipy.spatial.Delaunay"""
centers_barycentric_coordinates = np.array([0.333, 0.333, 0.334])
def __init__(self,
points=None,
values=None,
npoints=120,
points_range=2,
values_range=1,
add_extreme_points=False,
pad_factor=0.05,
**kwargs):
"""
Args:
points, values (torch.tensor/np.array):
Specific set of points/values to generate Delaunay
triangulation from. size: (n, 2), (n,), respectively.
if not given, npoints are randomly generated.
npoints (int):
number of delaunay triangulation points to generate, if
``points`` and ``values`` are not given.
points_range (float):
range for generated points.
values_range (float):
range for generated values.
add_extreme_points (bool):
Add extreme points of the rectangle to triangulation.
pad_factor (float):
relative pad factor for extreme points
(relative to data range).
"""
super().__init__()
if isinstance(points, torch.Tensor):
points = points.numpy()
values = values.numpy()
if isinstance(points, np.ndarray):
self._verify_input(points, values)
else:
interior_npoints = ((npoints -
4) if add_extreme_points is True else npoints)
# randomly generate points in [-points_range/2, points_range/2]^2
points = ((np.random.rand(int(interior_npoints), 2) - 0.5) *
points_range)
# randomly generate values in [-values_range/2, values_range/2]
values = ((np.random.rand(int(interior_npoints), ) - 0.5) *
values_range)
if add_extreme_points:
x_min, x_max = points[:, 0].min(), points[:, 0].max()
y_min, y_max = points[:, 1].min(), points[:, 1].max()
# pf -> absolute padding factor
pf = min(x_max - x_min, y_max - y_min) * pad_factor
# extreme points of domain
extreme_points = np.array([[x_min - pf, y_min - pf],
[x_max + pf, y_min - pf],
[x_min - pf, y_max + pf],
[x_max + pf, y_max + pf]])
points = np.vstack((points, extreme_points))
values = np.concatenate((values, np.zeros(4)))
# generate delaunay triangulation
self.tri = scipy.spatial.Delaunay(points)
for attstr in [
'values', 'simplices_points', 'simplices_values',
'simplices_affine_coeff'
]:
assert not hasattr(self.tri, attstr), \
f'{self.tri} does not have attribute {attstr}...'
# idx of convex hull points
self.convex_hull_points_idx = np.unique(self.tri.convex_hull.flatten())
# triangulation vertice values
self.tri.values = values
# sanity check
# https://math.stackexchange.com/questions/
# 1097646/number-of-triangles-in-a-triangulation
nlinear_regions = self.tri.simplices.shape[0]
assert nlinear_regions <= 2 * self.tri.points.shape[0] \
- 2 - self.convex_hull_points_idx.shape[0]
# coordinates of points in each simplex. size: (nsimplex, 3, 2).
self.tri.simplices_points = self.tri.points[self.tri.simplices]
assert self.tri.simplices_points.shape == \
(self.tri.simplices.shape[0], 3, 2)
# vertex values of points in each simplex. size: (nsimplex, 3, 2).
self.tri.simplices_values = self.tri.values[self.tri.simplices]
assert self.tri.simplices_values.shape == \
(self.tri.simplices.shape[0], 3)
simplices_centers = (
np.transpose(self.tri.simplices_points, (0, 2, 1)) @
self.centers_barycentric_coordinates[:, np.newaxis]
).squeeze(axis=-1)
# affine coefficients of each simplex
self.tri.simplices_affine_coeff = \
self.get_affine_coefficients(simplices_centers)
assert self.tri.simplices_affine_coeff.shape == \
(self.tri.simplices.shape[0], 3)
@property
def is_admissible(self):
"""
Check if the cpwl function is admissible, i.e., is of the form
f + ax + b, where f is a zero-boundary function.
"""
hull_points = self.tri.points[self.convex_hull_points_idx]
hull_values = self.tri.values[self.convex_hull_points_idx]
# fit a linear function through three non-colinear convex hull
# points, i.e. solve f(hull_points) = a^(hull_points) + b, for a and b.
# the first two points correspond to the min/max x coordinate
first_idx = np.argmin(hull_points[:, 0])
second_idx = np.argmax(hull_points[:, 0])
# to find the third point, we form the matrix
# x1 x2 x3
# y1 y2 y3
# 1 1 1
# and check for which (x3, y3) it has the largest determinant:
mat = np.ones((hull_points.shape[0], 3, 3))
mat[:, 0:2, 0] = hull_points[first_idx]
mat[:, 0:2, 1] = hull_points[second_idx]
mat[:, 0:2, 2] = hull_points
det = np.abs(np.linalg.det(mat))
third_idx = np.argmax(np.abs(det))
# 3 points
points = np.concatenate((hull_points[first_idx][np.newaxis, :],
hull_points[second_idx][np.newaxis, :],
hull_points[third_idx][np.newaxis, :]),
axis=0)
values = np.array([hull_values[first_idx],
hull_values[second_idx],
hull_values[third_idx]])
# vert size: (1, 3, 3)
vert = np.concatenate((points, values[:, np.newaxis]),
axis=1)
# plane_coeff size: (1, 4)
plane_coeff = Lattice.solve_method(torch.from_numpy(vert).unsqueeze(0))
# affine_coeff size: (1, 3)
affine_coeff = Lattice.get_affine_coeff_from_plane_coeff(plane_coeff)
affine_coeff = affine_coeff.numpy()
z_linear = ((affine_coeff[:, 0:2] * hull_points).sum(axis=1) +
affine_coeff[:, 2])
if np.allclose(z_linear, hull_values):
return True
return False
@property
def has_rectangular_range(self):
"""Check if convex hull of cpwl is rectangular."""
hull_points = self.tri.points[self.convex_hull_points_idx]
x_min = hull_points[:, 0].min()
x_max = hull_points[:, 0].max()
y_min = hull_points[:, 1].min()
y_max = hull_points[:, 1].max()
# test rectangle
rect = np.array([[x_min, y_min],
[x_max, y_min],
[x_min, y_max],
[x_max, y_max]])
# check that hull_points contains the 4 corners of the rectangle
for corner in rect:
# difference to a corner.
diff = hull_points - np.array([corner])
if not np.any(np.all(np.isclose(diff, 0), axis=1)):
# there isn't any hull point corresponding to this corner
return False
# check that all points have
# x = x_min, or x = x_max, or y = y_min, or y = ymax
if not np.all(np.isclose(hull_points[:, 0], x_min) +
np.isclose(hull_points[:, 0], x_max) +
np.isclose(hull_points[:, 1], y_min) +
np.isclose(hull_points[:, 1], y_max)):
return False
return True
@staticmethod
def _verify_input(points, values):
"""
Verify user input points and values.
Args:
points:
expected np.ndarray of size (n, 2)
values:
expected np.ndarray of size (n,)
"""
assert points.shape[1] == 2, f'{points.shape[1]}'
assert len(points.shape) == 2, f'{len(points.shape)}'
assert isinstance(values, np.ndarray), f'{type(values)}'
assert values.shape == (points.shape[0], ), \
f'{values.shape} != ({points.shape[0]},)'
def get_affine_coefficients(self, x):
"""
Get the affine coefficients (a1, a2, d) of simplex where each datapoint
leaves, s.t. f(x1, x2) = a1.x1 + a2.x2 + d, using the solve method.
We have 3 vertices and 3 unkowns (a1, a2, d) for each datapoint.
So, we can solve the system of 3 equations to get (a1, a2, d).
Args:
x (np.ndarray):
locations where to get affine coefficients. size: (n, 2).
Returns:
affine_coeff (np.ndarray):
affine coefficients of simplices where each datapoint lives.
size: (n, 3).
"""
x_simplices, x_simplices_idx = self.get_x_simplices(x)
try:
affine_coeff = \
self.tri.simplices_affine_coeff[x_simplices_idx].copy()
return affine_coeff
except AttributeError:
pass
x_simplices_points = \
self.get_x_simplices_points(x_simplices=x_simplices) # (m, 3, 2)
x_simplices_values = \
self.get_x_simplices_values(x_simplices=x_simplices) # (m, 3)
# vertices for (x1, x2, z) -> {(x1_K, x2_K, z_K)}, K in {A, B, C}
# (x1 - x1_C).a1 + (x2 - x2_C).a2 + (z - z_C) = 0
# plug (x1_A, x2_A, z_A) and (x1_B, x2_B, z_B) above to find a1, a2
z_diff = (x_simplices_values[:, 0:2] -
x_simplices_values[:, 2:3])[:, :, np.newaxis]
assert z_diff.shape == (x.shape[0], 2, 1)
x_diff = x_simplices_points[:, 0:2, :] - x_simplices_points[:, 2:3, :]
assert x_diff.shape == (x.shape[0], 2, 2)
a1_a2 = np.linalg.solve(x_diff, z_diff)
assert a1_a2.shape == (x.shape[0], 2, 1)
a1_a2 = a1_a2.squeeze(-1)
d = x_simplices_values[:, 2] - \
(a1_a2 * x_simplices_points[:, 2]).sum(axis=1)
affine_coeff = np.hstack((a1_a2, d[:, np.newaxis]))
return affine_coeff
def get_x_simplices(self, x):
"""
Get simplices where x lives.
Args:
x (np.ndarray):
input locations. size (n, 2).
Returns:
x_simplices (np.ndarray):
indexes of vertices of simplices where x lives. size: (n, 3).
x_simplices_idx (np.ndarray):
indexes of simplices where x lives. size: (n,).
"""
assert isinstance(x, np.ndarray), f'x is of type: {type(x)}.'
x_simplices_idx = self.tri.find_simplex(x)
assert x_simplices_idx.shape == (x.shape[0], )
if np.any(x_simplices_idx < 0):
raise ValueError(
'At least one point is outside the triangulation...')
x_simplices = self.tri.simplices[x_simplices_idx].copy()
assert x_simplices.shape == (x.shape[0], 3)
return x_simplices, x_simplices_idx
def _check_x_simplices(self, x_simplices, x):
"""
Verify that either x_simplices is None. If so, returns x_simplices.
Otherwise, checks if x is not None and computes x_simplices from x.
Args:
x (None or np.ndarray):
input locations. size (n, 2).
x_simplices (np.ndarray):
indexes of vertices of simplices where x lives. size: (n, 3).
Returns:
x_simplices (np.ndarray):
indexes of vertices of simplices where x lives. size: (n, 3).
"""
if x_simplices is not None:
return x_simplices
elif x is None:
raise ValueError('Need to provide either "x" or "x_simplices".')
x_simplices, _ = self.get_x_simplices(x)
return x_simplices
def get_x_simplices_points(self, x_simplices=None, x=None):
"""
Get locations of vertices from x_simplices (indexes of vertices)
or x (input locations).
If x_simplices is not given, x has to be given.
Args:
x_simplices (None or np.ndarray):
indexes of vertices of simplices where x lives. size: (n, 3).
x (None or np.ndarray):
input locations. size (n, 2).
Returns:
x_simplices_points:
locations of vertices. size: (n, 3, 2).
"""
x_simplices = self._check_x_simplices(x_simplices, x)
x_simplices_points = self.tri.points[x_simplices]
assert x_simplices_points.shape == (*x_simplices.shape, 2)
return x_simplices_points
def get_x_simplices_values(self, x_simplices=None, x=None):
"""
Get values at the vertices from x_simplices (indexes of vertices)
or x (input locations).
If x_simplices is not given, x has to be given.
Args:
x_simplices (None or np.ndarray):
indexes of vertices of simplices where x lives. size: (n, 3).
x (None or np.ndarray):
input locations. size (n, 2).
Returns:
x_simplices_values:
values at the vertices. size (n, 3, 2).
"""
x_simplices = self._check_x_simplices(x_simplices, x)
x_simplices_values = self.tri.values[x_simplices]
assert x_simplices_values.shape == x_simplices.shape
return x_simplices_values
def evaluate_bar(self, x):
"""
Evaluate cpwl function at x, using barycentric coordinates method.
Args:
x (np.ndarray or torch.Tensor):
input locations. size (n, 2).
Returns:
z (np.ndarray or torch.Tensor):
values at x. size (n,).
"""
torchtensor = False
if isinstance(x, torch.Tensor):
# convert to numpy
torchtensor = True
device, dtype = x.device, x.dtype
x = x.detach().cpu().numpy()
x_bar_coord, x_simplices = self.get_x_baryc_coord(x)
x_simplices_values = \
self.get_x_simplices_values(x_simplices=x_simplices)
z = (x_simplices_values * x_bar_coord).sum(axis=1)
if torchtensor is True:
# convert to torch
z = torch.from_numpy(z).to(device=device, dtype=dtype)
return z
def get_x_baryc_coord(self, x):
"""
Get barycentric coordinates of x.
We use affine coordinates to compute barycentric coordinates
(more numerically stable):
x^T = [p1^T p2^T p3^T] @ [bar1, bar2, bar3]^T (with bar3 = 1-bar1-bar2)
x^T = [(p1-p3)^T (p2-p3)^T] @ [bar1, bar2]^T + p3^T
<=> (x-p_3)^T = [(p1-p3)^T (p2-p3)^T] @ [bar1, bar2]^T
Args:
x (np.ndarray or torch.Tensor):
input locations. size (n, 2).
Returns:
bar_coord (np.ndarray):
barycentric coordinates of x. size (n, 3).
x_simplices (np.ndarray):
indexes of vertices of simplices where x lives. size: (n, 3).
"""
x_simplices, x_simplices_idx = self.get_x_simplices(x)
# tri.transform: size: (nsimplex, 3, 2)
x_affine_coord = (x -
self.tri.transform[x_simplices_idx, 2])[:, :,
np.newaxis]
assert x_affine_coord.shape == (x.shape[0], 2, 1)
p1_p2_affine_coord_inv = self.tri.transform[x_simplices_idx, :2]
assert p1_p2_affine_coord_inv.shape == (x.shape[0], 2, 2)
bar1_bar2 = (p1_p2_affine_coord_inv @ x_affine_coord).squeeze(axis=-1)
assert bar1_bar2.shape == (x.shape[0], 2)
bar_coord = np.c_[bar1_bar2, 1 - bar1_bar2.sum(axis=1, keepdims=True)]
assert bar_coord.shape == (x.shape[0], 3)
return bar_coord, x_simplices
def evaluate(self, x):
"""
Evaluate cpwl function at x, using affine coefficients.
Args:
x (np.ndarray or torch.Tensor):
input locations. size (n, 2).
Returns:
z (np.ndarray or torch.Tensor):
values at x. size (n,).
"""
torchtensor = False
if isinstance(x, torch.Tensor):
# convert to numpy
torchtensor = True
device, dtype = x.device, x.dtype
x = x.detach().cpu().numpy()
affine_coeff = self.get_affine_coefficients(x)
z = (affine_coeff[:, 0:2] * x).sum(axis=1) + affine_coeff[:, 2]
if torchtensor is True:
# convert to torch
z = torch.from_numpy(z).to(device=device, dtype=dtype)
return z
def evaluate_with_grad(self, x):
"""
Evaluate cpwl function at x, using affine coefficients, and compute
gradient at x.
Args:
x (np.ndarray or torch.Tensor):
input locations. size (n, 2).
Returns:
z (np.ndarray or torch.Tensor):
values at x. size (n,).
x_grad (np.ndarray or torch.Tensor):
gradients at x. size (n, 2)
"""
torchtensor = False
if isinstance(x, torch.Tensor):
# convert to numpy
torchtensor = True
device, dtype = x.device, x.dtype
x = x.detach().cpu().numpy()
affine_coeff = self.get_affine_coefficients(x)
z = (affine_coeff[:, 0:2] * x).sum(axis=1) + affine_coeff[:, 2]
x_grad = affine_coeff[:, 0:2]
if torchtensor is True:
# convert to torch
z = torch.from_numpy(z).to(device=device, dtype=dtype)
x_grad = torch.from_numpy(z).to(device=device, dtype=dtype)
return z, x_grad
def compute_grad(self, x):
"""
Compute gradient of cpwl function at x.
Args:
x (np.ndarray or torch.Tensor):
input locations. size (n, 2).
Returns:
x_grad (np.ndarray or torch.Tensor):
gradients at x. size (n, 2)
"""
_, x_grad = self.evaluate_with_grad(x)
return x_grad
def get_exact_HTV(self):
"""
Get exact HTV of cpwl function.
Returns:
htv (float)
"""
grad = self.tri.simplices_affine_coeff[:, 0:2].copy()
assert grad.shape == (self.tri.simplices.shape[0], 2)
# size (nsimplex, 3) there are three neighbors of each simplex in 2D.
neighbors = self.tri.neighbors.copy() # -1 signals no neighbors
# tuple (rows, cols)
no_neighbor_idx = np.where(neighbors == -1)
has_neighbor_idx = np.where(neighbors > -1)
######################################
# compute norm of gradient differences
######################################
# size: (nsimplex, 3, 2)
grad_simplices_expand = grad[:, np.newaxis, :].repeat(3, axis=1)
# size: (nsimplex, 3, 2) --- (simplex idx, neighbor idx, neighbor grad)
grad_neighbors = grad[neighbors]
# do not count junctions with neighbors outside delaunay, so make
# both grads equal to zero, so as to not have a contribution from
# these.
grad_neighbors[no_neighbor_idx] = np.array([0., 0.])
grad_simplices_expand[no_neighbor_idx] = np.array([0., 0.])
# (nsimplex, 3, 2)
assert grad_neighbors.shape == (neighbors.shape[0], 3, 2)
grad_diff_norm = np.linalg.norm(grad_neighbors - grad_simplices_expand,
ord=2,
axis=-1)
assert grad_diff_norm.shape == neighbors.shape
##########################
# compute junction lengths
##########################
neighbors_simplices = self.tri.simplices[neighbors].copy()
# (nsimplex, 3, 3)
assert neighbors_simplices.shape == (neighbors.shape[0], 3, 3)
simplices_expand = \
self.tri.simplices[:, np.newaxis, :].repeat(3, axis=1).copy()
# (nsimplex, 3, 3)
assert simplices_expand.shape == (neighbors.shape[0], 3, 3)
# TODO: Comment this section
neighbors_simplices[no_neighbor_idx] = \
simplices_expand[no_neighbor_idx]
new_idx = (*no_neighbor_idx, no_neighbor_idx[1])
neighbors_simplices[new_idx] = -1
aux_arr = np.concatenate((simplices_expand, neighbors_simplices),
axis=-1)
aux_arr = np.sort(aux_arr, axis=-1)
z = np.diff(aux_arr, axis=-1)
edges_idx = aux_arr[np.where(z == 0)].reshape((*aux_arr.shape[0:2], 2))
edges_points = self.tri.points[edges_idx]
edges_len = np.linalg.norm(np.subtract(edges_points[:, :, 1, :],
edges_points[:, :, 0, :]),
ord=2,
axis=-1)
assert edges_len.shape == neighbors.shape
# Divide by 2 but only if repeated
edges_htv = grad_diff_norm * edges_len
edges_htv[has_neighbor_idx] = edges_htv[has_neighbor_idx] / 2
htv = edges_htv.sum()
assert np.allclose(htv, edges_htv[has_neighbor_idx].sum())
return htv
def get_grid(self, h=0.001, to_numpy=True, to_float32=False):
"""
Get a Grid over the rectangular range of the cpwl function.
If cpwl does not have rectangular range, throw an Error.
Args:
h (float):
step size.
to_numpy (bool):
if True, convert grid to numpy array.
to_float32 (bool):
if True, convert grid to float32
Returns:
grid (Grid):
Grid instance (see grid.py).
"""
# if not self.has_rectangular_range:
# raise ValueError(
# 'The triangulation does not have a rectangular range.')
# create image
convex_hull_points = self.tri.points[self.convex_hull_points_idx]
x1_min, x1_max = \
convex_hull_points[:, 0].min(), convex_hull_points[:, 0].max()
x2_min, x2_max = \
convex_hull_points[:, 1].min(), convex_hull_points[:, 1].max()
eps = h
return Grid(x1_min=x1_min + eps,
x1_max=x1_max,
x2_min=x2_min + eps,
x2_max=x2_max,
h=h,
to_numpy=to_numpy,
to_float32=to_float32)
def get_lefkimiattis_schatten_HTV(self, p=1, h=0.001):
"""
Get the HTV of the cpwl function via finite second differences for
computing the Hessian, and then taking its Schatten-p norm.
All p's should be equivalent from a small enough step.
Args:
p (int >= 1):
p for Schatten norm.
h (float):
step size for finite second differences.
Returns:
htv (float)
"""
grid = self.get_grid(h=h)
Hess = get_finite_second_diff_Hessian(grid, self.evaluate)
S = np.linalg.svd(Hess, compute_uv=False, hermitian=True)
assert S.shape == (*Hess.shape[0:2], 2)
# schatten-p-norm
points_htv = np.linalg.norm(S, ord=p, axis=-1)
# sum over locations
htv = (points_htv * h * h).sum()
return htv
def get_exact_grad_schatten_HTV(self, p=1, h=0.001):
"""
Get the trace-HTV of the cpwl function via finite first-differences on
the exact gradient for computing the Hessian, and then taking its
Schatten-p-norm.
Args:
p (int >= 1):
p for Schatten norm.
h (float):
step size for finite second differences.
Returns:
htv (float)
"""
grid = self.get_grid(h=h)
Hess = get_exact_grad_Hessian(grid, self.compute_grad)
S = np.linalg.svd(Hess, compute_uv=False, hermitian=False)
assert S.shape == (*Hess.shape[0:2], 2)
# schatten-p-norm
points_htv = np.linalg.norm(S, ord=p, axis=-1)
# sum over locations
htv = (points_htv * h * h).sum()
return htv
def get_lefkimiattis_trace_HTV(self, h=0.001):
"""
Get the trace-HTV of the cpwl function via finite second-differences
for computing the Hessian, and then taking its trace.
Args:
h (float):
step size for finite second differences.
Returns:
htv (float)
"""
grid = self.get_grid(h=h)
Hess = get_finite_second_diff_Hessian(grid, self.evaluate)
# trace
points_htv = np.abs(Hess[:, :, 0, 0] + Hess[:, :, 1, 1])
# sum over locations
htv = (points_htv * h * h).sum()
return htv
def get_exact_grad_trace_HTV(self, h=0.001):
"""
Get the trace-HTV of cpwl function via finite first differences on
the exact gradient to compute the Hessian, and then taking its trace.
Args:
h (float):
step size for finite second differences.
Returns:
htv (float)
"""
grid = self.get_grid(h=h)
Hess = get_exact_grad_Hessian(grid, self.compute_grad)
# trace
points_htv = np.abs(Hess[:, :, 0, 0] + Hess[:, :, 1, 1])
# sum over locations
htv = (points_htv * h * h).sum()
return htv
```
#### File: HTV-Learn/htvlearn/lattice.py
```python
import math
import torch
from torch import Tensor
import numpy as np
import warnings
from django.utils.functional import classproperty
from htvlearn.grid import Grid
class Lattice():
r"""
Class for cpwl functions ``f:\R² \to \R`` with a uniform hexagonal lattice
domain. This class of cpwl functions is in the span of linear hexagonal
box splines shifted on the hexagonal lattice. A function in this space is
uniquely specified by the values at the lattice vertices (box spline
coefficients).
The set of lattice vertices A for a centered hexagonal lattice of size
(lsize x lsize) with ``(lsize + 1)**2`` vertices is given by:
``
A = {a1*v1 + a2*v2: -lsize//2 <= ai < lsize//2, ai \in \Z, v1,v2 in R^2}
= {X_mat @ a : -lsize//2 <= ai < lsize//2, ai \in \Z, X_mat in R^{2x2}}
``
where ``v1 = h*(1, 0), v2 = h*(1/2, sqrt(3)/2)`` and ``h`` is the grid
step.
X_mat changes coordinates in the lattice basis v1,v2 \in R^2 to the
standard basis e1,e2; X_mat's columns are formed by the lattice vectors
v1,v2. Each lattice vertex is associated to a value corresponding to
the coefficient of the box spline at that location.
Tensor representation formats:
. batch format (m,n) (default) - vectors along rows
. mat (matrix) format (n,m) - vectors along columns
. batch_mat format - batch of matrices (b,n,m)
"""
ldim = 2 # lattice dimension
# barycentric coordinates for simplex centers (approximately)
centers_barycentric_coordinates = Tensor([0.333, 0.333, 0.334])
# hexagonal lattice matrix with unit vectors
hexagonal_matrix = Tensor([[1., 0.5],
[0., 0.5 * math.sqrt(3)]])
def __init__(self,
X_mat=None,
C_mat=None,
lsize=10,
C_init='zero',
**kwargs):
"""
Args:
Option 1 (direct) --- X_mat (lattice vectors) and the matrix of
lattice coefficients C_mat are given directly. These two matrices
need to satisfy several conditions (see self.verify_input).
X_mat (torch.Tensor):
lattice matrix; size (2, 2).
C_mat (torch.Tensor):
coefficients matrix; size (lsize+1, lsize+1);
C_mat(i1, i2) = f(a1*x1 + a2*x2) = f(X_mat @ a),
x direction -> rows, y -> columns.
Option 2 (indirect) --- define X_mat via lsize and initialize
C_mat with zeros or values from a normal distribution.
lsize (int):
Lattice size; number of vertices is ``(lsize+1)**2``.
C_init (str):
'zero' or 'normal' lattice coefficients initialization.
"""
# C_mat is indexed as (i, j) = (x, y)
if X_mat is not None and C_mat is not None:
self.verify_input(X_mat, C_mat)
else:
assert X_mat == C_mat, \
'X_mat is None and C_mat is not None, or vice-versa.'
X_mat, C_mat = self.init_hexagonal_lattice(lsize, C_init, **kwargs)
self.init_origin_simplices()
self.init_neighborhood()
self.update_lattice(X_mat, C_mat)
@property
def X_mat(self):
return self._X_mat
@X_mat.setter
def X_mat(self, new_X_mat):
raise AttributeError('X_mat can only be updated via'
'self.update_lattice().')
@property
def C_mat(self):
return self._C_mat
@C_mat.setter
def C_mat(self, new_C_mat):
raise AttributeError('C_mat can only be updated via self.update_'
'lattice() or self.update_coefficients().')
@property
def lsize(self):
return self._lsize
@lsize.setter
def lsize(self, new_lsize):
raise AttributeError('lsize can only be set in __init__().')
@property
def h(self):
return self._h
@h.setter
def h(self, new_h):
raise AttributeError('h cannot be set directly.')
@classmethod
def init_hexagonal_lattice(cls, lsize=10, C_init='zero', **kwargs):
"""
Initialize hexagonal lattice.
Args:
lsize (int):
Lattice size; number of vertices is ``(lsize+1)**2``.
C_init (str):
'zero' or 'normal' lattice coefficients initialization.
"""
assert C_init in ['zero', 'normal']
h = 1. / lsize
X_mat = cls.hexagonal_matrix.mul(h)
C_mat = torch.zeros((lsize + 1, lsize + 1))
if C_init == 'normal':
C_mat.normal_().mul_(0.05) # for visualization purposes.
assert cls.is_hexagonal(X_mat), 'Lattice is not hexagonal'
return X_mat, C_mat
# These two class properties require that lattice
# is hexagonal and lsize*h = 1 (both enforced)
@classproperty
def bottom_right_std(cls):
"""Get the bottom right corner of lattice in standard coordinates"""
return cls.hexagonal_matrix @ Tensor([0.5, -0.5])
@classproperty
def upper_right_std(cls):
"""Get the upper right corner of lattice in standard coordinates"""
return cls.hexagonal_matrix @ Tensor([0.5, 0.5])
@staticmethod
def is_hexagonal(X_mat):
"""
Check if lattice is hexagonal.
Args:
X_mat (torch.Tensor):
lattice matrix; size (2, 2).
"""
if not np.allclose(X_mat[1, 0], 0.):
return False
if not np.allclose(X_mat[0, 1], X_mat[0, 0] / 2):
return False
if not np.allclose(X_mat[1, 1], X_mat[0, 1] * math.sqrt(3)):
return False
return True
@classmethod
def verify_input(cls, X_mat, C_mat):
"""
Verify lattice matrix and coefficients.
Check if X_mat is hexagonal, that sizes are compatible, and that
the resulting lsize * h = 1. Throw an exception if not.
Args:
X_mat (torch.Tensor):
lattice matrix; size (2, 2).
C_mat (torch.Tensor):
coefficients matrix; size (lsize+1, lsize+1);
C_mat(i1, i2) = f(a1*x1 + a2*x2) = f(X_mat @ a),
x direction -> rows, y -> columns.
"""
# verify types
if X_mat.dtype != torch.float32 or C_mat.dtype != torch.float32:
raise ValueError('Expected float tensors.')
# verify if lattice is hexagonal
if not cls.is_hexagonal(X_mat):
raise ValueError('Invalid X_mat --- lattice not hexagonal.')
h, lsize = X_mat[0, 0], C_mat.size(0) - 1
if not np.allclose(lsize * h, 1.):
raise ValueError(f'lsize*h = {lsize * h}. Should be 1.')
# verify sizes
X_mat_size, C_mat_size = tuple(X_mat.size()), tuple(C_mat.size())
ldim = X_mat_size[0] # lattice dimension
if ldim != cls.ldim:
raise ValueError('Only 2D lattice allowed, for now')
if X_mat_size != (ldim, ldim):
raise ValueError(
f'Expected size ({ldim},{ldim}). Found size {X_mat_size}.')
if len(C_mat_size) != ldim or (not all(k == C_mat_size[0]
for k in C_mat_size)):
raise ValueError(
f'Expected size ([k] * {ldim}). Found size {C_mat_size}.')
if C_mat_size[0] % 2 != 1:
raise ValueError(
f'Expected odd dimensions. Found size {C_mat_size}.')
# verify consistency
if X_mat.det().allclose(Tensor([0.])):
raise ValueError(
'X_mat is not invertible or very ill-conditioned.')
if not X_mat.allclose(X_mat.clamp(min=0)):
# To facilitate computations.
raise ValueError('Both lattice vectors should be positive')
def init_origin_simplices(self):
"""
Initialize the first and second origin simplices, their barycentric
matrices and respective inverses. The rows of each simplex are the
lattice coordinates of the vertices.
Some computations, such as computation of barycentric coordinates
of x wrt to its simplex can be translated to computation of
barycentric coordinates of translated x wrt one of the origin
simplices. (see get_barycentric_coordinates())
"""
# vertices of origin square
vA, vB, vC, vD = [0, 0], [1, 0], [1, 1], [0, 1]
self.batch_origin_simplices = torch.tensor([[vA, vB, vD],
[vB, vC, vD]]) # (2,3,2)
assert self.batch_origin_simplices.size() == (2, 3, 2)
# size (2,3,3); for each of the 2 simplices (dim=0),
# get barycentric matrices
self.batch_origin_simplices_barycentric_mat = \
self.append_ones(self.batch_origin_simplices,
dim=-1).transpose(1, 2)
# batch inverse - size (2,3,3)
self.batch_inv_origin_simplices_barycentric_mat = \
torch.inverse(self.batch_origin_simplices_barycentric_mat)
assert self.batch_inv_origin_simplices_barycentric_mat.size() == \
(2, 3, 3)
def init_neighborhood(self):
"""
Initialize neighborhood steps for given vertex, such that
``v + self.neighborhood_diff`` gives the lattice coordinates of its
neighbors for the HTV computation.
"""
v_Ma, v_Mb, v_Mc = [0, 0], [1, 0], [0, 1]
v_A, v_B, v_C = [1, 1], [-1, 1], [1, -1]
# size: (6,2)
self.neighborhood_diff = \
torch.tensor([v_Ma, v_Mb, v_Mc, v_A, v_B, v_C])
self.neighborhood_size = self.neighborhood_diff.size(0)
def update_coefficients(self, new_C_mat):
"""
Update the lattice values and the information concerning the function
simplices.
Args:
new_C_mat (torch.Tensor):
new coefficients matrix; size (lsize+1, lsize+1);
new_C_mat(i1, i2) = f(a1*x1 + a2*x2) = f(X_mat @ a),
x direction -> rows, y -> columns.
"""
assert new_C_mat.size() == self._C_mat.size()
self._C_mat = new_C_mat
self.init_unique_simplices()
self.init_affine_coefficients()
def update_lattice(self, new_X_mat, new_C_mat):
"""
Update_lattice matrix and coefficients.
Also perform updates which are always required when X_mat or C_mat
are changed; e.g., after dividing lattice.
Args:
new_X_mat (torch.Tensor):
new lattice matrix; size (2, 2).
new_C_mat (torch.Tensor):
new coefficients matrix; size (lsize+1, lsize+1);
C_mat(i1, i2) = f(a1*x1 + a2*x2) = f(X_mat @ a),
x direction -> rows, y -> columns.
"""
self._h = new_X_mat[0, 0]
self._X_mat, self._C_mat = new_X_mat, new_C_mat
# so that inversion does not have to be repeated
self._X_mat_inv = torch.inverse(new_X_mat)
self._lsize = new_C_mat.size(0) - 1
if self._lsize % 2 != 0:
raise ValueError(f'lsize {self._lsize} should be even...')
# centered lattice; bottom left lattice coordinates: (lmin, lmin)
self.lmin = -(self._lsize // 2)
self.lmax = self.lmin + self._lsize
# list of (x, y) locations of vertices in lattice coordinates
self.lattice_grid = Grid(self.lmin,
self.lmax + 1,
h=1,
square=True,
to_numpy=False).x
assert self.lattice_grid.size() == ((self._lsize + 1)**2, 2)
# Now we get three corresponding tensors:
# self.unique_simplices[i] <-> self.simplex_centers[i]
# <-> self.affine_coeff[i]
self.init_unique_simplices()
self.init_simplex_centers()
self.init_affine_coefficients()
def get_lattice_mask(self, x_lat, pad=0):
"""
Get mask of points inside lattice to access as
x_lat[in_lattice_mask].
Args:
x_lat (torch.Tensor):
input in lattice coordinates; size (m, 2).
Returns:
lattice_mask (torch.bool):
signals which points are inside lattice; size: (m,).
"""
x_lat_min, _ = x_lat.min(1) # minimum along dim=1
x_lat_max, _ = x_lat.max(1) # maximum along dim=1
lattice_mask = ((x_lat_min >= self.lmin + pad) *
(x_lat_max <= self.lmax - pad))
return lattice_mask
def inside_lattice(self, x_lat):
"""
Check that all datapoints in x_lat lie inside lattice.
Args:
x_lat (torch.Tensor):
input in lattice coordinates; size (m, 2).
Returns:
True if all datapoints in x_lat are inside lattice.
"""
lattice_mask = self.get_lattice_mask(x_lat)
return lattice_mask.all()
def init_unique_simplices(self):
"""
Initialize tensor with lattice simplices, without repetition.
saves self.unique_simplices which is of size (k, 3). Each row i gives
the indices of the vertices of simplex i <= k.
"""
# x_lat does not include upper and right boundary vertices.
# x_lat goes row-wise (i.e. has x_axis stride = 1);
# e.g. lsize=1: x_lat = [(0, 0), (1, 0), (0, 1), (1, 1)]
x_lat = Grid(self.lmin,
self.lmax,
h=1,
square=True,
to_numpy=False).x
m = x_lat.size(0)
x_lat_expand = x_lat.view(m, 1, 1, 2).expand(m, 2, 3, 2)
# (m, 2, 3, 2)
unique_simplices_aux = x_lat_expand + self.batch_origin_simplices
self.unique_simplices = unique_simplices_aux.view(m * 2, 3, 2)
assert self.unique_simplices.size(0) == (self._lsize**2) * 2
# index relative to bottom left vertice
# size (m, 3, 2)
unique_simplices_rel = self.unique_simplices.sub(self.lmin)
# values of vertices of each unique simplex
self.unique_simplices_values = \
self._C_mat[unique_simplices_rel[:, :, 0],
unique_simplices_rel[:, :, 1]]
assert self.unique_simplices_values.size() == \
(self.unique_simplices.size(0), 3)
def init_simplex_centers(self):
"""
Initialize tensor with centers of the lattice simplices, without
repetition, in lattice coordinates.
saves self.simplex_centers which is of size (k, 2). Each row i gives
the centers of simplex i <= k.
"""
try:
self.simplex_centers = \
self.get_simplex_centers(self.unique_simplices)
except AttributeError:
print(
'Need to initialize unique_simplices before simplex_centers.')
raise
@classmethod
def get_simplex_centers(cls, simplices):
"""
Get tensor with centers of the lattice simplices, without repetition,
in lattice coordinates.
Args:
simplices (torch.int64):
size: (k, 3).
Returns:
xcenter (torch.Tensor):
Each row i gives the centers of simplex i <= k; size (k, 2).
"""
# unique_simplices size (k,3,2)
k = simplices.size(0)
batch_xcenter_barycentric_coordinates = \
cls.centers_barycentric_coordinates.view(1, 3).expand(k, 3)
assert batch_xcenter_barycentric_coordinates.size() == (k, 3)
simplices_mat = simplices.transpose(1, 2).float()
assert simplices_mat.size() == (k, 2, 3)
xcenter = (
simplices_mat
@ batch_xcenter_barycentric_coordinates.unsqueeze(-1)).squeeze(-1)
assert xcenter.size() == (k, 2)
return xcenter
@classmethod
def append_ones(cls, x, dim=0):
"""
Append ones to ``x`` along dimension ``dim``.
Useful for constructing a barycentric matrix.
Args:
dim (int>=0):
dimension along which to concatenate with ones vector.
"""
if dim not in [0, -1]:
raise ValueError(
'Can only append ones to first or last dimension.')
x_size = x.size()
assert x_size[dim] == cls.ldim, ('concatenation dimension should be '
'the same as the lattice dimension.')
if dim == 0:
ones_vec = torch.ones((1, *x_size[1::]))
x_out = torch.cat((x.float(), ones_vec), dim=0)
elif dim == -1:
ones_vec = torch.ones((*x_size[0:-1], 1))
x_out = torch.cat((x.float(), ones_vec), dim=-1)
return x_out
# ==== Affine subsets coefficients ===================================== #
def init_affine_coefficients(self):
"""
Initialize affine coefficients of each simplex in the lattice.
The affine coefficients completely specify the function in each
simplex, i.e., f(x1,x2) = a1.x1 + a2.x2 + d, where (a1, a2, d) are the
affine coefficients for a given simplex.
"""
try:
x_lat = self.simplex_centers
except AttributeError:
print('Need to initialize simplex_centers before affine_coeff.')
raise
self.affine_coeff = \
self.get_affine_coefficients(x_lat, use_saved_affine=False)
def get_affine_coefficients(self, x_lat, use_saved_affine=True, **kwargs):
"""
Get the affine coefficients for the simplex to which x_lat belongs to.
Plane equation: a1*x1 + a2*x2 + z + d = 0
(plane coefficients: (a1, a2, 1, d));
Affine space equation: z = f(x1,x2) = -a1*x1 -a2*x2 -d
(affine coefficients: (a'1, a'2, d') = (-a1, -a2, -d));
Args:
x_lat (torch.Tensor):
input in lattice coordinates; size (m, 2).
use_saved_affine (bool):
use saved self.affine_coeff, resulting in a speedup.
Returns:
affine_coeff (torch.Tensor):
size: (m, 3).
"""
if use_saved_affine is True:
try:
lattice_mask = self.get_lattice_mask(x_lat)
x_lat_in = x_lat[lattice_mask]
idx_unique_simplices, _, _ = \
self.check_my_simplex(x_lat_in, **kwargs)
affine_coeff = torch.zeros(x_lat.size(0), 3)
affine_coeff[lattice_mask] = \
self.affine_coeff[idx_unique_simplices]
except AttributeError:
print('affine_coeff attribute still not created...')
raise
else:
plane_coeff = self.get_plane_coefficients(x_lat, **kwargs)
affine_coeff = self.get_affine_coeff_from_plane_coeff(plane_coeff)
assert affine_coeff.size() == (x_lat.size(0), 3)
return affine_coeff
@staticmethod
def get_affine_coeff_from_plane_coeff(plane_coeff):
"""
Get affine coefficients from plane coefficients.
Plane equation: a1*x1 + a2*x2 + z + d = 0
(plane coefficients (a1, a2, 1, d));
Affine space equation: z = f(x1,x2) = -a1*x1 -a2*x2 -d
(affine coefficients: (a'1, a'2, d') = (-a1, -a2, -d))
Args:
plane_coeff (torch.Tensor):
size: (m, 4).
Returns:
affine_coeff (torch.Tensor):
size: (m, 3).
"""
return plane_coeff.index_select(1, torch.tensor([0, 1, 3])).mul(-1)
# ==== Check My Simplex =============================================== #
def check_my_simplex(self, x_lat, **kwargs):
"""
Get the lattice idx/coordinates of the vertices which form
the simplex in which x_lat lives, using the halfplane method.
Args:
x_lat (torch.Tensor):
input in lattice coordinates; size (m, 2).
Returns:
idx_unique_simplices (torch.int64):
indices (in self.unique_simplices) of simplices in which
x_lat lives; size (m,).
vertices_lat (torch.int64):
each element is an array of 3 vertices which constitute
the simplex in which x_lat[i] lives; size (m, 3, 2).
second_mask (torch.int64):
mask which is one for each point belonging to the second
possible simplex; size (m,).
"""
assert x_lat.size(1) == self.ldim, f'found size {x_lat.size(1)}...'
if not self.inside_lattice(x_lat):
raise ValueError(
'check_my_simplex(): x_lat should lie inside lattice.')
# compute fractional part of x_lat
x_lat_floor = x_lat.floor()
x_lat_fractional = x_lat - x_lat_floor
second_mask = self.get_halfplane_mask(x_lat_fractional)
base_simplex_idx = \
x_lat_floor[:, 1].long().sub(self.lmin) * (self._lsize * 2) + \
x_lat_floor[:, 0].long().sub(self.lmin) * 2
idx_unique_simplices = base_simplex_idx + second_mask
vertices_lat = self.unique_simplices[idx_unique_simplices]
return idx_unique_simplices, vertices_lat, second_mask
@staticmethod
def get_halfplane_mask(x_lat_fractional):
"""
Use the halfplane method to get the mask which is 1 for the
tensors which live in the second simplex and 0 otherwise.
Args:
x_lat_fractional (torch.Tensor):
fractional part of x_lat in lattice basis; size (m, 2).
Returns:
second_mask (torch.int64):
mask which is one for each point belonging to the second
possible simplex; size (m,).
"""
# {x1} + {x2} - 1 >= 0 -> belongs to second simplex;
# {xi} - fractional of xi
half_plane = x_lat_fractional.sum(1).sub(1) # (m,)
second_mask = (half_plane > 0.).long() # (m,)
return second_mask
# ==== Plane coefficients ============================================== #
def get_plane_coefficients(self, x_lat, **kwargs):
"""
Get the plane coefficients for the simplex to which x_lat belongs to.
Plane equation: a1*x1 + a2*x2 + z + d = 0
(plane coefficients: (a1, a2, 1, d)).
This function should only be called in get_affine_coefficients()
to create self.affine_coeff tensor. After that, it is much faster
to call get_affine_coefficients() with use_saved_affine=True to
retrieve self.affine_coeff directly.
Args:
x_lat (torch.Tensor):
input in lattice coordinates. size (m, 2)
Returns:
plane_coeff (torch.Tensor):
size: (m, 4).
"""
lattice_mask = self.get_lattice_mask(x_lat)
x_lat_in = x_lat[lattice_mask]
_, vertices_lat_in, _ = self.check_my_simplex(x_lat_in, **kwargs)
vertices_lat_in_rel = vertices_lat_in.sub(self.lmin)
vertices_values_in = \
self._C_mat[vertices_lat_in_rel[:, :, 0],
vertices_lat_in_rel[:, :, 1]]
vertices_size = vertices_lat_in.size() # (m, 3, 2)
# (m*3, 2) to convert all vertices to standard
vertices_lat_in_reshape = vertices_lat_in.view(-1, 2)
vertices_std_in = \
self.lattice_to_standard(
vertices_lat_in_reshape.float()).reshape(vertices_size)
vertices_std_in_with_values = \
torch.cat(
(vertices_std_in,
vertices_values_in.unsqueeze(-1)),
dim=-1)
plane_coeff_in = self.solve_method(vertices_std_in_with_values)
plane_coeff = torch.tensor([0., 0., 1., 0.])\
.unsqueeze(0).expand(x_lat.size(0), -1).clone()
plane_coeff[lattice_mask] = plane_coeff_in
# check that c = plane_coeff[:, 2] = 1
assert (plane_coeff[:, 2]).allclose(torch.ones_like(plane_coeff[:, 2]))
return plane_coeff
@classmethod
def solve_method(cls, vertices_std_with_values):
"""
Get plane coefficients for each datapoint from vertices_std_with
_values, i.e., (x1,x2,z), using the 'solve' method.
Plane equation: a1*x1 + a2*x2 + z + d = 0.
We have 3 vertices and 3 unkowns (a1, a2, d) for each datapoint,
so we can substitute these and solve the system of 3 equations
to get (a1, a2, d).
Args:
vertices_std_with_values (torch.Tensor):
concatenation of datapoint locations in standard coordinates
and values; size (m, 3, 3).
Returns:
plane_coeff (torch.Tensor):
size: (m, 4).
"""
m = vertices_std_with_values.size(0)
vertices_std = vertices_std_with_values[:, :, 0:2]
vertices_values = vertices_std_with_values[:, :, 2]
# vertices for (x1, x2, z) -> {(x1_K, x2_K, z_K)}, K in {A,B,C}
# (x1-x1_C).a1 + (x2-x2_C).a2 + (z-z_C) = 0
# plug (x1_A, x2_A, z_A) and (x1_B, x2_B, z_B) above to find a1, a2
z_diff = (vertices_values[:, 0:2] -
vertices_values[:, 2:3]).unsqueeze(-1)
assert z_diff.size() == (m, 2, 1)
x_diff = vertices_std[:, 0:2, :] - vertices_std[:, 2:3, :]
assert x_diff.size() == (m, 2, 2)
a1_a2 = torch.linalg.solve(x_diff, z_diff.mul(-1))
assert a1_a2.size() == (m, 2, 1)
a1_a2 = a1_a2.squeeze(-1)
assert a1_a2.size() == (m, 2)
# normals are normalized by last coordinate
normalized_normals = cls.append_ones(a1_a2, dim=-1)
assert normalized_normals.size() == (m, 3)
plane_coeff = cls.get_plane_coeffs_from_normals_vertices(
normalized_normals, vertices_std_with_values)
return plane_coeff
@staticmethod
def get_plane_coeffs_from_normals_vertices(normalized_normals,
vertices_std_with_values):
"""
Get plane coefficients from the normalized simplex normals
and the vertices.
Args:
normalized_normals (torch.Tensor):
normals for each datapoint normalized by last coordinate,
i.e., of the form (a1, a2, 1); size (m, 3).
vertices_std_with_values (torch.Tensor):
concatenation of datapoint locations in standard coordinates
and values; size (m, 3, 3).
Returns:
plane_coeff (torch.Tensor):
size: (m, 4).
"""
m = vertices_std_with_values.size(0)
# plug (x1A, x2A, zA) into plane equation to find d
d = (normalized_normals *
vertices_std_with_values[:, 0, :]).sum(1).mul(-1)
assert d.size() == (m, )
plane_coeff = torch.cat((normalized_normals, d.view(-1, 1)), dim=-1)
assert plane_coeff.size() == (m, 4)
return plane_coeff
# ==== Coordinate transformations ====================================== #
def lattice_to_standard(self, x_lat):
"""
Transform x_lat coordinates in lattice basis to coordinates in
standard basis.
Args:
x_lat (torch.Tensor):
input in lattice basis; size(m, 2).
Returns:
x_standard (torch.Tensor):
input in standard basis; size(m, 2).
The ith output vector corresponds to taking the linear combination
of the columns of lattice matrix with scalars forming vector xi:
xi[0]X_mat[:, 0] + xi[1]X_mat[:, 1] = X_mat @ xi
"""
assert len(x_lat.size()) == 2, 'x_lat should be 2D'
assert x_lat.size(1) == self.ldim, f'found size {x_lat.size(1)}...'
X_mat = self._X_mat.to(x_lat.dtype)
x_standard = (X_mat @ x_lat.t()).t()
return x_standard
def standard_to_lattice(self, x_standard):
"""
Transform x_standard coordinates in standard basis to coordinates in
lattice basis.
Args:
x_standard (torch.Tensor):
input in standard basis; size(m, 2).
Returns:
x_lat (torch.Tensor):
input in lattice basis; size(m, 2).
The ith output vector corresponds to taking the linear combination
of the columns of lattice matrix with scalars forming vector xi:
xi[0]X_mat^(-1)[:, 0] + xi[1]X_mat^(-1)[:, 1] = X_mat^(-1) @ xi
"""
assert len(x_standard.size()) == 2, 'x_standard should be 2D'
assert x_standard.size(1) == self.ldim, \
f'found size {x_standard.size(1)}...'
X_mat_inv = self._X_mat_inv.to(x_standard.dtype)
x_lat = (X_mat_inv @ x_standard.t()).t()
return x_lat
# ==== Compute z = f(x1,x2) ============================================ #
def get_values_from_interpolation(self, x_lat, **kwargs):
"""
Get values for each datapoint in x_lat from interpolation via
barycentric coordinates in its simplex.
Args:
x_lat (torch.Tensor):
input in lattice basis; size(m, 2).
Returns:
vertices_lat (torch.int64):
each element is an array of 3 vertices which constitute
the simplex in which x_lat[i] lives; size (m, 3, 2).
x_values (torch.Tensor):
size (m,).
"""
lattice_mask = self.get_lattice_mask(x_lat)
x_lat_in = x_lat[lattice_mask]
m = x_lat_in.size(0)
# sizes: ((m, 3), (m, 3, 3))
x_lat_in_barycentric_coordinates, vertices_lat_in = \
self.get_barycentric_coordinates(x_lat_in, **kwargs)
# rel = relative to bottom left corner ("origin")
vertices_lat_in_rel = vertices_lat_in.sub(self.lmin)
# TODO: Generalize next operation (e.g.)
# C_mat[vertices_lat_rel.split(dim=-1)]
vertices_values_in = \
self._C_mat[vertices_lat_in_rel[:, :, 0],
vertices_lat_in_rel[:, :, 1]]
assert vertices_values_in.size() == (m, 3)
# batch inner product:
# f(xi) = a1f(v1) + a2f(v2) + a3f(v3), i \in {1,...,m}
# where v1, v2, v3 are the vertices of the simplex where xi lives
x_values_in = (vertices_values_in *
x_lat_in_barycentric_coordinates).sum(1) # (m,)
assert x_values_in.size() == (m, )
vertices_lat = torch.ones((x_lat.size(0), 3, 2),
dtype=torch.int64).mul(-1)
vertices_lat[lattice_mask] = vertices_lat_in
x_values = torch.zeros(x_lat.size(0))
x_values[lattice_mask] = x_values_in
return x_values, vertices_lat
def get_barycentric_coordinates(self, x_lat, in_origin=True, **kwargs):
"""
Get the barycentric coordinates of each datapoint in x_lat.
Args:
x_lat (torch.Tensor):
input in lattice basis; size(m, 2).
in_origin (bool):
If True, compute barycentric coordinates in origin instead
of in original location (should give the same result,
but be faster, since we don't need to compute inverses).
Returns:
x_lat_barycentric_coordinates (torch.Tensor):
barycentric coordinates of each datapoint; size (m, 3).
vertices_lat (torch.int64):
each element is an array of 3 vertices which constitute
the simplex in which x_lat[i] lives; size (m, 3, 2).
"""
if not self.inside_lattice(x_lat):
raise ValueError('get_barycentric_coordinates(): '
'x_lat should lie inside lattice.')
m = x_lat.size(0)
# vertices size (m, 3, 2), second_mask size (m,)
_, vertices_lat, second_mask = self.check_my_simplex(x_lat, **kwargs)
if in_origin is True:
x_lat_fractional = x_lat - x_lat.floor()
batch_r = self.append_ones(x_lat_fractional, dim=-1) # (m, 3)
batch_R_mat_inv = \
self.batch_inv_origin_simplices_barycentric_mat[second_mask]
else:
warnings.warn('in_origin=False is poorly conditioned. '
'Prefer in_origin=True.')
batch_r = self.append_ones(x_lat, dim=-1) # (m, 3)
batch_R_mat = self.append_ones(vertices_lat,
dim=-1).transpose(1, 2) # (m, 3, 3)
batch_R_mat_inv = torch.inverse(batch_R_mat) # (m, 3, 3)
assert batch_r.size() == (m, 3)
assert batch_R_mat_inv.size() == (m, 3, 3)
# (m, 3, 3) x (m, 3, 1) = (m, 3, 1) -> squeeze -> (m, 3)
x_lat_barycentric_coordinates = \
(batch_R_mat_inv @ batch_r.unsqueeze(-1)).squeeze(-1)
assert x_lat_barycentric_coordinates.size() == (m, 3)
return x_lat_barycentric_coordinates, vertices_lat
def get_values_from_affine_coefficients(self, x_lat, **kwargs):
"""
Get values for each datapoint in x_lat from the affine coefficients of
its simplex.
affine coefficients: (a'1, a'2, d');
z = f(x1,x2) = a'1.x1 + a'2.x2 + d'.
Args:
x_lat (torch.Tensor):
input in lattice basis; size(m, 2).
Returns:
x_values (torch.Tensor):
size (m,).
"""
affine_coeff = self.get_affine_coefficients(x_lat, **kwargs) # (m, 3)
# get (a'1, a'2, d')
al1, al2, dl = \
affine_coeff[:, 0], affine_coeff[:, 1], affine_coeff[:, 2]
x_std = self.lattice_to_standard(x_lat)
x_values = al1 * x_std[:, 0] + al2 * x_std[:, 1] + dl
return x_values
# ====================================================================== #
@property
def flattened_C(self):
"""
Get flattened coefficients from matrix, moving first along rows
(x-direction) and then columns (y-direction).
"""
return self._C_mat.t().flatten()
def flattened_C_to_C_mat(self, flattened_C):
"""
Get coefficients matrix from flattened coefficients.
Args:
flattened_C (torch.Tensor):
flattened coefficients tensor; size (m, ).
Returns:
C_mat (torch.Tensor):
coefficients matrix; size (n, n), such that n * n = m.
"""
assert flattened_C.size() == (self._C_mat.size(0) *
self._C_mat.size(1),)
return flattened_C.reshape(self._C_mat.size()).t()
def save(self, save_str, lattice_dict=None, **kwargs):
"""
Save lattice in ``lattice_dict`` under ``save_str`` key.
Args:
save_str (str):
key for saving lattice in dictionary.
lattice_dict (dict or None):
If not None, save lattice in this dictionary. Otherwise,
initialize a new dictionary and save it there.
Returns:
lattice_dict (dict):
dictionary with saved lattice.
"""
if lattice_dict is None:
lattice_dict = {}
if save_str not in lattice_dict:
lattice_dict[save_str] = {}
lattice_dict[save_str]['X_mat'] = self._X_mat.clone()
lattice_dict[save_str]['C_mat'] = self._C_mat.clone()
return lattice_dict
# ====================================================================== #
def refine_lattice(self):
"""Refine the lattice into a finer scale with half the grid size."""
new_X_mat = self._X_mat.div(2) # divide lattice vectors by 2
new_lsize = self._lsize * 2
new_C_mat = self._C_mat.new_zeros((new_lsize + 1, new_lsize + 1))
# fill borders by piecewise linear interpolation
xp = np.arange(0, self._lsize + 1) * 2
x = np.arange(0, self._lsize * 2 + 1)
new_C_mat[0, :] = \
torch.from_numpy(np.interp(x, xp, self._C_mat[0, :].numpy()))
new_C_mat[:, 0] = \
torch.from_numpy(np.interp(x, xp, self._C_mat[:, 0].numpy()))
new_C_mat[-1, :] = \
torch.from_numpy(np.interp(x, xp, self._C_mat[-1, :].numpy()))
new_C_mat[:, -1] = \
torch.from_numpy(np.interp(x, xp, self._C_mat[:, -1].numpy()))
# fill interior by piecewise linear interpolation
grid = Grid(self.lmin + 0.5,
self.lmax - 0.1,
h=0.5,
square=True,
to_numpy=False,
to_float32=True)
int_x_values, _ = self.get_values_from_interpolation(grid.x)
int_new_C_mat = int_x_values.reshape(grid.meshgrid_size).t()
new_C_mat[1:-1, 1:-1] = int_new_C_mat
# Apply changes
self.update_lattice(new_X_mat, new_C_mat)
```
#### File: HTV-Learn/htvlearn/nn_manager.py
```python
import warnings
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import numpy as np
from functools import partial
import torch.autograd.functional as AF
from htvlearn.htv_utils import compute_mse_snr
from htvlearn.networks import (
ReLUfcNet2D,
LeakyReLUfcNet2D,
GELUfcNet2D
)
from htvlearn.nn_project import NNProject
from htvlearn.hessian import (
get_exact_grad_Hessian,
get_exact_Hessian,
get_finite_second_diff_Hessian
)
from htvlearn.lattice import Lattice
#########################################################################
# MANAGER
class NNManager(NNProject):
""" """
def __init__(self, params, log=True):
"""
Args:
params (dict):
parameter dictionary.
log (bool):
if True, log results.
"""
super().__init__(params, log=log)
is_ckpt_loaded = False
if self.load_ckpt is True:
# is_ckpt_loaded=True if a checkpoint was successfully loaded.
is_ckpt_loaded = self.restore_ckpt_params()
self.net = self.build_model(self.params, self.device)
self.net.double()
self.net.dtype = next(self.net.parameters()).dtype
self.optimizer, self.scheduler = self.set_optimization()
if is_ckpt_loaded is True:
self.restore_model_data(self.net)
# During testing, average the loss only at the end to get accurate
# value of the loss per sample. If using reduction='mean', when
# nb_test_samples % batch_size != 0 we can only average the loss per
# batch (as in training for printing the losses) but not per sample.
self.criterion = nn.MSELoss(reduction='mean')
self.test_criterion = nn.MSELoss(reduction='sum')
self.criterion.to(self.device)
self.test_criterion.to(self.device)
# # uncomment for printing network architecture
# print(self.net)
@classmethod
def build_model(cls, params, device, *args, **kwargs):
"""
Build the network model.
Args:
params (dict):
contains the network name and the model parameters.
device (str):
'cpu' or 'cuda:0'.
Returns:
net (nn.Module)
"""
print('\n==> Building model..')
if params['net_model'] == 'relufcnet2d':
NetworkModule = ReLUfcNet2D
elif params['net_model'] == 'leakyrelufcnet2d':
NetworkModule = LeakyReLUfcNet2D
elif params['net_model'] == 'gelufcnet2d':
NetworkModule = GELUfcNet2D
else:
raise ValueError(f'Model {params["net_model"]} not available.')
net = NetworkModule(**params['model'], device=device)
net = net.to(device)
if device == 'cuda':
cudnn.benchmark = True
print(f'[Network] Total number of parameters : {net.num_params}.')
return net
def set_optimization(self):
"""Initialize optimizer and scheduler."""
optimizer = optim.Adam(self.net.parameters(),
lr=0.001,
weight_decay=self.params['weight_decay'])
print('\nOptimizer :', optimizer, sep='\n')
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
self.params['milestones'])
return optimizer, scheduler
@property
def model_state(self):
""" """
return self.net.state_dict()
@property
def optimizer_state(self):
""" """
return self.optimizer.state_dict()
@property
def htv_log(self):
""" """
return self.htv_dict
@property
def train_loss_log(self):
""" """
return self.train_loss_dict
@property
def valid_loss_log(self):
""" """
return self.valid_loss_dict
#########################################################################
# TRAIN
def train(self):
"""Training loop."""
self.net.train() # set the network in training mode
if self.params['verbose']:
self.print_train_info()
if self.params['log_step'] is None: # default
# log at every epoch
self.params['log_step'] = self.num_train_batches
if self.params['valid_log_step'] is None: # default
# validation done halfway and at the end of training
self.params['valid_log_step'] = int(
self.num_train_batches * self.params['num_epochs'] * 1. / 2.)
elif self.params['valid_log_step'] < 0:
# validation at every epoch
self.params['valid_log_step'] = self.num_train_batches
self.global_step = 0
###
# initialize log dictionaries
self.htv_dict = {}
self.train_loss_dict = {}
self.valid_loss_dict = {}
###
# get initial model performance
# -1 signals that training was not performed
self.latest_train_loss = -1
self.validation_step(-1)
self.net.train()
print('\n==> Starting training...')
for epoch in range(0, self.params['num_epochs']):
self.train_epoch(epoch)
# shuffle training data
self.trainloader = \
self.dataloader.get_shuffled_trainloader_in_memory()
print('\nFinished training.')
# test model
self.test()
def train_epoch(self, epoch):
"""
Training for one epoch.
Args:
epoch (int).
"""
print(f'\nEpoch: {epoch}\n')
running_loss = 0.
for batch_idx, (inputs, labels) in enumerate(self.trainloader):
inputs = inputs.to(device=self.device, dtype=self.net.dtype)
labels = labels.to(device=self.device, dtype=self.net.dtype)
outputs = self.net(inputs)
loss = self.criterion(outputs, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
running_loss += loss.item()
if batch_idx % self.params['log_step'] == (
self.params['log_step'] - 1):
# training log step
mse = (running_loss / self.params['log_step'])
self.latest_train_loss = mse
losses_dict = {'avg. mse': mse}
self.train_log_step(epoch, batch_idx, losses_dict)
running_loss = 0. # reset running loss
if self.global_step % self.params['valid_log_step'] == (
self.params['valid_log_step'] - 1):
# validation log step
self.validation_step(epoch)
self.net.train()
self.global_step += 1
if self.scheduler is not None:
self.scheduler.step()
if self.params['verbose']:
lr = [group['lr'] for group in self.optimizer.param_groups]
print(f'scheduler: epoch - {self.scheduler.last_epoch}; '
f'learning rate - {lr}')
def validation_step(self, epoch):
"""
Perform one validation step. Saves results on checkpoint.
Args:
epoch (int).
"""
train_loss, _ = self.evaluate_results(mode='train')
valid_loss, _ = self.evaluate_results(mode='valid')
#####
if not self.params['no_htv']:
print('\n==> Computing Hessian...')
self.htv_dict[str(epoch + 1)] = self.compute_network_htv()
print('HTV dict :', self.htv_dict[str(epoch + 1)])
print('Exact HTV :', self.data.cpwl.get_exact_HTV())
print('Finished.')
self.update_json('htv', self.htv_dict[str(epoch + 1)])
#####
self.train_loss_dict[str(epoch + 1)] = train_loss
self.valid_loss_dict[str(epoch + 1)] = valid_loss
print(f'\nvalidation mse : {valid_loss}')
losses_dict = {'train_mse': train_loss,
'valid_mse': valid_loss}
self.valid_log_step(losses_dict)
self.ckpt_log_step(epoch) # save checkpoint
def compute_network_htv(self):
"""
Compute the network's HTV using the 'finite_diff_differential' or
'exact_differential' mode.
Returns:
htv (float).
"""
cpwl = True if 'relu' in self.params['net_model'] else False
htv = {}
if self.params['htv_mode'] == 'finite_diff_differential':
# use finite second differences to compute the hessian
grid = self.data.cpwl.get_grid(h=0.0002)
with torch.no_grad():
Hess = get_finite_second_diff_Hessian(
grid, self.evaluate_func)
htv = self.get_htv_from_Hess(Hess, grid.h, cpwl=cpwl)
elif self.params['htv_mode'] == 'exact_differential':
warnings.warn('"exact_differential" mode is computationally '
'expensive and does not lead to to precise '
'computations, in general. Prefer setting '
'"htv_mode" to "finite_diff_differential".')
grid = self.data.cpwl.get_grid(h=0.01)
if self.params['net_model'].endswith('relufcnet2d'):
# cpwl function -> exact gradient + finite first differences
# to compute hessian
Hess = get_exact_grad_Hessian(
grid, partial(self.differentiate_func, 'jacobian'))
else:
# cpwl function -> exact hessian + sum over the grid locations
# to compute hessian
Hess = get_exact_Hessian(
grid, partial(self.differentiate_func, 'hessian'))
htv = self.get_htv_from_Hess(Hess, grid.h, cpwl=cpwl)
return htv
def evaluate_func(self, x, batch_size=2000000):
"""
Evaluate model function for some input.
Args:
x (np.ndarray):
inputs. size: (n, 2).
batch_size (int):
batch size for evaluation.
Returns:
y (np.ndarray):
result of evaluating model at x.
"""
x = torch.from_numpy(x).to(self.device)
assert x.dtype == torch.float64
dataloader = x.split(batch_size)
if self.params['verbose']:
print('Length dataloader: ', len(dataloader))
y = torch.tensor([], device=x.device, dtype=x.dtype)
for batch_idx, input in enumerate(dataloader):
out = self.net(input)
y = torch.cat((y, out), dim=0)
return y.detach().cpu().numpy()
def differentiate_func(self, mode, x):
"""
Evaluate model Jacobian/Hessian at some input.
Args:
mode (str):
"jacobian" or "hessian"
x (np.ndarray):
inputs. size: (n, 2).
Returns:
x_diff (np.ndarray):
result of evaluating model ``mode`` at x.
"""
assert mode in ['jacobian', 'hessian']
inputs = tuple(torch.from_numpy(x).to(self.device))
autograd_func = AF.jacobian if mode == 'jacobian' else AF.hessian
g = partial(autograd_func, lambda x: self.net(x.unsqueeze(0)))
x_diff = torch.stack(tuple(map(g, inputs))).detach().cpu().numpy()
return x_diff
@staticmethod
def get_htv_from_Hess(Hess, h, cpwl=False):
"""
Get the HTV from the hessian at grid locations.
Args:
h (float):
grid size.
cpwl (bool):
True if network is CPWL.
Returns:
htv (float).
"""
if cpwl is False:
# schatten-p-norm -> sum
S = np.linalg.svd(Hess, compute_uv=False, hermitian=True)
assert S.shape == (*Hess.shape[0:2], 2)
htv = {}
for p in [1, 2, 5, 10]:
points_htv = np.linalg.norm(S, ord=p, axis=-1) # schatten norm
# value x area (riemman integral)
htv[str(p)] = (points_htv * h * h).sum()
else:
points_htv = np.abs(Hess[:, :, 0, 0] + Hess[:, :, 1, 1])
htv = (points_htv * h * h).sum()
return htv
def test(self):
"""Test model."""
if self.params['verbose']:
self.print_test_info()
test_loss, _ = self.evaluate_results(mode='test')
print(f'\ntest mse : {test_loss}')
self.update_json('test_mse', test_loss)
# save test prediction to last checkpoint
self.ckpt_log_step(self.params['num_epochs'] - 1)
print('\nFinished testing.')
def evaluate_results(self, mode):
"""
Evaluate train, validation or test results.
Args:
mode (str):
'train', 'valid' or 'test'
Returns:
mse (float):
``mode`` mean-squared-error.
output (torch.Tensor):
result of evaluating model on ``mode`` set.
"""
assert mode in ['train', 'valid', 'test']
if mode == 'train':
dataloader = self.trainloader
data_dict = self.data.train
elif mode == 'valid':
dataloader = self.validloader
data_dict = self.data.valid
else:
dataloader = self.testloader
data_dict = self.data.test
self.net.eval()
running_loss = 0.
total = 0
output = torch.tensor([]).to(device=self.device)
values = torch.tensor([]).to(device=self.device)
with torch.no_grad():
# notation: _b = 'batch'
for batch_idx, (inputs_b, labels_b) in enumerate(dataloader):
inputs_b = inputs_b.to(device=self.device,
dtype=self.net.dtype)
labels_b = labels_b.to(device=self.device,
dtype=self.net.dtype)
outputs_b = self.net(inputs_b)
output = torch.cat((output, outputs_b), dim=0)
values = torch.cat((values, labels_b), dim=0)
loss = self.test_criterion(outputs_b, labels_b)
running_loss += loss.item()
total += labels_b.size(0)
data_dict['predictions'] = output
loss = running_loss / total
# sanity check
mse, _ = compute_mse_snr(values.cpu(), output.cpu())
assert np.allclose(mse, loss), \
'(mse: {:.7f}, loss: {:.7f})'.format(mse, loss)
return mse, output
def evaluate_lattice(self):
"""Create and evaluate network on lattice."""
lat = Lattice(**self.params['lattice'])
print(f'Sampled lattice lsize: {lat.lsize}.')
lattice_grid = lat.lattice_to_standard(lat.lattice_grid.float())
z = self.forward_data(lattice_grid)
new_C_mat = lat.flattened_C_to_C_mat(z)
lat.update_coefficients(new_C_mat.float())
return lat
def forward_data(self, inputs, batch_size=64):
"""
Compute model output for some input.
Args:
input (torch.Tensor):
size: (n, 2).
batch_size (int)
"""
self.net.eval()
predictions = torch.tensor([])
with torch.no_grad():
for batch_idx, inputs in enumerate(inputs.split(batch_size)):
inputs = inputs.to(device=self.device, dtype=self.net.dtype)
outputs = self.net(inputs)
predictions = torch.cat((predictions, outputs.cpu()), dim=0)
return predictions
@staticmethod
def read_htv_log(htv_log):
"""
Parse htv_log dictionary.
Args:
htv_log (dict).
Returns:
epochs (np.ndarray):
array with saved epochs. size: (E,).
htv: (dict/np.ndarray):
saved htv accross training:
if non-cpwl: dict('p': array of size: (E,)),
if cpwl: array of size: (E,).
"""
assert isinstance(htv_log, dict)
# keys of htv_log are epochs
epochs = np.array([int(epoch) for epoch in htv_log.keys()])
idx = np.argsort(epochs)
epochs = epochs[idx]
# e.g. epoch 1
# non-cpwl network:
# htv_log['1'] = {'1': htv_1, '2': htv_2}
# relu network: epoch 1: htv_log['1'] = htv
# dictio is list of dictionaries containing 'p': htv_p for each epoch
# or a single value in the case of CPWL networks
dictio = list(htv_log.values())
if isinstance(dictio[0], dict):
# p in schatten-p norms
p_array = np.array([int(p) for p in dictio[0].keys()])
p_array.sort()
# e.g.:
# {'1': np.array of size (epochs,),
# '2': np.array of size (epochs,)}
htv = {}
for p in p_array:
htv[str(p)] = np.array(
[float(dictio[j][str(p)]) for j in range(0, len(dictio))])
htv[str(p)] = htv[str(p)][idx] # sort array according to epoch
else:
htv = np.array([float(dictio[j]) for j in range(0, len(dictio))])
htv = htv[idx]
return epochs, htv
@staticmethod
def read_loss_log(loss_log):
"""
Read loss_log (train or valid) dictionary.
Returns:
epochs (np.ndarray):
array with saved epochs. size: (E,).
loss (np.ndarray):
array with saved loss across training. size: (E,)
"""
assert isinstance(loss_log, dict)
# keys of loss_log are epochs
epochs = np.array([int(epoch) for epoch in loss_log.keys()])
idx = np.argsort(epochs)
epochs = epochs[idx]
# e.g. loss_log = {'1': loss_epoch_1, ..., '300': loss_epoch_300}
loss = np.array([float(val) for val in loss_log.values()])
loss = loss[idx] # sort
return epochs, loss
```
#### File: htvlearn/plots/base_plot.py
```python
import os
import copy
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
import matplotlib.cm as cm
from htvlearn.data import Data
class BasePlot():
colorscale = 'coolwarm' # default colorscale
def __init__(self,
data_obj=None,
log_dir=None,
**kwargs):
"""
Args:
data_obj (Data):
None (if not plotting data samples) or
object of class Data (see htvlearn.data).
log_dir (str):
Log directory for html images. If None, the images are only
shown but not saved in html format.
"""
self.data = data_obj
self.log_dir = log_dir
if self.log_dir is not None and not os.path.isdir(self.log_dir):
raise NotADirectoryError(
f'log_dir "{self.log_dir}" is not a valid directory')
def verify_data_obj(self):
"""Verify that a data object exists and is valid"""
if self.data is None:
raise ValueError('A data object does not exist.')
elif not isinstance(self.data, Data):
raise ValueError(f'data_obj is of type {type(self.data)}.')
@classmethod
def map_val2color(cls, val, vmin, vmax, colorscale=None):
"""
Map a value to a color in the colormap.
Args:
val (float):
value in [vmin, vmax] to map to color.
vmin, vmax (float):
min and max ranges for val.
colorscale (str):
matplotlib colorscale or None (use default).
Returns:
rgb color.
"""
colorsc = colorscale if colorscale is not None else cls.colorscale
cmap = cm.get_cmap(colorsc)
if vmin > vmax:
raise ValueError('incorrect relation between vmin and vmax')
t = 0.
if not np.allclose(vmin, vmax):
t = (val - vmin) / float((vmax - vmin)) # normalize val
R, G, B, alpha = cmap(t)
return 'rgb(' + '{:d}'.format(int(R * 255 + 0.5)) + ',' + '{:d}'\
.format(int(G * 255 + 0.5)) +\
',' + '{:d}'.format(int(B * 255 + 0.5)) + ')'
@classmethod
def map_val2color2d(cls, val, vmin, vmax):
"""
Map a 2D value to an rgb color. The R and G channels have
an independent and direct correspondence to each element in
the 2D value. The B channel is kept fixed.
Args:
val (float):
value s.t. val[i] in [vmin[i], vmax[i]], i=1,2,
to be mapped to an rgb color. The R, G channels are set
by val.
vmin, vmax (2d array):
min and max ranges for each element in val.
Returns:
rgb color.
"""
if vmin[0] > vmax[0] or vmin[1] > vmax[1]:
raise ValueError('incorrect relation between vmin and vmax')
t = np.zeros(2)
# normalize val
if not np.allclose(vmin[0], vmax[0]):
t[0] = (val[0] - vmin[0]) / float((vmax[0] - vmin[0]))
if not np.allclose(vmin[1], vmax[1]):
t[1] = (val[1] - vmin[1]) / float((vmax[1] - vmin[1]))
R, G = t[1], t[0]
B = 0.4
return 'rgb(' + '{:d}'.format(int(R * 255 + 0.5)) + ',' + '{:d}'\
.format(int(G * 255 + 0.5)) +\
',' + '{:d}'.format(int(B * 255 + 0.5)) + ')'
@classmethod
def map_array2color(cls, array, min=None, max=None):
"""
Map an array of values to colors.
Args:
array (1d array):
array of values to map to colors. size: (N,)
min, max (float):
If not None, set the ranges for the values in array.
Returns:
1d array of rgb colors. size: (N,)
"""
if min is None:
min = array.min()
if max is None:
max = array.max()
return np.array([cls.map_val2color(val, min, max) for val in array])
@classmethod
def map_array2color2d(cls, array, min=None, max=None):
"""
Map a 2D array of values to colors.
Args:
array (2d array):
array of values to map to colors. size: (N x 2).
min, max (2d array):
If not None, sets the ranges for the values in array.
Returns:
1d array of rgb colors. size: (N,)
"""
if array.shape[1] != 2:
raise ValueError(f"array has shape {array.shape}.")
if min is None:
if min.shape != (2, ):
raise ValueError(f'min has shape {min.shape}')
min = array.amin(axis=0)
if max is None:
if max.shape != (2, ):
raise ValueError(f'max has shape {max.shape}')
max = array.amax(axis=0)
return np.array([cls.map_val2color2d(val, min, max) for val in array])
@classmethod
def get_normal_facecolor(cls, affine_coeff, max=None):
"""
Get facecolor of simplices according to their normals.
Args:
affine_coeff (array):
affine coefficients of the simplices.
size: (number of simplices, 3).
max (2d array):
If not None, sets the max ranges for the values in
affine_coeff[:, 0:2].
Returns:
facecolor (1d array):
1d array of rgb colors whose size is the number of simplices.
"""
if not affine_coeff.shape[1] == 3:
raise ValueError(f"affine_coeff has shape {affine_coeff.shape}.")
if max is None:
max = np.array([1.75, 1.75])
facecolor = \
cls.map_array2color2d(-affine_coeff[:, 0:2], min=-max, max=max)
return facecolor
@staticmethod
def get_scatter3d(x, y, z, marker_size=2):
r"""
Get a scatter 3D plot (f: \R^2 \to \R)
Args:
x, y (1d array):
positions of the samples.
z (1d array):
values of the samples.
Returns:
A plotly.graph_objects.Scatter3D object.
"""
data = go.Scatter3d(x=x,
y=y,
z=z,
mode='markers',
marker=dict(size=marker_size,
color='black'),
opacity=0.8)
return data
def plot_fig(self,
fig,
filename=None,
num_subplots=1,
view=None,
**kwargs):
"""
Plot figure.
Args:
fig:
instance of plotly.graph_objects.Figure to plot.
filename (str):
Figure filename.
num_subplots (int):
Number of figure subplots.
view (str):
If None, a default view is used.
Otherwise can be set to "up" "side".
"""
assert isinstance(fig, go.Figure), f'fig is of type {type(fig)}.'
assert view in [None, 'up', 'side'], f'view "{view}" is invalid.'
ax_dict = dict(linecolor='#000000',
linewidth=4,
showgrid=False,
showticklabels=False,
tickfont=dict(size=15),
gridcolor='#000000',
gridwidth=0.3,
title='',
showbackground=True)
fig_dict = dict(
scene_aspectmode='data',
scene=dict(xaxis=copy.deepcopy(ax_dict),
yaxis=copy.deepcopy(ax_dict),
zaxis=copy.deepcopy(ax_dict),
camera=dict(up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0))),
font=dict(size=20),
)
fig_dict['scene']['xaxis']['title'] = 'x'
fig_dict['scene']['yaxis']['title'] = 'y'
fig_dict['scene']['zaxis']['title'] = 'z'
if view == 'up':
fig_dict['scene']['zaxis']['visible'] = False
fig_dict['scene']['camera']['eye'] = dict(x=0, y=0, z=3)
fig_dict['scene']['camera']['up'] = dict(x=0, y=1, z=0)
elif view == 'side':
fig_dict['scene']['camera']['eye'] = dict(x=1.2, y=0.3, z=0.4)
else:
# default
fig_dict['scene']['camera']['eye'] = dict(x=1.5, y=1.9, z=1.7)
for i in range(2, num_subplots + 1):
fig_dict['scene' + str(i)] = fig_dict['scene'].copy()
fig.update_layout(**fig_dict)
if self.log_dir is None:
fig.show()
else:
self.export_fig(fig, filename, self.log_dir)
@staticmethod
def export_fig(fig, filename, log_dir):
"""
Plot html figure and export to log_dir.
Args:
fig:
instance of plotly.graph_objects.Figure to plot.
filename (str):
Figure filename.
log_dir (str):
Log directory where figure is exported to.
"""
assert isinstance(fig, go.Figure), f'fig is of type {type(fig)}.'
if not os.path.isdir(log_dir):
raise NotADirectoryError(
f'log_dir "{log_dir}" is not a valid directory')
file_path = os.path.join(f'{log_dir}', f'{filename}')
pio.write_html(fig, file=f'{file_path}.html', auto_open=True)
```
#### File: htvlearn/plots/plot_cpwl.py
```python
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
from htvlearn.delaunay import Delaunay
from htvlearn.plots.base_plot import BasePlot
class Plot(BasePlot):
def __init__(self, data_obj=None, **plot_params):
"""
Args:
data_obj (Data):
None (if not plotting data samples) or
object of class Data (see htvlearn.data).
"""
super().__init__(data_obj=data_obj, **plot_params)
def plot_delaunay(self,
*delaunay_obj_list,
observations=False,
color='normal',
opaque=True,
constrast=False,
filename='trisurf_delaunay',
**kwargs):
"""
Plot a several Delaunay objects from a list.
Args:
delaunay_obj_list (Delaunay):
list of Delaunay objects (see htvlearn.delaunay).
observations (bool):
True if plotting data observations.
color (str):
Plot colors according to simplex normals (color="normal")
or mean height of vertices (color="z").
opaque (bool):
If True, ground truth is made opaque
(if True, might make some observations non-visible).
constrast (bool):
Enhance figure constrast.
filename (str):
Figure filename.
"""
specs = [[{'type': 'scene'}] * len(delaunay_obj_list)]
num_subplots = len(delaunay_obj_list)
fig = make_subplots(cols=num_subplots,
specs=specs,
shared_xaxes=True,
shared_yaxes=True)
for i, delaunay in enumerate(delaunay_obj_list):
if not isinstance(delaunay, Delaunay):
continue
delaunay_fig_dict = dict(x=delaunay.tri.points[:, 0],
y=delaunay.tri.points[:, 1],
simplices=delaunay.tri.simplices,
edges_color='rgb(255, 255, 255)',
plot_edges=False)
if color == 'normal':
# affine coefficients of each simplex
color_list = self.get_normal_facecolor(
delaunay.tri.simplices_affine_coeff)
elif color == 'z':
z_mean = \
delaunay.tri.values[delaunay.tri.simplices].mean(axis=1)
color_list = self.map_array2color(z_mean)
else:
raise ValueError(f'color {color} should be "normal" or "z".')
trisurf_fig = ff.create_trisurf(z=delaunay.tri.values,
color_func=color_list,
**delaunay_fig_dict)
for trisurf_trace in trisurf_fig.data:
fig.add_trace(trisurf_trace, row=1, col=i + 1)
if opaque is False:
fig['data'][-1].update(opacity=0.95)
if constrast is True:
fig.update_traces(lighting=dict(ambient=0.1,
diffuse=1,
specular=0.1),
lightposition=dict(x=0, y=0, z=4),
selector=dict(type='mesh3d'))
if observations:
fig.add_trace(self.get_observations_plot(**kwargs), row=1, col=1)
self.plot_fig(fig,
filename=filename,
num_subplots=num_subplots,
**kwargs)
def get_observations_plot(self,
mode='train',
marker_size=2,
**kwargs):
"""
Get observations plot.
Args:
mode (str):
'train', 'valid', or 'test'
marker_size (float):
marker size for observation points.
Returns:
A plotly.graph_objects.Scatter3D object.
"""
assert mode in ['train', 'valid', 'test']
self.verify_data_obj()
data_dict = {
'train': self.data.train,
'valid': self.data.valid,
'test': self.data.test
}[mode]
input = data_dict['input'].cpu().numpy()
values = data_dict['values'].cpu().numpy()
observations = self.get_scatter3d(x=input[:, 0],
y=input[:, 1],
z=values,
marker_size=marker_size)
return observations
```
#### File: HTV-Learn/htvlearn/rbf_manager.py
```python
import torch
import numpy as np
import time
import datetime
import json
from htvlearn.rbf_project import RBFProject
from htvlearn.rbf import RBF
from htvlearn.hessian import get_finite_second_diff_Hessian
from htvlearn.htv_utils import compute_mse_snr
from htvlearn.lattice import Lattice
class RBFManager(RBFProject):
""" """
def __init__(self, params, log=True):
"""
Args:
params (dict):
parameter dictionary.
log (bool):
if True, log results.
"""
# initializes log, lattice, data, json files
super().__init__(params, log=log)
if self.load_ckpt is True:
# is_ckpt_loaded=True if a checkpoint was successfully loaded.
is_ckpt_loaded = self.restore_ckpt_params()
if is_ckpt_loaded is True:
self.restore_data()
self.rbf = RBF(self.data, **self.params['rbf'])
@property
def htv_log(self):
""" """
return self.htv_dict
def train(self):
"""Run algorithm and save results."""
self.htv_dict = {}
output = self.forward_data(self.data.train['input'])
mse, _ = compute_mse_snr(self.data.train['values'].cpu(),
output.cpu())
self.update_json('train_mse', mse)
print(f'Train mse : {mse}')
if not self.params['no_htv']:
print('\nComputing Hessian...')
self.htv_dict = self.compute_htv()
print('HTV dict :',
json.dumps(self.htv_dict,
indent=4,
sort_keys=False))
print('Exact HTV : {:.2f}'.format(self.data.cpwl.get_exact_HTV()))
print('Finished.')
self.update_json('htv', self.htv_dict)
for mode in ['valid', 'test']:
mse, _ = self.evaluate_results(mode)
self.update_json('_'.join([mode, 'mse']), mse)
print(f'{mode} mse : {mse}')
# save params, data and htv to checkpoint
self.save_to_ckpt()
def compute_htv(self):
"""
Compute the HTV of the model in the data range.
Return:
htv (dict):
dictionary of the form {'p': htv_p}
"""
grid = self.data.cpwl.get_grid(h=0.0005)
Hess = get_finite_second_diff_Hessian(grid, self.evaluate_func)
S = np.linalg.svd(Hess, compute_uv=False, hermitian=True)
assert S.shape == (*Hess.shape[0:2], 2)
htv = {}
for p in [1, 2, 5, 10]:
points_htv = np.linalg.norm(S, ord=p, axis=-1) # schatten norm
# value x area (riemman integral)
htv[str(p)] = (points_htv * grid.h * grid.h).sum()
return htv
def evaluate_results(self, mode):
"""
Evaluate train, validation or test results.
Args:
mode (str):
'train', 'valid' or 'test'
Returns:
mse (float):
``mode`` mean-squared-error.
output (torch.Tensor):
result of evaluating model on ``mode`` set.
"""
assert mode in ['train', 'valid', 'test']
if mode == 'train':
data_dict = self.data.train
elif mode == 'valid':
data_dict = self.data.valid
else:
data_dict = self.data.test
output = self.forward_data(data_dict['input'])
# save predictions
data_dict['predictions'] = output
# compute mse
mse, _ = compute_mse_snr(data_dict['values'], output)
return mse, output
def evaluate_func(self, x, batch_size=100000):
"""
Evaluate model function for some input.
Args:
x (np.ndarray):
inputs. size: (n, 2).
batch_size (int):
batch size for evaluation.
Returns:
y (np.ndarray):
result of evaluating model at x.
"""
x = torch.from_numpy(x)
assert x.dtype == torch.float64
dataloader = x.split(batch_size)
y = torch.tensor([], device=x.device, dtype=x.dtype)
# every 5% progress
print_step = max(int(len(dataloader) * 1. / 20), 1)
print('\n==> Starting evaluation...')
if self.params['verbose']:
print('=> Length dataloader: ', len(dataloader))
start_time = time.time()
for batch_idx, input in enumerate(dataloader):
out = self.forward_data(input)
y = torch.cat((y, out), dim=0)
if batch_idx % print_step == 0:
progress = int((batch_idx + 1) * 100. / len(dataloader))
print(f'=> {progress}% complete')
end_time = time.time()
run_time = str(datetime.timedelta(seconds=int(end_time - start_time)))
print(f'==> Finished. Run time: {run_time} (h:min:sec).')
return y.numpy()
def evaluate_lattice(self):
"""Create and evaluate network on lattice."""
lat = Lattice(**self.params['lattice'])
print(f'Sampled lattice lsize: {lat.lsize}.')
lattice_grid = lat.lattice_to_standard(lat.lattice_grid.float())
z = self.forward_data(lattice_grid)
new_C_mat = lat.flattened_C_to_C_mat(z)
lat.update_coefficients(new_C_mat.float())
return lat
def forward_data(self, input, *args, **kwargs):
"""
Compute model output for some input.
Args:
input (torch.Tensor):
size: (n, 2).
"""
return self.rbf.evaluate(input)
@staticmethod
def read_htv_log(htv_log):
"""
Parse htv_log dictionary.
Args:
htv_log (dict).
Returns:
htv (dict):
dictionary of the form {'p': np.array([htv_p])}.
"""
# e.g. htv_log = {'1': htv_1, '2': htv_2}
assert isinstance(htv_log, dict)
# p in schatten-p norms
p_array = np.array([int(p) for p in htv_log.keys()])
p_array.sort()
htv = {}
for p in p_array:
htv[str(p)] = np.array([float(htv_log[str(p)])])
return htv
```
#### File: HTV-Learn/htvlearn/rbf.py
```python
import torch
import numpy as np
class RBF():
def __init__(self, data_obj, eps, lmbda, **params):
"""
Args:
data_obj (Data):
instance of Data class (data.py), containing:
data_obj['train']['input']: size: (M,2);
data_obj['train']['values']: size: (M,).
eps (float):
rbf kernel shape parameter (the higher, the more localized)
lmbda (float):
regularization weight
"""
self.params = params
self.data = data_obj
self.eps = eps
self.lmbda = lmbda
self.input = self.data.train['input']
assert self.input.size(1) == 2
self.values = self.data.train['values']
assert self.input.size(0) == self.values.size(0)
self.init_gram_mat()
self.init_coeffs()
@staticmethod
def get_sigma_from_eps(eps):
"""
Get sigma (standard deviation) from rbf shape parameter eps.
Args:
eps (float):
rbf kernel shape parameter (the higher, the more localized).
Returns:
sigma (float):
standard deviation of the kernel.
"""
return np.sqrt(1. / (2. * eps))
def init_gram_mat(self):
"""
Init Gram interpolation matrix with gaussian kernels.
``gram_mat = exp(-(eps * distance_pairs)**2)``, where
``distance pairs`` is the gram matrix of parwise distances of training
datapoints.
"""
# (1, M, 2) - (M, 1, 2) = (M, M, 2)
# location pairwise differences
loc_diff_pairs = self.input.unsqueeze(0) - self.input.unsqueeze(1)
assert loc_diff_pairs.size() == (self.input.size(0),
self.input.size(0), 2)
# distance pairs
distance_pairs = torch.norm(loc_diff_pairs, dim=-1)
assert distance_pairs.size() == (self.input.size(0),
self.input.size(0))
# RBF interpolation matrix with gaussian kernels
self.gram_mat = torch.exp(-(self.eps * distance_pairs.double())**2)
# check if it is symmetric
assert torch.equal(self.gram_mat.T, self.gram_mat)
assert self.gram_mat.size() == (self.input.size(0), self.input.size(0))
def init_coeffs(self):
"""
Solve ``(gram_mat + lambda*I)a = y`` for ``a`` (coefficients),
and save the result in self.coeffs.
"""
A = self.gram_mat + self.lmbda * \
torch.ones_like(self.gram_mat[:, 0]).diag()
B = self.values.unsqueeze(-1).to(dtype=self.gram_mat.dtype,
device=self.gram_mat.device)
# Solve AX = B
X = torch.linalg.lstsq(A, B).solution
self.coeffs = X.squeeze(-1).float()
assert self.coeffs.size() == self.values.size()
def construct_H_mat(self, x, **kwargs):
"""
Construct the forward matrix H from x (the locations where we wish
to evaluate the RBF), such that f(x) = H_mat @ self.coeffs.
Args:
x (torch.Tensor):
locations where we wish to evaluate the RBF.
Returns:
H_mat (torch.Tensor):
size: (x.size(0), self.coeffs.size(0))
"""
# (N, 1, 2) - (1, M, 2) = (N, M, 2)
# location pairwise differences \varphi(\norm{\V x - \V x_i})
loc_diff_pairs = x.unsqueeze(1) - self.input.unsqueeze(0)
assert loc_diff_pairs.size() == (x.size(0), self.input.size(0), 2)
# distance pairs
distance_pairs = torch.norm(loc_diff_pairs, dim=-1)
assert distance_pairs.size() == (x.size(0), self.input.size(0))
# x interpolation matrix with gaussian kernels
H_mat = torch.exp(-(self.eps * distance_pairs)**2)
assert H_mat.size() == (x.size(0), self.coeffs.size(0))
return H_mat
def evaluate(self, x, **kwargs):
r"""
Evaluate RBF output at locations x.
f(\V x) = self.H_mat @ self.coeffs, where self.H_mat is constructed
from x.
Args:
x (torch.Tensor):
locations where we wish to evaluate the RBF.
Returns:
output (torch.Tensor):
evaluation of the RBF at x.
"""
try:
H_mat = self.construct_H_mat(x)
except RuntimeError:
print('\nError: need to reduce size of input batches.\n')
raise
coeffs = self.coeffs.to(dtype=H_mat.dtype)
output = torch.mv(H_mat, coeffs)
return output
```
#### File: HTV-Learn/scripts/plot_data.py
```python
import argparse
from htvlearn.data import Data
from htvlearn.plots.plot_cpwl import Plot
from htvlearn.htv_utils import ArgCheck
def plot_data(args):
"""
Args:
args: arguments from argparser
"""
data_params = {
'dataset_name': args.dataset_name,
'num_train': args.num_train,
'non_uniform': args.non_uniform,
'noise_ratio': args.noise_ratio,
'seed': args.seed
}
data_obj = Data(**data_params)
plot_params = {}
plot_params['log_dir'] = '/tmp'
plot = Plot(data_obj, **plot_params)
plot.plot_delaunay(data_obj.cpwl,
observations=False,
color='normal',
filename='GT_no_data')
plot.plot_delaunay(data_obj.cpwl,
observations=True,
opaque=False,
marker_size=(0.65
if 'face' in args.dataset_name
else 2),
color='normal',
filename='GT_data')
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description='Load parameters from checkpoint file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# data
dataset_choices = {
'face', 'cut_face_gaps', 'pyramid', 'quad_top_planes'
}
parser.add_argument(
'--dataset_name',
choices=dataset_choices,
type=str,
default='face',
help=' ')
parser.add_argument(
'--num_train',
metavar='[INT>0]',
type=ArgCheck.p_int,
default=10000,
help=' ')
parser.add_argument(
'--non_uniform',
action='store_true',
help=' ')
parser.add_argument(
'--noise_ratio',
metavar='[FLOAT,>0]',
type=ArgCheck.nn_float,
default=0.,
help=' ')
parser.add_argument(
'--seed',
metavar='[INT]',
type=int,
default=-1,
help=' ')
args = parser.parse_args()
plot_data(args)
```
#### File: HTV-Learn/scripts/run_face_rbf.py
```python
import os
import argparse
import copy
from htvlearn.main import main_prog
def run_face_rbf(args):
"""
Args:
args: verified arguments from arparser
"""
if not os.path.isdir(args.log_dir):
print(f'\nLog directory {args.log_dir} not found. Creating it.')
os.makedirs(args.log_dir)
params = {
'method': 'rbf',
'lmbda': 1e-2,
'eps': 50,
'log_dir': args.log_dir,
'dataset_name': 'face',
'num_train': 10000,
'data_dir': './data',
'noise_ratio': 0.,
'seed': 8,
'lsize': 194,
}
params['model_name'] = ('face_rbf_lmbda_{:.1E}_'.format(params["lmbda"]) +
'eps_{:d}'.format(params["eps"]))
main_prog(copy.deepcopy(params))
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description='Run RBF on the face dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log_dir',
metavar='[STR]',
type=str,
default='./ckpt',
help='Model log directory.')
args = parser.parse_args()
run_face_rbf(args)
```
#### File: HTV-Learn/scripts/run_pyramid_htv.py
```python
import os
import argparse
import copy
from htvlearn.main import main_prog
def run_pyramid_htv(args):
"""
Args:
args: verified arguments from arparser
"""
if not os.path.isdir(args.log_dir):
print(f'\nLog directory {args.log_dir} not found. Creating it.')
os.makedirs(args.log_dir)
params = {
'method': 'pyramid',
'lmbda': 0.,
'log_dir': args.log_dir,
'dataset_name': 'pyramid',
'data_dir': './data',
'noise_ratio': 0.,
'lsize': 30,
'admm_iter': 40000,
'simplex': True
}
params['model_name'] = 'pyramid_htv_lmbda_{:.1E}'.format(params["lmbda"])
main_prog(copy.deepcopy(params))
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description='Run HTV on the pyramid dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log_dir',
metavar='[STR]',
type=str,
default='./ckpt',
help='Model log directory.')
args = parser.parse_args()
run_pyramid_htv(args)
```
#### File: HTV-Learn/scripts/run_qtp_rbf.py
```python
import os
import argparse
import copy
from htvlearn.main import main_prog
def run_qtp_rbf(args):
"""
Args:
args: verified arguments from arparser
"""
if not os.path.isdir(args.log_dir):
print(f'\nLog directory {args.log_dir} not found. Creating it.')
os.makedirs(args.log_dir)
params = {
'method': 'rbf',
'lmbda': 8e-2,
'eps': 7,
'log_dir': args.log_dir,
'dataset_name': 'quad_top_planes',
'num_train': 250,
'data_dir': './data',
'noise_ratio': 0.05,
'seed': 14,
'lsize': 64,
}
params['model_name'] = ('qtp_rbf_lmbda_{:.1E}_'.format(params["lmbda"]) +
'eps_{:d}'.format(params["eps"]))
main_prog(copy.deepcopy(params))
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(
description='Run RBF on the data fitting task.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log_dir',
metavar='[STR]',
type=str,
default='./ckpt',
help='Model log directory.')
args = parser.parse_args()
run_qtp_rbf(args)
```
#### File: HTV-Learn/tests/conftest.py
```python
def pytest_addoption(parser):
"""Pytest parser"""
parser.addoption("--plot",
action="store",
help="list of pytest fixtures to plot")
parser.addoption('--seed',
action='store',
default=42,
help="Set seed")
```
#### File: HTV-Learn/tests/test_delaunay.py
```python
import pytest
import numpy as np
import torch
import time
from htvlearn.delaunay import Delaunay
from htvlearn.plots.plot_cpwl import Plot
from htvlearn.data import (
BoxSpline,
SimplicialSpline,
CutPyramid,
SimpleJunction,
DistortedGrid,
Data
)
@pytest.fixture(autouse=True)
def set_seed(request):
"""Set random seed."""
# Code that will run before
seed = request.config.getoption("--seed")
torch.manual_seed(int(seed))
torch.cuda.manual_seed_all(int(seed))
np.random.seed(int(seed))
# toy datasets that have an htv attribute
toy_dataset_list = [BoxSpline, CutPyramid, SimpleJunction]
dataset_dict = {
'toy': toy_dataset_list,
'all': toy_dataset_list + [SimplicialSpline, DistortedGrid],
'simple_junction': [SimpleJunction],
'distorted_grid': [DistortedGrid]
}
# receives dataset as parameter
@pytest.fixture(scope="module")
def dataset(request):
dt = request.param
ret_dict = {
'name': dt.__name__,
'points': dt.points.copy(),
'values': dt.values.copy()
}
if hasattr(dt, 'htv'):
ret_dict['htv'] = dt.htv
return ret_dict
@pytest.fixture(scope='module')
def skip_plot(request):
if 'plot' not in request.config.getoption("-m"):
raise pytest.skip('Skipping!')
@pytest.mark.filterwarnings("ignore::UserWarning")
class TestDelaunay:
@pytest.mark.plot
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_plot_delaunay(self, dataset, request):
""" """
plot_arg = request.config.getoption("--plot")
if plot_arg is None or plot_arg not in dataset['name']:
pytest.skip()
cpwl = Delaunay(**dataset)
plot = Plot(log_dir='/tmp')
plot.plot_delaunay(cpwl)
def test_is_admissible(self):
points, values = Data.init_zero_boundary_planes()
values = Data.add_linear_func(points, values)
cpwl = Delaunay(points=points, values=values)
assert cpwl.is_admissible is True
@pytest.mark.parametrize("dataset", dataset_dict["toy"], indirect=True)
def test_exact_htv(self, dataset):
""" """
cpwl = Delaunay(**dataset)
assert np.allclose(cpwl.get_exact_HTV(), dataset['htv'])
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_exact_grad_trace_htv(self, dataset):
""" """
if dataset['name'].endswith('Junction') or \
dataset['name'].endswith('DistortedGrid'):
cpwl = Delaunay(**dataset)
else:
cpwl = Delaunay(**dataset, add_extreme_points=True)
h = (cpwl.tri.points[:, 0].max() - cpwl.tri.points[:, 0].min()) / 5000
exact_grad_trace_htv = cpwl.get_exact_grad_trace_HTV(h=h)
exact_htv = cpwl.get_exact_HTV()
print('(Discrete, Exact) : ({:.4f}, {:.4f})'
.format(exact_grad_trace_htv, exact_htv))
assert np.allclose(exact_grad_trace_htv, exact_htv, rtol=1e-3)
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_lefkimiattis_HTV(self, dataset):
""" """
if dataset['name'].endswith('Junction') or \
dataset['name'].endswith('DistortedGrid'):
cpwl = Delaunay(**dataset)
else:
cpwl = Delaunay(**dataset, add_extreme_points=True)
h = (cpwl.tri.points[:, 0].max() - cpwl.tri.points[:, 0].min()) / 5000
lefkimiattis_htv = cpwl.get_lefkimiattis_schatten_HTV(h=h)
exact_htv = cpwl.get_exact_HTV()
print('(Discrete, Exact) : ({:.4f}, {:.4f})'
.format(lefkimiattis_htv, exact_htv))
assert not np.allclose(lefkimiattis_htv, exact_htv, rtol=1e-3)
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_lefkimiattis_trace_HTV(self, dataset):
""" """
if dataset['name'].endswith('Junction') or \
dataset['name'].endswith('DistortedGrid'):
cpwl = Delaunay(**dataset)
else:
cpwl = Delaunay(**dataset, add_extreme_points=True)
h = (cpwl.tri.points[:, 0].max() - cpwl.tri.points[:, 0].min()) / 5000
lefkimiattis_trace_htv = cpwl.get_lefkimiattis_trace_HTV(h=h)
exact_htv = cpwl.get_exact_HTV()
print('(Discrete, Exact) : ({:.4f}, {:.4f})'
.format(lefkimiattis_trace_htv, exact_htv))
assert np.allclose(lefkimiattis_trace_htv, exact_htv, rtol=2e-3)
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_exact_grad_schatten_HTV(self, dataset):
""" """
if dataset['name'].endswith('Junction') or \
dataset['name'].endswith('DistortedGrid'):
cpwl = Delaunay(**dataset)
else:
cpwl = Delaunay(**dataset, add_extreme_points=True)
h = (cpwl.tri.points[:, 0].max() - cpwl.tri.points[:, 0].min()) / 5000
exact_grad_schatten_htv = cpwl.get_exact_grad_schatten_HTV(h=h)
exact_htv = cpwl.get_exact_HTV()
print('(Discrete, Exact) : ({:.4f}, {:.4f})'
.format(exact_grad_schatten_htv, exact_htv))
assert not np.allclose(exact_grad_schatten_htv, exact_htv, rtol=1e-3)
@pytest.mark.parametrize("dataset",
dataset_dict["simple_junction"],
indirect=True)
def test_simple_junction(self, dataset):
""" """
cpwl = Delaunay(**dataset)
assert np.array_equal(cpwl.tri.points, dataset['points'])
assert np.array_equal(cpwl.tri.values, dataset['values'])
pos_mask = (cpwl.tri.simplices_affine_coeff[:, 0] > 0)
assert np.allclose(
(cpwl.tri.simplices_affine_coeff[np.where(pos_mask)[0], :] -
SimpleJunction.a1_affine_coeff[np.newaxis, :]),
np.zeros((np.sum(pos_mask), 3)))
assert np.allclose(
(cpwl.tri.simplices_affine_coeff[np.where(~pos_mask)[0], :] -
SimpleJunction.a2_affine_coeff[np.newaxis, :]),
np.zeros((np.sum(pos_mask), 3)))
grid = cpwl.get_grid(h=0.01)
z, x_grad = cpwl.evaluate_with_grad(grid.x)
assert np.allclose(
(np.abs(x_grad) -
SimpleJunction.a1_affine_coeff[np.newaxis, 0:2]),
np.zeros_like(x_grad))
@pytest.mark.parametrize("dataset",
dataset_dict["distorted_grid"],
indirect=True)
def test_evaluate(self, dataset):
""" """
cpwl = Delaunay(**dataset, add_extreme_points=True)
grid = cpwl.get_grid(h=0.01)
t1 = time.time()
z, x_grad = cpwl.evaluate_with_grad(grid.x)
t2 = time.time()
z_bar = cpwl.evaluate_bar(grid.x)
t3 = time.time()
print('affine_coeff/bar time: {:.3f}'
.format((t2 - t1) / (t3 - t2)))
assert np.all(np.allclose(z, z_bar))
@pytest.mark.parametrize("dataset",
dataset_dict["distorted_grid"],
indirect=True)
def test_convex_hull_extreme_points(self, dataset):
""" """
cpwl = Delaunay(**dataset, add_extreme_points=True)
npoints = cpwl.tri.points.shape[0]
expected_convex_hull_points_idx = \
np.array([npoints - 4, npoints - 3, npoints - 2, npoints - 1])
assert np.array_equal(cpwl.convex_hull_points_idx,
expected_convex_hull_points_idx)
``` |
{
"source": "JoaquimEsteves/git-format-staged",
"score": 3
} |
#### File: JoaquimEsteves/git-format-staged/git-format-staged.py
```python
from __future__ import print_function
import argparse
from fnmatch import fnmatch
import re
import os
import subprocess
import sys
# The string $VERSION is replaced during the publish process.
VERSION = "$VERSION"
PROG = sys.argv[0]
def info(msg):
print(msg, file=sys.stderr)
def warn(msg):
print("{}: warning: {}".format(PROG, msg), file=sys.stderr)
def fatal(msg):
print("{}: error: {}".format(PROG, msg), file=sys.stderr)
exit(1)
def format_staged_files(
file_patterns, formatter, git_root, update_working_tree=True, write=True
):
try:
output = subprocess.check_output(
[
"git",
"diff-index",
"--cached",
# select only file additions and modifications
"--diff-filter=AM",
"--no-renames",
"HEAD",
]
)
for line in output.splitlines():
entry = parse_diff(line.decode("utf-8"))
entry_path = normalize_path(
entry["src_path"], relative_to=git_root
)
if entry["dst_mode"] == "120000":
# Do not process symlinks
continue
if not (matches_some_path(file_patterns, entry_path)):
continue
if format_file_in_index(
formatter,
entry,
update_working_tree=update_working_tree,
write=write,
):
info(
"Reformatted {} with {}".format(
entry["src_path"], formatter
)
)
except Exception as err:
fatal(str(err))
def format_file_in_index(
formatter, diff_entry, update_working_tree=True, write=True
):
"""
Run formatter on file in the git index. Creates a new git object with the
result, and replaces the content of the file in the index with that object.
Returns hash of the new object if formatting produced any changes.
"""
orig_hash = diff_entry["dst_hash"]
new_hash = format_object(formatter, orig_hash, diff_entry["src_path"])
# If the new hash is the same then the formatter did not make any changes.
if not write or new_hash == orig_hash:
return None
# If the content of the new object is empty then the formatter did not
# produce any output. We want to abort instead of replacing the file with
# an empty one.
if object_is_empty(new_hash):
return None
replace_file_in_index(diff_entry, new_hash)
if update_working_tree:
try:
patch_working_file(diff_entry["src_path"], orig_hash, new_hash)
except Exception as err:
# Errors patching working tree files are not fatal
warn(str(err))
return new_hash
file_path_placeholder = re.compile("\{\}")
def format_object(formatter, object_hash, file_path):
"""
Run formatter on a git blob identified by its hash. Writes output to a new
git blob, and returns the hash of the new blob.
"""
get_content = subprocess.Popen(
["git", "cat-file", "-p", object_hash], stdout=subprocess.PIPE
)
format_content = subprocess.Popen(
re.sub(file_path_placeholder, file_path, formatter),
shell=True,
stdin=get_content.stdout,
stdout=subprocess.PIPE,
)
write_object = subprocess.Popen(
["git", "hash-object", "-w", "--stdin"],
stdin=format_content.stdout,
stdout=subprocess.PIPE,
)
get_content.stdout.close()
format_content.stdout.close()
if get_content.wait() != 0:
raise ValueError(
"unable to read file content from object database: " + object_hash
)
if format_content.wait() != 0:
raise Exception(
"formatter exited with non-zero status"
) # TODO: capture stderr from format command
new_hash, _err = write_object.communicate()
if write_object.returncode != 0:
raise Exception("unable to write formatted content to object database")
return new_hash.decode("utf-8").rstrip()
def object_is_empty(object_hash):
get_content = subprocess.Popen(
["git", "cat-file", "-p", object_hash], stdout=subprocess.PIPE
)
content, err = get_content.communicate()
if get_content.returncode != 0:
raise Exception("unable to verify content of formatted object")
return not content
def replace_file_in_index(diff_entry, new_object_hash):
subprocess.check_call(
[
"git",
"update-index",
"--cacheinfo",
"{},{},{}".format(
diff_entry["dst_mode"], new_object_hash, diff_entry["src_path"]
),
]
)
def patch_working_file(path, orig_object_hash, new_object_hash):
patch = subprocess.check_output(
["git", "diff", orig_object_hash, new_object_hash]
)
# Substitute object hashes in patch header with path to working tree file
patch_b = patch.replace(orig_object_hash.encode(), path.encode()).replace(
new_object_hash.encode(), path.encode()
)
apply_patch = subprocess.Popen(
["git", "apply", "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
apply_patch.communicate(input=patch_b)
if apply_patch.returncode != 0:
raise Exception(
f"could not apply formatting changes to working tree file {path}"
)
# Format: src_mode dst_mode src_hash dst_hash status/score? src_path dst_path?
diff_pat = re.compile(
"^:(\d+) (\d+) ([a-f0-9]+) ([a-f0-9]+) ([A-Z])(\d+)?\t([^\t]+)(?:\t([^\t]+))?$"
)
def parse_diff(diff):
"""
Parse output from `git diff-index`
"""
m = diff_pat.match(diff)
if not m:
raise ValueError("Failed to parse diff-index line: " + diff)
return {
"src_mode": unless_zeroed(m.group(1)),
"dst_mode": unless_zeroed(m.group(2)),
"src_hash": unless_zeroed(m.group(3)),
"dst_hash": unless_zeroed(m.group(4)),
"status": m.group(5),
"score": int(m.group(6)) if m.group(6) else None,
"src_path": m.group(7),
"dst_path": m.group(8),
}
zeroed_pat = re.compile("^0+$")
def unless_zeroed(s):
"""
Returns the argument unless the argument is a string of zeroes, in which case
returns `None`
"""
return s if not zeroed_pat.match(s) else None
def get_git_root():
return (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode("utf-8")
.rstrip()
)
def normalize_path(p, relative_to=None):
return os.path.abspath(os.path.join(relative_to, p) if relative_to else p)
def matches_some_path(patterns, target):
is_match = False
for signed_pattern in patterns:
(is_pattern_positive, pattern) = from_signed_pattern(signed_pattern)
if fnmatch(target, normalize_path(pattern)):
is_match = is_pattern_positive
return is_match
# Checks for a '!' as the first character of a pattern, returns the rest of the
# pattern in a tuple. The tuple takes the form (is_pattern_positive, pattern).
# For example:
# from_signed_pattern('!pat') == (False, 'pat')
# from_signed_pattern('pat') == (True, 'pat')
def from_signed_pattern(pattern):
if pattern[0] == "!":
return (False, pattern[1:])
else:
return (True, pattern)
class CustomArgumentParser(argparse.ArgumentParser):
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = argparse._(
"unrecognized arguments: %s. Do you need to quote your formatter command?"
)
self.error(msg % " ".join(argv))
return args
if __name__ == "__main__":
parser = CustomArgumentParser(
description="Transform staged files using a formatting command that accepts content via stdin and produces a result via stdout.",
epilog='Example: %(prog)s --formatter "prettier --stdin" "src/*.js" "test/*.js"',
)
parser.add_argument(
"--formatter",
"-f",
required=True,
help="Shell command to format files, will run once per file. Occurrences of the placeholder `{}` will be replaced with a path to the file being formatted. (Example: \"prettier --stdin --stdin-filepath '{}'\")",
)
parser.add_argument(
"--no-update-working-tree",
action="store_true",
help="By default formatting changes made to staged file content will also be applied to working tree files via a patch. This option disables that behavior, leaving working tree files untouched.",
)
parser.add_argument(
"--no-write",
action="store_true",
help='Prevents %(prog)s from modifying staged or working tree files. You can use this option to check staged changes with a linter instead of formatting. With this option stdout from the formatter command is ignored. Example: %(prog)s --no-write -f "eslint --stdin --stdin-filename \'{}\' >&2" "*.js"',
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s version {}".format(VERSION),
help="Display version of %(prog)s",
)
parser.add_argument(
"files",
nargs="+",
help='Patterns that specify files to format. The formatter will only transform staged files that are given here. Patterns may be literal file paths, or globs which will be tested against staged file paths using Python\'s fnmatch function. For example "src/*.js" will match all files with a .js extension in src/ and its subdirectories. Patterns may be negated to exclude files using a "!" character. Patterns are evaluated left-to-right. (Example: "main.js" "src/*.js" "test/*.js" "!test/todo/*")',
)
args = parser.parse_args()
files = vars(args)["files"]
format_staged_files(
file_patterns=files,
formatter=vars(args)["formatter"],
git_root=get_git_root(),
update_working_tree=not vars(args)["no_update_working_tree"],
write=not vars(args)["no_write"],
)
``` |
{
"source": "joaquimgomez/BachelorThesis-TextSimilarityMeasures",
"score": 3
} |
#### File: src/data_management/corpus_for_context2vec.py
```python
import argparse
from os import listdir, mkdir
from os.path import isfile, join
from nltk import sent_tokenize
def main(org, dest):
with open(org, 'r') as o:
corpus = o.read()
sent_corpus = sent_tokenize(corpus)
with open(dest, 'w') as d:
for sent in sent_corpus:
d.write(sent + '\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Creates the dataset from files in the input directory.")
parser.add_argument("--origen", "-o",
help = "Directory of folders containing preprocessed files.",
default = "./")
parser.add_argument("--destination", "-d",
help = "Directory where the final document for GloVe goes.",
default = "./")
args = parser.parse_args()
main(args.origen, args.destination)
```
#### File: src/data_management/preprocess_baseline.py
```python
import argparse
import nltk.data
import string
import pandas as pd
import re
from os import listdir, mkdir
from os.path import isfile, join
from gensim.parsing.porter import PorterStemmer
from gensim.parsing.preprocessing import remove_stopwords
def saveFiles(documents, dest):
mkdir(dest)
for doc in documents:
with open(dest + str(doc) + '.txt', 'w') as f:
f.write(documents[doc])
#for sentence in documents[doc]:
# f.write(sentence + '\n')
def documentPreprocessing(document):
# Filter for non-printable characters
filter_printable = lambda x: x in string.printable
# Stemmer
porter = PorterStemmer()
#for i in range(0, len(document)):
doc = document
# Lowercasing
doc = doc.lower()
# Remove emails and web addresses
doc = re.sub(r'\S*@\S*\s?', '', doc, flags = re.MULTILINE)
doc = re.sub(r'http\S+', '', doc, flags = re.MULTILINE)
# Erase non-printable characters
doc = ''.join(filter(filter_printable, doc))
# Remove Stopwords (using gensim stopwords set)
doc = remove_stopwords(doc)
# Stemming
doc = porter.stem_sentence(doc)
return doc
def obtainFileContents(index):
#tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
documents = {}
print("Obtaining content from indexed files.")
for ind, row in index.iterrows():
print("Obtaining content from file " + row['file_name'] + ".")
f = open(row['file_path'])
fContent = f.read()
documents[row['id']] = fContent #tokenizer.tokenize(fContent)
f.close()
print("\n\n")
return documents
def generateIndex(folders):
print("Constructing index from files:")
index = pd.DataFrame(columns=['id', 'category', 'file_name', 'file_path'])
currentId = 1
for (folderName, path) in folders:
print("Indexing files from folder " + folderName + ".")
files = [(file, join(path, file)) for file in listdir(path) if isfile(join(path, file)) and not file.startswith('.')]
for (file, filePath) in files:
#group = file.split("-")[0]
index = index.append({'id': str(currentId) + "-" + folderName, 'category': folderName, 'file_name': file, 'file_path': filePath}, ignore_index=True)
currentId = currentId + 1
print("\nTotal number of indexed files: " + str(len(index.index)))
print("Indexed files:")
print(index)
print("\n\n")
return index
def main(org, dest):
# Obtain all the folders
folders = [(folder, join(org, folder)) for folder in listdir(org) if not isfile(join(org, folder)) and not folder.startswith('.')]
# Generate an index for all files
index = generateIndex(folders)
# Save index to csv
mkdir('./meta/')
index.to_csv('./meta/pdfs_index.csv', index=False)
# Obtain content of all documents in index
documents = obtainFileContents(index)
# Preprocess documents
print("Preprocessing loaded documents:")
for doc in documents:
print("Preprocessing document with id " + str(doc) + ".")
documents[doc] = documentPreprocessing(documents[doc])
print("\n\n")
# Save preprocessed files
print("Saving preprocessed files.")
saveFiles(documents, dest)
print("\n\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Creates the dataset from files in the input directory.")
parser.add_argument("--origen", "-o",
help = "Directory of folders containing files.",
default = "./")
parser.add_argument("--destination", "-d",
help = "Directory where dataset goes. The destination folder must not exist.",
default = "./")
args = parser.parse_args()
main(args.origen, args.destination)
```
#### File: tasks/task2_evaluation_clustering/clustering.py
```python
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import dendrogram as dendro
import src.similarities.similarities as sim
import numpy as np
class Clustering():
def computeDistanceMatrix(self, measure, relaxed):
print("Creating condensed distance matrix...")
n = len(self.documents)
m = n
mat = np.zeros((n, m))
for i in range(0, n):
for j in range(0, i + 1):
print("Computing (" + str(i) + ", " + str(j) + ") of " + "(" + str(n) + ", " + str(m) + ")...")
if (relaxed != -1):
mat[i,j] = self.embedding.distance(self.documents[i], self.documents[j], measure)
else:
mat[i,j] = self.embedding.distance(self.documents[i], self.documents[j], measure, relaxed)
self.distMat = mat
print("Condensed distance matrix created.")
return mat
def loadDistanceMatrix(self, path):
self.distMat = np.loadtxt(path)
def saveDistanceMateix(self, path):
mat = np.matrix(self.distMat)
with open(path, 'wb') as f:
for line in mat:
np.savetxt(f, line, fmt='%.2f')
def hierarchicalClustering(self, type):
self.hierClust = linkage(self.distMat, type)
def dendrogram(self):
return dendro(self.hierClust)
def createEmbeddingObject(self, embedding, embeddingModelPath, embeddingSpacyModelPath):
if (embedding == "w2v"):
return sim.Word2VecSimilarity(embeddingModelPath, embeddingSpacyModelPath)
elif (embedding == "fT"):
return sim.FastTextSimilarity(embeddingModelPath, embeddingSpacyModelPath)
elif (embedding == "GV"):
return sim.GloVeSimilarity(embeddingModelPath, embeddingSpacyModelPath)
elif (embedding == "d2v"):
return sim.Doc2VecSimilarity(embeddingModelPath)
def __init__(self, documents, embedding, embeddingModelPath, embeddingSpacyModelPath):
print("Creating Clustering object...")
self.documents = documents
print("Clustering object created.")
print("Creating internal objects for the distance computation...")
self.embedding = self.createEmbeddingObject(embedding, embeddingModelPath, embeddingSpacyModelPath)
print("Internal objects for the distance computation created.")
```
#### File: tasks/task2_evaluation_clustering/distance_matrix_calculation.py
```python
import os
import sys
import time
import argparse
import numpy as np
import pickle
from multiprocessing import Pool, cpu_count
from functools import partial
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import src.similarities.similarities as sim
"""def parDist(documents, embedding, mesure, relaxed, elem):
print("Computing (" + str(elem[0]) + ", " + str(elem[1]) + ") ...")
if (measure == 2):
return embedding.distance(documents[elem[0]], documents[elem[1]])
else:
return embedding.distance(documents[elem[0]], documents[elem[1]], measure, relaxed)
def parComputeDistanceMatrix(documents, embedding, measure, relaxed):
print("Creating condensed distance matrix...")
n = len(documents)
m = n
#mat = np.zeros((n, m))
lower_triangle = [(i, j) for i in range(0, n) for j in range(0, i + 1)]
with Pool(cpu_count() - 1) as pool:
funcAndarg = partial(parDist, documents=documents, embedding=embedding, measure=measure, relaxed=relaxed)
results = pool.map(funcAndarg, lower_triangle)
print(results)
with open("./result.pkl", 'wb') as f:
pickle.dump(results, f, pickle.HIGHEST_PROTOCOL)"""
def computeDistanceMatrix(documents, embedding, measure, relaxed):
print("Creating condensed distance matrix...")
n = len(documents)
m = n
mat = np.zeros((n, m))
for i in range(0, n):
for j in range(0, i + 1):
print("Computing (" + str(i) + ", " + str(j) + ") of " + "(" + str(n) + ", " + str(m) + ")...")
if (measure == 2):
mat[i,j] = embedding.distance(documents[i], documents[j])
else:
mat[i,j] = embedding.distance(documents[i], documents[j], measure, relaxed)
print("Condensed distance matrix created.")
return mat
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "Performs the distance matrix computation.")
parser.add_argument("--documents", "-d",
help = "Documents path",
default = "NONE")
parser.add_argument("--embedding", "-e",
help = "Embedding type",
default = "NONE")
parser.add_argument("--modelembedding", "-me",
help = "Embedding model path",
default = "NONE")
parser.add_argument("--scipy", "-s",
help = "Embedding Spacy model path",
default = "NONE")
parser.add_argument("--measure", "-m",
help = "Measure to be used",
default = "NONE")
parser.add_argument("--relaxed", "-r",
help = "Relaxed measure",
default = "NONE")
parser.add_argument("--path", "-p",
help = "Distance matrix destination path",
default = "NONE")
args = parser.parse_args()
documents = args.documents
embedding = args.embedding
model = args.modelembedding
scipy = args.scipy
measure = int(args.measure)
relaxed = int(args.relaxed)
destination = args.path
print("Retrieving documents...")
docs = []
for doc in os.listdir(documents):
if doc.endswith(".txt"):
docs.append(documents + doc)
docs.sort(key = lambda x: int(x.split("/")[-1].split("-")[0]))
print(docs)
print("Documents retrieved.")
print("Creating internal objects for the distance computation...")
if (embedding == "w2v"):
m = sim.Word2VecSimilarity(model, scipy)
elif (embedding == "fT"):
m = sim.FastTextSimilarity(model, scipy)
elif (embedding == "GV"):
m = sim.GloVeSimilarity(model, scipy)
elif (embedding == "d2v"):
m = sim.Doc2VecSimilarity(model)
elif (embedding == "ELMo"):
m = sim.ELMoSimilarity(model)
elif (embedding == "NRC"):
m = sim.NRCSimilarity(model)
print("Internal objects for the distance computation created.")
start = time.time()
#matrix = parComputeDistanceMatrix(docs, m, measure, relaxed) #-- VERSIÓN PARALELA
matrix = computeDistanceMatrix(docs, m, measure, relaxed) #-- VERSIÓN NO PARALELA
end = time.time()
print("The computation taken " + str(end - start) + ".")
print("Saving the computed distance matrix...")
npMat = np.matrix(matrix)
with open(destination, 'wb') as f:
for line in npMat:
np.savetxt(f, line, fmt='%.2f')
print("Distance patrix saved.")
print("\n\n")
print("The resulted matrix is the following one:")
print(npMat)
``` |
{
"source": "joaquimg/StochOptFormat",
"score": 3
} |
#### File: StochOptFormat/examples/news_vendor.py
```python
import json
import jsonschema
import math
from pulp import *
def mathoptformat_to_pulp(node, name):
sp = node['subproblem']
# Create the problem
sense = LpMaximize if sp['objective']['sense'] == 'max' else LpMinimize
prob = LpProblem(name, sense)
# Initialize the variables
vars = {}
for x in sp['variables']:
vars[x['name']] = LpVariable(x['name'])
# Add the objective function
obj = sp['objective']['function']
if obj['head'] == 'SingleVariable':
prob += vars[obj['variable']]
elif obj['head'] == 'ScalarAffineFunction':
prob += lpSum(
term['coefficient'] * vars[term['variable']] for term in obj['terms']
) + obj['constant']
else:
raise(Exception('Unsupported objective: ' + str(obj)))
# Add the constraints
for c in sp['constraints']:
f, s = c['function'], c['set']
if f['head'] == 'SingleVariable':
x = f['variable']
if s['head'] == 'GreaterThan':
vars[x].lowBound = s['lower']
elif s['head'] == 'LessThan':
vars[x].upBound = s['upper']
elif s['head'] == 'EqualTo':
vars[x].lowBound = s['value']
vars[x].upBound = s['value']
elif s['head'] == 'Interval':
vars[x].lowBound = s['lower']
vars[x].upBound = s['upper']
else:
raise(Exception('Unsupported set: ' + str(s)))
elif f['head'] == 'ScalarAffineFunction':
lhs = lpSum(
term['coefficient'] * vars[term['variable']] for term in f['terms']
) + f['constant']
if s['head'] == 'GreaterThan':
prob += lhs >= s['lower']
elif s['head'] == 'LessThan':
prob += lhs <= s['upper']
elif s['head'] == 'EqualTo':
prob += lhs == s['value']
elif s['head'] == 'Interval':
prob += lhs <= s['upper']
prob += lhs >= s['lower']
else:
raise(Exception('Unsupported set: ' + str(s)))
else:
raise(Exception('Unsupported function: ' + str(f)))
return {
'prob': prob,
'vars': vars,
'state_variables': node['state_variables'],
'realizations': node['realizations'],
}
def solve_second_stage(node, state, noise):
for (name, s) in node['state_variables'].items():
v = node['vars'][s['in']]
v.lowBound = state[name]
v.upBound = state[name]
for (name, w) in noise.items():
p = node['vars'][name]
p.lowBound = w
p.upBound = w
node['prob'].solve()
return {
'objective': value(node['prob'].objective),
'pi': {
name: node['vars'][s['in']].dj
for (name, s) in node['state_variables'].items()
}
}
def solve_first_stage(node):
node['prob'].solve()
return {
name: node['vars'][s['out']].varValue
for (name, s) in node['state_variables'].items()
}
def add_cut(first_stage, x, ret):
cut_term = lpSum(
p * r['objective'] +
p * lpSum(
r['pi'][name] * (first_stage['vars'][s['out']] - x[name])
for (name, s) in first_stage['state_variables'].items()
)
for (p, r) in ret
)
if first_stage['prob'].sense == -1:
first_stage['prob'] += first_stage['theta'] <= cut_term
else:
first_stage['prob'] += first_stage['theta'] >= cut_term
def load_two_stage_problem(filename):
with open(filename, 'r') as io:
data = json.load(io)
assert(data['version']['major'] == 0)
assert(data['version']['minor'] == 1)
assert(len(data['nodes']) == 2)
assert(len(data['edges']) == 2)
nodes = {
name: mathoptformat_to_pulp(node, name)
for (name, node) in data['nodes'].items()
}
first_stage, second_stage = None, None
for edge in data['edges']:
if edge['from'] == data['root']['name']:
first_stage = nodes[edge['to']]
else:
second_stage = nodes[edge['to']]
for (name, init) in data['root']['state_variables'].items():
x = first_stage['vars'][first_stage['state_variables'][name]['in']]
x.lowBound = init['initial_value']
x.upBound = init['initial_value']
first_stage['theta'] = LpVariable("theta", -10**6, 10**6)
first_stage['prob'].objective += first_stage['theta']
return first_stage, second_stage
def benders(first_stage, second_stage, iteration_limit = 20):
bounds = []
for iter in range(iteration_limit):
x = solve_first_stage(first_stage)
ret = [(
noise['probability'],
solve_second_stage(second_stage, x, noise['support'])
) for noise in second_stage['realizations']]
add_cut(first_stage, x, ret)
det_bound = value(first_stage['prob'].objective)
stat_bound = det_bound - first_stage['theta'].varValue + sum(
p * value(r['objective']) for (p, r) in ret
)
bounds.append((det_bound, stat_bound))
if abs(det_bound - stat_bound) < 1e-6:
break
return bounds
def validate(filename):
with open(filename, 'r') as io:
instance = json.load(io)
with open('../sof.schema.json', 'r') as io:
schema = json.load(io)
jsonschema.validate(instance = instance, schema = schema)
validate('news_vendor.sof.json')
first_stage, second_stage = load_two_stage_problem('news_vendor.sof.json')
ret = benders(first_stage, second_stage)
# Check solution!
x = solve_first_stage(first_stage)
assert(x['x'] == 10)
print(ret)
``` |
{
"source": "joaquimjfernandes/Curso-de-Python",
"score": 3
} |
#### File: Aulas Python/m03/aula20c.py
```python
def contador(*num):
tam = len(num)
print(f'Recebi os valores {num} e ao todo são {tam} números')
# Programa Principal
contador(2, 1, 7)
contador(8, 0)
contador(4, 4, 7, 6, 2)
```
#### File: Desafios Python/m02/ex047.py
```python
print('\033[32;1mDESAFIO 047 - Contagem dos Pares\033[m')
print('\033[32;1mALUNO:\033[m \033[36;1m<NAME>\033[m')
print('-' * 50)
# -----------------------------------------------------------------------
def Contador():
for n in range(1, 51):
if n % 2 == 0:
print(n, end=' ')
print('Acabou!')
# -----------------------------------------------------------------------
# Programa Principal
Contador()
```
#### File: Desafios Python/m02/ex050.py
```python
print('\033[32;1mDESAFIO 050 - Soma dos Pares\033[m')
print('\033[32;1mALUNO:\033[m \033[36;1m<NAME>\033[m')
print('-' * 50)
# -----------------------------------------------------------------------
def SomaPar():
soma= cont = 0
for n in range(1, 7):
n = int(input(f'Digite o {n}º número: '))
if n % 2 == 0:
soma += n
cont += 1
print(f'Recebemos {cont} Pares e a sua soma foi {soma}')
# -----------------------------------------------------------------------
# Programa Principal
SomaPar()
``` |
{
"source": "JoaquimXG/csv-merge",
"score": 3
} |
#### File: src/merge_csv/merge_files.py
```python
import pandas as pd
import logging
from .validate_options import validate_options
from .merge_dataframes import merge_dataframes_multiple_columns, merge_dataframes_single_column
def merge_files(left_file: str, right_file: str, columns: list, keep: str = 'none', keep_missing: str = 'none') -> pd.DataFrame:
"""
Merges two csv files
Parameters:
left_file (str): Path to first file
right_file (str): Path to second file
column (str): Name of column to merge files on
keep (str): Table to keep values from when no match is found. One of ['left', 'right', 'both', 'none']. Default is 'none'
keep_missing (str): Table to keep values from when row contains no value in given oclumn. One of ['left', 'right', 'both', 'none']. Default is 'none'
Returns:
(pd.DataFrame): Merged DataFrame
"""
log = logging.getLogger(__name__)
dfLeft = pd.read_csv(left_file)
dfRight = pd.read_csv(right_file)
validate_options(dfLeft, dfRight, columns, keep, keep_missing)
log.info("Starting Merge")
if len(columns) == 1:
return merge_dataframes_single_column(dfLeft, dfRight, columns[0], keep, keep_missing)
else:
return merge_dataframes_multiple_columns(dfLeft, dfRight, columns, keep)
``` |
{
"source": "Joaquin6/StockAPI",
"score": 2
} |
#### File: Joaquin6/StockAPI/algorithm.py
```python
from colorama import Fore, Style, init as ColoramaInit
import alpaca_trade_api as alpaca
from datetime import *
import numpy as np
import logging
from scrape import *
import time
import threading
import sys
from config import *
ColoramaInit(autoreset=True)
#Logging info
#ogging.basicConfig(filename='debug.log', level=logging.DEBUG)
logging.basicConfig(filename='Misc/trades.log', level=logging.INFO)
class algo:
def __init__(self):
#Initialization of accounts api, account, twelvedataclients
self.api = alpaca.REST(TESTAPI_KEY, TESTAPI_SECRET, APCA_API_BASE_URL, 'v2')
self.account = self.api.get_account()
#Potential Stocks to check
self.tickers = []
#Stocks the made it past the initial check
self.approved = ['F', 'SPCE', 'RBLX', 'TAL', 'NVDA', 'MFC', 'IOVA', 'VFC', 'BEKE', 'YALA']
self.blacklist = []
self.timeToClose = None
#self.importT()
#self.test()
self.run()
def test(self):
#RSI indicator
#Dead Code
"""
@:param:
:return:
"""
#under30 is undervalued/oversold and over 70 is overvalued/undersold
for stonk in self.approved:
print(stonk)
alp = self.api.get_barset(stonk,'1D',limit=14).df
alp.columns = alp.columns.get_level_values(1)
if(len(alp)>0):
test5=sum(alp['close'])/len(alp)
print("Average closing price {}".format(test5))
#determine if higher or lower
if(alp['close'][13]>alp['close'][12]):
print("current day is higher")
else:
print("Previous day is higher")
#rsi = 100 - (100/(1+RS))
for x in range(len(alp['close'])-1):
print(alp['close'][x])
print(alp['close'][x]-alp['open'][x+1])
else:
print("Empty Dataframe FUCKING ALPACA")
return
def run(self):
self.importT()
while True:
#Calculate time and time to closing as well as sets the positions
self.clock = self.api.get_clock()
closingTime = self.clock.next_close.replace(tzinfo=timezone.utc).timestamp()
currTime = self.clock.timestamp.replace(tzinfo=timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
positions = self.api.list_positions()
#Checks if markets r open
if self.clock.is_open:
print("MARKET IS OPEN")
if (self.timeToClose < (60 * 15)):
# Close all positions when 15 minutes til market close.
print("Market closing soon. importing tickers.")
self.importT()
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)
#major bug here need to just clean up this elif
elif(self.timeToClose == (60*60)):
print("An Hour is Left Importing for end day trading!")
self.importT()
time.sleep(60)
else:
#Checking tickers for indicator
#When live, on 1 minute interval we check
#If stocks are ready to sell
self.sma()
for position in positions:
print("check position")
profloss = float(position.unrealized_plpc) * 100
if (profloss > 8 or (profloss < -4)):
orderSide = 'sell'
qty = abs(int(float(position.qty)))
#not entirelly sure was respSO is
#Logging, printing, submitting orders
logging.info("AT {} SOLD {}".format(time.ctime(),position.symbol))
print("AT {} SOLD {}".format(time.ctime(),position.symbol))
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide))
tSubmitOrder.start()
tSubmitOrder.join()
#Checkings the SMA indicator of our tickers for purchases
#This needs to be clean up and proper
time.sleep(60)
else:
#When off
print("OFF LINE")
time.sleep(60*15)
logging.info("PROGRAM OFF")
return
def submitOrder(self,qty,symbol,orderSide):
"""
:param qty: Quantity of the shares
:param symbol: Which specific symbol
:param orderSide: Do we purchase or sell it
:return:
"""
x = symbol
t = qty
self.api.submit_order(symbol=x, qty=t, side=orderSide, type='market',time_in_force='day')
return
def sma(self):
"""
Indicator runs through all our tickers for the algorithm
:return: void
"""
barTimeframe = "1D" # 1Min, 5Min, 15Min, 1H, 1D
for x in self.tickers:
returned_data = self.api.get_barset(x, barTimeframe, limit=100)
closeList = []
volumeList = []
#Converting returned data to dataframe and flattening the columns
bar = returned_data.df
bar.columns = bar.columns.get_level_values(1)
# Iterates through the barset thats in a dataframe and puts them into individual lists
for index,row in bar.iterrows():
#print(row['open'],row['volume'])
closeList.append(row['close'])
volumeList.append(row['volume'])
#From lists to numpy arrays to use
closeList = np.array(closeList, dtype=np.float64)
volumeList = np.array(volumeList, dtype=np.float64)
# Calculated SMA trading indicator
SMA20 = self.moving_average(closeList,20)
SMA50 = self.moving_average(closeList,50)
final20 = sum(SMA20)/len(SMA20)
final50 = sum(SMA50)/len(SMA50)
#Now that we have the stocks for the trading indicator
#MASSIVE BUG HERE EDIT THE TRY/EXCEPT WHEN GET TO IT
if(final20>final50):
self.approved.append(x)
try:
#Throws an error if there is not a position which means we buy it
openPosition = self.api.get_position(x)
except:
#Calculates the price and size of our position
price,targetPositionSize = self.calculateQ(x)
#logs and prints all the transaction (Put in function for later)
print("On {} We are going to buy: {} at {} for a total amount of {}".format(time.ctime(),x,price,round(targetPositionSize)-1))
logging.info("On {} We are going to buy: {} at {} for a total amount of {}".format(time.ctime(), x,price,round(targetPositionSize)-1))
#order examples
#Send order
#NEEDS TO BE WHOLE NUMBER (Buggy with Fractional Shares
self.submitOrder(round(targetPositionSize) - 1, x, 'buy')
try:
print("Success Buy")
except:
print("ERROR WITH ORDER PROBABLY ON PRICE")
return
def moving_average(self,x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def importT(self):
"""
Sets the tickers list of all volatile and valid stocks from scrape
:return:
"""
#imprt tickers
scrape = YahooScrape()
scrape.findVolatile(100)
self.tickers.extend(scrape.sortValid(1))
print(self.tickers)
logging.info(self.tickers)
return
def calculateQ(self,stock):
""" This calculates the amount we are going to buy with current cash balance
:param stock: Ticker of the stock
:return:
"""
# Opens new position if one does not exist
# If we havent already bought this stock
# Gets our cash balance and the last quote for the stock
cashBalance = float(self.api.get_account().cash)
quoteL = self.api.get_last_quote(stock)._raw
# Then calculates the target position based on our maxpos(.25) and current price
price = quoteL['askprice']
if (price == 0):
price = quoteL['bidprice']
targetPositionSize = round(cashBalance / (price / maxPos), 2)
return price,targetPositionSize
```
#### File: Joaquin6/StockAPI/indicators.py
```python
import config
import numpy as np
import pandas as pd
import alpaca_trade_api as alpaca
from config import *
class indicators():
def __init__(self):
#ind determines which indicactor it is
"""
ind determines which indicator to use
0 is Simple Moving Average
1 is Rsi indicator
"""
self.tickers = 'BA'
self.api = alpaca.REST(TESTAPI_KEY, TESTAPI_SECRET, APCA_API_BASE_URL, 'v2')
#actual indiaqctors
def sma(self):
return
#calculations
def moving_average(self,x, w):
"""
Closing Prices
:param x:Sequence of closing prices
:param w: w is a length for a sequence of ones
:return: The moving average of the sequence(x) in a list so we can see
"""
return np.convolve(x, np.ones(w), 'valid') / w
def rsiIndicator(self):
return
``` |
{
"source": "JoaquinAmatRodrigo/skforecaster",
"score": 3
} |
#### File: ForecasterAutoregCustom/tests/test_init.py
```python
import pytest
import pandas as pd
from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom
from sklearn.linear_model import LinearRegression
def create_predictors(y):
'''
Create first 5 lags of a time series.
'''
lags = y[-1:-6:-1]
return lags
def test_init_exception_when_window_size_argument_is_string():
'''
'''
with pytest.raises(Exception):
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = '5'
)
def test_init_exception_when_fun_predictors_argument_is_string():
'''
'''
with pytest.raises(Exception):
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = 'create_predictors',
window_size = 5
)
```
#### File: ForecasterAutoregCustom/tests/test_predict_interval.py
```python
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom
from sklearn.linear_model import LinearRegression
def create_predictors(y):
'''
Create first 5 lags of a time series.
'''
lags = y[-1:-6:-1]
return lags
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_True():
'''
Test output when regressor is LinearRegression and one step ahead is predicted
using in sample residuals.
'''
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=11, step=1)
)
results = forecaster.predict_interval(steps=1, in_sample_residuals=True, n_boot=2)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_True():
'''
Test output when regressor is LinearRegression and two step ahead is predicted
using in sample residuals.
'''
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.],
[11., 23., 23.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=12, step=1)
)
results = forecaster.predict_interval(steps=2, in_sample_residuals=True, n_boot=2)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_False():
'''
Test output when regressor is LinearRegression and one step ahead is predicted
using out sample residuals.
'''
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=11, step=1)
)
results = forecaster.predict_interval(steps=1, in_sample_residuals=False, n_boot=2)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_False():
'''
Test output when regressor is LinearRegression and two step ahead is predicted
using out sample residuals.
'''
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.],
[11., 23., 23.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=12, step=1)
)
results = forecaster.predict_interval(steps=2, in_sample_residuals=False)
pd.testing.assert_frame_equal(results, expected)
```
#### File: ForecasterAutoregMultiOutput/tests/test_predict.py
```python
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoregMultiOutput import ForecasterAutoregMultiOutput
from sklearn.linear_model import LinearRegression
def test_predict_output_when_regressor_is_LinearRegression():
'''
Test predict output when using LinearRegression as regressor.
'''
forecaster = ForecasterAutoregMultiOutput(LinearRegression(), lags=3, steps=3)
forecaster.fit(y=pd.Series(np.arange(50)))
results = forecaster.predict()
expected = pd.Series(
data = np.array([50., 51., 52.]),
index = pd.RangeIndex(start=50, stop=53, step=1),
name = 'pred'
)
pd.testing.assert_series_equal(results, expected)
```
#### File: ForecasterAutoreg/tests/test_create_lags.py
```python
import pytest
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from sklearn.linear_model import LinearRegression
def test_create_lags_output():
'''
Test matrix of lags is created properly when langs=3 and y=np.arange(10).
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
results = forecaster._create_lags(y=np.arange(10))
expected = (np.array([[2., 1., 0.],
[3., 2., 1.],
[4., 3., 2.],
[5., 4., 3.],
[6., 5., 4.],
[7., 6., 5.],
[8., 7., 6.]]),
np.array([3., 4., 5., 6., 7., 8., 9.]))
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
def test_create_lags_exception_when_len_of_y_is_lower_than_maximum_lag():
'''
Test exception is raised when length of y is lower than maximum lag included
in the forecaster.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=10)
with pytest.raises(Exception):
forecaster._create_lags(y=np.arange(5))
```
#### File: ForecasterAutoreg/tests/test_get_coef.py
```python
from pytest import approx
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
def test_output_get_coef_when_regressor_is_LinearRegression():
'''
Test output of get_coef when regressor is LinearRegression with lags=3
and it is trained with y=pd.Series(np.arange(5)).
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(5)))
expected = pd.DataFrame({
'feature': ['lag_1', 'lag_2', 'lag_3'],
'coef': np.array([0.33333333, 0.33333333, 0.33333333])
})
results = forecaster.get_coef()
assert (results['feature'] == expected['feature']).all()
assert results['coef'].values == approx(expected['coef'].values)
def test_output_get_coef_when_regressor_is_RandomForest():
'''
Test output of get_coef when regressor is RandomForestRegressor with lags=3
and it is trained with y=pd.Series(np.arange(5)).
'''
forecaster = ForecasterAutoreg(RandomForestRegressor(n_estimators=1, max_depth=2), lags=3)
forecaster.fit(y=pd.Series(np.arange(5)))
expected = None
results = forecaster.get_coef()
assert results is expected
```
#### File: ForecasterAutoreg/tests/test_get_feature_importances.py
```python
from pytest import approx
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
def test_output_get_feature_importance_when_regressor_is_RandomForest():
'''
'''
forecaster = ForecasterAutoreg(RandomForestRegressor(n_estimators=1, max_depth=2, random_state=123), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
expected = pd.DataFrame({
'feature': ['lag_1', 'lag_2', 'lag_3'],
'importance': np.array([0.94766355, 0., 0.05233645])
})
results = forecaster.get_feature_importance()
assert (results['feature'] == expected['feature']).all()
assert results['importance'].values == approx(expected['importance'].values)
def test_output_get_feature_importance_when_regressor_is_linear_model():
'''
'''
forecaster = ForecasterAutoreg(Lasso(), lags=3)
forecaster.fit(y=pd.Series(np.arange(5)))
expected = None
results = forecaster.get_feature_importance()
assert results is expected
```
#### File: skforecast/model_selection_statsmodels/model_selection_statsmodels.py
```python
from typing import Union, Dict, List, Tuple
import numpy as np
import pandas as pd
import logging
from tqdm import tqdm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.model_selection import ParameterGrid
from statsmodels.tsa.statespace.sarimax import SARIMAX
from ..model_selection import time_series_splitter
logging.basicConfig(
format = '%(asctime)-5s %(name)-10s %(levelname)-5s %(message)s',
level = logging.INFO,
)
def backtesting_sarimax(
y: pd.Series,
initial_train_size: int,
steps: int,
metric: str,
refit: bool=False,
order: tuple=(1, 0, 0),
seasonal_order: tuple=(0, 0, 0, 0),
trend: str=None,
alpha: float= 0.05,
exog: Union[pd.Series, pd.DataFrame]=None,
sarimax_kwargs: dict={},
fit_kwargs: dict={'disp':0},
verbose: bool=False
) -> Tuple[np.array, pd.DataFrame]:
'''
Backtesting (validation) of `SARIMAX` model from statsmodels v0.12. The model
is trained using the `initial_train_size` first observations, then, in each
iteration, a number of `steps` predictions are evaluated. If refit is `True`,
the model is re-fitted in each iteration before making predictions.
https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_forecasting.html
Parameters
----------
y : pandas Series
Time series values.
initial_train_size: int
Number of samples used in the initial train.
steps : int
Number of steps to predict.
metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}
Metric used to quantify the goodness of fit of the model.
refit: bool, default False
Whether to re-fit the model in each iteration.
order: tuple
The (p,d,q) order of the model for the number of AR parameters, differences,
and MA parameters. d must be an integer indicating the integration order
of the process, while p and q may either be an integers indicating the AR
and MA orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is an
AR(1) model: (1,0,0).
seasonal_order: tuple
The (P,D,Q,s) order of the seasonal component of the model for the AR parameters,
differences, MA parameters, and periodicity. D must be an integer
indicating the integration order of the process, while P and Q may either
be an integers indicating the AR and MA orders (so that all lags up to
those orders are included) or else iterables giving specific AR and / or
MA lags to include. s is an integer giving the periodicity (number of
periods in season), often it is 4 for quarterly data or 12 for monthly data.
Default is no seasonal effect.
trend: str {‘n’,’c’,’t’,’ct’}
Parameter controlling the deterministic trend polynomial A(t). Can be
specified as a string where ‘c’ indicates a constant (i.e. a degree zero
component of the trend polynomial), ‘t’ indicates a linear trend with time,
and ‘ct’ is both. Can also be specified as an iterable defining the non-zero
polynomial exponents to include, in increasing order. For example, [1,1,0,1]
denotes a+bt+ct3. Default is to not include a trend component.
alpha: float, default 0.05
The significance level for the confidence interval. The default
alpha = .05 returns a 95% confidence interval.
exog : pd.Series, pd.DataFrame, default `None`
Exogenous variable/s included as predictor/s. Must have the same
number of observations as `y` and should be aligned so that y[i] is
regressed on exog[i].
sarimax_kwargs: dict, default `{}`
Additional keyword arguments passed to SARIMAX constructor. See more in
https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html#statsmodels.tsa.statespace.sarimax.SARIMAX
fit_kwargs: dict, default `{'disp':0}`
Additional keyword arguments passed to SARIMAX fit. See more in
https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.fit.html#statsmodels.tsa.statespace.sarimax.SARIMAX.fit
verbose : bool, default `False`
Print number of folds used for backtesting.
Returns
-------
metric_value: np.ndarray shape (1,)
Value of the metric.
backtest_predictions: pandas DataFrame
Values predicted and their estimated interval:
column pred = predictions.
column lower = lower bound of the interval.
column upper = upper bound interval of the interval.
'''
if metric not in ['mean_squared_error', 'mean_absolute_error',
'mean_absolute_percentage_error']:
raise Exception(
f"Allowed metrics are: 'mean_squared_error', 'mean_absolute_error' and "
f"'mean_absolute_percentage_error'. Got {metric}."
)
metrics = {
'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'mean_absolute_percentage_error': mean_absolute_percentage_error
}
metric = metrics[metric]
folds = int(np.ceil((len(y) - initial_train_size) / steps))
remainder = (len(y) - initial_train_size) % steps
backtest_predictions = []
if verbose:
print(f"Number of observations used for training: {initial_train_size}")
print(f"Number of observations used for backtesting: {len(y) - initial_train_size}")
print(f" Number of folds: {folds}")
print(f" Number of steps per fold: {steps}")
if remainder != 0:
print(f" Last fold only includes {remainder} observations.")
if folds > 50 and refit:
print(
f"Model will be fit {folds} times. This can take substantial amounts of time. "
f"If not feasible, try with `refit = False`."
)
if refit:
# In each iteration (except the last one) the model is fitted before
# making predictions. The train size increases by `steps` in each iteration.
for i in range(folds):
train_size = initial_train_size + i * steps
if exog is not None:
next_window_exog = exog.iloc[train_size:train_size + steps, ]
if i < folds - 1: # from the first step to one before the last one.
if exog is None:
model = SARIMAX(
endog = y.iloc[:train_size],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
pred = model.get_forecast(steps=steps)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
model = SARIMAX(
endog = y.iloc[:train_size],
exog = exog.iloc[:train_size, ],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
pred = model.get_forecast(steps=steps, exog=next_window_exog)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
if remainder == 0:
if exog is None:
model = SARIMAX(
endog = y.iloc[:train_size],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
pred = model.get_forecast(steps=steps)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
model = SARIMAX(
endog = y.iloc[:train_size],
exog = exog.iloc[:train_size, ],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
pred = model.get_forecast(steps=steps, exog=next_window_exog)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
# Only the remaining steps need to be predicted
steps = remainder
if exog is None:
model = SARIMAX(
endog = y.iloc[:train_size],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
pred = model.get_forecast(steps=steps)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
model = SARIMAX(
endog = y.iloc[:train_size],
exog = exog.iloc[:train_size, ],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
pred = model.get_forecast(steps=steps, exog=next_window_exog)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
backtest_predictions.append(pred)
else:
# Since the model is only fitted with the initial_train_size, the model
# must be extended in each iteration to include the data needed to make
# predictions.
if exog is None:
model = SARIMAX(
endog = y.iloc[:initial_train_size],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
else:
model = SARIMAX(
endog = y.iloc[:initial_train_size],
exog = exog.iloc[:initial_train_size],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
for i in range(folds):
last_window_end = initial_train_size + i * steps
last_window_start = (initial_train_size + i * steps) - steps
last_window_y = y.iloc[last_window_start:last_window_end]
if exog is not None:
last_window_exog = exog.iloc[last_window_start:last_window_end]
next_window_exog = exog.iloc[last_window_end:last_window_end + steps]
if i == 0:
# No extend is needed for the first fold
if exog is None:
pred = model.get_forecast(steps=steps)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
pred = model.get_forecast(steps=steps, exog=next_window_exog)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
elif i < folds - 1:
if exog is None:
model = model.extend(endog=last_window_y)
pred = model.get_forecast(steps=steps)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
model = model.extend(endog=last_window_y, exog=last_window_exog)
pred = model.get_forecast(steps=steps, exog=next_window_exog)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
if remainder == 0:
if exog is None:
model = model.extend(endog=last_window_y)
pred = model.get_forecast(steps=steps)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
model = model.extend(endog=last_window_y, exog=last_window_exog)
pred = model.get_forecast(steps=steps, exog=next_window_exog)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
# Only the remaining steps need to be predicted
steps = remainder
if exog is None:
model = model.extend(endog=last_window_y)
pred = model.get_forecast(steps=steps)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
else:
model = model.extend(endog=last_window_y, exog=last_window_exog)
pred = model.get_forecast(steps=steps, exog=next_window_exog)
pred = pd.concat((
pred.predicted_mean.rename("predicted_mean"),
pred.conf_int(alpha=alpha)),
axis = 1
)
backtest_predictions.append(pred)
backtest_predictions = pd.concat(backtest_predictions)
metric_value = metric(
y_true = y.iloc[initial_train_size: initial_train_size + len(backtest_predictions)],
y_pred = backtest_predictions['predicted_mean']
)
return np.array([metric_value]), backtest_predictions
def cv_sarimax(
y: pd.Series,
initial_train_size: int,
steps: int,
metric: str,
order: tuple=(1, 0, 0),
seasonal_order: tuple=(0, 0, 0, 0),
trend: str=None,
alpha: float= 0.05,
exog: Union[pd.Series, pd.DataFrame]=None,
allow_incomplete_fold: bool=True,
sarimax_kwargs: dict={},
fit_kwargs: dict={'disp':0},
verbose: bool=False
) -> Tuple[np.array, np.array]:
'''
Cross-validation of `SARIMAX` model from statsmodels v0.12. The order of data
is maintained and the training set increases in each iteration.
Parameters
----------
y : pandas Series
Time series values.
order: tuple
The (p,d,q) order of the model for the number of AR parameters, differences,
and MA parameters. d must be an integer indicating the integration order
of the process, while p and q may either be an integers indicating the AR
and MA orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is an
AR(1) model: (1,0,0).
seasonal_order: tuple
The (P,D,Q,s) order of the seasonal component of the model for the AR parameters,
differences, MA parameters, and periodicity. D must be an integer
indicating the integration order of the process, while P and Q may either
be an integers indicating the AR and MA orders (so that all lags up to
those orders are included) or else iterables giving specific AR and / or
MA lags to include. s is an integer giving the periodicity (number of
periods in season), often it is 4 for quarterly data or 12 for monthly data.
Default is no seasonal effect.
trend: str {‘n’,’c’,’t’,’ct’}
Parameter controlling the deterministic trend polynomial A(t). Can be
specified as a string where ‘c’ indicates a constant (i.e. a degree zero
component of the trend polynomial), ‘t’ indicates a linear trend with time,
and ‘ct’ is both. Can also be specified as an iterable defining the non-zero
polynomial exponents to include, in increasing order. For example, [1,1,0,1]
denotes a+bt+ct3. Default is to not include a trend component.
alpha: float, default 0.05
The significance level for the confidence interval. The default
alpha = .05 returns a 95% confidence interval.
initial_train_size: int
Number of samples in the initial train split.
steps : int
Number of steps to predict.
metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}
Metric used to quantify the goodness of fit of the model.
exog : pandas Series, pandas DataFrame, default `None`
Exogenous variable/s included as predictor/s. Must have the same
number of observations as `y` and should be aligned so that y[i] is
regressed on exog[i].
sarimax_kwargs: dict, default {}
Additional keyword arguments passed to SARIMAX initialization. See more in
https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html#statsmodels.tsa.statespace.sarimax.SARIMAX
fit_kwargs: dict, default `{'disp':0}`
Additional keyword arguments passed to SARIMAX fit. See more in
https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.fit.html#statsmodels.tsa.statespace.sarimax.SARIMAX.fit
verbose : bool, default `False`
Print number of folds used for cross-validation.
Returns
-------
cv_metrics: 1D np.ndarray
Value of the metric for each partition.
cv_predictions: pandas DataFrame
Values predicted and their estimated interval:
column pred = predictions.
column lower = lower bound of the interval.
column upper = upper bound interval of the interval.
'''
if metric not in ['mean_squared_error', 'mean_absolute_error',
'mean_absolute_percentage_error']:
raise Exception(
f"Allowed metrics are: 'mean_squared_error', 'mean_absolute_error' and "
f"'mean_absolute_percentage_error'. Got {metric}."
)
metrics = {
'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'mean_absolute_percentage_error': mean_absolute_percentage_error
}
metric = metrics[metric]
if isinstance(y, pd.Series):
y = y.to_numpy(copy=True)
if isinstance(exog, (pd.Series, pd.DataFrame)):
exog = exog.to_numpy(copy=True)
cv_predictions = []
cv_metrics = []
splits = time_series_splitter(
y = y,
initial_train_size = initial_train_size,
steps = steps,
allow_incomplete_fold = allow_incomplete_fold,
verbose = verbose
)
for train_index, test_index in splits:
if exog is None:
model = SARIMAX(
endog = y.iloc[train_index],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
pred = model.get_forecast(steps=len(test_index))
pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))
else:
model = SARIMAX(
endog = y.iloc[train_index],
exog = exog.iloc[train_index],
order = order,
seasonal_order = seasonal_order,
trend = trend,
**sarimax_kwargs
).fit(**fit_kwargs)
pred = model.get_forecast(steps=len(test_index), exog=exog.iloc[test_index])
pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))
metric_value = metric(
y_true = y.iloc[test_index],
y_pred = pred[:, 0]
)
cv_metrics.append(metric_value)
cv_predictions.append(pred)
return np.array(cv_metrics), np.concatenate(cv_predictions)
def grid_search_sarimax(
y: pd.Series,
param_grid: dict,
initial_train_size: int,
steps: int,
metric: str,
exog: Union[pd.Series, pd.DataFrame]=None,
refit: bool=False,
sarimax_kwargs: dict={},
fit_kwargs: dict={'disp':0},
verbose: bool=False
) -> pd.DataFrame:
'''
Exhaustive search over specified parameter values for a `SARIMAX` model from
statsmodels v0.12. Validation is done using time series cross-validation or
backtesting.
Parameters
----------
y : pandas Series
Time series values.
param_grid : dict
Dictionary with parameters names (`str`) as keys and lists of parameter
settings to try as values. Allowed parameters in the grid are: order,
seasonal_order and trend.
initial_train_size: int
Number of samples used in the initial train.
steps : int
Number of steps to predict.
metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}
Metric used to quantify the goodness of fit of the model.
exog : np.ndarray, pd.Series, pd.DataFrame, default `None`
Exogenous variable/s included as predictor/s. Must have the same
number of observations as `y` and should be aligned so that y[i] is
regressed on exog[i].
refit: bool, default False
Whether to re-fit the model in each iteration.
sarimax_kwargs: dict, default `{}`
Additional keyword arguments passed to SARIMAX initialization. See more in
https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html#statsmodels.tsa.statespace.sarimax.SARIMAX
fit_kwargs: dict, default `{'disp':0}`
Additional keyword arguments passed to SARIMAX fit. See more in
https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.fit.html#statsmodels.tsa.statespace.sarimax.SARIMAX.fit
verbose : bool, default `True`
Print number of folds used for cv or backtesting.
Returns
-------
results: pandas DataFrame
Metric value estimated for each combination of parameters.
'''
params_list = []
metric_list = []
# bic_list = []
# aic_list = []
if 'order' not in param_grid:
param_grid['order'] = [(1, 0, 0)]
if 'seasonal_order' not in param_grid:
param_grid['seasonal_order'] = [(0, 0, 0, 0)]
if 'trend' not in param_grid:
param_grid['trend'] = [None]
keys_to_ignore = set(param_grid.keys()) - {'order', 'seasonal_order', 'trend'}
if keys_to_ignore:
print(
f'Only arguments: order, seasonal_order and trend are allowed for grid search.'
f' Ignoring {keys_to_ignore}.'
)
for key in keys_to_ignore:
del param_grid[key]
param_grid = list(ParameterGrid(param_grid))
logging.info(
f"Number of models compared: {len(param_grid)}"
)
for params in tqdm(param_grid, ncols=90):
metric_value = backtesting_sarimax(
y = y,
exog = exog,
order = params['order'],
seasonal_order = params['seasonal_order'],
trend = params['trend'],
initial_train_size = initial_train_size,
steps = steps,
refit = refit,
metric = metric,
sarimax_kwargs = sarimax_kwargs,
fit_kwargs = fit_kwargs,
verbose = verbose
)[0]
params_list.append(params)
metric_list.append(metric_value)
# model = SARIMAX(
# endog = y,
# exog = exog,
# order = params['order'],
# seasonal_order = params['seasonal_order'],
# trend = params['trend'],
# **sarimax_kwargs
# ).fit(**fit_kwargs)
# bic_list.append(model.bic)
# aic_list.append(model.aic)
results = pd.DataFrame({
'params': params_list,
'metric': metric_list,
#'bic' : bic_list,
#'aic' : aic_list
})
results = results.sort_values(by='metric', ascending=True)
results = pd.concat([results, results['params'].apply(pd.Series)], axis=1)
return results
```
#### File: utils/tests/test_preproces_last_window.py
```python
import pytest
import numpy as np
import pandas as pd
from skforecast.utils import preprocess_last_window
def test_output_preprocess_last_window_when_last_window_index_is_DatetimeIndex_and_has_frequecy():
'''
Test values returned by when last_window is a pandas Series with DatetimeIndex
and freq is not None.
'''
last_window = pd.Series(
data = np.arange(3),
index = pd.date_range("1990-01-01", periods=3, freq='D')
)
results = preprocess_last_window(last_window)
expected = (np.arange(3),
pd.DatetimeIndex(['1990-01-01', '1990-01-02', '1990-01-03'],
dtype='datetime64[ns]', freq='D')
)
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
def test_output_preprocess_last_window_when_last_window_index_is_DatetimeIndex_but_has_not_frequecy():
'''
Test values returned by when last_window is a pandas Series with DatetimeIndex
and freq is None.
'''
last_window = pd.Series(
data = np.arange(3),
index = pd.to_datetime(["1990-01-01", "1990-01-02", "1990-01-03"])
)
results = preprocess_last_window(last_window)
expected = (np.arange(3),
pd.RangeIndex(start=0, stop=3, step=1)
)
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
def test_output_preprocess_last_window_when_last_window_index_is_not_DatetimeIndex():
'''
Test values returned by when last_window is a pandas Series without DatetimeIndex.
'''
last_window = pd.Series(data=np.arange(3))
results = preprocess_last_window(last_window)
expected = (np.arange(3),
pd.RangeIndex(start=0, stop=3, step=1)
)
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
``` |
{
"source": "joaquinbosano/Coberturas-dinamicas",
"score": 3
} |
#### File: joaquinbosano/Coberturas-dinamicas/dataclass_git.py
```python
import quandl
import pandas as pd
class Dataholder:
def __init__(self, subyacente, tiempo_inicio, tiempo_terminal, api_key, cantidad_contratos = 1):
self.__api_key = str(api_key)
self.__subyacente = subyacente
self.__cantidad_contratos = cantidad_contratos
self.__tiempo_inicio = tiempo_inicio
self.__tiempo_terminal = tiempo_terminal
self.__dict_aceptados = {"crudo": "CHRIS/ICE_T", "cobre": "CHRIS/CME_HG", "oro": "CHRIS/CME_GC", "soja":"CHRIS/ICE_IS"}
def Crear(self):
quandl.ApiConfig.api_key = self.__api_key
lista_convencional = []
lista_settle = []
lista_last = []
iterador = 1
if self.__subyacente in list(self.__dict_aceptados.keys()):
if self.__subyacente == list(self.__dict_aceptados.keys())[0]:
lista_convencional = ["EIA/PET_RWTC_D"]
while iterador < self.__cantidad_contratos + 1:
nombre_convencional = str(self.__dict_aceptados[self.__subyacente]) + str(iterador)
lista_convencional.append(nombre_convencional)
lista_settle.append(nombre_convencional + " - Settle")
iterador += 1
placeholder = quandl.get(lista_convencional , start_date = self.__tiempo_inicio, end_date = self.__tiempo_terminal)
return placeholder.loc[:,lista_settle].dropna(axis = 0, how = "any")
else:
while iterador < self.__cantidad_contratos + 1:
nombre_convencional = str(self.__dict_aceptados[self.__subyacente]) + str(iterador)
lista_convencional.append(nombre_convencional)
lista_settle.append(nombre_convencional +" - Settle" )
lista_last.append(nombre_convencional + " - Last")
iterador += 1
placeholder = quandl.get(lista_convencional, start_date = self.__tiempo_inicio, end_date = self.__tiempo_terminal)
try:
return placeholder.loc[:,lista_settle].dropna(axis = 0, how = "any")
except:
return placeholder.loc[:,lista_last].dropna(axis = 0, how = "any")
def Cambiar_Diccionario (self, claves, codigos):
if type(claves) is type([1,2]):
if type (codigos) is type([1,2]):
newdict = dict(zip([claves],[codigos]))
self.__dict_aceptados.update(newdict)
else:
self.__dict_aceptados.update({str(claves):str(codigos)})
def ver(self):
return self.__dict_aceptados
``` |
{
"source": "joaquincabezas/pytorch_geometric_temporal",
"score": 2
} |
#### File: pytorch_geometric_temporal/test/dataset_test.py
```python
import numpy as np
import networkx as nx
from torch_geometric_temporal.signal import temporal_signal_split
from torch_geometric_temporal.signal import StaticGraphTemporalSignal
from torch_geometric_temporal.signal import DynamicGraphTemporalSignal
from torch_geometric_temporal.signal import DynamicGraphStaticSignal
from torch_geometric_temporal.dataset import METRLADatasetLoader, PemsBayDatasetLoader, WindmillOutputDatasetLoader
from torch_geometric_temporal.dataset import ChickenpoxDatasetLoader, PedalMeDatasetLoader, WikiMathsDatasetLoader, EnglandCovidDatasetLoader
def get_edge_array(n_count):
return np.array([edge for edge in nx.gnp_random_graph(n_count, 0.1).edges()]).T
def generate_signal(snapshot_count, n_count, feature_count):
edge_indices = [get_edge_array(n_count) for _ in range(snapshot_count)]
edge_weights = [np.ones(edge_indices[t].shape[1]) for t in range(snapshot_count)]
features = [np.random.uniform(0,1,(n_count, feature_count)) for _ in range(snapshot_count)]
return edge_indices, edge_weights, features
def test_dynamic_graph_temporal_signal_real():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features = generate_signal(250, 100, 32)
targets = [np.random.uniform(0,10,(n_count,)) for _ in range(snapshot_count)]
dataset = DynamicGraphTemporalSignal(edge_indices, edge_weights, features, targets)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
targets = [np.floor(np.random.uniform(0,10,(n_count,))).astype(int) for _ in range(snapshot_count)]
dataset = DynamicGraphTemporalSignal(edge_indices, edge_weights, features, targets)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
def test_static_graph_temporal_signal():
dataset = StaticGraphTemporalSignal(None, None, [None, None],[None, None])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
def test_dynamic_graph_temporal_signal():
dataset = DynamicGraphTemporalSignal([None, None], [None, None], [None, None],[None, None])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
def test_static_graph_temporal_signal_typing():
dataset = StaticGraphTemporalSignal(None, None, [np.array([1])],[np.array([2])])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x.shape == (1,)
assert snapshot.y.shape == (1,)
def test_dynamic_graph_static_signal_typing():
dataset = DynamicGraphStaticSignal([None], [None], None, [None])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
def test_chickenpox():
loader = ChickenpoxDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102, )
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20, )
def test_pedalme():
loader = PedalMeDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 225)
assert snapshot.edge_attr.shape == (225, )
assert snapshot.x.shape == (15, 4)
assert snapshot.y.shape == (15, )
def test_wiki():
loader = WikiMathsDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(1):
for snapshot in dataset:
snapshot.edge_index.shape == (2, 27079)
snapshot.edge_attr.shape == (27079, )
snapshot.x.shape == (1068, 8)
snapshot.y.shape == (1068, )
def test_windmill():
loader = WindmillOutputDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
snapshot.edge_index.shape == (2, 97032)
snapshot.edge_attr.shape == (97032, )
snapshot.x.shape == (312, 8)
snapshot.y.shape == (312, )
def test_covid():
loader = EnglandCovidDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
snapshot.edge_index.shape[0] == 2
snapshot.edge_attr.shape[0] == snapshot.edge_index.shape[1]
snapshot.x.shape == (129, 8)
snapshot.y.shape == (129, )
def test_metrla():
loader = METRLADatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 1722)
assert snapshot.edge_attr.shape == (1722, )
assert snapshot.x.shape == (207, 2, 12)
assert snapshot.y.shape == (207, 12)
def test_metrla_task_generator():
loader = METRLADatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset(num_timesteps_in=6, num_timesteps_out=5)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 1722)
assert snapshot.edge_attr.shape == (1722, )
assert snapshot.x.shape == (207, 2, 6)
assert snapshot.y.shape == (207, 5)
def test_pemsbay():
loader = PemsBayDatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 2694)
assert snapshot.edge_attr.shape == (2694, )
assert snapshot.x.shape == (325, 2, 12)
assert snapshot.y.shape == (325, 2, 12)
def test_pemsbay_task_generator():
loader = PemsBayDatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset(num_timesteps_in=6, num_timesteps_out=5)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 2694)
assert snapshot.edge_attr.shape == (2694, )
assert snapshot.x.shape == (325, 2, 6)
assert snapshot.y.shape == (325, 2, 5)
def test_discrete_train_test_split_static():
loader = ChickenpoxDatasetLoader()
dataset = loader.get_dataset()
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102, )
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20, )
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102, )
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20, )
def test_discrete_train_test_split_dynamic():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features = generate_signal(250, 100, 32)
targets = [np.random.uniform(0,10,(n_count,)) for _ in range(snapshot_count)]
dataset = DynamicGraphTemporalSignal(edge_indices, edge_weights, features, targets)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
def test_train_test_split_dynamic_graph_static_signal():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features = generate_signal(250, 100, 32)
targets = [np.random.uniform(0, 10, (n_count,)) for _ in range(snapshot_count)]
dataset = StaticGraphTemporalSignal(edge_indices[0], edge_weights[0], features, targets)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
def test_discrete_train_test_split_dynamic():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features = generate_signal(250, 100, 32)
feature = features[0]
targets = [np.random.uniform(0,10,(n_count,)) for _ in range(snapshot_count)]
dataset = DynamicGraphStaticSignal(edge_indices, edge_weights, feature, targets)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100, )
``` |
{
"source": "joaquingb1993/cv-joaquin-galvez-blanco",
"score": 2
} |
#### File: CV/core/views.py
```python
from django.shortcuts import render, HttpResponse
from .models import Project
# Create your views here.
def home(request):
projects = Project.objects.all()
return render(request,"core/home.html",{'projects':projects})
def about(request):
return render(request,"core/about.html")
def portfolio(request):
return render(request, "core/portfolio.html")
def contacto(request):
return render(request, "core/contacto.html")
``` |
{
"source": "JOAQUINGR/DNAshapeValues",
"score": 3
} |
#### File: DNAshapeValues/scripts/bedcreation.py
```python
import pandas
import re
import sys
fasta = sys.argv[1]
finalname = str(sys.argv[2])
def correction(line):
regular = re.match(r"(.+):([0-9]+)-([0-9]+)", line)
return regular.group(1), regular.group(2), regular.group(3)
def create_files(filetype, dataframe):
with open(filetype+'.bed', 'w') as finalbed:
for index, row in dataframe.iterrows():
finalbed.write('{}\t{}\t{}\t{}\n'.format(row.chr, row.start, row.end, row.id))
with open(fasta, 'r') as totalfastafile:
df = pandas.read_csv(totalfastafile, sep='>', names=['sequence', 'id'])
dfid = df['id'].dropna().reset_index(drop=True)
dfsequence = df['sequence'].dropna().reset_index(drop=True)
dfasta = pandas.merge(dfid, dfsequence, left_index=True, right_index=True)
df = dfasta
df['chr'], df['start'], df['end'] = zip(*df['id'].map(correction))
create_files(finalname, df)
``` |
{
"source": "joaquingv12/Solving-Image-Processing-Problems-with-Python-Part1",
"score": 4
} |
#### File: Chapter_02/codes/Chapter_02.py
```python
from scipy.ndimage import rotate
from skimage.io import imread
import matplotlib.pylab as plt
im = imread('images/Img_02_04.jpg')
im = rotate(im, -45)
plt.figure(figsize=(5,5))
plt.imshow(im)
plt.axis('off') # stop showing the axes
plt.show()
# ### 1.2 Flipping and Flopping an image with *numpy*
import matplotlib.pyplot as plt
import numpy as np
im = plt.imread('images/Img_02_42.jpg')
im_filpped = np.flipud(im)
plt.figure(figsize=(10, 12))
plt.subplot(211), plt.imshow(im), plt.axis('off'), plt.title('original', size=20)
plt.subplot(212), plt.imshow(im_filpped), plt.axis('off'), plt.title('flipped', size=20) #np.fliplr(im)
plt.show()
im = plt.imread('images/Img_02_43.jpeg')
im_filpped = np.fliplr(im)
plt.figure(figsize=(15, 12))
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('original', size=20)
plt.subplot(122), plt.imshow(im_filpped), plt.axis('off'), plt.title('flopped', size=20) #np.fliplr(im)
plt.show()
# ### 1.3 Applying Affine Transformation with *scipy.ndimage*
from skimage.io import imread
from scipy.ndimage import affine_transform
import numpy as np
import matplotlib.pylab as plt
im = imread("images/Img_02_01.jpg")
rot_mat = np.array([[np.cos(np.pi/4),np.sin(np.pi/4), 0],[-np.sin(np.pi/4),np.cos(np.pi/4), 0], [0,0,1]])
shr_mat = np.array([[1, 0.45, 0], [0, 0.75, 0], [0, 0, 1]])
transformed = affine_transform(im, rot_mat@shr_mat, offset=[-im.shape[0]/4+25, im.shape[1]/2-50, 0], output_shape=im.shape)
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('Input image', size=20)
plt.subplot(122), plt.imshow(transformed), plt.axis('off'), plt.title('Output image', size=20)
plt.show()
# ## 2. Implement Image Transformation with Warping / Inverse Warping using scikit-image and scipy.ndimage
# ### 2.1 Applying translation on an image using scikit-image warp
from skimage.io import imread
from skimage.transform import warp
import matplotlib.pylab as plt
def translate(xy, t_x, t_y):
xy[:, 0] -= t_y
xy[:, 1] -= t_x
return xy
im = imread('images/Img_02_01.jpg')
im = warp(im, translate, map_args={'t_x':-250, 't_y':200}) # create a dictionary for translation parameters
plt.imshow(im)
plt.title('Translated image', size=20)
plt.show()
# ### 2.2 Implementing the Swirl transformation using scikit-image warp
def swirl(xy, x0, y0, R):
r = np.sqrt((xy[:,1]-x0)**2 + (xy[:,0]-y0)**2)
a = np.pi*r / R
xy[:, 1] = (xy[:, 1]-x0)*np.cos(a) + (xy[:, 0]-y0)*np.sin(a) + x0
xy[:, 0] = -(xy[:, 1]-x0)*np.sin(a) + (xy[:, 0]-y0)*np.cos(a) + y0
return xy
im = imread('images/Img_02_02.jpg')
print(im.shape)
im1 = warp(im, swirl, map_args={'x0':220, 'y0':360, 'R':650})
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('Input image', size=20)
plt.subplot(122), plt.imshow(im1), plt.axis('off'), plt.title('Output image', size=20)
plt.show()
# ### 2.3 Implementing Swirl Transform using *scipy.ndimage*
from scipy import ndimage as ndi
from skimage.io import imread
from skimage.color import rgb2gray
import matplotlib.pylab as plt, numpy as np
def apply_swirl(xy, x0, y0, R):
r = np.sqrt((xy[1]-x0)**2 + (xy[0]-y0)**2)
a = np.pi*r / R
return ((xy[1]-x0)*np.cos(a) + (xy[0]-y0)*np.sin(a) + x0, -(xy[1]-x0)*np.sin(a) + (xy[0]-y0)*np.cos(a) + y0)
im = rgb2gray(imread('images/Img_02_06.jpg'))
print(im.shape)
im1 = ndi.geometric_transform(im, apply_swirl, extra_arguments=(100, 100, 250))
plt.figure(figsize=(20,10))
plt.gray()
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('Input image', size=20)
plt.subplot(122), plt.imshow(im1), plt.axis('off'), plt.title('Output image', size=20)
plt.show()
# ### 2.4 Implementing Elastic Deformation
import numpy as np
import matplotlib.pylab as plt
from skimage.color import rgb2gray
from scipy.ndimage import gaussian_filter, map_coordinates
def elastic_transform(image, alpha, sigma):
random_state = np.random.RandomState(None)
h, w = image.shape
dx = gaussian_filter((random_state.rand(*image.shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*image.shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(w), np.arange(h))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
distored_image = map_coordinates(image, indices, order=1, mode='reflect')
return distored_image.reshape(image.shape)
img = rgb2gray(plt.imread('images/Img_02_22.png'))
img1 = elastic_transform(img, 100, 4)
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(img), plt.axis('off'), plt.title('Original', size=20)
plt.subplot(122), plt.imshow(img1), plt.axis('off'), plt.title('Deformed', size=20)
plt.tight_layout()
plt.show()
# ## 3. Image Projection with Homography using scikit-image
from skimage.transform import ProjectiveTransform
from skimage.io import imread
import numpy as np
import matplotlib.pylab as plt
from matplotlib.path import Path
im_src = imread('images/Img_02_04.jpg')
im_dst = imread('images/Img_02_03.jpg')
print(im_src.shape, im_dst.shape)
pt = ProjectiveTransform()
width, height = im_src.shape[0], im_src.shape[1]
src = np.array([[ 0., 0.],
[height-1, 0.],
[height-1, width-1],
[ 0., width-1]])
dst = np.array([[ 74., 41.],
[ 272., 96.],
[ 272., 192.],
[ 72., 228.]])
pt.estimate(src, dst)
width, height = im_dst.shape[0], im_dst.shape[1]
polygon = dst
poly_path = Path(polygon)
x, y = np.mgrid[:height, :width]
coors = np.hstack((x.reshape(-1, 1), y.reshape(-1,1)))
mask = poly_path.contains_points(coors)
mask = mask.reshape(height, width)
dst_indices = np.array([list(x) for x in list(zip(*np.where(mask > 0)))])
#print(dst_indices)
src_indices = np.round(pt.inverse(dst_indices), 0).astype(int)
src_indices[:,0], src_indices[:,1] = src_indices[:,1], src_indices[:,0].copy()
im_out = np.copy(im_dst)
im_out[dst_indices[:,1], dst_indices[:,0]] = im_src[src_indices[:,0], src_indices[:,1]]
plt.figure(figsize=(30,10))
plt.subplot(131), plt.imshow(im_src, cmap='gray'), plt.axis('off'), plt.title('Source image', size=30)
plt.subplot(132), plt.imshow(im_dst, cmap='gray'), plt.axis('off'), plt.title('Destination image', size=30)
plt.subplot(133), plt.imshow(im_out, cmap='gray'), plt.axis('off'), plt.title('Output image', size=30)
plt.tight_layout()
plt.show()
# ## 4. Detecting Colors and Changing Colors of Objects with opencv-python
import cv2
import numpy as np
import matplotlib.pylab as plt
img = cv2.imread("images/Img_02_05.png")
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 70, 25), (15, 255, 255))
imask = mask>0
brown = np.zeros_like(img)
brown[imask] = img[imask]
black = img.copy()
hsv[...,0:3] = hsv[...,0:3] / 3
black[imask] = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[imask]
black = np.clip(black, 0, 255)
plt.figure(figsize=(20,10))
plt.subplots_adjust(0,0,1,0.9,0.01,0.075)
plt.subplot(131), plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)), plt.axis('off'), plt.title('original', size=20)
plt.subplot(132), plt.imshow(cv2.cvtColor(brown, cv2.COLOR_BGR2RGB)), plt.axis('off'), plt.title('only horse', size=20)
plt.subplot(133), plt.imshow(cv2.cvtColor(black, cv2.COLOR_BGR2RGB)), plt.axis('off'), plt.title('horse color changed', size=20)
plt.suptitle('Detecting and changing object colors with opencv-python', size=25)
plt.show()
# ### Detecting Covid-19 Virus Objects with Colors in HSV colorspace
img = cv2.cvtColor(cv2.imread('covid_19_blood.jpg'), cv2.COLOR_BGR2RGB)
img_hsv=cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
low_green = np.array([30, 25, 10])
high_green = np.array([80, 255, 255])
green_mask = cv2.inRange(img_hsv, low_green, high_green)
green = cv2.bitwise_and(img, img, mask=green_mask)
output_img = img.copy()
output_img[np.where(green_mask==0)] = (0,0,0)
plt.figure(figsize=(20, 8))
plt.gray()
plt.subplots_adjust(0,0,1,0.975,0.05,0.05)
plt.subplot(131), plt.imshow(img), plt.axis('off'), plt.title('original', size=20)
plt.subplot(132), plt.imshow(green_mask), plt.axis('off'), plt.title('mask', size=20)
plt.subplot(133), plt.imshow(output_img), plt.axis('off'), plt.title('covi-19 virus cells', size=20)
plt.suptitle('Filtering out the covid-19 virus cells', size=30)
plt.show()
# ## 5. Finding Duplicate and Similar Images with Hashing
# ### 5.1 Using Cryptographic (MD5) Hash functions to find duplicate images with hashlib
import hashlib, os
from glob import glob
import matplotlib.pylab as plt
from skimage.io import imread
from time import time
hex_digest = hashlib.md5(open('images/Img_02_01.jpg', 'rb').read()).hexdigest()
bin_digest = format(int(str(hex_digest), 16), "040b")
print('MD5 digest = {} ({})'.format(hex_digest, bin_digest) )
print('length of hex digest = {} bytes'.format(len(hex_digest)))
print('length of bin digest = {} bits'.format(len(bin_digest)))
def find_duplicates(dir_name):
def is_image(file_name):
f = file_name.lower()
return f.endswith(".png") or f.endswith(".jpg")
hash_keys = dict()
for file_name in glob(dir_name):
if os.path.isfile(file_name) and is_image(file_name):
with open(file_name, 'rb') as f:
file_hash = hashlib.md5(f.read()).hexdigest()
if file_hash not in hash_keys:
hash_keys[file_hash] = [file_name]
else:
hash_keys[file_hash].append(file_name)
return [hash_keys[file_hash] for file_hash in hash_keys if len(hash_keys[file_hash]) > 1]
def show_duplicates(duplicates):
for duplicated in duplicates:
try:
plt.figure(figsize=(20,10))
plt.subplots_adjust(0,0,1,0.9,0.05,0.05)
for (i, file_name) in enumerate(duplicated):
plt.subplot(1, len(duplicated), i+1)
plt.imshow(imread(file_name))
plt.title(file_name, size=20)
plt.axis('off')
plt.suptitle('{} duplicate images found with MD5 hash'.format(len(duplicated)), size=30)
plt.show()
except OSError as e:
continue
duplicates = find_duplicates('images/*.*')
print(duplicates)
show_duplicates(duplicates)
# ### 5.2 Using Perceptual Hash function (phash) to find similar images using imagehash
#!pip install imagehash
from PIL import Image
import imagehash
from time import time
import os
from glob import glob
import matplotlib.pylab as plt
def plot_images_to_compare(imfile1, imfile2, hashfunc = imagehash.phash):
img1, img2 = Image.open(imfile1), Image.open(imfile2)
print('sizes of images = {}, {}'.format(img1.size, img2.size))
hash1 = hashfunc(img1)
hash2 = hashfunc(img2)
plt.figure(figsize=(20,10))
plt.subplots_adjust(0,0,1,0.95,0.01,0.01)
plt.subplot(121), plt.imshow(img1), plt.title(str(hash1), size=20), plt.axis('off')
plt.subplot(122), plt.imshow(img2), plt.title(str(hash2), size=20), plt.axis('off')
plt.show()
print('hash1 = {} ({}), length = {} bits'.format(format(int(str(hash1), 16), "040b"), str(hash1), len(format(int(str(hash1), 16), "040b"))))
print('hash2 = {} ({}), length = {} bits'.format(format(int(str(hash2), 16), "040b"), str(hash2), len(format(int(str(hash2), 16), "040b"))))
print('hamming distance =', hash1 - hash2)
plot_images_to_compare('images/Img_02_31.jpg', 'images/Img_02_32.jpg')
plot_images_to_compare('images/Img_02_31.jpg', 'images/Img_02_43.png')
plot_images_to_compare('images/similar/Img_02_41.jpg', 'images/similar/Img_02_41.png')
plot_images_to_compare('images/Img_02_31.jpg', 'images/Img_02_35.jpg')
def preprocess_images(dir_name, hashfunc = imagehash.phash):
image_filenames = sorted(glob(dir_name))
print('number of images to process = {}'.format(len(image_filenames)))
images = {}
for img_file in sorted(image_filenames):
hash = hashfunc(Image.open(img_file))
images[hash] = images.get(hash, []) + [img_file]
for hash in images:
images[hash] = np.array(images[hash])
return images
def query_k_similar_images(image_file, images, k=3, hashfunc = imagehash.phash):
hash = hashfunc(Image.open(image_file))
hamming_dists = np.zeros(len(images))
image_files = np.array(list(images.values()))
hash_values = list(images.keys())
for i in range(len(image_files)):
hamming_dists[i] = hash - hash_values[i]
indices = np.argsort(hamming_dists)
return np.hstack(image_files[indices][:k]), hamming_dists[indices][:k]
start = time()
images = preprocess_images('images/*.*')
end = time()
print('processing time = {} seconds'.format(end-start))
def plot_query_returned_images(query, returned):
n = 1 + len(returned)
plt.figure(figsize=(20,8))
plt.subplots_adjust(0,0,1,0.95,0.05,0.05)
plt.subplot(1,n,1), plt.imshow(Image.open(query)), plt.title('query image', size=20), plt.axis('off')
for i in range(len(returned)):
plt.subplot(1,n,i+2), plt.imshow(Image.open(returned[i])), plt.title('returned image {}'.format(i+1), size=20)
plt.axis('off')
plt.show()
query = 'images/Img_02_39.jpg'
found, dists = query_k_similar_images(query, images, k=4)
dists
plot_query_returned_images(query, found)
```
#### File: Chapter_05/code/Chapter_05.py
```python
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import matplotlib.pylab as plt
from PIL import Image, ImageFilter
from copy import deepcopy
def plot_image(image, title=None, sz=20):
plt.imshow(image)
plt.title(title, size=sz)
plt.axis('off')
def add_noise(im, prop_noise, salt=True, pepper=True):
im = deepcopy(im)
n = int(im.width * im.height * prop_noise)
x, y = np.random.randint(0, im.width, n), np.random.randint(0, im.height, n)
for (x,y) in zip(x,y):
im.putpixel((x, y), # generate salt-and-pepper noise
((0,0,0) if np.random.rand() < 0.5 else (255,255,255)) if salt and pepper \
else (255,255,255) if salt \
else (0, 0, 0)) # if pepper
return im
orig = Image.open('images/Img_05_01.jpg')
i = 1
plt.figure(figsize=(12,35))
for prop_noise in np.linspace(0.05,0.3,6):
# choose random locations inside image
im = add_noise(orig, prop_noise)
plt.subplot(6,2,i), plot_image(im, 'Original Image with ' + str(int(100*prop_noise)) + '% added noise')
im1 = im.filter(ImageFilter.BLUR)
plt.subplot(6,2,i+1), plot_image(im1, 'Blurred Image')
i += 2
plt.show()
# ### 1.2 Gaussian BLUR Filter to remove Salt & Pepper Noise
im = Image.open('images/Img_05_01.jpg')
im = add_noise(im, prop_noise = 0.2)
plt.figure(figsize=(20,15))
i = 1
for radius in np.linspace(1, 3, 12):
im1 = im.filter(ImageFilter.GaussianBlur(radius))
plt.subplot(3,4,i)
plot_image(im1, 'radius = ' + str(round(radius,2)))
i += 1
plt.suptitle('PIL Gaussian Blur with different Radius', size=30)
plt.show()
# ### 1.3 Median Filter to remove Salt & Pepper Noise
im = Image.open('images/Img_05_02.jpg')
im = add_noise(im, prop_noise = 0.1)
plt.figure(figsize=(20,10))
plt.subplot(1,4,1)
plot_image(im, 'Input noisy image')
i = 2
for sz in [3,7,11]:
im1 = im.filter(ImageFilter.MedianFilter(size=sz))
plt.subplot(1,4,i), plot_image(im1, 'Output (Filter size=' + str(sz) + ')', 20)
i += 1
plt.tight_layout()
plt.show()
# ### 1.4 Max, Min and Mode filters to remove outliers from image
# #### Min filter
orig = Image.open('images/Img_05_11.jpg')
im = add_noise(orig, prop_noise = 0.2, pepper=False)
plt.figure(figsize=(20,10))
plt.subplot(1,4,1)
plot_image(im, 'Input noisy image')
i = 2
for sz in [3,7,11]:
im1 = im.filter(ImageFilter.MinFilter(size=sz))
plt.subplot(1,4,i), plot_image(im1, 'Output (Filter size=' + str(sz) + ')')
i += 1
plt.tight_layout()
plt.show()
# #### Max filter
im = add_noise(orig, prop_noise = 0.3, salt=False)
plt.figure(figsize=(20,10))
plt.subplot(1,4,1)
plot_image(im, 'Input noisy image')
i = 2
for sz in [3,7,11]:
im1 = im.filter(ImageFilter.MaxFilter(size=sz))
plt.subplot(1,4,i), plot_image(im1, 'Output (Filter size=' + str(sz) + ')')
i += 1
plt.show()
# #### Mode filter
orig = Image.open('images/Img_05_20.jpg')
im = add_noise(orig, prop_noise = 0.1)
plt.figure(figsize=(20,20))
plt.subplot(1,3,1)
plot_image(im, 'Input noisy image', 25)
i = 2
for sz in [3,5]:
im1 = im.filter(ImageFilter.ModeFilter(size=sz))
plt.subplot(1,3,i), plot_image(im1, 'Output (Filter size=' + str(sz) + ')', 25)
i += 1
plt.tight_layout()
plt.show()
# ### 1.5 Progressive Application of Gaussian Blur, Median, Mode and Max Filters on an image
im = Image.open('images/Img_05_02.jpg')
plt.figure(figsize=(10,15))
plt.subplots_adjust(0,0,1,0.95,0.05,0.05)
im1 = im.copy()
sz = 5
for i in range(8):
im1 = im1.filter(ImageFilter.GaussianBlur(radius=sz))
if i % 2 == 0:
plt.subplot(4,4,4*i//2+1), plot_image(im1, 'Gaussian Blur' if i == 0 else None, 25)
im1 = im.copy()
for i in range(8):
im1 = im1.filter(ImageFilter.MedianFilter(size=sz))
if i % 2 == 0:
plt.subplot(4,4,4*i//2+2), plot_image(im1, 'Median' if i == 0 else None, 25)
im1 = im.copy()
for i in range(8):
im1 = im1.filter(ImageFilter.ModeFilter(size=sz))
if i % 2 == 0:
plt.subplot(4,4,4*i//2+3), plot_image(im1, 'Mode' if i == 0 else None, 25)
im1 = im.copy()
for i in range(8):
im1 = im1.filter(ImageFilter.MaxFilter(size=sz))
if i % 2 == 0:
plt.subplot(4,4,4*i//2+4), plot_image(im1, 'Max' if i == 0 else None, 25)
plt.show()
# ## 2. Unsharp masking to Sharpen an Image
# ### 2.1 With scikit-image filters module
#! pip install --upgrade scikit-image
#import skimage
#skimage.filters.__all__
import numpy as np
import matplotlib.pylab as plt
from skimage.io import imread
from skimage.filters import unsharp_mask
im = imread('images/Img_05_04.jpg')
im1 = unsharp_mask(im, radius=1, amount=1)
im2 = unsharp_mask(im, radius=5, amount=2)
im3 = unsharp_mask(im, radius=20, amount=3)
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(20, 12))
axes = axes.ravel()
axes[0].set_title('Original image', size=20), axes[0].imshow(im)
axes[1].set_title('Enhanced image, radius=1, amount=1.0', size=20), axes[1].imshow(im1)
axes[2].set_title('Enhanced image, radius=5, amount=2.0', size=20), axes[2].imshow(im2)
axes[3].set_title('Enhanced image, radius=20, amount=3.0', size=20), axes[3].imshow(im3)
for ax in axes:
ax.axis('off')
fig.tight_layout()
plt.show()
# ### 2.2 With PIL ImageFilter module
from PIL import Image, ImageFilter
im = Image.open('images/Img_05_05.jpg')
plt.figure(figsize=(15,16))
plt.subplot(221), plot_image(im, 'original')
im1 = im.filter(ImageFilter.UnsharpMask(radius=2, percent=150))
plt.subplot(222), plot_image(im1, 'unsharp masking, radius=2, percent=150')
im1 = im.filter(ImageFilter.UnsharpMask(radius=5, percent=200))
plt.subplot(223), plot_image(im1, 'unsharp masking, radius=5, percent=200')
im1 = im.filter(ImageFilter.UnsharpMask(radius=10, percent=250))
plt.subplot(224), plot_image(im1, 'unsharp masking, radius=10, percent=250')
plt.tight_layout()
plt.show()
# ### 2.3 Laplacian Sharpening with SimpleITK
import SimpleITK as sitk
import numpy as np
import matplotlib.pylab as plt
image = sitk.ReadImage('images/Img_05_20.jpg', sitk.sitkFloat32)
filt = sitk.UnsharpMaskImageFilter()
filt.SetAmount(1.5) # typically set between 1 and 2
filt.SetSigmas(0.15)
sharpened = filt.Execute(image)
np_image = sitk.GetArrayFromImage(image)
np_image = np_image / np_image.max()
np_sharpened = sitk.GetArrayFromImage(sharpened)
np_sharpened = np_sharpened / np_sharpened.max()
plt.figure(figsize=(20,10))
plt.gray()
plt.subplots_adjust(0,0,1,1,0.05,0.05)
plt.subplot(121), plot_image(np_image, 'Original Image')
plt.subplot(122), plot_image(np_sharpened, 'Sharpened Image (with UnsharpMask)')
plt.show()
# ### 2.4 Implementing Unsharp Mask with opencv-python
import cv2
im = cv2.imread("images/Img_05_13.png")
im_smoothed = cv2.GaussianBlur(im, (11,11), 10, 10)
im1 = cv2.addWeighted(im, 1.0 + 3.0, im_smoothed, -3.0, 0) # im1 = im + 3.0*(im - im_smoothed)
plt.figure(figsize=(20,25))
plt.subplots_adjust(0,0,1,0.95,0.05,0.05)
plt.subplot(211), plot_image(cv2.cvtColor(im, cv2.COLOR_BGR2RGB), 'Original Image')
plt.subplot(212), plot_image(cv2.cvtColor(im1, cv2.COLOR_BGR2RGB), 'Sharpened Image')
plt.show()
# ## 3. Averaging of Images to remove Random Noise
from skimage import img_as_float
from skimage.util import random_noise
from skimage.metrics import peak_signal_noise_ratio
from skimage.io import imread
import matplotlib.pylab as plt
import numpy as np
im = img_as_float(imread('images/Img_05_06.jpg')) # original image
n = 100
images = np.zeros((n, im.shape[0], im.shape[1], im.shape[2]))
sigma = 0.2
for i in range(n):
images[i,...] = random_noise(im, var=sigma**2)
im_mean = images.mean(axis=0)
im_median = np.median(images, axis=0)
plt.figure(figsize=(10,10))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05, hspace=.01)
plt.subplot(221), plot_image(im, 'Original image')
plt.subplot(222), plot_image(images[0], 'Noisy PSNR: ' + str(round(peak_signal_noise_ratio(im, images[0]),3)))
plt.subplot(223), plot_image(im_mean, 'Mean PSNR: ' + str(round(peak_signal_noise_ratio(im, im_mean),3)))
plt.subplot(224), plot_image(im_median, 'Median PSNR: ' + str(round(peak_signal_noise_ratio(im, im_median),3)))
plt.show()
plt.figure(figsize=(10,5))
plt.hist(images[:,100,100,0], color='red', alpha=0.2, label='red')
plt.hist(images[:,100,100,1], color='green', alpha=0.2, label='green')
plt.hist(images[:,100,100,2], color='blue', alpha=0.2, label='blue')
plt.vlines(im[100,100,0], 0, 20, color='red', label='original')
plt.vlines(im[100,100,1], 0, 20, color='green', label='original')
plt.vlines(im[100,100,2], 0, 20, color='blue', label='original')
plt.vlines(im_mean[100,100,0], 0, 20, color='red', linestyles='dashed', label='estimated')
plt.vlines(im_mean[100,100,1], 0, 20, color='green', linestyles='dashed', label='estimated')
plt.vlines(im_mean[100,100,2], 0, 20, color='blue', linestyles='dashed', label='estimated')
plt.legend()
plt.grid()
plt.show()
# ## 4. Image Denoising with Curvature-Driven Algorithms
import SimpleITK as sitk
import matplotlib.pylab as plt
img = sitk.ReadImage('images/Img_05_11.png', sitk.sitkFloat64)
normfilter = sitk.NormalizeImageFilter()
caster = sitk.CastImageFilter()
caster.SetOutputPixelType(sitk.sitkFloat64)
tkfilter = sitk.ShotNoiseImageFilter()
tkfilter.SetScale(0.2)
img_noisy = tkfilter.Execute (img)
img_noisy = sitk.RescaleIntensity(img_noisy)
tkfilter = sitk.CurvatureFlowImageFilter()
tkfilter.SetNumberOfIterations(50)
tkfilter.SetTimeStep(0.1)
img_res_TK = tkfilter.Execute(img_noisy)
tkfilter = sitk.MinMaxCurvatureFlowImageFilter()
tkfilter.SetNumberOfIterations(50)
tkfilter.SetTimeStep(0.1)
tkfilter.SetStencilRadius(4)
img_res_TK1 = tkfilter.Execute(img_noisy)
img_res_TK1 = sitk.RescaleIntensity(img_res_TK1)
# #### Anisotropic Diffusion
tkfilter = sitk.CurvatureAnisotropicDiffusionImageFilter()
tkfilter.SetNumberOfIterations(100);
tkfilter.SetTimeStep(0.05);
tkfilter.SetConductanceParameter(3);
img_res_TK2 = tkfilter.Execute(img_noisy)
#img_res_TK1 = sitk.RescaleIntensity(img_res_TK1)
tkfilter = sitk.GradientAnisotropicDiffusionImageFilter()
tkfilter.SetNumberOfIterations(100);
tkfilter.SetTimeStep(0.05);
tkfilter.SetConductanceParameter(3);
img_res_TK3 = tkfilter.Execute(img_noisy)
plt.figure(figsize=(16,20))
plt.gray()
plt.subplots_adjust(0,0,1,1,0.01,0.05)
plt.subplot(321), plt.imshow(sitk.GetArrayFromImage(img)), plt.axis('off'), plt.title('Original', size=20)
plt.subplot(322), plt.imshow(sitk.GetArrayFromImage(img_noisy)), plt.axis('off'), plt.title('Noisy (with added Shot Noise)', size=20)
plt.subplot(323), plt.imshow(sitk.GetArrayFromImage(img_res_TK)), plt.axis('off'), plt.title('Denoised (with CurvatureFlowImageFilter)', size=20)
plt.subplot(324), plt.imshow(sitk.GetArrayFromImage(img_res_TK1)), plt.axis('off'), plt.title('Denoised (with MinMaxCurvatureFlowImageFilter)', size=20)
plt.subplot(325), plt.imshow(sitk.GetArrayFromImage(img_res_TK2)), plt.axis('off'), plt.title('Denoised (with CurvatureAnisotropicDiffusionImageFilter)', size=20)
plt.subplot(326), plt.imshow(sitk.GetArrayFromImage(img_res_TK3)), plt.axis('off'), plt.title('Denoised (with GradientAnisotropicDiffusionImageFilter)', size=20)
plt.show()
# ## 5. Contrast Strectching / Histogram Equalization with opencv-python
import numpy as np
import matplotlib.pylab as plt
import cv2
def plot_hist(img, col='r'):
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = col)
plt.hist(img.flatten(),256,[0,256], color = col, alpha = 0.1)
plt.xlim([0,256])
plt.title('CDF and histogram of the color channels', size=20)
#plt.legend(('cdf','histogram'), loc = 'upper left')
return bins, cdf
def plot_img_hist(img, title):
plt.figure(figsize=(20,10))
plt.subplot(121), plot_image(img, title)
plt.subplot(122), plot_hist(img[...,0], 'r'), plot_hist(img[...,1], 'g'), plot_hist(img[...,2], 'b')
plt.show()
img = cv2.imread('images/Img_05_07.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = img.copy()
for i in range(3):
hist,bins = np.histogram(img[...,i].flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_m = np.ma.masked_equal(cdf,0)
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
#cdf_m = 255 * cdf / cdf[-1] # normalize
cdf = np.ma.filled(cdf_m,0).astype('uint8')
img2[...,i] = cdf[img[...,i]]
# use linear interpolation of cdf to find new pixel values
#img2[...,i] = np.reshape(np.interp(img[...,i].flatten(),bins[:-1],cdf), img[...,i].shape)
img_lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
equ = img_lab.copy()
equ[...,0] = cv2.equalizeHist(equ[...,0])
equ = np.clip(cv2.cvtColor(equ, cv2.COLOR_LAB2RGB), 0, 255)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl = img_lab.copy()
cl[...,0] = clahe.apply(cl[...,0])
cl = np.clip(cv2.cvtColor(cl, cv2.COLOR_LAB2RGB), 0, 255)
plot_img_hist(img, 'Original Image')
plot_img_hist(img2, 'Hist. Equalized')
plot_img_hist(equ, 'Hist. Equalized (LAB space)')
plot_img_hist(cl, 'Adaptive Hist. Equalized (LAB space)')
# ## 6. Fingerprint Cleaning and Minutiaes extraction
# ### 6.1 Fingerprint Cleaning with Morphological operations
from skimage.io import imread
from skimage.color import rgb2gray
import numpy as np
import matplotlib.pylab as plt
from skimage.morphology import binary_opening, binary_closing, skeletonize, square
from scipy.ndimage import morphological_gradient
from skimage.filters import threshold_otsu
im = rgb2gray(imread('images/Img_05_09.jpg'))
im[im <= 0.5] = 0 # binarize
im[im > 0.5] = 1
im_o = binary_opening(im, square(2))
im_c = binary_closing(im, square(2))
im_oc = binary_closing(binary_opening(im, square(2)), square(3))
im_s = skeletonize(im_oc)
im_g = morphological_gradient(im_oc.astype(np.uint8), size=(2,2))
plt.figure(figsize=(20,12))
plt.gray()
plt.subplot(231), plot_image(im, 'original')
plt.subplot(232), plot_image(im_o, 'opening')
plt.subplot(233), plot_image(im_c, 'closing')
plt.subplot(234), plot_image(im_oc, 'opening + closing')
plt.subplot(235), plot_image(im_s, 'skeletonizing')
plt.subplot(236), plot_image(im_g, 'morphological gradient')
plt.show()
# ### 6.2 Feature (Minutiaes) extraction from an enhanced fingerprint
from PIL import Image, ImageDraw
cells = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]
def minutiae_at(pixels, i, j):
values = [pixels[i + k][j + l] for k, l in cells]
crossings = 0
for k in range(0, 8):
crossings += abs(values[k] - values[k + 1])
crossings /= 2
if pixels[i][j] == 1:
if crossings == 1:
return "ending"
if crossings == 3:
return "bifurcation"
return "none"
def calculate_minutiaes(im):
pixels = 255 - np.array(im).T
pixels = 1.0*(pixels > 10)
(x, y) = im.size
result = im.convert("RGB")
draw = ImageDraw.Draw(result)
colors = {"ending" : (150, 0, 0), "bifurcation" : (0, 150, 0)}
ellipse_size = 2
for i in range(1, x - 1):
for j in range(1, y - 1):
minutiae = minutiae_at(pixels, i, j)
if minutiae != "none":
draw.ellipse([(i - ellipse_size, j - ellipse_size), (i + ellipse_size, j + ellipse_size)], outline = colors[minutiae])
del draw
return result
im = Image.open('images/Img_05_10.jpg').convert("L") # covert to grayscale
out = calculate_minutiaes(im)
plt.figure(figsize=(15,12))
plt.gray()
plt.subplot(121), plot_image(im, 'input thinned')
plt.subplot(122), plot_image(out, 'with minutiaes extracted')
plt.show()
# ## 7. Edge Detection with LOG / Zero-Crossing, Canny vs. Holistically-Nested
# ### 7.0 Computing the Image Derivatives
from scipy.signal import convolve
from skimage.io import imread
from skimage.color import rgb2gray
img = rgb2gray(imread('images/Img_05_38.png'))
h, w = img.shape
kd1 = [[1, -1]]
kd2 = [[1, -2, 1]]
imgd1 = convolve(img, kd1, mode='same')
imgd2 = convolve(img, kd2, mode='same')
plt.figure(figsize=(20,10))
plt.gray()
plt.subplot(231), plt.imshow(img), plt.title('image', size=15)
plt.subplot(232), plt.imshow(imgd1), plt.title('1st derivative', size=15)
plt.subplot(233), plt.imshow(imgd2), plt.title('2nd derivative', size=15)
plt.subplot(234), plt.plot(range(w), img[0,:]), plt.title('image function', size=15)
plt.subplot(235), plt.plot(range(w), imgd1[0,:]), plt.title('1st derivative function', size=15)
plt.subplot(236), plt.plot(range(w), imgd2[0,:]), plt.title('2nd derivative function', size=15)
plt.show()
# ### 7.1 With LoG / Zero-Crossing
import numpy as np
from scipy import ndimage
from skimage.io import imread
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
def any_neighbor_neg(img, i, j):
for k in range(-1,2):
for l in range(-1,2):
if img[i+k, j+k] < 0:
return True, img[i, j] - img[i+k, j+k]
return False, None
def zero_crossing(img, th):
out_img = np.zeros(img.shape)
for i in range(1,img.shape[0]-1):
for j in range(1,img.shape[1]-1):
found, slope = any_neighbor_neg(img, i, j)
if img[i,j] > 0 and found and slope > th:
out_img[i,j] = 255
return out_img
img = rgb2gray(imread('images/Img_05_18.jpg'))
#img = misc.imread('../new images/tagore.png')[...,3]
print(np.max(img))
fig = plt.figure(figsize=(10,16))
plt.subplots_adjust(0,0,1,0.95,0.05,0.05)
plt.gray() # show the filtered result in grayscale
for sigma, thres in zip(range(3,10,2), [1e-3, 1e-4, 1e-5, 1e-6]):
plt.subplot(3,2,sigma//2)
result = ndimage.gaussian_laplace(img, sigma=sigma)
result = zero_crossing(result, thres)
plt.imshow(result)
plt.axis('off')
plt.title('LoG with zero-crossing, sigma=' + str(sigma), size=20)
plt.tight_layout()
plt.show()
# ### 7.2 With Canny and Holistically-nested (deep learning model based)
import cv2
import numpy as np
import matplotlib.pylab as plt
image = cv2.imread('images/Img_05_18.jpg')
(h, w) = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blurred, 80, 150)
class CropLayer(object):
def __init__(self, params, blobs):
self.xstart = 0
self.xend = 0
self.ystart = 0
self.yend = 0
def getMemoryShapes(self, inputs):
inputShape, targetShape = inputs[0], inputs[1]
batchSize, numChannels = inputShape[0], inputShape[1]
height, width = targetShape[2], targetShape[3]
self.ystart = (inputShape[2] - targetShape[2]) // 2
self.xstart = (inputShape[3] - targetShape[3]) // 2
self.yend = self.ystart + height
self.xend = self.xstart + width
return [[batchSize, numChannels, height, width]]
def forward(self, inputs):
return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]]
prototxt_path = "models/deploy.prototxt"
model_path = "models/hed_pretrained_bsds.caffemodel"
net = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
cv2.dnn_registerLayer('Crop', CropLayer)
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(w, h), mean=(104.00698793, 116.66876762, 122.67891434), swapRB=False, crop=False)
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(outs[i][0][0,:,:], (w, h))
hed = (255 * hed).astype("uint8")
plt.figure(figsize=(20, 8))
plt.gray()
plt.subplots_adjust(0,0,1,0.975,0.05,0.05)
plt.subplot(131), plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)), plt.axis('off'), plt.title('input', size=20)
plt.subplot(132), plt.imshow(canny), plt.axis('off'), plt.title('canny', size=20)
plt.subplot(133), plt.imshow(hed), plt.axis('off'), plt.title('holistically-nested', size=20)
plt.show()
``` |
{
"source": "joaquingx/extruct",
"score": 3
} |
#### File: extruct/extruct/w3cmicrodata.py
```python
import collections
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import lxml.etree
from w3lib.html import strip_html5_whitespace
from extruct.utils import parse_html
class LxmlMicrodataExtractor(object):
_xp_item = lxml.etree.XPath('descendant-or-self::*[@itemscope]')
_xp_prop = lxml.etree.XPath("""set:difference(.//*[@itemprop],
.//*[@itemscope]//*[@itemprop])""",
namespaces = {"set": "http://exslt.org/sets"})
_xp_clean_text = lxml.etree.XPath('descendant-or-self::*[not(self::script or self::style)]/text()')
# ancestor and preceding axes contain all elements before the context node
# so counting them gives the "document order" of the context node
_xp_item_docid = lxml.etree.XPath("""count(preceding::*[@itemscope])
+ count(ancestor::*[@itemscope])
+ 1""")
def __init__(self, nested=True, strict=False, add_text_content=False, add_html_node=False):
self.nested = nested
self.strict = strict
self.add_text_content = add_text_content
self.add_html_node = add_html_node
def get_docid(self, node):
return int(self._xp_item_docid(node))
def extract(self, htmlstring, base_url=None, encoding="UTF-8"):
tree = parse_html(htmlstring, encoding=encoding)
return self.extract_items(tree, base_url)
def extract_items(self, document, base_url):
items_seen = set()
return [
item for item in (
self._extract_item(it, items_seen=items_seen, base_url=base_url)
for it in self._xp_item(document))
if item]
def _extract_item(self, node, items_seen, base_url):
itemid = self.get_docid(node)
if self.nested:
if itemid in items_seen:
return
items_seen.add(itemid)
item = {}
if not self.nested:
item["iid"] = itemid
types = node.get('itemtype', '').split()
if types:
if not self.strict and len(types) == 1:
item["type"] = types[0]
else:
item["type"] = types
itemid = node.get('itemid')
if itemid:
item["id"] = itemid.strip()
properties = collections.defaultdict(list)
# start with item references
refs = node.get('itemref', '').split()
if refs:
for refid in refs:
for name, value in self._extract_property_refs(
node, refid, items_seen=items_seen, base_url=base_url):
properties[name].append(value)
for name, value in self._extract_properties(
node, items_seen=items_seen, base_url=base_url):
properties[name].append(value)
props = []
for (name, values) in properties.items():
if not self.strict and len(values) == 1:
props.append((name, values[0]))
else:
props.append((name, values))
if props:
item["properties"] = dict(props)
else:
# item without properties; let's use the node itself
item["value"] = self._extract_property_value(
node, force=True, items_seen=items_seen, base_url=base_url)
# below are not in the specs, but can be handy
if self.add_text_content:
textContent = self._extract_textContent(node)
if textContent:
item["textContent"] = textContent
if self.add_html_node:
item["htmlNode"] = node
return item
def _extract_properties(self, node, items_seen, base_url):
for prop in self._xp_prop(node):
for p, v in self._extract_property(
prop, items_seen=items_seen, base_url=base_url):
yield p, v
def _extract_property_refs(self, node, refid, items_seen, base_url):
for prop in node.xpath("id($refid)/descendant-or-self::*[@itemprop]", refid=refid):
for p, v in self._extract_property(
prop, items_seen=items_seen, base_url=base_url):
yield p, v
def _extract_property(self, node, items_seen, base_url):
props = node.get("itemprop").split()
value = self._extract_property_value(
node, items_seen=items_seen, base_url=base_url)
return [(p, value) for p in props]
def _extract_property_value(self, node, items_seen, base_url, force=False):
#http://www.w3.org/TR/microdata/#values
if not force and node.get("itemscope") is not None:
if self.nested:
return self._extract_item(
node, items_seen=items_seen, base_url=base_url)
else:
return {"iid_ref": self.get_docid(node)}
elif node.tag == "meta":
return node.get("content", "")
elif node.tag in ("audio", "embed", "iframe", "img", "source", "track", "video"):
return urljoin(base_url, strip_html5_whitespace(node.get("src", "")))
elif node.tag in ("a", "area", "link"):
return urljoin(base_url, strip_html5_whitespace(node.get("href", "")))
elif node.tag in ("object",):
return urljoin(base_url, strip_html5_whitespace(node.get("data", "")))
elif node.tag in ("data", "meter"):
return node.get("value", "")
elif node.tag in ("time",):
return node.get("datetime", "")
# not in W3C specs but used in schema.org examples
elif node.get("content"):
return node.get("content")
else:
return self._extract_textContent(node)
def _extract_textContent(self, node):
return u"".join(self._xp_clean_text(node)).strip()
MicrodataExtractor = LxmlMicrodataExtractor
```
#### File: extruct/tests/test_microdata.py
```python
import json
import unittest
from extruct.w3cmicrodata import MicrodataExtractor
from tests import get_testdata
class TestMicrodata(unittest.TestCase):
maxDiff = None
def test_schemaorg_CreativeWork(self):
for i in [1]:
body = get_testdata('schema.org', 'CreativeWork.{:03d}.html'.format(i))
expected = json.loads(get_testdata('schema.org', 'CreativeWork.{:03d}.json'.format(i)).decode('UTF-8'))
mde = MicrodataExtractor()
data = mde.extract(body)
self.assertEqual(data, expected)
def test_schemaorg_LocalBusiness(self):
for i in [2, 3]:
body = get_testdata('schema.org', 'LocalBusiness.{:03d}.html'.format(i))
expected = json.loads(get_testdata('schema.org', 'LocalBusiness.{:03d}.json'.format(i)).decode('UTF-8'))
mde = MicrodataExtractor()
data = mde.extract(body)
self.assertEqual(data, expected)
def test_schemaorg_MusicRecording(self):
for i in [1]:
body = get_testdata('schema.org', 'MusicRecording.{:03d}.html'.format(i))
expected = json.loads(get_testdata('schema.org', 'MusicRecording.{:03d}.json'.format(i)).decode('UTF-8'))
mde = MicrodataExtractor()
data = mde.extract(body)
self.assertEqual(data, expected)
def test_schemaorg_Event(self):
for i in [1, 2, 3, 4, 8]:
body = get_testdata('schema.org', 'Event.{:03d}.html'.format(i))
expected = json.loads(get_testdata('schema.org', 'Event.{:03d}.json'.format(i)).decode('UTF-8'))
mde = MicrodataExtractor()
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_textContent_values(self):
body = get_testdata('w3c', 'microdata.4.2.strings.html')
expected = json.loads(get_testdata('w3c', 'microdata.4.2.strings.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_textContent_values_unclean(self):
body = get_testdata('w3c', 'microdata.4.2.strings.unclean.html')
expected = json.loads(get_testdata('w3c', 'microdata.4.2.strings.unclean.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_5_2(self):
body = get_testdata('w3c', 'microdata.5.2.html')
expected = json.loads(get_testdata('w3c', 'microdata.5.2.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_5_3(self):
body = get_testdata('w3c', 'microdata.5.3.html')
expected = json.loads(get_testdata('w3c', 'microdata.5.3.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_5_5(self):
body = get_testdata('w3c', 'microdata.5.5.html')
expected = json.loads(get_testdata('w3c', 'microdata.5.5.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_7_1(self):
body = get_testdata('w3c', 'microdata.7.1.html')
expected = json.loads(get_testdata('w3c', 'microdata.7.1.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body, 'http://blog.example.com/progress-report')
self.assertEqual(data, expected)
def test_w3c_meter_element(self):
body = get_testdata('w3c', 'microdata.4.2.meter.html')
expected = json.loads(get_testdata('w3c', 'microdata.4.2.meter.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_data_element(self):
body = get_testdata('w3c', 'microdata.4.2.data.html')
expected = json.loads(get_testdata('w3c', 'microdata.4.2.data.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_object_element(self):
body = get_testdata('w3c', 'microdata.object.html')
expected = json.loads(get_testdata('w3c', 'microdata.object.json').decode('UTF-8'))
mde = MicrodataExtractor(strict=True)
data = mde.extract(body, 'http://www.example.com/microdata/test')
self.assertEqual(data, expected)
class TestMicrodataFlat(unittest.TestCase):
maxDiff = None
def test_w3c_5_2(self):
body = get_testdata('w3c', 'microdata.5.2.html')
expected = json.loads(get_testdata('w3c', 'microdata.5.2.flat.json').decode('UTF-8'))
mde = MicrodataExtractor(nested=False, strict=True)
data = mde.extract(body)
self.assertEqual(data, expected)
def test_w3c_7_1(self):
body = get_testdata('w3c', 'microdata.7.1.html')
expected = json.loads(get_testdata('w3c', 'microdata.7.1.flat.json').decode('UTF-8'))
mde = MicrodataExtractor(nested=False, strict=True)
data = mde.extract(body, 'http://blog.example.com/progress-report')
self.assertEqual(data, expected)
class TestMicrodataWithText(unittest.TestCase):
maxDiff = None
def test_w3c_5_2(self):
body = get_testdata('w3c', 'microdata.5.2.html')
expected = json.loads(get_testdata('w3c', 'microdata.5.2.withtext.json').decode('UTF-8'))
mde = MicrodataExtractor(add_text_content=True)
data = mde.extract(body)
self.assertEqual(data, expected)
class TestUrlJoin(unittest.TestCase):
maxDiff = None
def test_join_none(self):
body = get_testdata('schema.org', 'product.html')
expected = json.loads(get_testdata('schema.org', 'product.json').decode('UTF-8'))
mde = MicrodataExtractor()
data = mde.extract(body)
self.assertEqual(data, expected)
def test_join_custom_url(self):
body = get_testdata('schema.org', 'product.html')
expected = json.loads(get_testdata('schema.org', 'product_custom_url.json').decode('UTF-8'))
mde = MicrodataExtractor()
data = mde.extract(body, base_url='http://some-example.com')
self.assertEqual(data, expected)
``` |
{
"source": "joaquingx/questionary",
"score": 4
} |
#### File: questionary/examples/text.py
```python
import re
from pprint import pprint
import questionary
from examples import custom_style_dope
from questionary import Validator, ValidationError, prompt
class PhoneNumberValidator(Validator):
def validate(self, document):
ok = re.match(
r'^([01])?[-.\s]?\(?(\d{3})\)?'
r'[-.\s]?(\d{3})[-.\s]?(\d{4})\s?'
r'((?:#|ext\.?\s?|x\.?\s?)(?:\d+)?)?$',
document.text)
if not ok:
raise ValidationError(
message='Please enter a valid phone number',
cursor_position=len(document.text)) # Move cursor to end
def ask_pystyle(**kwargs):
# create the question object
question = questionary.text(
"What's your phone number",
validate=PhoneNumberValidator,
style=custom_style_dope,
**kwargs)
# prompt the user for an answer
return question.ask()
def ask_dictstyle(**kwargs):
questions = [
{
'type': 'text',
'name': 'phone',
'message': "What's your phone number",
'validate': PhoneNumberValidator
}
]
return prompt(questions, style=custom_style_dope, **kwargs)
if __name__ == '__main__':
pprint(ask_pystyle())
```
#### File: tests/prompts/test_password.py
```python
from tests.utils import feed_cli_with_input
def test_password_entry():
message = 'What is your password'
text = "<PASSWORD>\r"
result, cli = feed_cli_with_input('password', message, text)
assert result == 'my password'
```
#### File: questionary/tests/test_examples.py
```python
from prompt_toolkit.input.defaults import create_pipe_input
from prompt_toolkit.output import DummyOutput
from tests.utils import KeyInputs
def ask_with_patched_input(q, text):
inp = create_pipe_input()
try:
inp.send_text(text)
return q(input=inp, output=DummyOutput())
finally:
inp.close()
def test_confirm_example():
from examples.confirm import ask_dictstyle, ask_pystyle
text = "n" + KeyInputs.ENTER + "\r"
result_dict = ask_with_patched_input(ask_dictstyle, text)
result_py = ask_with_patched_input(ask_pystyle, text)
assert result_dict == {'continue': False}
assert result_dict['continue'] == result_py
def test_text_example():
from examples.text import ask_dictstyle, ask_pystyle
text = "1234567890" + KeyInputs.ENTER + "\r"
result_dict = ask_with_patched_input(ask_dictstyle, text)
result_py = ask_with_patched_input(ask_pystyle, text)
assert result_dict == {'phone': '1234567890'}
assert result_dict['phone'] == result_py
def test_select_example():
from examples.select import ask_dictstyle, ask_pystyle
text = KeyInputs.DOWN + KeyInputs.ENTER + KeyInputs.ENTER + "\r"
result_dict = ask_with_patched_input(ask_dictstyle, text)
result_py = ask_with_patched_input(ask_pystyle, text)
assert result_dict == {'theme': 'Make a reservation'}
assert result_dict['theme'] == result_py
def test_rawselect_example():
from examples.rawselect import (
ask_dictstyle,
ask_pystyle)
text = "3" + KeyInputs.ENTER + KeyInputs.ENTER + "\r"
result_dict = ask_with_patched_input(ask_dictstyle, text)
result_py = ask_with_patched_input(ask_pystyle, text)
assert result_dict == {'theme': 'Ask opening hours'}
assert result_dict['theme'] == result_py
def test_checkbox_example():
from examples.checkbox import ask_dictstyle, ask_pystyle
text = "n" + KeyInputs.ENTER + KeyInputs.ENTER + KeyInputs.ENTER + "\r"
result_dict = ask_with_patched_input(ask_dictstyle, text)
result_py = ask_with_patched_input(ask_pystyle, text)
assert result_dict == {'toppings': ['foo']}
assert result_dict['toppings'] == result_py
def test_password_example():
from examples.password import ask_dictstyle, ask_pystyle
text = "asdf" + KeyInputs.ENTER + "\r"
result_dict = ask_with_patched_input(ask_dictstyle, text)
result_py = ask_with_patched_input(ask_pystyle, text)
assert result_dict == {'password': '<PASSWORD>'}
assert result_dict['password'] == result_py
```
#### File: questionary/tests/utils.py
```python
from prompt_toolkit.input.defaults import create_pipe_input
from prompt_toolkit.output import DummyOutput
from questionary import prompt
from questionary.prompts import prompt_by_name
class KeyInputs(object):
DOWN = '\x1b[B'
UP = '\x1b[A'
LEFT = '\x1b[D'
RIGHT = '\x1b[C'
ENTER = '\x0a'
ESCAPE = '\x1b'
CONTROLC = '\x03'
BACK = '\x7f'
SPACE = ' '
def feed_cli_with_input(_type, message, text, **kwargs):
"""
Create a Prompt, feed it with the given user input and return the CLI
object.
This returns a (result, Application) tuple.
"""
inp = create_pipe_input()
try:
inp.send_text(text)
prompter = prompt_by_name(_type)
application = prompter(message,
input=inp,
output=DummyOutput(),
**kwargs)
result = application.unsafe_ask()
return result, application
finally:
inp.close()
def patched_prompt(questions, text, **kwargs):
"""Create a prompt where the input and output are predefined."""
inp = create_pipe_input()
try:
inp.send_text(text)
result = prompt(questions,
input=inp,
output=DummyOutput(),
**kwargs)
return result
finally:
inp.close()
``` |
{
"source": "joaquinhernandezg/PyMUSE",
"score": 2
} |
#### File: PyMUSE/PyMUSE/musecube_old.py
```python
import copy
import gc
import glob
import os
import warnings
import aplpy
import linetools.utils as ltu
import numpy as np
import numpy.ma as ma
import pyregion
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.io.ascii.sextractor import SExtractor
from astropy.modeling import models, fitting
from astropy.table import Table
from astropy.utils import isiterable
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.utils import name_from_coord
from matplotlib import pyplot as plt
from scipy import interpolate
from scipy import ndimage
import PyMUSE.utils as mcu
class MuseCube:
"""
Class to handle VLT/MUSE data
"""
def __init__(self, filename_cube, filename_white=None, pixelsize=0.2 * u.arcsec, n_fig=1,
flux_units=1E-20 * u.erg / u.s / u.cm ** 2 / u.angstrom, vmin=None, vmax=None, wave_cal='air'):
"""
Parameters
----------
filename_cube: string
Name of the MUSE datacube .fits file
filename_white: string
Name of the MUSE white image .fits file
pixel_size : float or Quantity, optional
Pixel size of the datacube, if float it assumes arcsecs.
Default is 0.2 arcsec
n_fig : int, optional
XXXXXXXX
flux_units : Quantity
XXXXXXXXXX
"""
# init
self.color = False
self.cmap = ""
self.flux_units = flux_units
self.n = n_fig
plt.close(self.n)
self.wave_cal = wave_cal
self.filename = filename_cube
self.filename_white = filename_white
self.load_data()
self.white_data = fits.open(self.filename_white)[1].data
self.hdulist_white = fits.open(self.filename_white)
self.white_data = np.where(self.white_data < 0, 0, self.white_data)
if not vmin:
self.vmin=np.nanpercentile(self.white_data,0.25)
else:
self.vmin = vmin
if not vmax:
self.vmax=np.nanpercentile(self.white_data,98.)
else:
self.vmax = vmax
self.gc2 = aplpy.FITSFigure(self.filename_white, figure=plt.figure(self.n))
self.gc2.show_grayscale(vmin=self.vmin, vmax=self.vmax)
# self.gc = aplpy.FITSFigure(self.filename, slices=[1], figure=plt.figure(20))
self.pixelsize = pixelsize
gc.enable()
# plt.close(20)
print("MuseCube: Ready!")
def load_data(self):
hdulist = fits.open(self.filename)
print("MuseCube: Loading the cube fluxes and variances...")
# import pdb; pdb.set_trace()
self.cube = ma.MaskedArray(hdulist[1].data)
self.stat = ma.MaskedArray(hdulist[2].data)
print("MuseCube: Defining master masks (this may take a while but it is for the greater good).")
# masking
self.mask_init = np.isnan(self.cube) | np.isnan(self.stat)
self.cube.mask = self.mask_init
self.stat.mask = self.mask_init
# for ivar weighting ; consider creating it in init ; takes long
# self.flux_over_ivar = self.cube / self.stat
self.header_1 = hdulist[1].header # Necesito el header para crear una buena copia del white.
self.header_0 = hdulist[0].header
if self.filename_white is None:
print("MuseCube: No white image given, creating one.")
w_data = copy.deepcopy(self.create_white(save=False).data)
w_header_0 = copy.deepcopy(self.header_0)
w_header_1 = copy.deepcopy(self.header_1)
# These loops remove the third dimension from the header's keywords. This is neccesary in order to
# create the white image and preserve the cube astrometry
for i in w_header_0.keys():
if '3' in i:
del w_header_0[i]
for i in w_header_1.keys():
if '3' in i:
del w_header_1[i]
# prepare the header
hdu = fits.HDUList()
hdu_0 = fits.PrimaryHDU(header=w_header_0)
hdu_1 = fits.ImageHDU(data=w_data, header=w_header_1)
hdu.append(hdu_0)
hdu.append(hdu_1)
hdu.writeto('new_white.fits', clobber=True)
self.filename_white = 'new_white.fits'
print("MuseCube: `new_white.fits` image saved to disk.")
def color_gui(self, cmap):
"""
Function to change the cmap of the canvas
:param cmap: string. matplotlib's color map. cmap = 'none' to gray scale again
:return:
"""
if cmap == 'none':
self.color = False
self.cmap = ""
else:
self.color = True
self.cmap = cmap
self.reload_canvas()
def get_smoothed_white(self, npix=2, save=True, show=False, **kwargs):
"""Gets an smoothed version (Gaussian of sig=npix)
of the white image. If save is True, it writes a file
to disk called `smoothed_white.fits`.
**kwargs are passed down to scipy.ndimage.gaussian_filter()
"""
hdulist = self.hdulist_white
im = self.white_data
if npix > 0:
smooth_im = ndimage.gaussian_filter(im, sigma=npix, **kwargs)
else:
smooth_im = im
if save:
hdulist[1].data = smooth_im
prihdr = hdulist[0].header
comment = 'Spatially smoothed with a Gaussian kernel of sigma={} spaxels (by MuseCube)'.format(npix)
# print(comment)
prihdr['history'] = comment
hdulist.writeto('smoothed_white.fits', clobber=True)
if show:
fig = aplpy.FITSFigure('smoothed_white.fits', figure=plt.figure())
fig.show_grayscale(vmin=self.vmin,vmax=self.vmax)
return smooth_im
def spec_to_vacuum(self, spectrum):
spectrum_vac = spectrum
if self.wave_cal == 'air':
spectrum_vac.meta['airvac'] = 'air'
spectrum_vac.airtovac()
return spectrum_vac
else:
return spectrum_vac
def spatial_smooth(self, npix, output="smoothed.fits", test=False, **kwargs):
"""Applies Gaussian filter of std=npix in both spatial directions
and writes it to disk as a new MUSE Cube.
Notes: the STAT cube is not touched.
Parameters
----------
npix : int
Std of Gaussian kernel in spaxel units.
output : str, optional
Name of the output file
test : bool, optional
Whether to check for flux being conserved
**kwargs are passed down to scipy.ndimage.gaussian_filter()
Return
------
Writes a new file to disk.
"""
if not isinstance(npix, int):
raise ValueError("npix must be integer.")
cube_new = copy.deepcopy(self.cube)
ntot = len(self.cube)
for wv_ii in range(ntot):
print('{}/{}'.format(wv_ii + 1, ntot))
image_aux = self.cube[wv_ii, :, :]
smooth_ii = ma.MaskedArray(ndimage.gaussian_filter(image_aux, sigma=npix, **kwargs))
smooth_ii.mask = image_aux.mask | np.isnan(smooth_ii)
# test the fluxes are conserved
if test:
gd_pix = ~smooth_ii.mask
try:
med_1 = np.nansum(smooth_ii[gd_pix])
med_2 = np.nansum(image_aux[gd_pix])
print(med_1, med_2, (med_1 - med_2) / med_1)
np.testing.assert_allclose(med_1, med_2, decimal=4)
except AssertionError:
import pdb
pdb.set_trace()
cube_new[wv_ii, :, :] = smooth_ii
# import pdb; pdb.set_trace()
hdulist = fits.open(self.filename)
hdulist[1].data = cube_new.data
prihdr = hdulist[0].header
comment = 'Spatially smoothed with a Gaussian kernel of sigma={} spaxels (by MuseCube)'.format(npix)
print(comment)
prihdr['history'] = comment
hdulist.writeto(output, clobber=True)
print("MuseCube: new smoothed cube written to {}".format(output))
def get_mini_image(self, center, halfsize=15):
"""
:param center: tuple of coordinates, in pixels
:param size: length of the square around center
:return: ndarray which contain the image
"""
side = 2 * halfsize + 1
image = [[0 for x in range(side)] for y in range(side)]
data_white = fits.open(self.filename_white)[1].data
center_x = center[0]
center_y = center[1]
for i in xrange(center_x - halfsize - 1, center_x + halfsize):
for j in xrange(center_y - halfsize - 1, center_y + halfsize):
i2 = i - (center_x - halfsize)
j2 = j - (center_y - halfsize)
image[j2][i2] = data_white[j - 1][i - 1]
return image
def get_gaussian_seeing_weighted_spec(self, x_c, y_c, radius, seeing=4):
"""
Function to extract the spectrum of a circular aperture defined by x_c, y_c and radius in spaxel space.
The spectrum is weighted by a 2d gaussian centered at the center of the aperture, with a std = seeing in spaxels
:param x_c: x coordinate of the center of the aperture (spaxel)
:param y_c: y coordiante of the center of the aperture (spaxel)
:param radius: radius of the circular aperture
:param seeing: standard deviation of the gaussian in spaxels
:return: XSpectrum1D object
"""
import scipy.ndimage.filters as fi
new_3dmask = self.get_mini_cube_mask_from_ellipse_params(x_c, y_c, radius)
w = self.wavelength
n = len(w)
fl = np.zeros(n)
sig = np.zeros(n)
self.cube.mask = new_3dmask
for wv_ii in range(n):
mask = new_3dmask[wv_ii]
center = np.zeros(mask.shape) ###Por alguna razon no funciona si cambio la asignacion a np.zeros_like(mask)
center[y_c][x_c] = 1
weigths = ma.MaskedArray(fi.gaussian_filter(center, seeing))
weigths.mask = mask
weigths = weigths / np.sum(weigths)
fl[wv_ii] = np.sum(self.cube[wv_ii] * weigths)
sig[wv_ii] = np.sqrt(np.sum(self.stat[wv_ii] * (weigths ** 2)))
self.cube.mask = self.mask_init
return XSpectrum1D.from_tuple((w, fl, sig))
def get_spec_spaxel(self, x, y, coord_system='pix', n_figure=2, empirical_std=False, save=False):
"""
Gets the spectrum of a single spaxel (xy) of the MuseCube
:param x: x coordinate of the spaxel
:param y: y coordinate of the spaxel
:param coord_system: 'pix' or 'wcs'
:return: spec: XSpectrum1D object
"""
if coord_system == 'wcs':
x_c, y_c = self.w2p(x, y)
x_world, y_world = x, y
else:
x_c, y_c = x, y
x_world, y_world = self.p2w(x, y)
region_string = self.ellipse_param_to_ds9reg_string(x_c, y_c, 1, 1, 0, coord_system='pix')
self.draw_pyregion(region_string)
w = self.wavelength
n = len(w)
spec = np.zeros(n)
sigma = np.zeros(n)
for wv_ii in range(n):
spec[wv_ii] = self.cube.data[wv_ii][int(y_c)][int(x_c)]
sigma[wv_ii] = np.sqrt(self.stat.data[wv_ii][int(y_c)][int(x_c)])
spec = XSpectrum1D.from_tuple((self.wavelength, spec, sigma))
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
spec = self.spec_to_vacuum(spec)
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
coords = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
name = name_from_coord(coords)
plt.title(name)
plt.xlabel('Angstroms')
plt.ylabel('Flux (' + str(self.flux_units) + ')')
if save:
spec.write_to_fits(name + '.fits')
return spec
def get_spec_from_ellipse_params(self, x_c, y_c, params, coord_system='pix', mode='wwm', npix=0, frac=0.1,
n_figure=2, empirical_std=False, save=False, color='green'):
"""
Obtains a combined spectrum of spaxels within a geometrical region defined by
x_c, y_c, param
:param x_c: x coordinate of the center of the ellipse
:param y_c: y coordinate of the center of the ellipse
:param params: Either a float that will be interpreted as a radius, or an iterable [a,b,theta] with the ellipse parameters
:param coord_system: str. Default = 'pix'.
If coord_system = 'wcs' the coordinates will be considered as degrees
:param mode: str
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `gaussian` - Weighted mean. Weights are obtained from a 2D gaussian fit of the bright profile
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weights given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
:param frac. FLoat, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)
:param n_figure: int. Default = 2. Figure to display the spectrum
:param empirical_std: boolean. Default = False.
If True, the errors of the spectrum will be determined empirically
:param save: boolean. Default = False
If True, the spectrum will be saved in hard_disk
:return: spec: XSpectrum1D object
"""
if mode == 'gaussian':
spec = self.get_gaussian_profile_weighted_spec(x_c=x_c, y_c=y_c, params=params)
else:
new_mask = self.get_mini_cube_mask_from_ellipse_params(x_c, y_c, params, coord_system=coord_system,color=color)
spec = self.spec_from_minicube_mask(new_mask, mode=mode, npix=npix, frac=frac)
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
spec = self.spec_to_vacuum(spec)
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
if coord_system == 'wcs':
x_world, y_world = x_c, y_c
else:
x_world, y_world = self.p2w(x_c, y_c)
coords = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
name = name_from_coord(coords)
plt.title(name)
plt.xlabel('Angstroms')
plt.ylabel('Flux (' + str(self.flux_units) + ')')
if save:
spec.write_to_fits(name + '.fits')
return spec
def get_spec_from_interactive_polygon_region(self, mode='wwm', npix=0, frac=0.1,
n_figure=2,
empirical_std=False, save=False):
"""
Function used to interactively define a region and extract the spectrum of that region
To use this function, the class must have been initialized in a "ipython --pylab qt" enviroment
It's also needed the package roipoly. Installation instructions and LICENSE in:
https://github.com/jdoepfert/roipoly.py/
:param mode: str, default = wwm
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weghts given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
:param frac. FLoat, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)
:param n_figure: int. Default = 2. Figure to display the spectrum
:param empirical_std: boolean. Default = False
If True, the errors of the spectrum will be determined empirically
:param save: boolean. Default = False
If True, the spectrum will be saved in hard_disk
:return: spec: XSpectrum1D object
"""
from roipoly import roipoly
current_fig = plt.figure(self.n)
MyROI = roipoly(roicolor='r', fig=current_fig)
raw_input("MuseCube: Please select points with left click. Right click and Enter to continue...")
print("MuseCube: Calculating the spectrum...")
mask = MyROI.getMask(self.white_data)
mask_inv = np.where(mask == 1, 0, 1)
complete_mask = self.mask_init + mask_inv
new_3dmask = np.where(complete_mask == 0, False, True)
spec = self.spec_from_minicube_mask(new_3dmask, mode=mode, npix=npix, frac=frac)
self.reload_canvas()
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
plt.ylabel('Flux (' + str(self.flux_units) + ')')
plt.xlabel('Wavelength (Angstroms)')
plt.title('Polygonal region spectrum ')
plt.figure(self.n)
MyROI.displayROI()
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
spec = self.spec_to_vacuum(spec)
if save:
spec.write_to_fits('Poligonal_region_spec.fits')
return spec
def params_from_ellipse_region_string(self, region_string, deg=False):
"""
Function to get the elliptical parameters of a region_string.
If deg is True, only will be returned the center in degrees.
Otherwise, all parameters will be returned in pixels
:param region_string: Region defined as string using ds9 format
:param deg: If True, only the center of the ellipse will be returned, in degrees.
:return: x_center,y_center,params, parameter of the ellipse defined in region_string
"""
r = pyregion.parse(region_string)
if deg:
x_c, y_c = r[0].coord_list[0], r[0].coord_list[1]
if r[0].coord_format == 'physical' or r[0].coord_format == 'image':
x_world, y_world = self.p2w(x_c - 1, y_c - 1)
else:
x_world, y_world = x_c, y_c
return x_world, y_world
else:
if r[0].coord_format == 'physical' or r[0].coord_format == 'image':
x_c, y_c, params = r[0].coord_list[0], r[0].coord_list[1], r[0].coord_list[2:5]
else:
x_world = r[0].coord_list[0]
y_world = r[0].coord_list[1]
par = r[0].coord_list[2:5]
x_c, y_c, params = self.ellipse_params_to_pixel(x_world, y_world, params=par)
return x_c - 1, y_c - 1, params
def get_spec_from_region_string(self, region_string, mode='wwm', npix=0., frac=0.1, empirical_std=False, n_figure=2,
save=False):
"""
Obtains a combined spectrum of spaxels within geametrical region defined by the region _string, interpretated by ds9
:param region_string: str
Region defined by a string, using ds9 format (ellipse only in gaussian method)
example: region_string = 'physical;ellipse(100,120,10,5,35) # color = green'
:param mode: str
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `gaussian` - Weighted mean. Weights are obtained from a 2D gaussian fit of the bright profile (for elliptical regions only)
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weghts given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
:param frac. Float, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)
:param n_figure: int. Default = 2. Figure to display the spectrum
:param empirical_std: boolean. Default = False.
If True, the errors of the spectrum will be determined empirically
:param save: boolean. Default = False
If True, the spectrum will be saved in hard_disk
:return: spec: XSpectrum1D object
"""
if mode == 'gaussian':
spec = self.get_gaussian_profile_weighted_spec(region_string_=region_string)
else:
new_mask = self.get_mini_cube_mask_from_region_string(region_string)
spec = self.spec_from_minicube_mask(new_mask, mode=mode, npix=npix, frac=frac)
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
self.draw_pyregion(region_string)
spec = self.spec_to_vacuum(spec)
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
x_world, y_world = self.params_from_ellipse_region_string(region_string, deg=True)
coords = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
name = name_from_coord(coords)
plt.title(name)
plt.xlabel('Angstroms')
plt.ylabel('Flux (' + str(self.flux_units) + ')')
if save:
spec.write_to_fits(name + '.fits')
return spec
def draw_ellipse_params(self, xc, yc, params, color='green'):
"""
Function to draw in the interface the contour of the elliptical region defined by (xc,yc,params)
:param xc: x coordinate of the center of the ellipse
:param yc: y coordinate of the center of the ellipse
:param params: either a single radius or [a,b,theta] iterable
:param color: color to draw
:return:
"""
if isinstance(params, (float, int)):
params = [params, params, 0]
region_string = self.ellipse_param_to_ds9reg_string(xc, yc, params[0], params[1], params[2], color=color)
self.draw_pyregion(region_string)
def draw_pyregion(self, region_string):
"""
Function used to draw in the interface the contour of the region defined by region_string
:param region_string: str. Region defined by a string using ds9 format
:return: None
"""
hdulist = self.hdulist_white
r = pyregion.parse(region_string).as_imagecoord(hdulist[1].header)
fig = plt.figure(self.n)
ax = fig.axes[0]
patch_list, artist_list = r.get_mpl_patches_texts(origin=0)
patch = patch_list[0]
ax.add_patch(patch)
def spec_from_minicube_mask(self, new_3dmask, mode='wwm', npix=0, frac=0.1):
"""Given a 3D mask, this function provides a combined spectrum
of all non-masked voxels.
Parameters
----------
new_3dmask : np.array of same shape as self.cube
The 3D mask
mode : str
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weghts given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
Returns
-------
An XSpectrum1D object (from linetools) with the combined spectrum.
"""
if mode not in ['ivarwv', 'ivar', 'mean', 'median', 'wwm', 'sum', 'wwm_ivarwv', 'wwm_ivar', 'wfrac']:
raise ValueError("Not ready for this type of `mode`.")
if np.shape(new_3dmask) != np.shape(self.cube.mask):
raise ValueError("new_3dmask must be of same shape as the original MUSE cube.")
n = len(self.wavelength)
fl = np.zeros(n)
er = np.zeros(n)
if mode == 'ivar':
var_white = self.create_white(stat=True, save=False)
elif mode in ['wwm', 'wwm_ivarwv', 'wwm_ivar', 'wfrac']:
smoothed_white = self.get_smoothed_white(npix=npix, save=False)
if mode == 'wwm_ivar':
var_white = self.create_white(stat=True, save=False)
elif mode == 'wfrac':
mask2d = new_3dmask[1]
self.wfrac_show_spaxels(frac=frac, mask2d=mask2d, smoothed_white=smoothed_white)
warn = False
for wv_ii in xrange(n):
mask = new_3dmask[wv_ii] # 2-D mask
im_fl = self.cube[wv_ii][~mask] # this is a 1-d np.array()
im_var = self.stat[wv_ii][~mask] # this is a 1-d np.array()
if len(im_fl) == 0:
fl[wv_ii] = 0
er[wv_ii] = 99
elif mode == 'wwm':
im_weights = smoothed_white[~mask]
n_weights = len(im_weights)
im_weights = np.where(np.isnan(im_weights), 0, im_weights)
if np.sum(im_weights) == 0:
im_weights[:] = 1. / n_weights
warn = True
im_weights = im_weights / np.sum(im_weights)
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
elif mode == 'ivar':
im_var_white = var_white[~mask]
im_weights = 1. / im_var_white
n_weights = len(im_weights)
im_weights = np.where(np.isnan(im_weights), 0, im_weights)
if np.sum(im_weights) == 0:
im_weights[:] = 1. / n_weights
warn = True
im_weights = im_weights / np.sum(im_weights)
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
elif mode == 'ivarwv':
im_weights = 1. / im_var
n_weights = len(im_weights)
im_weights = np.where(np.isnan(im_weights), 0, im_weights)
if np.sum(im_weights) == 0:
im_weights[:] = 1. / n_weights
warn = True
im_weights = im_weights / np.sum(im_weights)
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
elif mode == 'wwm_ivarwv':
im_white = smoothed_white[~mask]
im_weights = im_white / im_var
n_weights = len(im_weights)
im_weights = np.where(np.isnan(im_weights), 0, im_weights)
if np.sum(im_weights) == 0:
im_weights[:] = 1. / n_weights
warn = True
im_weights = im_weights / np.sum(im_weights)
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
elif mode == 'wwm_ivar':
im_white = smoothed_white[~mask]
im_var_white = var_white[~mask]
im_weights = im_white / im_var_white
n_weights = len(im_weights)
im_weights = np.where(np.isnan(im_weights), 0, im_weights)
if np.sum(im_weights) == 0:
im_weights[:] = 1. / n_weights
warn = True
im_weights = im_weights / np.sum(im_weights)
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
elif mode == 'sum':
im_weights = 1.
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
elif mode == 'mean':
im_weights = 1. / len(im_fl)
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
elif mode == 'median':
fl[wv_ii] = np.median(im_fl)
er[wv_ii] = 1.2533 * np.sqrt(np.sum(im_var)) / len(im_fl) # explain 1.2533
elif mode == 'wfrac':
if (frac > 1) or (frac < 0):
raise ValueError('`frac` must be value within (0,1)')
im_white = smoothed_white[~mask]
fl_limit = np.percentile(im_white, (1. - frac) * 100.)
im_weights = np.where(im_white >= fl_limit, 1., 0.)
n_weights = len(im_weights)
im_weights = np.where(np.isnan(im_weights), 0., im_weights)
if np.sum(im_weights) == 0:
im_weights[:] = 1. / n_weights
warn = True
im_weights = im_weights / np.sum(im_weights)
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
if warn:
warnings.warn(
'Some wavelengths could not be combined using the selected mode (a mean where used only on those cases)')
if mode not in ['sum', 'median', 'mean', 'wfrac']: # normalize to match total integrated flux
spec_sum = self.spec_from_minicube_mask(new_3dmask, mode='sum')
fl_sum = spec_sum.flux.value
norm = np.sum(fl_sum) / np.sum(fl)
if norm < 0:
warnings.warn(
"Normalization factor is Negative!! (This probably means that you are extracting the spectrum where flux<0)")
fl = fl * norm
er = er * abs(norm)
print('normalization factor relative to total flux = ' + str(norm))
return XSpectrum1D.from_tuple((self.wavelength, fl, er))
def get_spec_and_image(self, center, halfsize=15, n_figure=3, mode='wwm', coord_system='pix', npix=0, frac=0.1,
save=False, empirical_std=False):
"""
Function to Get a spectrum and an image of the selected source.
:param center: Tuple. Contain the coordinates of the source.
:param halfsize: flot or list. If int, is the halfsize of the image box and the radius of a circular aperture to get the spectrum
If list, contain the [a,b,theta] parameter for an eliptical aperture. The box will be a square with the major semiaxis
:param n_fig: Figure number to display the spectrum and the image
:param mode: str
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `gaussian` - Weighted mean. Weights are obtained from a 2D gaussian fit of the bright profile
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weghts given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
:param frac. Float, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)m
:param empirical_std: boolean. Default = False.
If True, the errors of the spectrum will be determined empirically
:param save: boolean. Default = False
If True, the spectrum will be saved in hard_disk
:param coord_system: str. Default = 'pix'.
If coord_system = 'wcs' the coordinates will be considered as degrees
:return: spec: XSpectrum1D object
"""
spec = self.get_spec_from_ellipse_params(x_c=center[0], y_c=center[1], params=halfsize,
coord_system=coord_system, mode=mode, frac=frac, npix=npix,
empirical_std=empirical_std)
spec = self.spec_to_vacuum(spec)
if isinstance(halfsize, (int, float)):
halfsize = [halfsize, halfsize, 0]
if coord_system == 'wcs':
x_c, y_c, halfsize = self.ellipse_params_to_pixel(center[0], center[1], params=halfsize)
center_ = (x_c, y_c)
else:
center_ = center
aux = [halfsize[0], halfsize[1]]
halfsize = max(aux)
mini_image = self.get_mini_image(center=center_, halfsize=halfsize)
plt.figure(n_figure, figsize=(17, 5))
ax1 = plt.subplot2grid((1, 4), (0, 0), colspan=3)
if coord_system == 'pix':
x_world, y_world = self.p2w(center[0], center[1])
else:
x_world, y_world = center[0], center[1]
coord = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
spec_name = name_from_coord(coord)
if save:
spec.write_to_fits(spec_name + '.fits')
plt.title(spec_name)
w = spec.wavelength.value
f = spec.flux.value
ax1.plot(w, f)
plt.ylabel('Flux (' + str(self.flux_units) + ')')
plt.xlabel('Wavelength (Angstroms)')
n = len(w)
ave = np.nanmean(f)
std = np.nanstd(f)
ymin = ave - 3 * std
ymax = ave + 4 * std
plt.ylim([ymin, ymax])
plt.xlim([w[0], w[n - 1]])
ax2 = plt.subplot2grid((1, 4), (0, 3), colspan=1)
ax2.imshow(mini_image, cmap='gray', vmin=self.vmin, vmax=self.vmax)
plt.ylim([0, 2 * halfsize])
plt.xlim([0, 2 * halfsize])
return spec
def draw_region(self, r):
fig = plt.figure(self.n)
ax = fig.axes[0]
patch_list, artist_list = r.get_mpl_patches_texts(origin=0)
patch = patch_list[0]
ax.add_patch(patch)
def region_2dmask(self, r):
from pyregion.region_to_filter import as_region_filter
im_aux = np.ones_like(self.white_data)
hdu_aux = fits.open(self.filename_white)[1]
hdu_aux.data = im_aux
shape = hdu_aux.data.shape
region_filter = as_region_filter(r, origin=0)
mask_new = region_filter.mask(shape)
mask_new_inverse = np.where(~mask_new, True, False)
mask2d = mask_new_inverse
return mask2d
def region_3dmask(self, r):
mask2d = self.region_2dmask(r)
complete_mask_new = mask2d + self.mask_init
complete_mask_new = np.where(complete_mask_new != 0, True, False)
mask3d = complete_mask_new
return mask3d
def compute_kinematics(self, x_c, y_c, params, wv_line_vac, wv_range_size=35, type='abs', debug=False, z=0,
cmap='seismic'):
##Get the integrated spec fit, and estimate the 0 velocity wv from there
wv_line = wv_line_vac * (1 + z)
dwmax = 10
spec_total = self.get_spec_from_ellipse_params(x_c, y_c, params, mode='wwm')
wv_t = spec_total.wavelength.value
fl_t = spec_total.flux.value
sig_t = spec_total.sig.value
sig_eff = sig_t[np.where(np.logical_and(wv_t >= wv_line - wv_range_size, wv_t <= wv_line + wv_range_size))]
wv_eff = wv_t[np.where(np.logical_and(wv_t >= wv_line - wv_range_size, wv_t <= wv_line + wv_range_size))]
fl_eff = fl_t[np.where(np.logical_and(wv_t >= wv_line - wv_range_size, wv_t <= wv_line + wv_range_size))]
fl_left = fl_eff[:3]
fl_right = fl_eff[-3:]
intercept_init = (np.sum(fl_right) + np.sum(fl_left)) / (len(fl_left) + len(fl_right))
if type == 'abs':
a_init = np.min(fl_eff) - intercept_init
if type == 'emi':
a_init = np.max(fl_eff) - intercept_init
slope_init = 0
sigma_init = wv_range_size / 3.
mean_init = wv_line
gaussian = models.Gaussian1D(amplitude=a_init, mean=mean_init, stddev=sigma_init)
line = models.Linear1D(slope=slope_init, intercept=intercept_init)
model_init = gaussian + line
fitter = fitting.LevMarLSQFitter()
model_fit = fitter(model_init, wv_eff, fl_eff, weights=sig_eff / np.sum(sig_eff))
mean_total = model_fit[0].mean.value
sigma_total = model_fit[0].stddev.value
z_line = (mean_total / wv_line_vac) - 1.
if isinstance(params, (int, float)):
params = [params, params, 0]
region_string = self.ellipse_param_to_ds9reg_string(x_c, y_c, params[0], params[1], params[2])
mask2d = self.get_new_2dmask(region_string)
##Find center guessing parameters
spec_c = self.get_spec_spaxel(x_c, y_c)
fl_c = spec_c.flux.value
wv_c = spec_c.wavelength.value
sig_c = spec_total.sig.value
sig_eff = sig_c[np.where(np.logical_and(wv_c >= wv_line - wv_range_size, wv_c <= wv_line + wv_range_size))]
wv_eff = wv_c[np.where(np.logical_and(wv_c >= wv_line - wv_range_size, wv_c <= wv_line + wv_range_size))]
fl_eff = fl_c[np.where(np.logical_and(wv_c >= wv_line - wv_range_size, wv_c <= wv_line + wv_range_size))]
#### Define central gaussian_mean
wv_c_eff = wv_eff
fl_c_eff = fl_eff
fl_left = fl_eff[:3]
fl_right = fl_eff[-3:]
intercept_init = (np.sum(fl_right) + np.sum(fl_left)) / (len(fl_left) + len(fl_right))
if type == 'abs':
a_init = np.min(fl_eff) - intercept_init
if type == 'emi':
a_init = np.max(fl_eff) - intercept_init
slope_init = 0
sigma_init = sigma_total
mean_init = wv_line
gaussian = models.Gaussian1D(amplitude=a_init, mean=mean_init, stddev=sigma_init)
line = models.Linear1D(slope=slope_init, intercept=intercept_init)
model_init = gaussian + line
fitter = fitting.LevMarLSQFitter()
model_fit = fitter(model_init, wv_eff, fl_eff, weights=sig_eff / np.sum(sig_eff))
mean_center = model_fit[0].mean.value
a_center = model_fit[0].amplitude.value
sigma_center = model_fit[0].stddev.value
##get spaxel in mask2d
y, x = np.where(~mask2d)
n = len(x)
kine_im = np.where(self.white_data == 0, np.nan, np.nan)
sigma_im = np.where(self.white_data == 0, np.nan, np.nan)
for i in xrange(n):
print(str(i + 1) + '/' + str(n))
spec = self.get_spec_spaxel(x[i], y[i])
wv = spec.wavelength.value
fl = spec.flux.value
sig = spec_total.sig.value
sig_eff = sig[np.where(np.logical_and(wv >= wv_line - wv_range_size, wv <= wv_line + wv_range_size))]
wv_eff = wv[np.where(np.logical_and(wv >= wv_line - wv_range_size, wv <= wv_line + wv_range_size))]
fl_eff = fl[np.where(np.logical_and(wv >= wv_line - wv_range_size, wv <= wv_line + wv_range_size))]
fl_left = fl_eff[:3]
fl_right = fl_eff[-3:]
intercept_init = (np.sum(fl_right) + np.sum(fl_left)) / (len(fl_left) + len(fl_right))
if type == 'abs':
a_init = np.min(fl_eff) - intercept_init
if type == 'emi':
a_init = np.max(fl_eff) - intercept_init
slope_init = 0
sigma_init = sigma_center
mean_init = mean_center
gaussian = models.Gaussian1D(amplitude=a_init, mean=mean_init, stddev=sigma_init)
line = models.Linear1D(slope=slope_init, intercept=intercept_init)
model_init = gaussian + line
fitter = fitting.LevMarLSQFitter()
model_fit = fitter(model_init, wv_eff, fl_eff, weights=sig_eff / np.sum(sig_eff))
m = fitter.fit_info['param_cov']
residual = model_fit(wv_eff) - fl_eff
noise = np.std(residual)
if debug:
plt.figure()
plt.plot(wv_c_eff, fl_c_eff, drawstyle='steps-mid', color='grey')
plt.plot(wv_eff, fl_eff, drawstyle='steps-mid')
plt.plot(wv_eff, model_fit(wv_eff))
plt.plot(wv_eff, residual, color='red')
plt.plot(wv_eff, sig_eff, color='yellow', drawstyle='steps-mid')
m = fitter.fit_info['param_cov']
if m != None:
print('Display Cov Matrix')
plt.figure()
plt.imshow(m, interpolation='none', vmin=0, vmax=15)
plt.colorbar()
else:
print('Cov Matrix undefined')
mean = model_fit[0].mean.value
amp = model_fit[0].amplitude.value
if abs(amp) >= 2. * noise and (a_center * amp > 0) and abs(mean_center - mean) <= dwmax:
if debug:
print('Fit Aceptado')
print(str(x[i]) + ',' + str(y[i]))
units = u.km / u.s
vel = ltu.dv_from_z((mean / wv_line_vac) - 1, z_line).to(units).value
kine_im[y[i]][x[i]] = vel
else:
if debug:
print('Fit Negado')
print(str(x[i]) + ',' + str(y[i]))
if debug:
print('value of wv_dif = ' + str(mean_center - mean))
print('amplitude = ' + str(amp))
print('noise = ' + str(noise))
raw_input('Enter to continue...')
hdulist = self.hdulist_white
hdulist[1].data = kine_im
hdulist.writeto('kinematics.fits', clobber=True)
fig = aplpy.FITSFigure('kinematics.fits', figure=plt.figure())
fig.show_colorscale(cmap=cmap)
fig.add_colorbar()
fig.colorbar.set_axis_label_text('V (km s$^{-1}$)')
xw, yw = self.p2w(x_c, y_c)
if isinstance(params, (int, float)):
r = params * self.pixelsize
else:
r = params[0] * self.pixelsize
r = r.to(u.deg)
fig.recenter(xw, yw, r.value)
return kine_im
def save_muselet_specs(self, filename, mode='sum', params=4, frac=0.1, npix=0, empirical_std=False,
redmonster_format=True, ids='all'):
"""
:param filename: string, Name of the MUSELET output fits table
:param mode: string, mode of extractor for the spectra
:param params: int or iterable. Default = 4. Elliptical parameters for the extraction of the spectra in spaxel units
:param frac: float. Default = 0.1. Extraction parameter used in 'wfrac' mode.
:param npix: int. Default = 0. Extraction parameter used in several modes. stddev of the Gaussian kernel to smooth
the white image. If npix = 0, no smooth is done.
:param empirical_std: float, Default = False. If True, the stddev of the spectra will be empirically estimated.
:param redmonster_format: float. Default = True. If True, the spectra will be saved in a rdeable format for Redmonster software.
:param ids: string or iterable. Default = 'all'. If ids = 'all', all the spectra in the MUSELET table will be extracted.
if ids is iterable, it must contain the ids in the MUSELET table of the sources to extract (e.g. ids = [1,15,23] will
extract only the sources with the ids 1, 15 and 23)
:return:
"""
fits_table = Table.read(fits.open(filename)[1])
ID = fits_table['ID'].data.data
RA = fits_table['RA'].data.data
DEC = fits_table['DEC'].data.data
if ids == 'all':
ids = fits_table['ID'].data.data
n = len(ids)
for i in xrange(n):
j = np.where(ids[i] == ID)[0][0]
x_world = RA[j]
y_world = DEC[j]
coord = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
str_id = str(ids[i]).zfill(3)
spec_fits_name = str_id + '_' + name_from_coord(coord)
x, y = self.w2p(x_world, y_world)
spec = self.get_spec_from_ellipse_params(x, y, params, mode=mode, npix=npix, frac=frac,
empirical_std=empirical_std, save=False)
if redmonster_format:
mcu.spec_to_redmonster_format(spec=spec, fitsname=spec_fits_name + '_RMF.fits', n_id=ids[i])
else:
spec.write_to_fits(spec_fits_name + '.fits')
print('ID = ' + str_id + ' Ready!!')
def save_ds9regfile_specs(self, regfile, mode='wwm', frac=0.1, npix=0, empirical_std=False, redmonster_format=True,
id_start=1, coord_name=False, debug=False):
"""
Function used to save a set of spectra given by a DS9 regionfile "regfile"
:param regfile: str. Name of the DS9 region file
:param mode: str. Default = 'wwm'. see more modes and details in self.spec_from_minicube_mask()
:param frac. FLoat, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)
:param empirical_std: boolean. Default = False.
If True, the errors of the spectrum will be determined empirically
:param redmonster_format: If True, the specta will be saved in a redeable format for redmonster software
:param coord_name: Boolean. Default = False.
If True, The name of each spectrum will be computed from the coordinates of the first (X,Y) pair in the region
string. Otherwhise, the spectra will be named with and ID and the name of the region file.
:param id_start: int. Default = 1
Initial id assigned to diferent spectra
"""
r = pyregion.open(regfile)
n = len(r)
self.reload_canvas()
for i in xrange(n):
id_ = id_start + i
r_i = pyregion.ShapeList([r[i]])
self.draw_region(r_i)
mask3d = self.region_3dmask(r_i)
##Get spec
spec = self.spec_from_minicube_mask(mask3d, mode=mode, npix=npix, frac=frac)
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
spec = self.spec_to_vacuum(spec)
str_id = str(id_).zfill(3)
spec_fits_name = str_id + '_' + regfile[:-4]
if coord_name:
r_aux = r[i]
x = r_aux.coord_list[0]
y = r_aux.coord_list[1]
x_world, y_world = self.p2w(x, y)
coord = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
spec_fits_name = str_id + '_' + name_from_coord(coord)
if redmonster_format:
if debug:
mag_tuple = ['mag_r', '-']
else:
mag_tuple = None
mcu.spec_to_redmonster_format(spec=spec, fitsname=spec_fits_name + '_RMF.fits', n_id=id_, mag=mag_tuple)
else:
spec.write_to_fits(spec_fits_name + '.fits')
print('ID = ' + str_id + ' Ready!!')
def get_spec_from_ds9regfile(self, regfile, mode='wwm', i=0, frac=0.1, npix=0, empirical_std=False, n_figure=2,
save=False):
"""
Function to get the spec of a region defined in a ds9 .reg file
The .reg file MUST be in physical coordiantes
:param regfile: str. Name of the DS9 region file
:param mode: str
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weghts given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
:param i: int, default = 0
Index of the region in the region file. i = 0 corresponds to the first region listed.
:param frac: Float, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)
:param n_figure: int. Default = 2. Figure to display the spectrum
:param empirical_std: boolean. Default = False.
If True, the errors of the spectrum will be determined empirically
:return: spec: XSpectrum1D object
"""
r = pyregion.open(regfile)
r = pyregion.ShapeList([r[i]])
self.draw_region(r)
mask3d = self.region_3dmask(r)
spec = self.spec_from_minicube_mask(mask3d, mode=mode, npix=npix, frac=frac)
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
spec = self.spec_to_vacuum(spec)
if save:
spec.write_to_fits(regfile[:-4] + '.fits')
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
plt.title('Spectrum from ' + regfile)
plt.xlabel('Angstroms')
plt.ylabel('Flux (' + str(self.flux_units) + ')')
return spec
@property
def wavelength(self):
"""
Creates the wavelength array for the spectrum. The values of dw, and limits will depend
of the data and should be revised.
:return: w: array[]
array which contain an evenly sampled wavelength range
"""
dw = self.header_1['CD3_3']
w_ini = self.header_1['CRVAL3']
N = self.header_1['NAXIS3']
w_fin = w_ini + (N - 1) * dw
# w_aux = w_ini + dw*np.arange(0, N) #todo: check whether w_aux and w are the same
w = np.linspace(w_ini, w_fin, N)
# print('wavelength in range ' + str(w[0]) + ' to ' + str(w[len(w) - 1]) + ' and dw = ' + str(dw))
return w
def __edit_header(self, hdulist, values_list,
keywords_list=['CRPIX1', 'CRPIX2', 'CD1_1', 'CD2_2', 'CRVAL1', 'CRVAL2'], hdu=1):
hdu_element = hdulist[hdu]
if len(keywords_list) != len(values_list):
raise ValueError('Dimensions of keywords_list and values-list does not match')
n = len(values_list)
for i in xrange(n):
keyword = keywords_list[i]
value = values_list[i]
hdu_element.header[keyword] = value
# CSYER1=hdu_element.header['CSYER1']
# hdu_element.header['CSYER1']=1000.0860135214331
hdulist_edited = hdulist
hdulist_edited[hdu] = hdu_element
return hdulist_edited
def __save2fits(self, fitsname, data_to_save, stat=False, type='cube', n_figure=2, edit_header=[]):
if type == 'white':
hdulist = fits.HDUList.fromfile(self.filename_white)
hdulist[1].data = data_to_save
if len(edit_header) == 0:
hdulist.writeto(fitsname, clobber=True)
im = aplpy.FITSFigure(fitsname, figure=plt.figure(n_figure))
im.show_grayscale()
elif len(edit_header) == 1:
values_list = edit_header[0]
hdulist_edited = self.__edit_header(hdulist, values_list=values_list)
hdulist_edited.writeto(fitsname, clobber=True)
im = aplpy.FITSFigure(fitsname, figure=plt.figure(n_figure))
im.show_grayscale()
elif len(edit_header) == 2:
values_list = edit_header[0]
keywords_list = edit_header[1]
hdulist_edited = self.__edit_header(hdulist, values_list=values_list, keywords_list=keywords_list)
hdulist_edited.writeto(fitsname, clobber=True)
im = aplpy.FITSFigure(fitsname, figure=plt.figure(n_figure))
im.show_grayscale()
elif len(edit_header) == 3:
values_list = edit_header[0]
keywords_list = edit_header[1]
hdu = edit_header[2]
hdulist_edited = self.__edit_header(hdulist, values_list=values_list, keywords_list=keywords_list,
hdu=hdu)
hdulist_edited.writeto(fitsname, clobber=True)
im = aplpy.FITSFigure(fitsname, figure=plt.figure(n_figure))
im.show_grayscale()
if type == 'cube':
hdulist = fits.HDUList.fromfile(self.filename)
if stat == False:
hdulist[1].data = data_to_save
if stat == True:
hdulist[2].data = data_to_save
if len(edit_header) == 0:
hdulist.writeto(fitsname, clobber=True)
im = aplpy.FITSFigure(fitsname, slices=[1], figure=plt.figure(n_figure))
im.show_grayscale()
elif len(edit_header) == 1:
values_list = edit_header[0]
hdulist_edited = self.__edit_header(hdulist, values_list=values_list)
hdulist_edited.writeto(fitsname, clobber=True)
im = aplpy.FITSFigure(fitsname, slices=[1], figure=plt.figure(n_figure))
im.show_grayscale()
elif len(edit_header) == 2:
values_list = edit_header[0]
keywords_list = edit_header[1]
hdulist_edited = self.__edit_header(hdulist, values_list=values_list, keywords_list=keywords_list)
hdulist_edited.writeto(fitsname, clobber=True)
im = aplpy.FITSFigure(fitsname, slices=[1], figure=plt.figure(n_figure))
im.show_grayscale()
elif len(edit_header) == 3:
values_list = edit_header[0]
keywords_list = edit_header[1]
hdu = edit_header[2]
hdulist_edited = self.__edit_header(hdulist, values_list=values_list, keywords_list=keywords_list,
hdu=hdu)
hdulist_edited.writeto(fitsname, clobber=True)
im = aplpy.FITSFigure(fitsname, slices=[1], figure=plt.figure(n_figure))
im.show_grayscale()
def ellipse_params_to_pixel(self, xc, yc, params):
"""
Function to transform the parameters of an ellipse from degrees to pixels
:param xc:
:param yc:
:param radius:
:return:
"""
a = params[0]
b = params[1]
xaux, yaux, a2 = self.xyr_to_pixel(xc, yc, a)
xc2, yc2, b2 = self.xyr_to_pixel(xc, yc, b)
params2 = [a2, b2, params[2]]
return xc2, yc2, params2
def get_mini_cube_mask_from_region_string(self, region_string):
"""
Creates a 3D mask where all original masked voxels are masked out,
plus all voxels associated to spaxels outside the elliptical region
defined by the given parameters.
:param region_string: Region defined by ds9 format
:return: complete_mask_new: a new mask for the cube
"""
complete_mask_new = self.get_new_3dmask(region_string)
return complete_mask_new
def get_mini_cube_mask_from_ellipse_params(self, x_c, y_c, params, coord_system='pix',color='green'):
"""
Creates a 3D mask where all original masked voxels are masked out,
plus all voxels associated to spaxels outside the elliptical region
defined by the given parameters.
:param x_c: center of the elliptical aperture
:param y_c: center of the elliptical aperture
:param params: can be a single radius (float) of an circular aperture, or a (a,b,theta) tuple
:param coord_system: default: pix, possible values: pix, wcs
:return: complete_mask_new: a new mask for the cube
"""
if not isinstance(params, (int, float, tuple, list, np.array)):
raise ValueError('Not ready for this `radius` type.')
if isinstance(params, (int, float)):
a = params
b = params
theta = 0
elif isiterable(params) and (len(params) == 3):
a = max(params[:2])
b = min(params[:2])
theta = params[2]
else:
raise ValueError('If iterable, the length of radius must be == 3; otherwise try float.')
region_string = self.ellipse_param_to_ds9reg_string(x_c, y_c, a, b, theta, coord_system=coord_system,color=color)
complete_mask_new = self.get_new_3dmask(region_string)
return complete_mask_new
def ellipse_param_to_ds9reg_string(self, xc, yc, a, b, theta, color='green', coord_system='pix'):
"""Creates a string that defines an elliptical region given by the
parameters using the DS9 convention.
"""
if coord_system == 'wcs':
x_center, y_center, radius = self.ellipse_params_to_pixel(xc, yc, params=[a, b, theta])
else: # already in pixels
x_center, y_center, radius = xc, yc, [a, b, theta]
region_string = 'physical;ellipse({},{},{},{},{}) # color = {}'.format(x_center, y_center, radius[0],
radius[1],
radius[2], color)
return region_string
def wfrac_show_spaxels(self, frac, mask2d, smoothed_white):
y, x = np.where(~mask2d)
n = len(x)
im_white = smoothed_white[~mask2d]
fl_limit = np.percentile(im_white, (1. - frac) * 100.)
for i in xrange(n):
if smoothed_white[y[i]][x[i]] >= fl_limit:
plt.figure(self.n)
plt.plot(x[i] + 1, y[i] + 1, 'o', color='Blue')
def _test_3dmask(self, region_string, alpha=0.8, slice=0):
complete_mask = self.get_new_3dmask(region_string)
mask_slice = complete_mask[int(slice)]
plt.figure(self.n)
plt.imshow(mask_slice, alpha=alpha)
self.draw_pyregion(region_string)
def get_new_2dmask(self, region_string):
"""Creates a 2D mask for the white image that mask out spaxel that are outside
the region defined by region_string"""
from pyregion.region_to_filter import as_region_filter
im_aux = np.ones_like(self.white_data)
hdu_aux = fits.open(self.filename_white)[1]
hdu_aux.data = im_aux
hdulist = self.hdulist_white
r = pyregion.parse(region_string).as_imagecoord(hdulist[1].header)
shape = hdu_aux.data.shape
region_filter = as_region_filter(r, origin=0)
mask_new = region_filter.mask(shape)
mask_new_inverse = np.where(~mask_new, True, False)
return mask_new_inverse
def get_new_3dmask(self, region_string):
"""Creates a 3D mask for the cube that also mask out
spaxels that are outside the gemoetrical redion defined by
region_string.
Parameters
----------
region_string : str
A string that defines a geometrical region using the
DS9 format (e.g. see http://ds9.si.edu/doc/ref/region.html)
Returns
-------
A 3D mask that includes already masked voxels from the original cube,
plus all spaxels outside the region defined by region_string.
Notes: It uses pyregion package.
"""
mask2d = self.get_new_2dmask(region_string)
complete_mask_new = mask2d + self.mask_init
complete_mask_new = np.where(complete_mask_new != 0, True, False)
self.draw_pyregion(region_string)
return complete_mask_new
def plot_sextractor_regions(self, sextractor_filename, a_min=3.5, flag_threshold=32, wcs_coords=False, n_id=None, border_thresh=1):
self.reload_canvas()
x_pix = np.array(self.get_from_table(sextractor_filename, 'X_IMAGE'))
y_pix = np.array(self.get_from_table(sextractor_filename, 'Y_IMAGE'))
a = np.array(self.get_from_table(sextractor_filename, 'A_IMAGE'))
a_new = np.where(a < a_min, a_min, a)
b = np.array(self.get_from_table(sextractor_filename, 'B_IMAGE'))
ratios = a / b
b_new = a_new / ratios
b_new = np.where(b_new < 1, 1, b_new)
a = a_new
b = b_new
theta = np.array(self.get_from_table(sextractor_filename, 'THETA_IMAGE'))
flags = self.get_from_table(sextractor_filename, 'FLAGS').data
id = self.get_from_table(sextractor_filename, 'NUMBER').data
mag = self.get_from_table(sextractor_filename, 'MAG_AUTO').data
n = len(x_pix)
if wcs_coords:
x_world = np.array(self.get_from_table(sextractor_filename, 'X_WORLD'))
y_world = np.array(self.get_from_table(sextractor_filename, 'Y_WORLD'))
a_world = np.array(self.get_from_table(sextractor_filename, 'A_WORLD'))
b_world = np.array(self.get_from_table(sextractor_filename, 'B_WORLD'))
a_min_wcs = a_min * self.pixelsize
a_min_wcs = a_min_wcs.to(u.deg).value
a_world_new = np.where(a_world < a_min_wcs, a_min_wcs, a_world)
ratios_wcs = a_world / b_world
b_world_new = a_world_new / ratios_wcs
b_world_new = np.where(b_world_new < self.pixelsize.to(u.deg).value, self.pixelsize.to(u.deg).value,
b_world_new)
a_world = a_world_new
b_world = b_world_new
for i in xrange(n):
params_wcs = [a_world[i], b_world[i], theta[i]]
x_pix[i], y_pix[i], params = self.ellipse_params_to_pixel(x_world[i], y_world[i], params=params_wcs)
a[i] = params[0]
b[i] = params[1]
x2=[]
y2=[]
a2=[]
b2=[]
theta2=[]
flags2=[]
id2=[]
mag2=[]
ly,lx=self.white_data.shape
for i in xrange(n):
if x_pix[i]>=border_thresh and y_pix[i]>=border_thresh and x_pix[i]<=lx-border_thresh and y_pix[i]<=ly-border_thresh:
x2.append(x_pix[i])
y2.append(y_pix[i])
a2.append(a[i])
b2.append(b[i])
theta2.append(theta[i])
flags2.append(flags[i])
id2.append(id[i])
mag2.append(mag[i])
x_pix=np.array(x2)
y_pix=np.array(y2)
a=np.array(a2)
b=np.array(b2)
theta=np.array(theta2)
flags=np.array(flags2)
id=np.array(id2)
mag=np.array(mag2)
n=len(x_pix)
if n_id != None:
j = np.where(id == n_id)[0][0]
region_string = self.ellipse_param_to_ds9reg_string(x_pix[j], y_pix[j], a[j], b[j], theta[j], color='Green')
self.draw_pyregion(region_string)
plt.text(x_pix[j], y_pix[j], id[j], color='Red')
return
for i in xrange(n):
color = 'Green'
if flags[i] > flag_threshold:
color = 'Red'
region_string = self.ellipse_param_to_ds9reg_string(x_pix[i], y_pix[i], a[i], b[i], theta[i], color=color)
self.draw_pyregion(region_string)
plt.text(x_pix[i], y_pix[i], id[i], color='Red')
return x_pix, y_pix, a, b, theta, flags, id, mag
def save_sextractor_specs(self, sextractor_filename, flag_threshold=32, redmonster_format=True, a_min=3.5,
n_figure=2, wcs_coords=False,
mode='wwm', mag_kwrd='mag_r', npix=0, frac=0.1, border_thresh=1):
x_pix, y_pix, a, b, theta, flags, id, mag = self.plot_sextractor_regions(
sextractor_filename=sextractor_filename, a_min=a_min,
flag_threshold=flag_threshold, wcs_coords=wcs_coords, border_thresh=border_thresh)
self.reload_canvas()
n = len(x_pix)
for i in xrange(n):
if flags[i] <= flag_threshold:
x_world, y_world = self.p2w(x_pix[i], y_pix[i])
coord = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
spec_fits_name = name_from_coord(coord)
spec = self.get_spec_from_ellipse_params(x_c=x_pix[i], y_c=y_pix[i], params=[a[i], b[i], theta[i]],
mode=mode, npix=npix, frac=frac, save=False, n_figure=n_figure)
str_id = str(id[i]).zfill(3)
spec_fits_name = str_id + '_' + spec_fits_name
if redmonster_format:
mcu.spec_to_redmonster_format(spec=spec, fitsname=spec_fits_name + '_RMF.fits', n_id=id[i],
mag=[mag_kwrd, mag[i]])
else:
spec.write_to_fits(spec_fits_name + '.fits')
hdulist = fits.open(spec_fits_name + '.fits')
hdulist[0].header[mag_kwrd] = mag[i]
hdulist.writeto(spec_fits_name + '.fits', clobber=True)
print('ID = ' + str_id + ' Ready!!')
def __read_files(self, input):
path = input
files = glob.glob(path)
return files
def create_movie_wavelength_range(self, initial_wavelength, final_wavelength, width=5., outvid='wave_video.avi',
erase=True):
"""
Function to create a film over a wavelength range of the cube
:param initial_wavelength: initial wavelength of the film
:param final_wavelength: final wavelength of the film
:param width: width of the wavelength range in each frame
:param outvid: name of the final video
:param erase: if True, the individual frames will be erased after producing the video
:return:
"""
wave = self.wavelength
n = len(wave)
w_max = wave[n - 1] - width - 1
if initial_wavelength < wave[0]:
print(str(
initial_wavelength) + ' es menor al limite inferior minimo permitido, se usara en su lugar ' + str(
wave[0]))
initial_wavelength = wave[0]
if final_wavelength > wave[n - 1]:
print(str(final_wavelength) + ' es mayor al limite superior maximo permitido, se usara en su lugar ' + str(
w_max))
final_wavelength = w_max
if final_wavelength <= wave[0] or initial_wavelength >= wave[n - 1]:
raise ValueError('Input wavelength is not in valid range')
images_names = []
fitsnames = []
for i in xrange(initial_wavelength, final_wavelength):
wavelength_range = (i, i + width)
filename = 'colapsed_image_' + str(i) + '_'
im = self.get_image(wv_input=[wavelength_range], fitsname=filename + '.fits', type='sum', save='True')
plt.close(15)
image = aplpy.FITSFigure(filename + '.fits', figure=plt.figure(15))
image.show_grayscale()
image.save(filename=filename + '.png')
fitsnames.append(filename + '.fits')
images_names.append(filename + '.png')
plt.close(15)
video = self.make_video(images=images_names, outvid=outvid)
n_im = len(fitsnames)
if erase:
for i in xrange(n_im):
fits_im = fitsnames[i]
png_im = images_names[i]
command_fits = 'rm ' + fits_im
command_png = 'rm ' + png_im
os.system(command_fits)
os.system(command_png)
return video
def find_wv_inds(self, wv_array):
"""
:param wv_array
:return: Returns the indices in the cube, that are closest to wv_array
"""
inds = [np.argmin(np.fabs(wv_ii - self.wavelength)) for wv_ii in wv_array]
inds = np.unique(inds)
return inds
def sub_cube(self, wv_input, stat=False):
"""
Returns a cube-like object with fewer wavelength elements
:param wv_input: tuple or np.array
:return: XXXX
"""
if isinstance(wv_input[0], (tuple, list, np.ndarray)):
if len(wv_input[0]) != 2:
raise ValueError(
"If wv_input is given as tuple, it must be of lenght = 2, interpreted as (wv_min, wv_max)")
wv_inds = self.find_wv_inds(wv_input[0])
ind_min = np.min(wv_inds)
ind_max = np.max(wv_inds)
if stat:
sub_cube = self.stat[ind_min:ind_max + 1, :, :]
else:
sub_cube = self.cube[ind_min:ind_max + 1, :, :]
else: # assuming array-like for wv_input
wv_inds = self.find_wv_inds(wv_input)
if stat:
sub_cube = self.stat[wv_inds, :, :]
else:
sub_cube = self.cube[wv_inds, :, :]
return sub_cube
def get_filtered_image(self, _filter='r', save=True, n_figure=5, custom_filter=None):
"""
Function used to produce a filtered image from the cube
:param _filter: string, default = r
possible values: u,g,r,i,z , sdss filter or Johnson V,r to get the new image
:param save: Boolean, default = True
If True, the image will be saved
:param custom_filter: Default = None.
If not, can be a customized filter created by the user formated as [wc,fc],
where the first element is the wavelength array of the filter and the second is the
corresponding transmission curve.
:return:
"""
w = self.wavelength
if not custom_filter:
filter_curve = self.get_filter(wavelength_spec=w, _filter=_filter)
else:
wave_filter = custom_filter[0]
flux_filter = custom_filter[1]
filter_curve = self.filter_to_MUSE_wavelength(wave_filter, flux_filter, wavelength_spec=w)
condition = np.where(filter_curve > 0)[0]
fitsname = 'new_image_' + _filter + '_filter.fits'
sub_cube = self.cube[condition]
filter_curve_final = filter_curve[condition]
extra_dims = sub_cube.ndim - filter_curve_final.ndim
new_shape = filter_curve_final.shape + (1,) * extra_dims
new_filter_curve = filter_curve_final.reshape(new_shape)
new_filtered_cube = sub_cube * new_filter_curve
new_filtered_image = np.sum(new_filtered_cube, axis=0)
if save:
self.__save2fits(fitsname, new_filtered_image.data, type='white', n_figure=n_figure)
return new_filtered_image
def get_image(self, wv_input, fitsname='new_collapsed_cube.fits', type='sum', n_figure=2, save=False, stat=False,
maskfile=None, inverse_mask=True):
"""
Function used to colapse a determined wavelength range in a sum or a median type
:param wv_input: tuple or list
can be a list of wavelengths or a tuple that will represent a range
:param fitsname: str
The name of the fits that will contain the new image
:param type: str, possible values: 'sum' or 'median'
The type of combination that will be done.
:param n_figure: int
Figure to display the new image if it is saved
:return:
"""
if maskfile:
r = pyregion.open(maskfile)
n = len(r)
masks = []
for i in xrange(n):
masks.append(self.region_2dmask(pyregion.ShapeList([r[i]])))
mask_final = masks[0]
for i in xrange(n):
mask_final = np.logical_and(mask_final, masks[i])
if inverse_mask:
mask_final = np.where(~mask_final, True, False)
sub_cube = self.sub_cube(wv_input, stat=stat)
if type == 'sum':
matrix_flat = np.sum(sub_cube, axis=0)
elif type == 'median':
matrix_flat = np.median(sub_cube, axis=0)
else:
raise ValueError('Unknown type, please chose sum or median')
if maskfile:
matrix_flat = np.where(mask_final == 1, matrix_flat, np.nan)
if save:
self.__save2fits(fitsname, matrix_flat, type='white', n_figure=n_figure)
else:
if save:
self.__save2fits(fitsname, matrix_flat.data, type='white', n_figure=n_figure)
return matrix_flat
def get_continuum_range(self, range):
"""
:param range: tuple
contain the range of a emission line. Continuum will be computed around this range
:return: cont_range_inf: The continuum range at the left of the Emission line, same length than input range
cont_range_sup: The continuum range at the right of the Emission line, same length than input range
n : The number of element in the wavelength space inside the ranges
"""
wv_inds = self.find_wv_inds(range)
n = wv_inds[1] - wv_inds[0]
wv_inds_sup = wv_inds + n
wv_inds_inf = wv_inds - n
cont_range_inf = self.wavelength[wv_inds_inf]
cont_range_sup = self.wavelength[wv_inds_sup]
return cont_range_inf, cont_range_sup, n
def get_image_wv_ranges(self, wv_ranges, substract_cont=True, fitsname='new_collapsed_cube.fits', save=False,
n_figure=3):
image_stacker = np.zeros_like(self.white_data)
for r in wv_ranges:
image = self.get_image([r])
cont_range_inf, cont_range_sup, n = self.get_continuum_range(r)
cont_inf_image = self.get_image([cont_range_inf], type='median')
cont_sup_image = self.get_image([cont_range_sup], type='median')
cont_image = (n + 1) * (cont_inf_image + cont_sup_image) / 2.
if substract_cont:
image = image - cont_image
image_stacker = image_stacker + image.data
image_stacker = np.where(image_stacker < 0, 0, image_stacker)
if save:
self.__save2fits(fitsname, image_stacker, type='white', n_figure=n_figure)
return image_stacker
def create_white(self, new_white_fitsname='white_from_colapse.fits', stat=False, save=True):
"""
Function that collapses all wavelengths available to produce a new white image
:param new_white_fitsname: Name of the new image
:return:
"""
wave = self.wavelength
n = len(wave)
wv_input = [[wave[0], wave[n - 1]]]
white_image = self.get_image(wv_input, fitsname=new_white_fitsname, stat=stat, save=save)
return white_image
def calculate_mag(self, wavelength, flux, _filter, zeropoint_flux=9.275222661263278e-07):
dw = np.diff(wavelength)
new_flux = flux * _filter
f_mean = (new_flux[:-1] + new_flux[1:]) * 0.5
total_flux = np.sum(f_mean * dw) * self.flux_units.value
mag = -2.5 * np.log10(total_flux / zeropoint_flux)
return mag
def get_filter(self, wavelength_spec, _filter='r'):
wave_u = np.arange(2980, 4155, 25)
wave_g = np.arange(3630, 5855, 25)
wave_r = np.arange(5380, 7255, 25)
wave_i = np.arange(6430, 8655, 25)
wave_z = np.arange(7730, 11255, 25)
wave_R = np.array([5445., 5450., 5455., 5460., 5465., 5470., 5475., 5480.,
5485., 5490., 5495., 5500., 5505., 5510., 5515., 5520.,
5525., 5530., 5535., 5540., 5545., 5550., 5555., 5560.,
5565., 5570., 5575., 5580., 5585., 5590., 5595., 5600.,
5605., 5610., 5615., 5620., 5625., 5630., 5635., 5640.,
5645., 5650., 5655., 5660., 5665., 5670., 5675., 5680.,
5685., 5690., 5695., 5700., 5705., 5710., 5715., 5720.,
5725., 5730., 5735., 5740., 5745., 5750., 5755., 5760.,
5765., 5770., 5775., 5780., 5785., 5790., 5795., 5800.,
5805., 5810., 5815., 5820., 5825., 5830., 5835., 5840.,
5845., 5850., 5855., 5860., 5865., 5870., 5875., 5880.,
5885., 5890., 5895., 5900., 5905., 5910., 5915., 5920.,
5925., 5930., 5935., 5940., 5945., 5950., 5955., 5960.,
5965., 5970., 5975., 5980., 5985., 5990., 5995., 6000.,
6005., 6010., 6015., 6020., 6025., 6030., 6035., 6040.,
6045., 6050., 6055., 6060., 6065., 6070., 6075., 6080.,
6085., 6090., 6095., 6100., 6105., 6110., 6115., 6120.,
6125., 6130., 6135., 6140., 6145., 6150., 6155., 6160.,
6165., 6170., 6175., 6180., 6185., 6190., 6195., 6200.,
6205., 6210., 6215., 6220., 6225., 6230., 6235., 6240.,
6245., 6250., 6255., 6260., 6265., 6270., 6275., 6280.,
6285., 6290., 6295., 6300., 6305., 6310., 6315., 6320.,
6325., 6330., 6335., 6340., 6345., 6350., 6355., 6360.,
6365., 6370., 6375., 6380., 6385., 6390., 6395., 6400.,
6405., 6410., 6415., 6420., 6425., 6430., 6435., 6440.,
6445., 6450., 6455., 6460., 6465., 6470., 6475., 6480.,
6485., 6490., 6495., 6500., 6505., 6510., 6515., 6520.,
6525., 6530., 6535., 6540., 6545., 6550., 6555., 6560.,
6565., 6570., 6575., 6580., 6585., 6590., 6595., 6600.,
6605., 6610., 6615., 6620., 6625., 6630., 6635., 6640.,
6645., 6650., 6655., 6660., 6665., 6670., 6675., 6680.,
6685., 6690., 6695., 6700., 6705., 6710., 6715., 6720.,
6725., 6730., 6735., 6740., 6745., 6750., 6755., 6760.,
6765., 6770., 6775., 6780., 6785., 6790., 6795., 6800.,
6805., 6810., 6815., 6820., 6825., 6830., 6835., 6840.,
6845., 6850., 6855., 6860., 6865., 6870., 6875., 6880.,
6885., 6890., 6895., 6900., 6905., 6910., 6915., 6920.,
6925., 6930., 6935., 6940., 6945., 6950., 6955., 6960.,
6965., 6970., 6975., 6980., 6985., 6990., 6995., 7000.,
7005., 7010., 7015., 7020., 7025., 7030., 7035., 7040.,
7045., 7050., 7055., 7060., 7065., 7070., 7075., 7080.,
7085., 7090., 7095., 7100., 7105., 7110., 7115., 7120.,
7125., 7130., 7135., 7140., 7145., 7150., 7155., 7160.,
7165., 7170., 7175., 7180., 7185., 7190., 7195., 7200.,
7205., 7210., 7215., 7220., 7225., 7230., 7235., 7240.,
7245., 7250., 7255., 7260., 7265., 7270., 7275., 7280.,
7285., 7290., 7295., 7300., 7305., 7310., 7315., 7320.,
7325., 7330., 7335., 7340., 7345., 7350., 7355., 7360.,
7365., 7370., 7375., 7380., 7385., 7390., 7395., 7400.,
7405., 7410., 7415., 7420., 7425., 7430., 7435., 7440.,
7445., 7450., 7455., 7460., 7465., 7470., 7475., 7480.,
7485., 7490., 7495., 7500., 7505., 7510., 7515., 7520.,
7525., 7530., 7535., 7540., 7545., 7550., 7555., 7560.,
7565., 7570., 7575., 7580., 7585., 7590., 7595., 7600.,
7605., 7610., 7615., 7620., 7625., 7630., 7635., 7640.,
7645., 7650., 7655., 7660., 7665., 7670., 7675., 7680.,
7685., 7690., 7695., 7700., 7705., 7710., 7715., 7720.,
7725., 7730., 7735., 7740., 7745., 7750., 7755., 7760.,
7765., 7770., 7775., 7780., 7785., 7790., 7795., 7800.,
7805., 7810., 7815., 7820., 7825., 7830., 7835., 7840.,
7845., 7850., 7855., 7860., 7865., 7870., 7875., 7880.,
7885., 7890., 7895., 7900., 7905., 7910., 7915., 7920.,
7925., 7930., 7935., 7940., 7945., 7950., 7955., 7960.,
7965., 7970., 7975., 7980., 7985., 7990., 7995., 8000.,
8005., 8010., 8015., 8020., 8025., 8030., 8035., 8040.,
8045., 8050., 8055., 8060., 8065., 8070., 8075., 8080.,
8085., 8090., 8095., 8100., 8105., 8110., 8115., 8120.,
8125., 8130., 8135., 8140., 8145., 8150., 8155., 8160.,
8165., 8170., 8175., 8180., 8185., 8190., 8195., 8200.,
8205., 8210., 8215., 8220., 8225., 8230., 8235., 8240.,
8245., 8250., 8255., 8260., 8265., 8270., 8275., 8280.,
8285., 8290., 8295., 8300., 8305., 8310., 8315., 8320.,
8325., 8330., 8335., 8340., 8345., 8350., 8355., 8360.,
8365., 8370., 8375., 8380., 8385., 8390., 8395., 8400.,
8405., 8410., 8415., 8420., 8425., 8430., 8435., 8440.,
8445., 8450., 8455., 8460., 8465., 8470., 8475., 8480.,
8485., 8490., 8495., 8500., 8505., 8510., 8515., 8520.,
8525., 8530., 8535., 8540., 8545., 8550., 8555., 8560.,
8565., 8570., 8575., 8580., 8585., 8590., 8595., 8600.,
8605., 8610., 8615., 8620., 8625., 8630., 8635., 8640.,
8645., 8650., 8655., 8660., 8665., 8670., 8675., 8680.,
8685., 8690., 8695., 8700., 8705., 8710., 8715., 8720.,
8725., 8730., 8735., 8740., 8745., 8750., 8755., 8760.,
8765., 8770., 8775., 8780., 8785., 8790., 8795., 8800.,
8805., 8810., 8815., 8820., 8825., 8830., 8835., 8840.,
8845., 8850., 8855., 8860., 8865., 8870., 8875., 8880.,
8885., 8890., 8895., 8900., 8905., 8910., 8915., 8920.,
8925., 8930., 8935., 8940., 8945., 8950., 8955., 8960.,
8965., 8970., 8975., 8980., 8985., 8990., 8995., 9000.,
9005., 9010., 9015., 9020., 9025., 9030., 9035., 9040.,
9045., 9050., 9055., 9060., 9065., 9070., 9075., 9080.,
9085., 9090., 9095., 9100., 9105., 9110., 9115., 9120.,
9125., 9130., 9135., 9140., 9145., 9150., 9155., 9160.,
9165., 9170., 9175., 9180., 9185., 9190., 9195., 9200.,
9205., 9210., 9215., 9220., 9225., 9230., 9235., 9240.,
9245., 9250., 9255., 9260., 9265., 9270., 9275., 9280.,
9285., 9290., 9295., 9300., 9305., 9310., 9315., 9320.,
9325., 9330., 9335., 9340.])
wave_V = np.array([4760., 4765., 4770., 4775., 4780., 4785., 4790., 4795.,
4800., 4805., 4810., 4815., 4820., 4825., 4830., 4835.,
4840., 4845., 4850., 4855., 4860., 4865., 4870., 4875.,
4880., 4885., 4890., 4895., 4900., 4905., 4910., 4915.,
4920., 4925., 4930., 4935., 4940., 4945., 4950., 4955.,
4960., 4965., 4970., 4975., 4980., 4985., 4990., 4995.,
5000., 5005., 5010., 5015., 5020., 5025., 5030., 5035.,
5040., 5045., 5050., 5055., 5060., 5065., 5070., 5075.,
5080., 5085., 5090., 5095., 5100., 5105., 5110., 5115.,
5120., 5125., 5130., 5135., 5140., 5145., 5150., 5155.,
5160., 5165., 5170., 5175., 5180., 5185., 5190., 5195.,
5200., 5205., 5210., 5215., 5220., 5225., 5230., 5235.,
5240., 5245., 5250., 5255., 5260., 5265., 5270., 5275.,
5280., 5285., 5290., 5295., 5300., 5305., 5310., 5315.,
5320., 5325., 5330., 5335., 5340., 5345., 5350., 5355.,
5360., 5365., 5370., 5375., 5380., 5385., 5390., 5395.,
5400., 5405., 5410., 5415., 5420., 5425., 5430., 5435.,
5440., 5445., 5450., 5455., 5460., 5465., 5470., 5475.,
5480., 5485., 5490., 5495., 5500., 5505., 5510., 5515.,
5520., 5525., 5530., 5535., 5540., 5545., 5550., 5555.,
5560., 5565., 5570., 5575., 5580., 5585., 5590., 5595.,
5600., 5605., 5610., 5615., 5620., 5625., 5630., 5635.,
5640., 5645., 5650., 5655., 5660., 5665., 5670., 5675.,
5680., 5685., 5690., 5695., 5700., 5705., 5710., 5715.,
5720., 5725., 5730., 5735., 5740., 5745., 5750., 5755.,
5760., 5765., 5770., 5775., 5780., 5785., 5790., 5795.,
5800., 5805., 5810., 5815., 5820., 5825., 5830., 5835.,
5840., 5845., 5850., 5855., 5860., 5865., 5870., 5875.,
5880., 5885., 5890., 5895., 5900., 5905., 5910., 5915.,
5920., 5925., 5930., 5935., 5940., 5945., 5950., 5955.,
5960., 5965., 5970., 5975., 5980., 5985., 5990., 5995.,
6000., 6005., 6010., 6015., 6020., 6025., 6030., 6035.,
6040., 6045., 6050., 6055., 6060., 6065., 6070., 6075.,
6080., 6085., 6090., 6095., 6100., 6105., 6110., 6115.,
6120., 6125., 6130., 6135., 6140., 6145., 6150., 6155.,
6160., 6165., 6170., 6175., 6180., 6185., 6190., 6195.,
6200., 6205., 6210., 6215., 6220., 6225., 6230., 6235.,
6240., 6245., 6250., 6255., 6260., 6265., 6270., 6275.,
6280., 6285., 6290., 6295., 6300., 6305., 6310., 6315.,
6320., 6325., 6330., 6335., 6340., 6345., 6350., 6355.,
6360., 6365., 6370., 6375., 6380., 6385., 6390., 6395.,
6400., 6405., 6410., 6415., 6420., 6425., 6430., 6435.,
6440., 6445., 6450., 6455., 6460., 6465., 6470., 6475.,
6480., 6485., 6490., 6495., 6500., 6505., 6510., 6515.,
6520., 6525., 6530., 6535., 6540., 6545., 6550., 6555.,
6560., 6565., 6570., 6575., 6580., 6585., 6590., 6595.,
6600., 6605., 6610., 6615., 6620., 6625., 6630., 6635.,
6640., 6645., 6650., 6655., 6660., 6665., 6670., 6675.,
6680., 6685., 6690., 6695., 6700., 6705., 6710., 6715.,
6720., 6725., 6730., 6735., 6740., 6745., 6750., 6755.,
6760., 6765., 6770., 6775., 6780., 6785., 6790., 6795.,
6800., 6805., 6810., 6815., 6820., 6825., 6830., 6835.,
6840., 6845., 6850., 6855., 6860., 6865., 6870., 6875.,
6880., 6885., 6890., 6895., 6900., 6905., 6910., 6915.,
6920., 6925., 6930., 6935., 6940., 6945., 6950., 6955.,
6960., 6965., 6970., 6975., 6980., 6985., 6990., 6995.,
7000., 7005., 7010., 7015., 7020., 7025., 7030., 7035.,
7040., 7045., 7050., 7055., 7060., 7065., 7070., 7075.,
7080., 7085., 7090., 7095., 7100., 7105., 7110., 7115.,
7120., 7125., 7130., 7135., 7140., 7145., 7150., 7155.,
7160., 7165., 7170., 7175., 7180., 7185., 7190., 7195.,
7200., 7205., 7210., 7215., 7220., 7225., 7230., 7235.,
7240., 7245., 7250., 7255., 7260., 7265., 7270., 7280.])
flux_V = np.array([9.64320839e-03, 1.17108273e-02, 1.43528032e-02,
1.75631618e-02, 2.11335897e-02, 2.55253673e-02,
3.07395792e-02, 3.66303658e-02, 4.38177156e-02,
5.15626001e-02, 6.09055328e-02, 7.15601015e-02,
8.32171154e-02, 9.64917278e-02, 1.11321487e-01,
1.27047434e-01, 1.45095301e-01, 1.63879433e-01,
1.84025288e-01, 2.05674400e-01, 2.27541790e-01,
2.51783009e-01, 2.76728153e-01, 3.02018051e-01,
3.28636360e-01, 3.54072228e-01, 3.81254387e-01,
4.08208084e-01, 4.34315758e-01, 4.61384430e-01,
4.87483635e-01, 5.12711716e-01, 5.38157120e-01,
5.61274338e-01, 5.85662842e-01, 6.07098885e-01,
6.29042625e-01, 6.51120758e-01, 6.71111679e-01,
6.87856445e-01, 7.05869598e-01, 7.21706085e-01,
7.38656692e-01, 7.51982346e-01, 7.66451569e-01,
7.79320374e-01, 7.91537857e-01, 8.01387253e-01,
8.12644043e-01, 8.21886444e-01, 8.30849152e-01,
8.39123459e-01, 8.45743408e-01, 8.53470001e-01,
8.60292893e-01, 8.66531220e-01, 8.72752762e-01,
8.77110748e-01, 8.82006912e-01, 8.87016678e-01,
8.91045380e-01, 8.94107590e-01, 8.97235336e-01,
9.00786133e-01, 9.03548050e-01, 9.06549301e-01,
9.08831177e-01, 9.11690445e-01, 9.12861023e-01,
9.15185928e-01, 9.17089386e-01, 9.17668686e-01,
9.20558548e-01, 9.21113205e-01, 9.22701874e-01,
9.23237000e-01, 9.24772034e-01, 9.25894012e-01,
9.26325073e-01, 9.27905960e-01, 9.27411652e-01,
9.28828430e-01, 9.28686295e-01, 9.30086288e-01,
9.29822846e-01, 9.30881195e-01, 9.30577240e-01,
9.31094971e-01, 9.30789261e-01, 9.30882034e-01,
9.31607895e-01, 9.31012649e-01, 9.30543594e-01,
9.30507584e-01, 9.30894165e-01, 9.30728226e-01,
9.30551834e-01, 9.30233002e-01, 9.30283814e-01,
9.30285187e-01, 9.29119644e-01, 9.28713150e-01,
9.28867035e-01, 9.28172684e-01, 9.28012314e-01,
9.27614441e-01, 9.26771698e-01, 9.26360092e-01,
9.25508957e-01, 9.24991302e-01, 9.24198074e-01,
9.22970123e-01, 9.22512283e-01, 9.21908951e-01,
9.20856094e-01, 9.20415039e-01, 9.19665604e-01,
9.18579636e-01, 9.17498093e-01, 9.16515350e-01,
9.15503616e-01, 9.14212112e-01, 9.13366013e-01,
9.12551498e-01, 9.11715393e-01, 9.10380325e-01,
9.09479599e-01, 9.07609863e-01, 9.06777115e-01,
9.05421143e-01, 9.04353409e-01, 9.02455139e-01,
9.00539398e-01, 9.00131378e-01, 8.98344574e-01,
8.96168747e-01, 8.94843826e-01, 8.92673111e-01,
8.91329804e-01, 8.90147629e-01, 8.88428879e-01,
8.87021027e-01, 8.85309372e-01, 8.83131332e-01,
8.81392059e-01, 8.78589477e-01, 8.76842956e-01,
8.75344315e-01, 8.73290176e-01, 8.71898727e-01,
8.69045715e-01, 8.67195282e-01, 8.64461823e-01,
8.62905884e-01, 8.60242310e-01, 8.57690887e-01,
8.55785751e-01, 8.53161774e-01, 8.51369553e-01,
8.48543091e-01, 8.46092071e-01, 8.43811874e-01,
8.40855102e-01, 8.38205032e-01, 8.35638428e-01,
8.33058090e-01, 8.29829483e-01, 8.26507950e-01,
8.24152756e-01, 8.21133499e-01, 8.17982101e-01,
8.14945984e-01, 8.11371536e-01, 8.08797302e-01,
8.05465164e-01, 8.02152329e-01, 7.99375458e-01,
7.95579987e-01, 7.91873245e-01, 7.88838119e-01,
7.84947052e-01, 7.82865982e-01, 7.77375183e-01,
7.74711151e-01, 7.71566467e-01, 7.67292709e-01,
7.63668289e-01, 7.60665512e-01, 7.55569534e-01,
7.52378006e-01, 7.48392868e-01, 7.44523621e-01,
7.40757904e-01, 7.36248322e-01, 7.32364731e-01,
7.28448029e-01, 7.23732147e-01, 7.19756775e-01,
7.15782394e-01, 7.11536713e-01, 7.07296219e-01,
7.02669830e-01, 6.98336868e-01, 6.93820877e-01,
6.89229584e-01, 6.85463638e-01, 6.80321579e-01,
6.75755997e-01, 6.71247406e-01, 6.66305160e-01,
6.61537552e-01, 6.56552429e-01, 6.51618576e-01,
6.46831970e-01, 6.42130890e-01, 6.37422791e-01,
6.32663307e-01, 6.26985092e-01, 6.22300797e-01,
6.17429542e-01, 6.11961975e-01, 6.07117996e-01,
6.01615372e-01, 5.96683311e-01, 5.91556473e-01,
5.85764580e-01, 5.81412506e-01, 5.75745583e-01,
5.70708580e-01, 5.65521469e-01, 5.60354004e-01,
5.55104981e-01, 5.49598465e-01, 5.44442787e-01,
5.39409828e-01, 5.34089699e-01, 5.28689613e-01,
5.23753700e-01, 5.18192368e-01, 5.12720947e-01,
5.07284508e-01, 5.01651344e-01, 4.96233330e-01,
4.90987473e-01, 4.85806465e-01, 4.80457954e-01,
4.74516029e-01, 4.69459343e-01, 4.63997955e-01,
4.58108025e-01, 4.52913590e-01, 4.47898445e-01,
4.41578674e-01, 4.36835709e-01, 4.31392746e-01,
4.25792809e-01, 4.20569115e-01, 4.14983521e-01,
4.09441910e-01, 4.04065590e-01, 3.98449898e-01,
3.93368378e-01, 3.88108597e-01, 3.82731361e-01,
3.77610168e-01, 3.72011795e-01, 3.66899109e-01,
3.61709938e-01, 3.56277771e-01, 3.51459427e-01,
3.46341896e-01, 3.41169662e-01, 3.36199074e-01,
3.31208305e-01, 3.26275864e-01, 3.21232452e-01,
3.15962257e-01, 3.11138630e-01, 3.06086445e-01,
3.01351910e-01, 2.96466599e-01, 2.91627788e-01,
2.86797676e-01, 2.81993294e-01, 2.77036629e-01,
2.72600326e-01, 2.67752075e-01, 2.63035870e-01,
2.58718491e-01, 2.53945446e-01, 2.49440594e-01,
2.44970150e-01, 2.40328617e-01, 2.36014404e-01,
2.31458073e-01, 2.27129078e-01, 2.22980728e-01,
2.18599091e-01, 2.14399776e-01, 2.10105076e-01,
2.05955944e-01, 2.01979485e-01, 1.97873592e-01,
1.93701324e-01, 1.89863262e-01, 1.85919723e-01,
1.82102280e-01, 1.78372879e-01, 1.74555264e-01,
1.70942688e-01, 1.67413940e-01, 1.63823414e-01,
1.60374756e-01, 1.56812820e-01, 1.53197708e-01,
1.49876614e-01, 1.46493282e-01, 1.43237667e-01,
1.40090466e-01, 1.36744709e-01, 1.33655767e-01,
1.30583868e-01, 1.27497015e-01, 1.24574251e-01,
1.21548195e-01, 1.18785553e-01, 1.15858727e-01,
1.12972259e-01, 1.10239296e-01, 1.07432098e-01,
1.04911184e-01, 1.02240067e-01, 9.96163654e-02,
9.71846867e-02, 9.46867275e-02, 9.21891499e-02,
8.98626804e-02, 8.74147129e-02, 8.50797844e-02,
8.28987694e-02, 8.06197929e-02, 7.84934664e-02,
7.63682270e-02, 7.41679907e-02, 7.21602154e-02,
7.01406241e-02, 6.82159948e-02, 6.62652016e-02,
6.43459272e-02, 6.24867964e-02, 6.07102966e-02,
5.90227270e-02, 5.73293352e-02, 5.56865645e-02,
5.40774345e-02, 5.24679184e-02, 5.08922577e-02,
4.93965530e-02, 4.79321527e-02, 4.64570713e-02,
4.50907946e-02, 4.36638164e-02, 4.23424053e-02,
4.10112333e-02, 3.97419786e-02, 3.85188985e-02,
3.72569108e-02, 3.61442852e-02, 3.49567914e-02,
3.37763834e-02, 3.27081037e-02, 3.15532732e-02,
3.05547738e-02, 2.96382666e-02, 2.86316228e-02,
2.76253200e-02, 2.67284703e-02, 2.57629275e-02,
2.48762655e-02, 2.40548301e-02, 2.32087660e-02,
2.23887801e-02, 2.16649318e-02, 2.08810973e-02,
2.01191974e-02, 1.93965495e-02, 1.86923802e-02,
1.80622673e-02, 1.73420966e-02, 1.67779624e-02,
1.61432099e-02, 1.55458522e-02, 1.49808991e-02,
1.44260824e-02, 1.38898337e-02, 1.33757555e-02,
1.28895402e-02, 1.24336338e-02, 1.19317114e-02,
1.14778078e-02, 1.10224903e-02, 1.05936778e-02,
1.01979625e-02, 9.80331957e-03, 9.42119420e-03,
9.06843662e-03, 8.70236576e-03, 8.36401224e-03,
8.02174568e-03, 7.69513190e-03, 7.42049038e-03,
7.12957501e-03, 6.81147277e-03, 6.56225324e-03,
6.28752470e-03, 6.03279233e-03, 5.78228355e-03,
5.52640975e-03, 5.31245232e-03, 5.07642031e-03,
4.86187398e-03, 4.66857612e-03, 4.48455602e-03,
4.28951621e-03, 4.10438061e-03, 3.94181907e-03,
3.77903283e-03, 3.61310929e-03, 3.43858838e-03,
3.30562413e-03, 3.16893756e-03, 3.00862283e-03,
2.88184345e-03, 2.75286794e-03, 2.63536334e-03,
2.52844244e-03, 2.39721924e-03, 2.31343344e-03,
2.19719976e-03, 2.09656358e-03, 2.02219427e-03,
1.91874027e-03, 1.81754440e-03, 1.74118712e-03,
1.66113898e-03, 1.58724680e-03, 1.51313767e-03,
1.44662365e-03, 1.39100656e-03, 1.33283704e-03,
1.26319885e-03, 1.18512645e-03, 1.14880271e-03,
1.08921751e-03, 1.04411282e-03, 1.01634525e-03,
9.41211507e-04, 9.03511718e-04, 8.70077759e-04,
8.34191218e-04, 7.73599520e-04, 7.44963065e-04,
7.18376786e-04, 6.85756877e-04, 6.50605410e-04,
6.14275858e-04, 5.89862131e-04, 5.59216291e-04,
5.29026911e-04, 4.99960780e-04, 4.72659841e-04,
4.56626341e-04, 4.29005548e-04, 4.13897783e-04,
3.97251360e-04, 3.70411240e-04, 3.54581289e-04,
3.36891152e-04, 3.18884142e-04, 3.09158638e-04,
2.87089385e-04, 2.75648981e-04, 2.56309062e-04,
2.48264093e-04, 2.32592076e-04, 2.18097549e-04,
2.10234672e-04, 2.01618839e-04, 1.92721710e-04,
1.84358787e-04, 1.78293809e-04, 1.73047427e-04,
1.48465503e-04, 1.50579475e-04, 1.37227150e-04,
1.30995326e-04, 1.18210996e-04, 1.10485023e-04,
1.12393992e-04, 1.07742772e-04, 1.06566232e-04,
8.77865311e-05, 9.66540072e-05, 8.63869675e-05])
flux_R = np.array([1.12660611e-04, 1.33478958e-04, 1.80384908e-04,
2.26182416e-04, 2.96486858e-04, 3.83854918e-04,
4.94274013e-04, 6.20536394e-04, 8.19598287e-04,
1.01240180e-03, 1.29484743e-03, 1.64972723e-03,
2.04623789e-03, 2.60429144e-03, 3.19142252e-03,
3.95557463e-03, 4.87352252e-03, 5.92993259e-03,
7.22202599e-03, 8.75534654e-03, 1.05062985e-02,
1.26144767e-02, 1.49658072e-02, 1.76800156e-02,
2.09657979e-02, 2.44697619e-02, 2.87300396e-02,
3.34529758e-02, 3.85330200e-02, 4.46062708e-02,
5.08374691e-02, 5.79355812e-02, 6.60423279e-02,
7.43976021e-02, 8.39634419e-02, 9.44021988e-02,
1.04971266e-01, 1.16864176e-01, 1.29295054e-01,
1.42394171e-01, 1.56620798e-01, 1.70939655e-01,
1.86083679e-01, 2.02246418e-01, 2.18151264e-01,
2.35699348e-01, 2.52898312e-01, 2.70299339e-01,
2.88551636e-01, 3.06377716e-01, 3.25947761e-01,
3.45086975e-01, 3.63418694e-01, 3.82655678e-01,
4.01391029e-01, 4.19963226e-01, 4.39177132e-01,
4.56956482e-01, 4.75537567e-01, 4.93223953e-01,
5.10155792e-01, 5.27090416e-01, 5.43785629e-01,
5.59207916e-01, 5.75155678e-01, 5.89269867e-01,
6.03433266e-01, 6.18236656e-01, 6.30981636e-01,
6.43544693e-01, 6.55758591e-01, 6.67161560e-01,
6.78610764e-01, 6.89398499e-01, 6.99007721e-01,
7.09150238e-01, 7.17486267e-01, 7.26359787e-01,
7.34181595e-01, 7.41922607e-01, 7.49040909e-01,
7.55139770e-01, 7.61801071e-01, 7.67739029e-01,
7.72209625e-01, 7.77520752e-01, 7.82076034e-01,
7.86005707e-01, 7.90121536e-01, 7.94920044e-01,
7.97914963e-01, 8.01576385e-01, 8.04085770e-01,
8.06881256e-01, 8.09733276e-01, 8.12508926e-01,
8.14496231e-01, 8.16916046e-01, 8.18313217e-01,
8.20173111e-01, 8.21818848e-01, 8.23354797e-01,
8.24062653e-01, 8.25225525e-01, 8.26539078e-01,
8.27467270e-01, 8.28310471e-01, 8.29260254e-01,
8.29644699e-01, 8.29694901e-01, 8.30798569e-01,
8.31418304e-01, 8.31113281e-01, 8.31175461e-01,
8.31436615e-01, 8.31268921e-01, 8.31743851e-01,
8.31236649e-01, 8.31876831e-01, 8.31575623e-01,
8.31600800e-01, 8.31209564e-01, 8.30701218e-01,
8.30457306e-01, 8.29995575e-01, 8.29173889e-01,
8.28681335e-01, 8.28388367e-01, 8.27705078e-01,
8.26961517e-01, 8.26470642e-01, 8.25616913e-01,
8.25088272e-01, 8.24414825e-01, 8.23818588e-01,
8.22574463e-01, 8.21790543e-01, 8.20854645e-01,
8.20430603e-01, 8.19333649e-01, 8.18388138e-01,
8.17239914e-01, 8.16441727e-01, 8.15142059e-01,
8.14114456e-01, 8.13138275e-01, 8.12385178e-01,
8.11399994e-01, 8.10151062e-01, 8.09062042e-01,
8.07826004e-01, 8.06391449e-01, 8.05179291e-01,
8.04337387e-01, 8.02874298e-01, 8.01418991e-01,
8.00320816e-01, 7.99105682e-01, 7.97680512e-01,
7.96293411e-01, 7.94735107e-01, 7.93599701e-01,
7.92142716e-01, 7.90940323e-01, 7.89540253e-01,
7.87977982e-01, 7.86476135e-01, 7.85149383e-01,
7.83683319e-01, 7.82463837e-01, 7.80975647e-01,
7.79384079e-01, 7.77804413e-01, 7.76397171e-01,
7.74585876e-01, 7.73283157e-01, 7.71683350e-01,
7.70116653e-01, 7.68394089e-01, 7.66989212e-01,
7.65374298e-01, 7.63670044e-01, 7.61980438e-01,
7.60181885e-01, 7.58677445e-01, 7.57341537e-01,
7.55792389e-01, 7.54106216e-01, 7.52319260e-01,
7.50747833e-01, 7.48828659e-01, 7.47205200e-01,
7.45405502e-01, 7.43702850e-01, 7.42157440e-01,
7.40391464e-01, 7.38478088e-01, 7.36322479e-01,
7.34597397e-01, 7.32816925e-01, 7.31027298e-01,
7.29303818e-01, 7.27694702e-01, 7.25626068e-01,
7.24098816e-01, 7.22092285e-01, 7.20166626e-01,
7.18592148e-01, 7.16398239e-01, 7.14680633e-01,
7.12456436e-01, 7.10820770e-01, 7.09065247e-01,
7.06785812e-01, 7.05026474e-01, 7.03354034e-01,
7.01381912e-01, 6.99503784e-01, 6.97199249e-01,
6.95120850e-01, 6.93079453e-01, 6.91699600e-01,
6.89639130e-01, 6.88427200e-01, 6.85872650e-01,
6.84145126e-01, 6.81911545e-01, 6.80322800e-01,
6.78288803e-01, 6.76393280e-01, 6.74223022e-01,
6.72408447e-01, 6.70496292e-01, 6.68415146e-01,
6.66331940e-01, 6.64745712e-01, 6.62663345e-01,
6.60627213e-01, 6.58656998e-01, 6.56490936e-01,
6.54593048e-01, 6.52417145e-01, 6.50451279e-01,
6.48244934e-01, 6.46139450e-01, 6.44154511e-01,
6.41925736e-01, 6.39975548e-01, 6.37752533e-01,
6.35898399e-01, 6.33897591e-01, 6.31938820e-01,
6.29536552e-01, 6.27312431e-01, 6.25279121e-01,
6.23031921e-01, 6.20859680e-01, 6.18729477e-01,
6.16721458e-01, 6.14748001e-01, 6.12250404e-01,
6.09872932e-01, 6.07715263e-01, 6.05285225e-01,
6.03101807e-01, 6.01018982e-01, 5.99403038e-01,
5.96835365e-01, 5.94723625e-01, 5.92363167e-01,
5.89933815e-01, 5.86952133e-01, 5.84768906e-01,
5.82397041e-01, 5.80457268e-01, 5.77794266e-01,
5.75973740e-01, 5.73014793e-01, 5.70719414e-01,
5.68651657e-01, 5.66127243e-01, 5.63723564e-01,
5.61353035e-01, 5.58687668e-01, 5.56360054e-01,
5.53829727e-01, 5.51511993e-01, 5.49103394e-01,
5.46937523e-01, 5.44495354e-01, 5.42087212e-01,
5.39432335e-01, 5.37001495e-01, 5.34510727e-01,
5.31703186e-01, 5.29667206e-01, 5.27464333e-01,
5.24670296e-01, 5.22587357e-01, 5.19773483e-01,
5.17762489e-01, 5.14889717e-01, 5.12675095e-01,
5.10391426e-01, 5.07693596e-01, 5.05560875e-01,
5.02788238e-01, 5.00663567e-01, 4.98405113e-01,
4.95754623e-01, 4.93308716e-01, 4.90971375e-01,
4.88512230e-01, 4.85908470e-01, 4.84007683e-01,
4.81591797e-01, 4.79094429e-01, 4.76312561e-01,
4.73944168e-01, 4.71328812e-01, 4.69270897e-01,
4.66906967e-01, 4.64348908e-01, 4.61959457e-01,
4.59419556e-01, 4.57119751e-01, 4.54282990e-01,
4.52030411e-01, 4.49744415e-01, 4.47503815e-01,
4.44987106e-01, 4.42915993e-01, 4.40122299e-01,
4.38269691e-01, 4.35202255e-01, 4.33002968e-01,
4.30703163e-01, 4.28281441e-01, 4.25861244e-01,
4.23408241e-01, 4.21262741e-01, 4.19147110e-01,
4.16939697e-01, 4.14542465e-01, 4.11997719e-01,
4.09688759e-01, 4.07355232e-01, 4.04657173e-01,
4.02887306e-01, 4.00700073e-01, 3.98309898e-01,
3.95669937e-01, 3.93478394e-01, 3.91111298e-01,
3.88895645e-01, 3.86983261e-01, 3.84384155e-01,
3.81797638e-01, 3.79871559e-01, 3.77870216e-01,
3.75476189e-01, 3.73131638e-01, 3.70839462e-01,
3.69031487e-01, 3.66161499e-01, 3.63859253e-01,
3.61430778e-01, 3.59496612e-01, 3.57683106e-01,
3.55424080e-01, 3.52959938e-01, 3.50599556e-01,
3.48366928e-01, 3.46199951e-01, 3.43800392e-01,
3.41833038e-01, 3.39689293e-01, 3.37388229e-01,
3.35983315e-01, 3.33557548e-01, 3.31361923e-01,
3.29263535e-01, 3.27118683e-01, 3.24498863e-01,
3.22609215e-01, 3.20428238e-01, 3.18339233e-01,
3.16222420e-01, 3.14079876e-01, 3.12005463e-01,
3.09681053e-01, 3.07576656e-01, 3.05554867e-01,
3.03675804e-01, 3.01599236e-01, 2.99350357e-01,
2.97287026e-01, 2.95042343e-01, 2.93254433e-01,
2.91312427e-01, 2.89098625e-01, 2.86699619e-01,
2.84973373e-01, 2.82804375e-01, 2.81043167e-01,
2.79479942e-01, 2.76905003e-01, 2.74912872e-01,
2.72875061e-01, 2.71315537e-01, 2.68872356e-01,
2.67071037e-01, 2.64945831e-01, 2.62771225e-01,
2.60814991e-01, 2.59156818e-01, 2.56677303e-01,
2.54789314e-01, 2.53038921e-01, 2.51051693e-01,
2.49118004e-01, 2.46885796e-01, 2.45392628e-01,
2.43349152e-01, 2.41043224e-01, 2.39375744e-01,
2.37449379e-01, 2.35649910e-01, 2.33648262e-01,
2.32286263e-01, 2.30330391e-01, 2.28001060e-01,
2.26452904e-01, 2.24508724e-01, 2.22819996e-01,
2.20511837e-01, 2.19196682e-01, 2.17359448e-01,
2.15409527e-01, 2.13571644e-01, 2.11919060e-01,
2.10245914e-01, 2.08496246e-01, 2.06775856e-01,
2.05235577e-01, 2.03262482e-01, 2.01522713e-01,
1.99663773e-01, 1.97996788e-01, 1.96391239e-01,
1.94632092e-01, 1.92989120e-01, 1.91479111e-01,
1.89962959e-01, 1.87962627e-01, 1.86370125e-01,
1.84920654e-01, 1.83073902e-01, 1.81668034e-01,
1.80077705e-01, 1.78313961e-01, 1.76784782e-01,
1.75110645e-01, 1.73803921e-01, 1.72050915e-01,
1.70811748e-01, 1.68707829e-01, 1.67500534e-01,
1.65955715e-01, 1.64152584e-01, 1.62616043e-01,
1.61383820e-01, 1.59913750e-01, 1.58476162e-01,
1.57111960e-01, 1.55604382e-01, 1.54195471e-01,
1.52868767e-01, 1.51168289e-01, 1.50135088e-01,
1.48432417e-01, 1.46854248e-01, 1.45500660e-01,
1.44040155e-01, 1.43029194e-01, 1.41359615e-01,
1.40144958e-01, 1.38888855e-01, 1.37300205e-01,
1.36141462e-01, 1.34810266e-01, 1.33652449e-01,
1.32385340e-01, 1.30962801e-01, 1.29514580e-01,
1.28492441e-01, 1.26976881e-01, 1.26109915e-01,
1.24681196e-01, 1.23733912e-01, 1.22387972e-01,
1.21014032e-01, 1.19707127e-01, 1.18950415e-01,
1.17601652e-01, 1.16029644e-01, 1.15246582e-01,
1.13969402e-01, 1.12859097e-01, 1.11570110e-01,
1.10585833e-01, 1.09544601e-01, 1.08406753e-01,
1.07325516e-01, 1.05842676e-01, 1.04812813e-01,
1.03711939e-01, 1.02703686e-01, 1.01885681e-01,
1.00853710e-01, 9.96105671e-02, 9.87637615e-02,
9.77460957e-02, 9.68516922e-02, 9.56964302e-02,
9.48740578e-02, 9.36437607e-02, 9.26385784e-02,
9.13605881e-02, 9.08198070e-02, 8.97638321e-02,
8.86697960e-02, 8.77115726e-02, 8.71175385e-02,
8.63109493e-02, 8.48536015e-02, 8.42036724e-02,
8.32233620e-02, 8.23537445e-02, 8.15705395e-02,
8.05418396e-02, 7.98623276e-02, 7.91370583e-02,
7.78403139e-02, 7.73310661e-02, 7.62543249e-02,
7.54598522e-02, 7.44599009e-02, 7.38250256e-02,
7.31048202e-02, 7.23627281e-02, 7.15131903e-02,
7.05549860e-02, 6.98634911e-02, 6.91224623e-02,
6.86638069e-02, 6.76796818e-02, 6.68600273e-02,
6.60720110e-02, 6.53426409e-02, 6.48589230e-02,
6.40281153e-02, 6.31698275e-02, 6.24832773e-02,
6.17807865e-02, 6.11954021e-02, 6.05794573e-02,
5.96689224e-02, 5.90339708e-02, 5.84838772e-02,
5.78847265e-02, 5.68160105e-02, 5.64464664e-02,
5.57960987e-02, 5.50762606e-02, 5.47479629e-02,
5.40395975e-02, 5.31866121e-02, 5.24796009e-02,
5.18524837e-02, 5.13265848e-02, 5.05894184e-02,
5.04498529e-02, 4.95917797e-02, 4.92178106e-02,
4.86410618e-02, 4.78479099e-02, 4.73841429e-02,
4.68996859e-02, 4.65036964e-02, 4.57519102e-02,
4.53436470e-02, 4.48195744e-02, 4.40284443e-02,
4.36079264e-02, 4.33500671e-02, 4.26576328e-02,
4.20515776e-02, 4.15753365e-02, 4.11065292e-02,
4.07284117e-02, 4.01105547e-02, 3.95491576e-02,
3.92478895e-02, 3.86123323e-02, 3.83627343e-02,
3.81744385e-02, 3.72538948e-02, 3.67257714e-02,
3.64651537e-02, 3.61046267e-02, 3.56324434e-02,
3.50495958e-02, 3.47760701e-02, 3.45552087e-02,
3.38934398e-02, 3.36678410e-02, 3.31091881e-02,
3.26658273e-02, 3.23304272e-02, 3.17972445e-02,
3.14868403e-02, 3.11922049e-02, 3.07040787e-02,
3.03110600e-02, 2.99594235e-02, 2.98183370e-02,
2.92352104e-02, 2.89947557e-02, 2.86772442e-02,
2.83287978e-02, 2.79210877e-02, 2.72823572e-02,
2.73149657e-02, 2.69718742e-02, 2.67807961e-02,
2.61144757e-02, 2.57569838e-02, 2.57412481e-02,
2.51048923e-02, 2.50279760e-02, 2.49131537e-02,
2.45391846e-02, 2.42700195e-02, 2.38901758e-02,
2.35897589e-02, 2.28670168e-02, 2.28611231e-02,
2.27534866e-02, 2.24620295e-02, 2.19526005e-02,
2.16079593e-02, 2.14886975e-02, 2.11848760e-02,
2.12790751e-02, 2.06619120e-02, 2.07371426e-02,
2.00993228e-02, 1.95814931e-02, 1.95096111e-02,
1.88129783e-02, 1.91138482e-02, 1.89894068e-02,
1.82900357e-02, 1.82558620e-02, 1.84180438e-02,
1.78343022e-02, 1.79508388e-02, 1.98078752e-02,
2.35607266e-02, 1.64428818e-02, 1.63446629e-02,
1.61414671e-02, 1.59015155e-02, 1.57553589e-02,
1.55644822e-02, 1.53442860e-02, 1.52152765e-02,
1.49248958e-02, 1.47469020e-02, 1.46128261e-02,
1.45537209e-02, 1.43860090e-02, 1.40903854e-02,
1.39411104e-02, 1.37448251e-02, 1.35096633e-02,
1.34330940e-02, 1.32138276e-02, 1.30654049e-02,
1.28928685e-02, 1.27844548e-02, 1.25968790e-02,
1.24387026e-02, 1.23236620e-02, 1.21577203e-02,
1.19817626e-02, 1.18997812e-02, 1.17299104e-02,
1.16228032e-02, 1.13986945e-02, 1.13025677e-02,
1.11602139e-02, 1.10250735e-02, 1.09074187e-02,
1.07202637e-02, 1.06087947e-02, 1.05153501e-02,
1.03730762e-02, 1.02454245e-02, 1.00866878e-02,
9.99053955e-03, 9.78911459e-03, 9.76708233e-03,
9.62086201e-03, 9.47241306e-03, 9.33747649e-03,
9.41326499e-03, 9.13064659e-03, 9.12852585e-03,
9.06752527e-03, 8.93405914e-03, 8.67768466e-03,
8.64216387e-03, 8.60476136e-03, 8.40433478e-03,
8.29408765e-03, 8.28387678e-03, 8.08252513e-03,
8.08622956e-03, 7.89401472e-03, 7.83714354e-03,
7.71972716e-03, 7.65594542e-03, 7.46691644e-03,
7.51844585e-03, 7.36561239e-03, 7.31347740e-03,
7.21074879e-03, 7.17079341e-03, 7.00386226e-03,
7.00467884e-03, 6.87995970e-03, 6.80604935e-03,
6.66877091e-03, 6.58461690e-03, 6.56225383e-03,
6.54657483e-03, 6.29706144e-03, 6.29498184e-03,
6.20202959e-03, 6.14432633e-03, 6.14413202e-03,
6.01232946e-03, 5.90509057e-03, 5.87786853e-03,
5.79836965e-03, 5.70700347e-03, 5.57661533e-03,
5.59826493e-03, 5.52282333e-03, 5.46855211e-03,
5.39687157e-03, 5.30140877e-03, 5.28882802e-03,
5.22834003e-03, 5.12682915e-03, 5.03452301e-03,
4.97473180e-03, 5.00698507e-03, 4.91672516e-03,
4.86153126e-03, 4.76140350e-03, 4.73320752e-03,
4.78468746e-03, 4.58373725e-03, 4.58816707e-03,
4.48710144e-03, 4.41632897e-03, 4.37773258e-03])
flux_u = np.array(
[0.00000000e+00, 1.00000000e-04, 5.00000000e-04, 1.30000000e-03, 2.60000000e-03, 5.20000000e-03,
9.30000000e-03, 1.61000000e-02, 2.40000000e-02, 3.23000000e-02, 4.05000000e-02, 4.85000000e-02,
5.61000000e-02, 6.34000000e-02, 7.00000000e-02, 7.56000000e-02, 8.03000000e-02, 8.48000000e-02,
8.83000000e-02, 9.17000000e-02, 9.59000000e-02, 1.00100000e-01, 1.02900000e-01, 1.04400000e-01,
1.05300000e-01, 1.06300000e-01, 1.07500000e-01, 1.08500000e-01, 1.08400000e-01, 1.06400000e-01,
1.02400000e-01, 9.66000000e-02, 8.87000000e-02, 7.87000000e-02, 6.72000000e-02, 5.49000000e-02,
4.13000000e-02, 2.68000000e-02, 1.45000000e-02, 7.50000000e-03, 4.20000000e-03, 2.20000000e-03,
1.00000000e-03, 6.00000000e-04, 4.00000000e-04, 2.00000000e-04, 0.00000000e+00])
flux_g = np.array(
[0.00000000e+00, 3.00000000e-04, 8.00000000e-04,
1.30000000e-03, 1.90000000e-03, 2.40000000e-03,
3.40000000e-03, 5.50000000e-03, 1.03000000e-02,
1.94000000e-02, 3.26000000e-02, 4.92000000e-02,
6.86000000e-02, 9.00000000e-02, 1.12300000e-01,
1.34200000e-01, 1.54500000e-01, 1.72200000e-01,
1.87300000e-01, 2.00300000e-01, 2.11600000e-01,
2.21400000e-01, 2.30100000e-01, 2.37800000e-01,
2.44800000e-01, 2.51300000e-01, 2.57400000e-01,
2.63300000e-01, 2.69100000e-01, 2.74700000e-01,
2.80100000e-01, 2.85200000e-01, 2.89900000e-01,
2.94000000e-01, 2.97900000e-01, 3.01600000e-01,
3.05500000e-01, 3.09700000e-01, 3.14100000e-01,
3.18400000e-01, 3.22400000e-01, 3.25700000e-01,
3.28400000e-01, 3.30700000e-01, 3.32700000e-01,
3.34600000e-01, 3.36400000e-01, 3.38300000e-01,
3.40300000e-01, 3.42500000e-01, 3.44800000e-01,
3.47200000e-01, 3.49500000e-01, 3.51900000e-01,
3.54100000e-01, 3.56200000e-01, 3.58100000e-01,
3.59700000e-01, 3.60900000e-01, 3.61300000e-01,
3.60900000e-01, 3.59500000e-01, 3.58100000e-01,
3.55800000e-01, 3.45200000e-01, 3.19400000e-01,
2.80700000e-01, 2.33900000e-01, 1.83900000e-01,
1.35200000e-01, 9.11000000e-02, 5.48000000e-02,
2.95000000e-02, 1.66000000e-02, 1.12000000e-02,
7.70000000e-03, 5.00000000e-03, 3.20000000e-03,
2.10000000e-03, 1.50000000e-03, 1.20000000e-03,
1.00000000e-03, 9.00000000e-04, 8.00000000e-04,
6.00000000e-04, 5.00000000e-04, 3.00000000e-04,
1.00000000e-04, 0.00000000e+00])
flux_r = np.array(
[0.00000000e+00, 1.40000000e-03, 9.90000000e-03,
2.60000000e-02, 4.98000000e-02, 8.09000000e-02,
1.19000000e-01, 1.63000000e-01, 2.10000000e-01,
2.56400000e-01, 2.98600000e-01, 3.33900000e-01,
3.62300000e-01, 3.84900000e-01, 4.02700000e-01,
4.16500000e-01, 4.27100000e-01, 4.35300000e-01,
4.41600000e-01, 4.46700000e-01, 4.51100000e-01,
4.55000000e-01, 4.58700000e-01, 4.62400000e-01,
4.66000000e-01, 4.69200000e-01, 4.71600000e-01,
4.73100000e-01, 4.74000000e-01, 4.74700000e-01,
4.75800000e-01, 4.77600000e-01, 4.80000000e-01,
4.82700000e-01, 4.85400000e-01, 4.88100000e-01,
4.90500000e-01, 4.92600000e-01, 4.94200000e-01,
4.95100000e-01, 4.95500000e-01, 4.95600000e-01,
4.95800000e-01, 4.96100000e-01, 4.96400000e-01,
4.96200000e-01, 4.95300000e-01, 4.93100000e-01,
4.90600000e-01, 4.87300000e-01, 4.75200000e-01,
4.47400000e-01, 4.05900000e-01, 3.54400000e-01,
2.96300000e-01, 2.35000000e-01, 1.73900000e-01,
1.16800000e-01, 6.97000000e-02, 3.86000000e-02,
2.15000000e-02, 1.36000000e-02, 1.01000000e-02,
7.70000000e-03, 5.60000000e-03, 3.90000000e-03,
2.80000000e-03, 2.00000000e-03, 1.60000000e-03,
1.30000000e-03, 1.00000000e-03, 7.00000000e-04,
4.00000000e-04, 2.00000000e-04, 0.00000000e+00])
flux_i = np.array(
[0.00000000e+00, 1.00000000e-04, 3.00000000e-04,
4.00000000e-04, 4.00000000e-04, 4.00000000e-04,
3.00000000e-04, 4.00000000e-04, 9.00000000e-04,
1.90000000e-03, 3.40000000e-03, 5.60000000e-03,
1.04000000e-02, 1.97000000e-02, 3.49000000e-02,
5.69000000e-02, 8.51000000e-02, 1.18100000e-01,
1.55200000e-01, 1.98000000e-01, 2.44800000e-01,
2.90600000e-01, 3.29000000e-01, 3.56600000e-01,
3.82900000e-01, 4.06700000e-01, 4.24500000e-01,
4.32000000e-01, 4.25200000e-01, 4.02800000e-01,
3.84400000e-01, 3.91100000e-01, 4.01100000e-01,
3.98800000e-01, 3.92400000e-01, 3.91900000e-01,
3.98800000e-01, 3.97900000e-01, 3.93000000e-01,
3.89800000e-01, 3.87200000e-01, 3.84200000e-01,
3.79900000e-01, 3.73700000e-01, 3.68500000e-01,
3.67800000e-01, 3.60300000e-01, 1.52700000e-01,
2.17600000e-01, 2.75200000e-01, 3.43400000e-01,
3.39200000e-01, 3.36100000e-01, 3.31900000e-01,
3.27200000e-01, 3.22100000e-01, 3.17300000e-01,
3.12900000e-01, 3.09500000e-01, 3.07700000e-01,
3.07500000e-01, 3.08600000e-01, 3.09800000e-01,
3.09800000e-01, 3.07600000e-01, 3.02100000e-01,
2.93900000e-01, 2.82100000e-01, 2.59700000e-01,
2.24200000e-01, 1.81500000e-01, 1.37400000e-01,
9.73000000e-02, 6.52000000e-02, 4.10000000e-02,
2.37000000e-02, 1.28000000e-02, 7.40000000e-03,
5.30000000e-03, 3.60000000e-03, 2.20000000e-03,
1.40000000e-03, 1.10000000e-03, 1.00000000e-03,
1.00000000e-03, 9.00000000e-04, 6.00000000e-04,
3.00000000e-04, 0.00000000e+00])
flux_z = np.array(
[0., 0., 0.0001, 0.0001, 0.0001, 0.0002, 0.0002,
0.0003, 0.0005, 0.0007, 0.0011, 0.0017, 0.0027, 0.004,
0.0057, 0.0079, 0.0106, 0.0139, 0.0178, 0.0222, 0.0271,
0.0324, 0.0382, 0.0446, 0.0511, 0.0564, 0.0603, 0.0637,
0.0667, 0.0694, 0.0717, 0.0736, 0.0752, 0.0765, 0.0775,
0.0782, 0.0786, 0.0787, 0.0785, 0.078, 0.0772, 0.0763,
0.0751, 0.0738, 0.0723, 0.0708, 0.0693, 0.0674, 0.0632,
0.0581, 0.0543, 0.0526, 0.0523, 0.0522, 0.0512, 0.0496,
0.0481, 0.0473, 0.0476, 0.0482, 0.0476, 0.0447, 0.0391,
0.0329, 0.0283, 0.0264, 0.0271, 0.0283, 0.0275, 0.0254,
0.0252, 0.0256, 0.0246, 0.0244, 0.0252, 0.0258, 0.0265,
0.0274, 0.0279, 0.0271, 0.0252, 0.0236, 0.0227, 0.0222,
0.0216, 0.0208, 0.0196, 0.0183, 0.0171, 0.016, 0.0149,
0.0138, 0.0128, 0.0118, 0.0108, 0.0099, 0.0091, 0.0083,
0.0075, 0.0068, 0.0061, 0.0055, 0.005, 0.0045, 0.0041,
0.0037, 0.0033, 0.003, 0.0027, 0.0025, 0.0023, 0.0021,
0.0019, 0.0018, 0.0017, 0.0016, 0.0015, 0.0014, 0.0013,
0.0012, 0.0011, 0.001, 0.0009, 0.0008, 0.0008, 0.0007,
0.0006, 0.0006, 0.0006, 0.0005, 0.0005, 0.0004, 0.0004,
0.0003, 0.0003, 0.0002, 0.0002, 0.0001, 0.0001, 0., 0.])
if _filter == 'R':
wave_filter = wave_R
flux_filter = flux_R
elif _filter == 'V':
wave_filter = wave_V
flux_filter = flux_V
elif _filter == 'u':
wave_filter = wave_u
flux_filter = flux_u
elif _filter == 'g':
wave_filter = wave_g
flux_filter = flux_g
elif _filter == 'r':
wave_filter = wave_r
flux_filter = flux_r
elif _filter == 'i':
wave_filter = wave_i
flux_filter = flux_i
elif _filter == 'z':
wave_filter = wave_z
flux_filter = flux_z
else:
raise ValueError('not implemented transmission curve')
# filter es una built-in the python, creo que es mejor cambiarlo a ese nombre para evitar confusiones.
final_flux_filter = self.filter_to_MUSE_wavelength(wave_filter, flux_filter, wavelength_spec)
return final_flux_filter
def filter_to_MUSE_wavelength(self, wave_filter, flux_filter, wavelength_spec):
new_filter_wavelength = self.overlap_filter(wave_filter, wavelength_spec)
interpolator = interpolate.interp1d(wave_filter, flux_filter)
new_filter_flux = interpolator(new_filter_wavelength)
final_flux_filter = []
for j, w in enumerate(wavelength_spec):
k = mcu.indexOf(new_filter_wavelength, w)
if k >= 0:
final_flux_filter.append(new_filter_flux[k])
else:
final_flux_filter.append(0.)
return np.array(final_flux_filter)
def overlap_filter(self, wave_filter, wavelength_spec):
n = len(wave_filter)
w_min = wave_filter[0]
w_max = wave_filter[n - 1]
w_spec_overlap = []
if wave_filter[1] < wavelength_spec[0] or wave_filter[n - 2] > wavelength_spec[len(wavelength_spec) - 1]:
raise ValueError('Filter wavelength range is wider that spectrum range and convolution is not valid')
for w in wavelength_spec:
if w >= w_min and w <= w_max:
w_spec_overlap.append(w)
return np.array(w_spec_overlap)
def reload_canvas(self, vmin=None, vmax=None):
"""
Clean everything from the canvas with the white image
:param self:
:return:
"""
plt.figure(self.n)
plt.clf()
if vmin is not None:
self.vmin = vmin
if vmax is not None:
self.vmax = vmax
self.gc2 = aplpy.FITSFigure(self.filename_white, figure=plt.figure(self.n))
if self.color:
self.gc2.show_colorscale(cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)
else:
self.gc2.show_grayscale(vmin=self.vmin, vmax=self.vmax)
plt.show()
def get_from_table(self, input_file, keyword):
"""
Get a columns that correspond to a given keyword from a SExtractor outputfile
:param input_file: string
name of the SExtractor output file
:param keyword: string
keyword in the SExtractor output file
:return: data
the column associated to the keyword
"""
sex = SExtractor()
table = sex.read(input_file)
data = table[keyword]
return data
def get_gaussian_profile_weighted_spec(self, x_c=None, y_c=None, params=None, region_string_=None,
coord_system='pix'):
"""
Function that extract the spectrum from an aperture defined either by elliptical parameters or by an elliptical region defined by region_string in ds9 format
:param x_c: x_coordinate of the center of the aperture
:param y_c: y_coordinate of the center of the aperture
:param params: Either a single radius or a set of [a,b,theta] params
:param region_string: region defined by ds9 format (ellipse)
:param coord_system: in the case of defining and aperture using x_c,y_c,params, must indicate the type of this coordiantes. Possible values: 'pix' and 'wcs'
:return: XSpectrum1D object
"""
if max(x_c, y_c, params, region_string_) == None:
raise ValueError('Not valid input')
if region_string_ != None:
x_c, y_c, params = self.params_from_ellipse_region_string(region_string_)
if not isinstance(params, (int, float, tuple, list, np.array)):
raise ValueError('Not ready for this `radius` type.')
if isinstance(params, (int, float)):
a = params
b = params
theta = 0
elif isiterable(params) and (len(params) == 3):
a = max(params[:2])
b = min(params[:2])
theta = params[2]
else:
raise ValueError('If iterable, the length of radius must be == 3; otherwise try float.')
if coord_system == 'wcs':
x_center, y_center, params = self.ellipse_params_to_pixel(x_c, y_c, params=[a, b, theta])
else: # already in pixels
x_center, y_center, params = x_c, y_c, [a, b, theta]
xc = x_center
yc = y_center
new_mask = self.get_mini_cube_mask_from_ellipse_params(x_center, y_center, params)
spec_sum = self.spec_from_minicube_mask(new_mask, mode='sum')
halfsize = [a, b]
if region_string_ == None:
region_string = self.ellipse_param_to_ds9reg_string(xc, yc, a, b, theta)
else:
region_string = region_string_
new_2dmask = self.get_new_2dmask(region_string)
masked_white = ma.MaskedArray(self.white_data)
masked_white.mask = new_2dmask
###### Define domain matrix:
matrix_x = np.zeros_like(self.white_data)
matrix_y = np.zeros_like(self.white_data)
n = self.white_data.shape[0]
m = self.white_data.shape[1]
for i in xrange(m):
matrix_x[:, i] = i
for j in xrange(n):
matrix_y[j, :] = j
###########
amp_init = masked_white.max()
stdev_init_x = 0.33 * halfsize[0]
stdev_init_y = 0.33 * halfsize[1]
g_init = models.Gaussian2D(x_mean=xc, y_mean=yc, x_stddev=stdev_init_x,
y_stddev=stdev_init_y, amplitude=amp_init, theta=theta)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, matrix_x, matrix_y, masked_white)
weights = ma.MaskedArray(g(matrix_x, matrix_y))
if (g.y_stddev < 0) or (g.x_stddev < 0):
raise ValueError('Cannot trust the model, please try other input parameters.')
w = self.wavelength
n = len(w)
fl = np.zeros(n)
sig = np.zeros(n)
new_3dmask = self.get_new_3dmask(region_string)
self.cube.mask = new_3dmask
for wv_ii in range(n):
mask = new_3dmask[wv_ii]
weights.mask = mask
# n_spaxels = np.sum(mask)
weights = weights / np.sum(weights)
fl[wv_ii] = np.sum(self.cube[wv_ii] * weights) # * n_spaxels
sig[wv_ii] = np.sqrt(np.sum(self.stat[wv_ii] * (weights ** 2))) # * n_spaxels
# reset mask
self.cube.mask = self.mask_init
# renormalize
fl_sum = spec_sum.flux.value
norm = np.sum(fl_sum) / np.sum(fl)
fl = fl * norm
sig = sig * norm
return XSpectrum1D.from_tuple((w, fl, sig))
def determinate_seeing_from_white(self, xc, yc, halfsize):
"""
Function used to estimate the observation seeing of an exposure, fitting a gaussian to a brigth source of the image
:param xc: x coordinate in pixels of a bright source
:param yc: y coordinate in pixels of a bright source
:param halfsize: the radius of the area to fit the gaussian
:return: seeing: float
the observational seeing of the image defined as the FWHM of the gaussian
"""
hdulist = self.hdulist_white
data = hdulist[1].data
matrix_data = np.array(self.get_mini_image([xc, yc], halfsize=halfsize))
x = np.arange(0, matrix_data.shape[0], 1)
y = np.arange(0, matrix_data.shape[1], 1)
matrix_x, matrix_y = np.meshgrid(x, y)
amp_init = np.matrix(matrix_data).max()
stdev_init = 0.33 * halfsize
def tie_stddev(model): # we need this for tying x_std and y_std
xstddev = model.x_stddev
return xstddev
g_init = models.Gaussian2D(x_mean=halfsize + 0.5, y_mean=halfsize + 0.5, x_stddev=stdev_init,
y_stddev=stdev_init, amplitude=amp_init, tied={'y_stddev': tie_stddev})
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, matrix_x, matrix_y, matrix_data)
if (g.y_stddev < 0) or (g.y_stddev > halfsize):
raise ValueError('Cannot trust the model, please try other imput parameters.')
seeing = 2.355 * g.y_stddev * self.pixelsize.to('arcsec') # in arcsecs
print('FWHM={:.2f}'.format(seeing))
print('stddev from the 2D gaussian = {:.3f}'.format(g.y_stddev * self.pixelsize.to('arcsec')))
return seeing
def w2p(self, xw, yw):
"""
Transform from wcs coordinates system to pixel coordinates
:param self:
:param xw: float
x coordinate in wcs
:param yw: float
y coordinate in wcs
:return: xpix: float
x coordinate in pixels
ypix: float
y coordinate in pixels
"""
xpix, ypix = self.gc2.world2pixel(xw, yw)
if xpix < 0:
xpix = 0
if ypix < 0:
ypix = 0
return int(round(xpix)), int(round(ypix))
def p2w(self, xp, yp):
"""
Transform from pixel coordinate system to wcs coordinates
:param self:
:param xp: float
x coordinate in pixels
:param yp: float
y coordinate in pixels
:return: xw: float
x coordinate in wcs
yw: float
y coordinate in wcs
"""
xw, yw = self.gc2.pixel2world(xp, yp)
return xw, yw
def xyr_to_pixel(self, x_center, y_center, radius):
"""
Transform the (x,y) center and radius that define a circular region from wcs system coordinate to pixels
:param self:
:param x_center: float
x coordinate in wcs
:param y_center: float
y coordinate in wcs
:param radius: float
radius of the circular region
:return: x_center_pix: float
x coordinate in pixels
y_center_pix: float
y coordinate in pixels
radius_pix: float
radius of the circular region in pixels
"""
x_r = x_center + radius
x_r_pix, y_center_pix = self.w2p(x_r, y_center)
x_center, y_center = self.w2p(x_center, y_center)
radius = abs(x_r_pix - x_center)
x_center = int(round(x_center))
y_center = int(round(y_center))
radius = int(round(radius + 1))
x_center_pix = x_center
y_center_pix = y_center
radius_pix = radius
return x_center_pix, y_center_pix, radius_pix
@property
def shape(self):
"""
:param self:
:return:
"""
return self.cube.data.shape
def create_movie_redshift_range(self, z_ini=0., z_fin=1., dz=0.001, width=30, outvid='emission_lines_video.avi',
erase=True):
"""
Function to create a film, colapsing diferent wavelength ranges in which some strong emission lines would fall at certain redshifts
:param z_ini: initial redshift
:param z_fin: final redshift
:param dz: delta redshift
:param outvid: name of the final video
:param width: width of the lines that will be collapsed, in Angstroms
:param erase: If true, the individual frames to make the video will be erased after the video is produced
:return:
"""
OII = 3728.483
wave = self.wavelength
n = len(wave)
w_max = wave[n - 1 - 20]
max_z_allowed = (w_max / OII) - 1.
if z_fin > max_z_allowed:
print('maximum redshift allowed is ' + str(max_z_allowed) + ', this value will be used instead of ' + str(
z_fin))
z_fin = max_z_allowed
z_array = np.arange(z_ini, z_fin, dz)
images_names = []
fitsnames = []
for z in z_array:
print('z = ' + str(z))
ranges = self.create_ranges(z, width=width)
filename = 'emission_line_image_redshif_' + str(z) + '_'
image = self.get_image_wv_ranges(wv_ranges=ranges, fitsname=filename + '.fits', save=True)
plt.close(15)
image = aplpy.FITSFigure(filename + '.fits', figure=plt.figure(15))
image.show_grayscale()
plt.title('Emission lines image at z = ' + str(z))
image.save(filename=filename + '.png')
images_names.append(filename + '.png')
fitsnames.append(filename + '.fits')
plt.close(15)
video = self.make_video(images=images_names, outvid=outvid)
n_im = len(fitsnames)
if erase:
for i in xrange(n_im):
fits_im = fitsnames[i]
png_im = images_names[i]
command_fits = 'rm ' + fits_im
command_png = 'rm ' + png_im
os.system(command_fits)
os.system(command_png)
return video
def collapse_highSN(self, sn_min=5, fitsname='collapsed_emission_image.fits', save=True):
"""
Function used to sum only voxels in which the signal to noise is greater that sn_min value. This will create a new image
:param sn_min: float
threshold to signal to noise
:param fitsname: string
name of the new image
:param save: Boolean
If True, the new image is saved to the hard disk.
:return:
"""
count_voxel_cube = np.where(self.cube > (self.stat ** 0.5) * sn_min, 1., 0.)
count_voxel_im = np.sum(count_voxel_cube, axis=0) + 1
del count_voxel_cube
valid_voxel_cube = np.where(self.cube > (self.stat ** 0.5) * sn_min, self.cube, 0.)
valid_voxel_im = np.sum(valid_voxel_cube, axis=0)
del valid_voxel_cube
normalized_im = valid_voxel_im / count_voxel_im
normalized_im = np.where(np.isnan(normalized_im), 0, normalized_im)
if save:
hdulist = self.hdulist_white
hdulist[1].data = normalized_im
hdulist.writeto(fitsname, clobber=True)
return normalized_im
def create_ranges(self, z, width=30.):
"""
Function used to create the wavelength ranges around strong emission lines at a given redshift
:param z: redshift
:param width: width in Angstroms of the emission lines
:return:
"""
wave = self.wavelength
n = len(wave)
w_max = wave[n - 1]
w_min = wave[0]
half = width / 2.
OII = 3728.483
Hb = 4862.683
Ha = 6564.613
OIII_4959 = 4960.295
OIII_5007 = 5008.239
lines_wvs = {'OII': OII * (1. + z), 'Hb': Hb * (1. + z), 'OIII_4959': OIII_4959 * (1. + z),
'OIII_5007': OIII_5007 * (1. + z), 'Ha': Ha * (1. + z)}
range_OII = np.array([lines_wvs['OII'] - half, lines_wvs['OII'] + half])
range_Hb = np.array([lines_wvs['Hb'] - half, lines_wvs['Hb'] + half])
range_Ha = np.array([lines_wvs['Ha'] - half, lines_wvs['Ha'] + half])
range_OIII_4959 = np.array([lines_wvs['OIII_4959'] - half, lines_wvs['OIII_4959'] + half])
range_OIII_5007 = np.array([lines_wvs['OIII_5007'] - half, lines_wvs['OIII_5007'] + half])
ranges = [range_Ha, range_Hb, range_OII, range_OIII_4959, range_OIII_5007]
output_ranges = []
for range in ranges:
if range[0] - width >= w_min and range[1] + width <= w_max:
output_ranges.append(range)
return output_ranges
def make_video(self, images, outimg=None, fps=2, size=None, is_color=True, format="XVID", outvid='image_video.avi'):
from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize
fourcc = VideoWriter_fourcc(*format)
vid = None
for image in images:
if not os.path.exists(image):
raise FileNotFoundError(image)
img = imread(image)
if vid is None:
if size is None:
size = img.shape[1], img.shape[0]
vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)
if size[0] != img.shape[1] and size[1] != img.shape[0]:
img = resize(img, size)
vid.write(img)
vid.release()
return vid
# Un radio de 4 pixeles es equivalente a un radio de 0.0002 en wcs
``` |
{
"source": "Joaquinhuberto/newspaper",
"score": 3
} |
#### File: newspaper/news/forms.py
```python
from django import forms
from newspaper.news.models import News
class NewsForm(forms.ModelForm):
def clean_title(self):
if 'b' in self.cleaned_data['title']:
raise forms.ValidationError("El campo titulo no puede contener B <Esto es una validación chorra>")
return self.cleaned_data['title']
class Meta:
model = News
fields = '__all__'
```
#### File: newspaper/news/managers.py
```python
from django.db import models
from django.db.models.query import QuerySet
from datetime import datetime
class NewsQuerySet(QuerySet):
def news_published(self):
return self.filter(publish_date__lte=datetime.now())\
.order_by('publish_date')
def news_next_published(self):
return self.filter(publish_date__gte=datetime.now())\
.order_by('publish_date')
class NewsManager(models.Manager):
def get_queryset(self):
return NewsQuerySet(self.model, using=self._db)
def news_published(self):
return self.get_queryset().news_published()
def news_next_published(self):
return self.get_queryset().news_next_published()
```
#### File: newspaper/news/views.py
```python
import json
from datetime import datetime
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import (HttpResponseRedirect,
HttpResponseBadRequest,
HttpResponse)
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from newspaper.news.forms import NewsForm
from newspaper.news.models import News
def news_list(request):
news_published = News.objects.news_published()
paginator = Paginator(news_published, settings.NUM_ITEMS_PAG)
page_default = 1
page_published = request.GET.get('page', page_default)
try:
news_published = paginator.page(page_published)
except PageNotAnInteger:
news_published = paginator.page(page_default)
except EmptyPage:
news_published = paginator.page(paginator.num_pages)
news_next_published = News.objects.news_next_published()
return render_to_response("news/news_list.html",
{'news_published': news_published,
"news_next_published": news_next_published},
context_instance=RequestContext(request))
@login_required(login_url='/admin/')
def news_add(request):
data = None
if request.method == 'POST':
data = request.POST
initial = {'publish_date': datetime.now()}
# if 'title' in request.GET:
# initial['title'] = request.GET['title']
news_form = NewsForm(data=data,
initial=initial)
if news_form.is_valid():
news_form.save()
return HttpResponseRedirect(reverse('news_list'))
return render_to_response('news/news_add.html',
{'news_form': news_form},
context_instance=RequestContext(request))
@login_required(login_url='/admin/')
def news_edit(request, newsitem_pk):
data = None
if request.method == 'POST':
data = request.POST
news_item = get_object_or_404(News, pk=newsitem_pk)
news_form = NewsForm(data=data,
instance=news_item)
if news_form.is_valid():
news_form.save()
return HttpResponseRedirect(reverse('news_list'))
return render_to_response('news/news_edit.html',
{'news_form': news_form},
context_instance=RequestContext(request))
@login_required(login_url='/admin/')
def news_delete(request, newsitem_pk):
if request.method != 'POST':
return HttpResponseBadRequest()
news_item = get_object_or_404(News, pk=newsitem_pk)
news_item.delete()
if request.is_ajax():
return HttpResponse(json.dumps({'status': 'ok'}))
return HttpResponseRedirect(reverse('news_list'))
``` |
{
"source": "JoaquinIglesiasTurina/narxpy",
"score": 2
} |
#### File: JoaquinIglesiasTurina/narxpy/dataPreprocess.py
```python
import pymc3 as pm
import numpy as np
import theano.tensor as tt
import scipy.stats as stats
import functools as ft
def lagData(data, lags, includeCurrent=False):
if includeCurrent:
ranger = (0, lags)
else:
ranger = (1, lags + 1)
lagsList = [np.roll(data, lag, 0)
for lag in range(*ranger)]
return ft.reduce(
lambda x, y: np.concatenate([x, y], 1),
lagsList
)[lags:, :]
class RadialBasis(object):
def __init__(self, numBasis):
self.numBasis = numBasis
def fit(self, data):
self.nObs, self.nVars = data.shape
self.mvNormal = stats.multivariate_normal(
np.zeros(self.nVars),
np.eye(self.nVars))
self.kernelMatrix = np.quantile(
data,
np.arange(0, 1.1, 1/(self.numBasis - 1)),
axis = 0)
def transform(self, data):
def helper(array):
return np.array([
self.mvNormal.logpdf(array - self.kernelMatrix[x, :])
for x in range(self.numBasis)
])
return np.array([
helper(data[x, :]) for x in range(data.shape[0])
])
def fitTransform(self, data):
return self.fit(data).transform(data)
def changeBasis(data, numBasis):
def helper(array):
return np.array([
mvNormal.logpdf(array - kernelMatrix[x, :])
for x in range(numBasis)
])
nObs, nVars = data.shape
mvNormal = stats.multivariate_normal(
np.zeros(nVars),
np.eye(nVars))
kernelMatrix = np.quantile(
data,
np.arange(0, 1.1, 1/(numBasis - 1)),
axis = 0)
return np.array([
helper(data[x, :]) for x in range(data.shape[0])
])
``` |
{
"source": "joaquinkai/smart-thermostat",
"score": 3
} |
#### File: smart-thermostat/src/outside_weather.py
```python
import os
from time import time
from typing import Optional, NamedTuple, Dict, List, Any
import requests
from requests.models import Response
key = os.environ['OPEN_WEATHER_MAP_KEY']
class WeatherObservation(NamedTuple):
local_time: int
temperature: float
wind_speed: float
gust: float
wind_dir: int
pressure: float
humidity: int
main_weather: List[Dict[str, str]]
def outside_weather(weather_query='q=Oakland') -> Optional[WeatherObservation]:
url = f'http://api.openweathermap.org/data/2.5/weather?{weather_query}&units=metric&APPID={key}'
try:
response: Response = requests.get(url)
if response.status_code == 200:
weather: Dict[str, Any] = response.json()
main: Dict[str, Any] = weather['main']
wind: Dict[str, Any] = weather['wind']
return WeatherObservation(
int(time()),
float(main['temp']),
_mps_to_kph(float(wind['speed'])),
_mps_to_kph(float(wind.get('gust', 0))),
int(wind['deg']),
float(main['pressure']),
int(main['humidity']),
weather['weather'],
)
print(response.status_code, response.text)
except requests.RequestException as ex:
print(ex)
return None
def _mps_to_kph(meters_per_second: float) -> float:
km_per_second = meters_per_second / 1000
seconds_per_hour = 3600
return km_per_second * seconds_per_hour
if __name__ == '__main__':
print(outside_weather('zip=94549'))
``` |
{
"source": "JoaquinKeller/polyadicQML",
"score": 2
} |
#### File: polyadicQML/examples/example-binary.py
```python
from os.path import isdir
if not isdir("figures-ex"):
from os import mkdir
mkdir("figures-ex")
#############################################
import numpy as np
import matplotlib.pyplot as plt
from polyadicqml import Classifier
from polyadicqml.manyq import mqCircuitML
FIGURES = True
SEED = 294
np.random.seed(SEED)
##############################
# We create a dataset of 40 points from two gaussian clouds.
# We take a random polar point
a = 2 * np.pi * np.random.rand()
n_pc = 50 # Number of points per cluster
scale = 1.25
px, py = scale * np.cos(a), scale * np.sin(a)
# Create a matrix on the point and its symmetry and spread apart
X = np.asarray(n_pc * [[px, py]] + # Polar point
n_pc * [[-px, -py]] # Symmetric point
)
# Add gaussian noise
X += 0.7 * np.random.randn(*X.shape)
# Create target vecor
y = np.concatenate((np.zeros(n_pc), np.ones(n_pc)))
COLORS = ["tab:blue", "tab:red"]
if FIGURES:
import seaborn as sns
sns.set()
fig, ax = plt.subplots(figsize=(5,5))
idx = y == 1
ax.plot(X[~ idx,0], X[~ idx,1], ls="", marker="o", color=COLORS[0], label="Class 0")
ax.plot(X[idx,0], X[idx,1], ls="", marker="o", color=COLORS[1], label="Class 1",)
graph_args = dict(ls="", marker = "D", ms=10, mec="black", mew=2)
ax.plot([px], [py], color=COLORS[0], **graph_args)
ax.plot([-px], [-py], color=COLORS[1], **graph_args)
ax.set(xlim=[-np.pi,np.pi], ylim=[-np.pi,np.pi])
ax.legend(loc="upper left")
plt.savefig("figures-ex/binary-points.svg", bbox_inches="tight")
plt.close()
##############################
# Now we define the make_circuit function using the builder interface
def simple_circuit(bdr, x, params):
bdr.allin(x).cz(0,1).allin(params[:2])
bdr.cz(0,1).allin(params[2:4])
return bdr
##############################
# Now we instantiate the circuit
nbqbits = 2
nbparams = 4
qc = mqCircuitML(make_circuit=simple_circuit,
nbqbits=nbqbits, nbparams=nbparams)
bitstr = ['00', '11']
# We can use exact probabilities
model = Classifier(qc, bitstr)
model.fit(X, y, save_loss_progress=True)
# Or pass though shots-based estimates
model2 = Classifier(qc, bitstr, nbshots=300)
model2.fit(X, y, method='COBYLA', save_loss_progress=True)
if FIGURES:
fig, ax = plt.subplots(figsize=(5,3))
fig.set_tight_layout(True)
p0 = model2.__loss_progress__[0]
l2, = ax.plot(model2.__loss_progress__, c='tab:green')
l1, = ax.plot([p0] + model.__loss_progress__, c="tab:blue")
ax.set(ylabel="loss value", xlabel="iteration",)
ax.legend((l1, l2), ('BFGS - simulated QPU', 'COBYLA - shots'),
loc='upper right')
plt.savefig("figures-ex/binary-loss-progress.svg", bbox_inches='tight')
plt.close()
##############################
# Then we test the model
t = np.linspace(-np.pi,np.pi, num = 50)
X_test = np.array([[t1, t2] for t1 in t for t2 in t])
y_pred = model.predict(X_test)
if FIGURES:
fig, ax = plt.subplots(figsize=(5,5))
idx = y_pred == 1
ax.plot(X_test[idx,0], X_test[idx,1], ls="", marker="s", color="coral", alpha=.3)
ax.plot(X_test[~ idx,0], X_test[~ idx,1], ls="", marker="s", color="deepskyblue", alpha=.3)
idx = y == 1
ax.plot(X[~ idx,0], X[~ idx,1], ls="", marker="o", color=COLORS[0], label="Class 0")
ax.plot(X[idx,0], X[idx,1], ls="", marker="o", color=COLORS[1], label="Class 1")
graph_args = dict(ls="", marker = "D", ms=10, mec="black", mew=2)
ax.plot([px], [py], color=COLORS[0], **graph_args)
ax.plot([-px], [-py], color=COLORS[1], **graph_args)
ax.plot([5*py, -5*py], [-5*px, 5*px], color="tab:grey")
ax.set(xlim=[-np.pi,np.pi], ylim=[-np.pi,np.pi],)
ax.legend(loc="upper left")
plt.savefig("figures-ex/binary-predictions.svg", bbox_inches="tight")
plt.close()
#############################################
# We compute the full circuit output on the train set.
model.nbshots = 300
full_output = model.run_circuit(X)
if FIGURES:
fig, ax = plt.subplots(1, 2, figsize=(8,4))
fig.set_tight_layout(True)
bitstr = ['00', '10', '01', '11']
x = [0, 1, 2, 3]
names= ["Class 0", "Class 1"]
colors = ["tab:orange", "tab:green", "tab:blue", "tab:red"]
for label in range(2):
count = full_output[y==label]
for c in count:
ax[label].bar(x, height=c, alpha=.07, color=colors)
ax[label].boxplot(
count, positions=x, sym="_",
medianprops=dict(ls="-", lw=3, color="black"),
)
ax[label].set_title(f"{names[label]}", fontdict={'color': colors[-label]})
ax[label].set(xticks=x, xticklabels=bitstr, ylim=(0,300), xlim=(-.5,3.5))
ax[1].tick_params(labelleft=False, labelright=True)
plt.savefig("figures-ex/binary-counts.svg", bbox_inches="tight")
plt.close()
```
#### File: polyadicQML/polyadicqml/circuitML.py
```python
from numpy import pi, random, zeros_like, zeros, log2
class circuitML():
"""Abstract Quantum ML circuit interface.
Provides a unified interface to run multiple parametric circuits with
different input and model parameters, agnostic of the backend, implemented
in the subclasses.
Parameters
----------
make_circuit : callable of signature self.make_circuit
Function to generate the circuit corresponding to input `x` and
`params`.
nbqbits : int
Number of qubits.
nbparams : int
Number of parameters.
cbuilder : circuitBuilder
Circuit builder class to be used. It must correspond to the subclass
implementation.
Attributes
----------
nbqbits : int
Number of qubits.
nbparams : int
Number of parameters.
"""
def __init__(self, make_circuit, nbqbits, nbparams, cbuilder):
self.nbqbits = nbqbits
self.nbparams = nbparams
self.__set_builder__(cbuilder)
self.make_circuit = make_circuit
def __set_builder__(self, cbuilder):
self.__verify_builder__(cbuilder)
self._circuitBuilder = cbuilder
def __verify_builder__(self, cbuilder):
raise NotImplementedError
def run(self, X, params, nbshots=None, job_size=None):
"""Run the circuit with input `X` and parameters `params`.
Parameters
----------
X : array-like
Input matrix of shape *(nb_samples, nb_features)*.
params : vector-like
Parameter vector.
nbshots : int, optional
Number of shots for the circuit run, by default ``None``. If
``None``, uses the backend default.
job_size : int, optional
Maximum job size, to split the circuit runs, by default ``None``.
If ``None``, put all *nb_samples* in the same job.
Returns
-------
array
Bitstring counts as an array of shape *(nb_samples, 2**nbqbits)*
"""
raise NotImplementedError
def random_params(self, seed=None):
"""Generate a valid vector of random parameters.
Parameters
----------
seed : int, optional
random seed, by default ``None``
Returns
-------
vector
Vector of random parameters.
"""
if seed: random.seed(seed)
return random.randn(self.nbparams)
def make_circuit(self, bdr, x, params):
"""Generate the circuit corresponding to input `x` and `params`.
NOTE: This function is to be provided by the user, with the present
signature.
Parameters
----------
bdr : circuitBuilder
A circuit builder.
x : vector-like
Input sample
params : vector-like
Parameter vector.
Returns
-------
circuitBuilder
Instructed builder
"""
raise NotImplementedError
def __eq__(self, other):
return self.make_circuit is other.make_circuit
def __repr__(self):
return "<circuitML>"
def __str__(self):
return self.__repr__()
def grad(self, X, params, v=None, eps=None, nbshots=None, job_size=None):
"""Compute the gradient of the circuit w.r.t. parameters *params* on
input *X*.
Uses finite differences of the circuit runs.
Parameters
----------
X : array-like
Input matrix of shape *(nb_samples, nb_features)*.
params : vector-like
Parameter vector of length *nb_params*.
v : array-like
Vector or matrix to right multiply the Jacobian with.
eps : float, optional
Epsilon for finite differences. By default uses ``1e-8`` if
`nbshots` is not provided, else uses :math:`\\pi /
\\sqrt{\\text{nbshots}}`
nbshots : int, optional
Number of shots for the circuit run, by default ``None``. If
``None``, uses the backend default.
job_size : int, optional
Maximum job size, to split the circuit runs, by default ``None``.
If ``None``, put all *nb_samples* in the same job.
Returns
-------
array
Jacobian matix as an array of shape *(nb_params, 2**nbqbits)* if
`v` is None, else Jacobian-vector product: ``J(circuit) @ v``
"""
dim_out = 2**self.nbqbits
if v is not None:
if len(v.shape) > 1:
dim_out = v.shape[0]
else:
dim_out = 1
if eps is None:
if nbshots is None:
eps = 1e-8
else:
max(log2(self.nbqbits)*2*pi/3 * min(.5, 1/nbshots**.25), 1e-8)
num = eps if nbshots is None else eps * nbshots
out = zeros((self.nbparams, dim_out))
run_out = self.run(X, params, nbshots, job_size) / num
for i in range(len(params)):
d = zeros_like(params)
d[i] = eps
pd = self.run(X, params + d, nbshots, job_size) / num - run_out
out[i] = pd if v is None else pd @ v
return out
```
#### File: polyadicqml/qiskit/qkCircuitML.py
```python
import qiskit as qk
from qiskit.providers.aer.noise import NoiseModel
from qiskit.exceptions import QiskitError
from qiskit.providers import JobStatus
from sys import exc_info
from os.path import isfile
import numpy as np
from time import asctime, sleep
from itertools import cycle
import json
from .utility.backends import Backends
from ..circuitML import circuitML
from .qkBuilder import __qiskitGeneralBuilder__, qkBuilder
class qkCircuitML(circuitML):
"""Quantum ML circuit interface for qiskit and IBMQ.
Provides a unified interface to run multiple parametric circuits with
different input and model parameters.
Parameters
----------
make_circuit : callable of signature self.make_circuit
Function to generate the circuit corresponding to input `x` and
`params`.
nbqbits : int
Number of qubits.
nbparams : int
Number of parameters.
backend : Union[Backends, list, qiskit.providers.BaseBackend]
Backend(s) on which to run the circuits
cbuilder : circuitBuilder, optional
Circuit builder, by default :class:`qkBuilder`
noise_model : Union[list, qiskit.providers.aer.noise.NoiseModel], optional
Noise model to be provided to the backend, by default ``None``. Cannot
be used with `noise_backend`.
coupling_map : list, optional
Coupling map to be provided to the backend, by default ``None``.
Cannot be used with `noise_backend`.
noise_backend : Union[list, qiskit.providers.ibmq.IBMQBackend], optional
IBMQ backend from which the noise model should be generated, by
default ``None``.
save_path : str, optional
Where to save the jobs outputs, by default ``None``. Jobs are saved
only if a path is specified
Attributes
----------
nbqbits : int
Number of qubits.
nbparams : int
Number of parameters.
Raises
------
ValueError
If both `noise_model` and `noise_backend` are provided.
"""
def __init__(self, make_circuit, nbqbits, nbparams, backend,
cbuilder=qkBuilder,
noise_model=None, coupling_map=None,
noise_backend=None,
save_path=None):
super().__init__(make_circuit, nbqbits, nbparams, cbuilder)
self.save_path = save_path
if isinstance(backend, Backends):
self.__backend__ = backend
self.backend = self.__backend__.backends
self.noise_model = self.__backend__.noise_models
self.coupling_map = self.__backend__.coupling_maps
self.job_limit = backend.job_limit
else:
backend = backend if isinstance(backend, list) else [backend]
try:
self.job_limit = min(map(lambda x: x.job_limit(), backend))
except AttributeError:
self.job_limit = None
self.backend = cycle(backend)
if noise_model is not None and noise_backend is not None:
raise ValueError(
"Only one between 'noise_model' and 'noise_backend' can \
be passed to the constructor"
)
if isinstance(noise_model, list):
self.noise_model = cycle(noise_model)
else:
self.noise_model = cycle([noise_model])
if isinstance(coupling_map, list):
self.coupling_map = cycle(coupling_map)
else:
self.coupling_map = cycle([coupling_map])
if noise_backend is not None:
_noise_back = noise_backend
if not isinstance(noise_backend, list):
_noise_back = [noise_backend]
self.noise_model = cycle(
[NoiseModel.from_backend(_backend)
for _backend in _noise_back]
)
self.coupling_map = cycle(
[_backend.configuration().coupling_map
for _backend in _noise_back]
)
def __verify_builder__(self, cbuilder):
bdr = cbuilder(1)
if isinstance(bdr, __qiskitGeneralBuilder__): return
raise TypeError(
f"The circuit builder class is not comaptible: provided \
{cbuilder} expected {__qiskitGeneralBuilder__}"
)
def run(self, X, params, nbshots=None, job_size=None):
if not job_size:
if self.job_limit is not None and len(X) > self.job_limit:
job_size = self.job_limit
elif self.job_limit is not None and job_size > self.job_limit:
raise ValueError(
f"Job size {job_size} greater that job limit {self.job_limit}"
)
try:
if not job_size:
job, qc_list = self.request(X, params, nbshots)
try:
return self.result(job, qc_list, nbshots)
except:
status = job.status()
if job.done() or status == JobStatus.DONE:
print(f"Completed job {job.job_id()} on {job.backend().name()}")
elif status in (JobStatus.CANCELLED, JobStatus.ERROR):
print(f"{status} ({job.job_id()}) on {job.backend().name()}")
else:
print(f"Cancelling job {job.job_id()} on {job.backend().name()}")
job.cancel()
raise
else:
if not isinstance(job_size, int):
raise TypeError("'job_size' has to be int")
n_jobs = len(X) // job_size
requests = [
self.request(X[job_size * n : job_size * (n+1)], params, nbshots)
for n in range(n_jobs)
]
if job_size * n_jobs < len(X):
requests.append(
self.request(X[job_size * n_jobs :], params, nbshots)
)
try:
return np.vstack([
self.result(job, qc_list, nbshots)
for job, qc_list in requests
])
except:
for job, qc_list in requests:
status = job.status()
if job.done() or status == JobStatus.DONE:
print(f"Completed job {job.job_id()} on {job.backend().name()}")
elif status in (JobStatus.CANCELLED, JobStatus.ERROR):
print(f"{status} ({job.job_id()}) on {job.backend().name()}")
else:
print(f"Cancelling job {job.job_id()} on {job.backend().name()}")
job.cancel()
raise
except KeyboardInterrupt:
cin = input("[r] to reload backends, [ctrl-c] to confirm interrupt :\n")
if cin == 'r':
self.__backend__.load_beckends()
self.backend = self.__backend__.backends
self.noise_model = self.__backend__.noise_models
self.coupling_map = self.__backend__.coupling_maps
return self.run(X, params, nbshots, job_size)
except QiskitError as descr:
error_str = f"{asctime()} - Error in qkCircuitML.run :{exc_info()[0]}\n\t{descr}\n"
print(error_str)
with open("error.log", "w") as f:
f.write(error_str)
sleep(5)
return self.run(X, params, nbshots, job_size)
def make_circuit_list(self, X, params, nbshots=None):
"""Generate a circuit for each sample in `X` rows, with parameters
`params`.
Parameters
----------
X : array-like
Input matrix, of shape *(nb_samples, nb_features)* or
*(nb_features,)*. In the latter case, *nb_samples* is 1.
params : vector-like
Parameter vector.
nbshots : int, optional
Number of nbshots, by default ``None``
Returns
-------
list[qiskit.QuantumCircuit]
List of *nb_samples* circuits.
"""
def post(bdr):
if nbshots:
return bdr.measure_all().circuit()
return bdr.circuit()
if len(X.shape) < 2:
return [post(
self.make_circuit(
self._circuitBuilder(self.nbqbits), X, params
)
)]
else:
return [
post(
self.make_circuit(
self._circuitBuilder(self.nbqbits), x, params
)
)
for x in X]
def request(self, X, params, nbshots=None):
"""Create circuits corresponding to samples in `X` and parameters
`params` and send jobs to the backend for execution.
Parameters
----------
X : array-like
Input matrix, of shape *(nb_samples, nb_features)* or
*(nb_features,)*. In the latter case, *nb_samples* is 1.
params : vector-like
Parameter vector.
nbshots : int, optional
Number of nbshots, by default ``None``
Returns
-------
(qiskit.providers.BaseJob, list[qiskit.QuantumCircuit])
Job instance derived from BaseJob and list of corresponding
circuits.
"""
qc_list = self.make_circuit_list(X, params, nbshots)
# Optional arguments for execute are defined here, if they have been
# given at construction.
execute_kwargs = {}
if nbshots:
execute_kwargs['shots'] = nbshots
_noise_model = next(self.noise_model)
if _noise_model is not None:
execute_kwargs['basis_gates'] = _noise_model.basis_gates
execute_kwargs['noise_model'] = _noise_model
_coupling_map = next(self.coupling_map)
if _coupling_map is not None:
execute_kwargs['coupling_map'] = _coupling_map
return qk.execute(
qc_list, next(self.backend),
**execute_kwargs,
), qc_list
def result(self, job, qc_list, nbshots=None):
"""Retrieve job results and returns bitstring counts.
Parameters
----------
job : qiskit.providers.BaseJob
Job instance.
qc_list : list[qiskit.circuit.QuantumCircuit]
List of quantum circuits executed in `job`, of length *nb_samples*.
nbshots : int, optional
Number of shots, by default ``None``. If ``None``, raw counts are
returned.
Returns
-------
array
Bitstring counts as an array of shape *(nb_samples, 2**nbqbits)*,
in the same order as `qc_list`.
Raises
------
QiskitError
If job status is cancelled or had an error.
"""
wait = 1
while not job.done():
if job.status() in (JobStatus.CANCELLED, JobStatus.ERROR):
raise QiskitError
sleep(wait)
results = job.result()
if not nbshots:
out = [results.get_statevector(qc) for qc in qc_list]
out = np.abs(out)**2
order = [
int(f"{key:0>{self.nbqbits}b}"[::-1], 2)
for key in range(out.shape[1])
]
return out[:, order]
else:
out = np.zeros((len(qc_list), 2**self.nbqbits))
for n, qc in enumerate(qc_list):
for key, count in results.get_counts(qc).items():
# print(f"{key} : {count}")
out[n, int(key[::-1], 2)] = count
if self.save_path:
self.save_job(job)
return out
def save_job(self, job, save_path=None):
"""Save job output to json file.
Parameters
----------
job : qiskit.providers.BaseJob
Job instance.
save_path : path, optional
Where to save the output, by default ``None``. If None, uses
:attr:`qkCircuitML.save_path`.
"""
save_path = self.save_path if save_path is None else save_path
if isfile(save_path):
try:
with open(save_path) as f:
out = json.load(f)
except (FileNotFoundError, json.decoder.JSONDecodeError):
print(f"ATTENTION: file {save_path} is broken, confirm overwriting!")
input("Keybord interrupt ([ctrl-c]) to abort")
out = {}
else:
out = {}
with open(save_path, 'w') as f:
job_id = job.job_id()
try:
times = job.time_per_step()
info = {key: str(times[key]) for key in times}
except AttributeError:
info = {}
info['results'] = job.result().to_dict()
out[job_id] = info
json.dump(out, f)
```
#### File: polyadicQML/polyadicqml/quantumClassifier.py
```python
import numpy as np
import pickle
import json
from tqdm.auto import tqdm
from scipy.optimize import minimize
from .circuitML import circuitML
from .utility import CE_loss
SCIPY_METHODS = {
'bfgs', 'nelder-mead', 'powell', 'cg',
'newton-cg', 'l-bfgs-b', 'tnc', 'cobyla',
'slsqp', 'trust-constr', 'dogleg',
}
class Classifier():
"""Class for quantum classifiers. Defines the API using the scikit-learn
format.
Parameters
----------
circuit : circuitML
Quantum circuit to simulate, how to use and store is defined in child
classes.
bitstr : list of int or list of str
Which bitstrings should correspond to each class. The number of
classes for the classification is defined by the number of elements.
params : vector, optional
Initial model paramters. If ``None`` (default) uses
:meth:`circuitML.random_params`.
nbshots : int, optional
Number of shots for the quantum circuit. If 0, negative or None, then
exact proabilities are computed, by default ``None``.
nbshots_increment : float, int or callable, optional
How to increase the number of shots as optimization progress. If float
or int, the increment arise every `nbshots_incr_delay` iterations: if
float, then the increment is multiplicative; if int, then it is added.
If callable, the new nbshots is computed by calling
`nbshots_increment(nbshots, n_iter, loss_value)`.
nbshots_incr_delay : int, optional
After how many iteration nb_shots has to increse. By default 20, if
nbshots_increment is given
loss : callable, optional
Loss function, by default Negative LogLoss (Cross entropy).
job_size : int, optional
Number of runs for each circuit job, by default the number of
observations.
budget : int, optional
Maximum number of optimization steps, by default 100
name : srt, optional
Name to identify this classifier.
save_path : str, optional
Where to save intermediate training results, by deafult None. If
``None``, intermediate results are not saved.
Attributes
----------
bitstr : list[int]
Bitstrings (as int) on which to read the classes
nbshots : int
Number of shots to run circuit
job_size : int
Number of circuits to run in each backend job
nfev : int
Number if times the circuit has been run
"""
def __init__(self, circuit, bitstr, **kwargs):
super().__init__()
# Retrieve keyword arguments
params = kwargs.get('params')
nbshots = kwargs.get('nbshots')
nbshots_increment = kwargs.get('nbshots_increment')
nbshots_incr_delay = kwargs.get('nbshots_incr_delay')
loss = kwargs.get('loss', CE_loss)
job_size = kwargs.get('job_size')
budget = kwargs.get('budget', 100)
name = kwargs.get('name')
save_path = kwargs.get('save_path')
# Testing circuit and setting it
self.set_circuit(circuit)
# Setting bitstrings
self.set_bitstr(bitstr)
# Setting parameters
if params is None:
self.set_params(circuit.random_params())
else:
self.set_params(params)
# Testing for nbshots type
if not (isinstance(nbshots, int) or (nbshots is None)):
raise TypeError("Invalid `nbshots` type")
if nbshots is not None and nbshots < 1:
nbshots = None
self.nbshots = nbshots
# Testing for nbshots_incr_delay
if not (
isinstance(nbshots_incr_delay, int) or (nbshots_incr_delay is None)
):
raise TypeError("Invalid `nbshots_incr_delay` type")
self.nbshots_incr_delay = 20
if nbshots_incr_delay is not None:
self.nbshots_incr_delay = nbshots_incr_delay
self.__set_nbshots_increment__(nbshots_increment)
if not isinstance(budget, int):
raise TypeError("Invalid `budget` type")
self.__budget__ = budget
self.job_size = job_size
self.__loss__ = loss
self.__min_loss__ = np.inf
self.__fit_conv__ = False
self.__last_loss_value__ = None
self.__last_output__ = None
self.__last_params__ = None
self.__loss_progress__ = []
self.__output_progress__ = []
self.__params_progress__ = []
self.__name__ = name
self.__save_path__ = save_path
self.nfev = 0
def __verify_circuit__(self, circuit):
"""Test wheter a circuit is valid and raise TypeError if it is not.
Parameters
----------
circuit : circuitML
QML circuit
Raises
------
TypeError
If the circuit is not a circuitML
ValueError
If self has a circuit and the new circuit does not uses the same
make_circuit fuction
"""
if not isinstance(circuit, circuitML):
raise TypeError(
f"Circuit was type {type(circuit)} while circuitML was \
expected."
)
if hasattr(self, 'circuit'):
if self.circuit != circuit:
raise ValueError(
"Given circuit is different from previous circuit"
)
def set_circuit(self, circuit):
"""Set the circuit after testing for validity.
For a circuit to be valid, it has to be an instance of circuitML and,
in case self already has a circuit, to use the same make_circuit
function.
Parameters
----------
circuit : circuitML
QML circuit
Raises
------
Union[TypeError, ValueError]
If the circuit is invalid.
"""
self.__verify_circuit__(circuit)
self.circuit = circuit
def set_params(self, params):
"""Parameters setter
Parameters
----------
params : vector
Parameters vector
"""
self.params = params
def set_bitstr(self, bitstr):
"""Bitstring setter
Parameters
----------
bitstr : list[str] or list[int]
Bitstrings on which to read the class predictions.
Raises
------
TypeError
If bitstrings are of wrong type or have eterogenous types
"""
if isinstance(bitstr[0], int):
for i in bitstr:
if not isinstance(i, int):
raise TypeError("All bitstrings must have the same type")
self.bitstr = bitstr
elif isinstance(bitstr[0], str):
for i in bitstr:
if not isinstance(i, str):
raise TypeError("All bitstrings must have the same type")
self.bitstr = [int(bit, 2) for bit in bitstr]
else:
raise TypeError("Bitstrings must be either int or binary strings")
def __set_nbshots_increment__(self, nbshots_increment):
__incr__ = nbshots_increment
if nbshots_increment is None:
def __incr__(nbshots, n_iter, loss_value):
return nbshots
elif isinstance(nbshots_increment, float):
def __incr__(nbshots, n_iter, loss_value):
if n_iter % self.nbshots_incr_delay == 0:
return int(nbshots_increment * nbshots)
else:
return nbshots
elif isinstance(nbshots_increment, int):
def __incr__(nbshots, n_iter, loss_value):
if n_iter % self.nbshots_incr_delay == 0:
return nbshots + nbshots_increment
else:
return nbshots
self.nbshots_increment = __incr__
def run_circuit(self, X, params=None):
"""Run the circuit with input `X` and parameters `params`.
Parameters
----------
X : array-like
Input matrix of shape (nb_samples, nb_features).
params : vector-like, optional
Parameter vector, by default uses the model
:attr:`~polyadicqml.Classifier.params`
Returns
-------
array
Bitstring counts as an array of shape (nb_samples, 2**nbqbits)
"""
if params is None:
params = self.params
self.nfev += 1
return self.circuit.run(
X, params, self.nbshots, job_size=self.job_size
)
def predict_proba(self, X, params=None):
"""Compute the bitstring probabilities associated to each input point
of the design matrix.
Parameters
----------
X : array
Design matrix of n samples
params : vector, optional
Circuit parameters, by default None. If not given, model
parameters are used.
Returns
-------
array
Predicted bitstring probabilities. Rows correspond to samples and
columns to bitstrings, whose order is defined in
:attr:`~polyadicqml.quantumClassifier.bitstr`.
"""
out = self.run_circuit(X, params)
if self.nbshots:
out = out / float(self.nbshots)
return out[:, self.bitstr]
def proba_to_label(self, proba) -> np.ndarray:
"""Transforms a matrix of real values in integer labels.
Parameters
----------
proba : array
Real valued array
Returns
-------
vector
Labels vector
"""
return np.argmax(proba, axis=1)
def predict(self, X):
"""Compute the predicted class for each input point of the design
matrix.
Parameters
----------
X : array
Design matrix of n samples
Returns
-------
vector
Labels vector
"""
return self.proba_to_label(self.predict_proba(X))
def __call__(self, X):
"""Compute the predicted class for each input point of the design
matrix.
Equivalent to :meth:`~polyadicqml.quantumClassifier.predict`
Parameters
----------
X : array
Design matrix of n samples
params : vector, optional
Circuit parameters, by default None. If not given, model
parameters are used.
Returns
-------
vector
Labels vector
"""
return self.predict(X)
def set_loss(self, loss=None):
"""Loss function setter.
Parameters
----------
loss : callable, optional
Loss function of the form loss(y_true, y_pred, labels), by default
None. If None is given, nothing happens.
"""
if loss is not None:
self.__loss__ = loss
def __callback__(self, params, loss=False, output=False, ):
"""Callback function for optimization. It is called after each step.
Parameters
----------
params : vector
Current parameter vector
loss : bool, optional
Wheter to store the loss value, by default False
output : bool, optional
Wheter to store the current output and parameters , by default
False
"""
self.__n_iter__ += 1
self.pbar.update()
if loss or output:
self.__loss_progress__.append(self.__last_loss_value__)
if output:
self.__output_progress__.append(self.__last_output__.tolist())
self.__params_progress__.append(params.tolist())
if self.__save_path__ and self.__n_iter__ % 10 == 0:
self.save()
# We randomize the indices only after the callback
# this is necessary to estimate the gradient by FD
self._rnd_indices = np.random.choice(
self.__indices, size=self.__batch_size, replace=False)
def __scipy_minimize__(
self, input_train, target_train, labels, method,
save_loss_progress, save_output_progress,
**kwargs
):
def to_optimize(params):
self.nbshots = self.nbshots_increment(
self.nbshots, self.__n_iter__, self.__min_loss__)
probas = self.predict_proba(
input_train[self.__rnd_indices], params
)
loss_value = self.__loss__(
target_train[self.__rnd_indices], probas, labels=labels
)
self.__last_loss_value__ = loss_value
self.__last_output__ = probas[np.argsort(self.__rnd_indices)]
if loss_value < self.__min_loss__:
self.__min_loss__ = loss_value
self.set_params(params.copy())
if method.lower() == "cobyla":
self.__callback__(
params, save_loss_progress, save_output_progress
)
return loss_value
# SCIPY.MINIMIZE IMPLEMENTATION
options = kwargs.get('options', {'maxiter': self.__budget__})
bounds = kwargs.get('bounds')
if method == 'L-BFGS-B' and bounds is None:
bounds = [(-np.pi, np.pi) for _ in self.params]
mini_kwargs = dict(
method=method, bounds=bounds,
options=options,
)
if method.lower() not in ('cobyla'):
mini_kwargs["callback"] = lambda xk: self.__callback__(
xk, save_loss_progress, save_output_progress,
)
mini_out = minimize(to_optimize, self.params, **mini_kwargs)
self.set_params(mini_out.x.copy())
self.__fit_conv__ = mini_out.success
def __inner_opt__(self):
pass
def fit(self, input_train, target_train, batch_size=None,
**kwargs):
"""Fit the model according to the given training data.
Parameters
----------
input_train : array
Training design matrix.
target_train : vector
Labels corresponding to `input_train`.
batch_size : int, optional
Minibatches size, by default None. If none uses the full dataset
with rndom shuffle at each iteration.
method : str, optional
Optimization method, by default BFGS
bounds : sequence, optional
Bounds on variables for L-BFGS-B, TNC, SLSQP, Powell, and
trust-constr methods as a sequence of ``(min, max)`` pairs for
each element in x. None is used to specify no bound.
options : dict, optional
Optimizer options, by default {'maxiter': budget}
save_loss_progress : bool, optional
Whether to store the loss progress, by default False
save_output_progress : file path, optional
Path where to save the output evolution , by default None. If none
is given, the output is not saved.
seed : int, optional
Random seed, by default None
Returns
-------
Classifier
self
"""
method = kwargs.pop('method', 'BFGS')
save_loss_progress = kwargs.pop('save_loss_progress', None)
save_output_progress = kwargs.pop('save_output_progress', None)
seed = kwargs.pop('seed', None)
if seed is not None:
np.random.seed(seed)
_nbshots = self.nbshots
self.pbar = tqdm(total=self.__budget__, desc="Training", leave=False)
self.__n_iter__ = 0
if batch_size:
self.__batch_size = batch_size
else:
self.__batch_size = len(target_train)
_labels = np.unique(target_train)
if len(_labels) > len(self.bitstr):
raise ValueError(
f"Too many labels: expected {len(self.bitstr)}, found \
{len(_labels)} in target_train"
)
self.__indices = np.arange(len(target_train))
self.__rnd_indices = np.random.choice(
self.__indices, size=self.__batch_size, replace=False
)
if method.lower() in SCIPY_METHODS:
self.__scipy_minimize__(
input_train, target_train, _labels,
method, save_loss_progress, save_output_progress, **kwargs
)
else:
raise NotImplementedError
self.pbar.close()
del self.pbar
if self.__n_iter__ < self.__budget__:
if self.__fit_conv__:
print(f"Early convergence at step {self.__n_iter__}")
else:
print(f"Optimization failed at step {self.__n_iter__}")
if save_output_progress:
with open(save_output_progress, "w") as f:
_d = dict(output = self.__output_progress__,
labels = target_train.tolist(),
loss_value = self.__loss_progress__,
params = self.__params_progress__)
json.dump(_d, f)
self.__output_progress__ = []
self.__params_progress__ = []
# we reset the number if nbshots, as we changed it during training.
self.nbshots = _nbshots
return self
def info_dict(self):
"""Returns a dictionary containing models information.
Returns
-------
dict
Information dictionary
"""
out = {}
model_info = {
"parameters": self.params.tolist(),
'circuit': str(self.circuit),
'nbshots': self.nbshots,
'nbshots_increment': str(self.nbshots_increment),
'nbshots_incr_delay': str(self.nbshots_incr_delay),
'bitstr': [bin(bit) for bit in self.bitstr],
'job_size': self.job_size if self.job_size else "FULL",
'nfev': self.nfev,
}
if self.__loss_progress__:
model_info["loss_progress"] = self.__loss_progress__
model_info["n_iter"] = self.__n_iter__
name = "quantumClassifier"
if self.__name__ is not None:
name = self.__name__
out[str(name)] = model_info
return out
def save(self, path=None):
if path is None:
path = self.__save_path__
with open(path, 'wb') as f:
pickle.dump(self.info_dict, f)
```
#### File: tests/test_issues/test_issue#1.py
```python
if False:
import numpy as np
from polyadicqml.manyq import mqCircuitML
from polyadicqml.qiskit import qkBuilder
from polyadicqml import Classifier
nbqbits = 2
nbparams = 22
bitstr = ['00', '01', '10']
def block(bdr, x, p):
bdr.allin(x[:2])
bdr.cz(0,1).allin(p[:2])
bdr.cz(0,1).allin(x[[2,3]])
bdr.cz(0,1).allin(p[[2,3]])
bdr.cz(0,1).allin(x[[4,5]])
bdr.cz(0,1).allin(p[[4,5]])
bdr.cz(0,1).allin(x[[6,7]])
bdr.cz(0,1).allin(p[[6,7]])
bdr.cz(0,1).allin(x[[8,9]])
bdr.cz(0,1).allin(p[[8,9]])
bdr.cz(0,1).input(0, x[10])
bdr.cz(0,1).input(0, p[10])
def wineCircuit(bdr, x, params):
block(bdr, x, params[:11])
bdr.cz(0,1)
block(bdr, x, params[11:])
return bdr
input_train = np.random.rand(2940,11)
target_train = (3*np.random.rand((2940))).astype(int)
qc = mqCircuitML(make_circuit=wineCircuit,
nbqbits=nbqbits, nbparams=nbparams)
model = Classifier(qc, bitstr)
model.fit(input_train, target_train)
``` |
{
"source": "joaquinllopez00/kenzie-twitter",
"score": 3
} |
#### File: kenzie-twitter/twitteruser/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class TwitterUser(AbstractUser):
name = models.CharField(max_length=50)
followees = models.ManyToManyField("self", symmetrical=False, related_name="followers")
def __str__(self):
return self.name
```
#### File: kenzie-twitter/twitteruser/views.py
```python
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, HttpResponseRedirect, reverse
from twitteruser.models import TwitterUser
from tweet.models import Tweet
from django.contrib.auth.decorators import login_required
@login_required
def user_view(request, user_name: str=""):
following = 0
for user in TwitterUser.objects.all():
if request.user in user.followees.all():
following += 1
print(following)
if user_name == "" or user_name == request.user.username:
if request.user:
user = request.user
tweets = Tweet.objects.filter(tweeter=user)
count = tweets.count()
print(count)
else:
return HttpResponseRedirect("/login")
return render(request, 'user_detail.html', {'user': user, 'tweets': tweets, 'count': count, "userpage": True, "following": following})
user = TwitterUser.objects.get(username=user_name)
tweets = Tweet.objects.filter(tweeter=user)
count = tweets.count()
followees = user.followees.all()
# follow = TwitterUser.objects.filter(followees =user)
if request.user in followees:
print("included")
follow = True
else:
follow = False
print(followees, follow, request.user, count)
return render(request, "user_detail.html", {'user': user, 'tweets': tweets, 'count': count,'follow': follow, "userpage": False, "following": following}, )
def follow_user(request, followed_user:int):
print(followed_user, "int")
follow_user = TwitterUser.objects.get(id=followed_user)
print(follow_user, "user we are trying to folllow")
following_user = request.user
follow_user.followees.add(following_user)
follow_user.save()
print(follow_user.followees.count(), "user_request", following_user,"request.user")
return HttpResponseRedirect("/user/%s" % follow_user.username)
def unfollow_user(request, unfollowed_user:int):
unfollow_user = TwitterUser.objects.get(id=unfollowed_user)
unfollowing_user = request.user
unfollow_user.followees.remove(unfollowing_user)
unfollow_user.save()
return HttpResponseRedirect("/user/%s" % unfollow_user.username
)
``` |
{
"source": "joaquinlpereyra/uno",
"score": 4
} |
#### File: joaquinlpereyra/uno/cards.py
```python
import random
import safe_input
import players
from utils import Colors
from exceptions import CantUseThisCard
"""A module to hold all card-related classes.
The most interesting are base classes: Card, _ColoredCard, _SpecialColoredCard,
_SpecialCard.
Polymorphism is used extensively on SpecialCards (both colored and non-colored)
to affect the game.
"""
class Card:
"""A base class to represent all cards."""
def __init__(self, game):
"""A Card is part of a particular game and has a Place: it is either
on a Player's Hand or on one of the decks.
"""
self._game = game
self._place = None
@property
def owner(self):
"""Return the owner of the card, if it has one. If not, return None."""
return self._place.owner if isinstance(self._place, players.Hand) else None
@property
def deck(self):
"""Return the deck of the card, if it has one. If not, return None."""
return self._place if isinstance(self._place, Deck) else None
@property
def game(self):
return self._game
def is_compatible_with(self, another_card):
"""Should return wether the card is _compatible_ (that is, can be put
above on the discarded stack) with another card."""
raise NotImplementedError("Every Card should decide what is compatible with!")
def use(self):
"""Remove the card from the hand of the owner and add it to the
discarded_deck of the game. If it is a special card, apply its effects
to the game as well.
Raises CantUseThisCard error if the card is not compatible with the
topmost card of the game's discarded deck.
"""
if not self.is_compatible_with(self.game.discarded_deck.check()):
raise CantUseThisCard
self.do_effect()
self.owner.hand.remove_card(self)
self.game.discarded_deck.add(self)
def move(self, somewhere):
"""Moves the card to somewhere. Somewhere should either be a
player's hand or a deck.
"""
self._place = somewhere
def do_effect(self):
pass
class _ColoredCard(Card):
"""A baseclass to represent all cards which have a color."""
def __init__(self, game, color):
Card.__init__(self, game)
self._color = color
# property decorator makes the color inmutable after creation :)
# it is used extensively to create read-only properties
@property
def color(self):
return self._color
@property
def number(self):
# NOTE: this is useful so as to make compatibilities easily symmetric,
# without the need to try/except all the methods.
# all the cards with no numbers actually have a magic, impossible -1 value
return -1
def is_compatible_with(self, another_card):
"""Should return wether the card is _compatible_ (that is, can be put
above on the discarded stack) with another card."""
if self.game.special_active_color is not None:
return self.color == self.game.special_active_color
return self.color == another_card.color or \
isinstance(another_card, _SpecialCard)
def use(self):
super().use()
self.game.special_active_color = None
class _SpecialColoredCard(_ColoredCard):
"""A class to represent all cards which have special effects and have colors."""
def __init__(self, game, color):
_ColoredCard.__init__(self, game, color)
class _SpecialCard(Card):
"""A class to represent all cards which have special effects and dont have
colors."""
def __init__(self, game):
Card.__init__(self, game)
@property
def color(self):
return None
@property
def number(self):
return -1
def is_compatible_with(self, another_card):
# a special card is compatible with everything, really
return True
class NormalCard(_ColoredCard):
def __init__(self, game, number, color):
_ColoredCard.__init__(self, game, color)
self._number = number
@property
def number(self):
return self._number
def is_compatible_with(self, another_card):
if self.game.special_active_color is not None:
return self.color == self.game.special_active_color
return self.number == another_card.number or \
self.color == another_card.color or \
isinstance(another_card, _SpecialCard)
def __str__(self):
return "{0} {1}".format(self.color, self.number)
class SkipPlayerCard(_SpecialColoredCard):
def __init__(self, game, color):
_SpecialColoredCard.__init__(self, game, color)
def do_effect(self):
print("A Skip Player card has been played! Next player will be skipped.")
self.game.forcibly_skip_next_turn = True
def __str__(self):
return "{0} Skip Player".format(self.color)
class ChangeDirectionCard(_SpecialColoredCard):
def __init__(self, game, color):
_SpecialColoredCard.__init__(self, game, color)
def do_effect(self):
print("A Change Direction card has been played! Game will go in reverse now.")
self.game.game_going_right = not self.game.game_going_right
def __str__(self):
return "{0} Change Direction".format(self.color)
class Take2Card(_SpecialColoredCard):
def __init__(self, game, color):
_SpecialColoredCard.__init__(self, game, color)
def is_compatible_with(self, another_card):
if self.game.special_active_color is not None:
return self.color == self.game.special_active_color
return self.color == another_card.color or \
isinstance(another_card, Take4Card) or \
isinstance(another_card, Take2Card)
def do_effect(self):
print("A Take 2 card has been played! Next player will have to take {0} cards "
"unless she can defend herself!".format(self.game.cards_to_forcibly_take+2))
self.game.cards_to_forcibly_take += 2
def __str__(self):
return "{0} Take 2".format(self.color)
class Take4Card(_SpecialCard):
def __init__(self, game):
_SpecialCard.__init__(self, game)
def do_effect(self):
print("A Take 4 card has been played! Next player will have to take {0} cards "
"unless she can defend herself!".format(self.game.cards_to_forcibly_take+4))
self.game.cards_to_forcibly_take += 4
def __str__(self):
return "Take 4"
class Take8Card(_SpecialCard):
def __init__(self, game):
_SpecialCard.__init__(self, game)
def do_effect(self):
print("A Take 8 card has been played! Next player will have to take {0} cards "
"unless she can defend herself!".format(self.game.cards_to_forcibly_take+8))
self.game.cards_to_forcibly_take += 8
def __str__(self):
return "Take 8"
class ChangeColor(_SpecialCard):
def __init__(self, game):
_SpecialCard.__init__(self, game)
self.colors = {'red': Colors.RED,
'yellow': Colors.YELLOW,
'green': Colors.GREEN,
'blue': Colors.BLUE
}
def choose_color(self):
color_chosen = safe_input.choose_color()
return color_chosen
def _do_effect(self, color_chosen):
self.game.special_active_color = self.colors[color_chosen]
print("A Change Color card has been played! The active color is now: {0}".format(color_chosen))
def do_effect(self):
if isinstance(self.owner, players.AIPlayer):
self.do_ai_effect()
else:
color_chosen = self.choose_color()
self._do_effect(color_chosen)
def do_ai_effect(self):
color_chosen = random.choice(list(self.colors.keys()))
self._do_effect(color_chosen)
def __str__(self):
return "Change Color"
```
#### File: joaquinlpereyra/uno/decks.py
```python
import random
from exceptions import CantTakeFromEmptyDeck, CantCheckEmptyDeck
"""
Holds all the deck related classes.
Decks are thought of of mostly as stacks.
The DiscardedDeck is special in the sense that we (almost) never take from it,
we only keep adding stuff.
Class and methods don't have docstrings because of their simplistic nature.
"""
class Deck:
def __init__(self):
self.elems = []
@property
def is_empty(self):
return len(self.elems) == 0
def add(self, elem):
self.elems.append(elem)
elem.move(self)
def take(self):
if self.is_empty:
raise CantTakeFromEmptyDeck
card = self.elems.pop()
card.move(None)
return card
def take_multiple(self, n):
cards = []
for _ in range(n):
cards.append(self.take())
return cards
def shuffle(self):
random.shuffle(self.elems)
class DiscardedDeck(Deck):
def __init__(self):
Deck.__init__(self)
def check(self):
if self.is_empty:
raise CantCheckEmptyDeck
return self.elems[-1]
class MainDeck(Deck):
def __init__(self):
Deck.__init__(self)
```
#### File: joaquinlpereyra/uno/game.py
```python
import cards
import decks
import players
import safe_input
from utils import Colors
from utils import pretty_print_as_supermarket_list
""" THE MODULE.
This modules defines an init the Game class, the king of all this project.
The game object is a singleton which coordinates all the actions between decks,
cards and players, which hold references to it.
"""
# thank you tuples for being inmutable we should all learn from you
COLORS = (Colors.RED, Colors.BLUE, Colors.GREEN, Colors.YELLOW)
class Game:
def __init__(self):
self.main_deck = decks.MainDeck()
self.discarded_deck = decks.DiscardedDeck()
self.cards_to_forcibly_take = 0
self.game_going_right = True
self.forcibly_skip_next_turn = False
self.special_active_color = None
self.players = []
self._create_players()
self._fill_main_deck()
self._inital_deal_cards()
self._activate_last_card_on_discarded_deck()
self._play_game()
def _fill_main_deck(self):
"""Fills the main deck with the corresponding cards."""
# fill the deck up with normal cards. weird range index to avoid
# unnecesary ifs to comply with non-zero cards appearing twice per color
# but zero-cards only once per color :)
for n in range(1, 20):
n = n % 10
for color in COLORS:
self.main_deck.add(cards.NormalCard(self, n, color))
# the special colored cards, two per color
for _ in range(2):
for color in COLORS:
self.main_deck.add(cards.SkipPlayerCard(self, color))
self.main_deck.add(cards.ChangeDirectionCard(self, color))
self.main_deck.add(cards.Take2Card(self, color))
# and the special not-colored ones
for _ in range(4):
self.main_deck.add(cards.Take4Card(self))
self.main_deck.add(cards.Take8Card(self))
self.main_deck.add(cards.ChangeColor(self))
def _create_players(self):
"""Add players to the game. At least one human and one AI. Human
can choose up to two extra AI players."""
self.players.append(players.HumanPlayer(self, "You"))
self.players.append(players.AIPlayer(self, "AI 1"))
print("An AI has been already created and added to the game.")
for n in range(2,4):
want_to_add_ai_player = safe_input.want_to_add_ai_player()
if want_to_add_ai_player:
self.players.append(players.AIPlayer(self, "AI {0}".format(n)))
else:
break
def _inital_deal_cards(self):
"""Shuffle and deal seven cards to each player found on the game.
Takes one card from the main deck and adds it to the discarded deck.
"""
self.main_deck.shuffle()
for player in self.players:
cards = self.main_deck.take_multiple(7)
player.hand.add_multiple_cards(cards)
self.discarded_deck.add(self.main_deck.take())
def _activate_last_card_on_discarded_deck(self):
"""Causes the last card on the discarded deck to do its effect."""
last_card = self.discarded_deck.check()
last_card.do_effect()
def _add_or_remove(self, n, how_much=1):
"""Adds or remove 1 from n depending on wether the game is going
left or right.
"""
return n+how_much if self.game_going_right else n-how_much
def _grab_players_response_cards(self, player):
take_2 = list(filter(lambda card: isinstance(card, cards.Take2Card), player.hand.cards))
take_4 = list(filter(lambda card: isinstance(card, cards.Take4Card), player.hand.cards))
take_8 = list(filter(lambda card: isinstance(card, cards.Take8Card), player.hand.cards))
if take_8:
return take_8
elif take_4:
return take_4
elif take_2:
return list(filter(lambda card: card.is_compatible_with(self.discarded_deck.check()), take_2))
else:
return []
def react_empty_deck(self):
"""When the main deck is empty, fill it up with cards from the discarded
deck. When this method is finished, the main deck will have all
the cards from the discarded deck except the topmost, which will still
be in the discarded deck, and the main deck will be shuffled.
"""
last_card = self.discarded_deck.take()
self.main_deck = self.discarded_deck
self.main_deck.shuffle()
self.discarded_deck = decks.DiscardedDeck()
self.discarded_deck.add(last_card)
def make_player_grab_cards(self, player):
"""Responds to the special Take2 or Take4 cards. If player can respond
to this card, the sensible option to automatically respond to it will
be chosen automatically. Else, he will have to grab as many cards
as necessary."""
player_special_cards = self._grab_players_response_cards(player)
if player_special_cards:
player.play_specific_card(player_special_cards[0])
return True
else:
player.grab_n_from_deck(self.cards_to_forcibly_take)
self.cards_to_forcibly_take = 0
return False
def victory(self, winning_player):
"""Prints the victory messages."""
print("** VICTORY ** \t" * 3)
print("The winning player was: {0}".format(winning_player))
print("This are the hands of the rest of the players: ")
for player in self.players:
pretty_print_as_supermarket_list("Hand of {0}".format(player), *player.hand.cards)
print("Thank you for playing!")
def _print_player_HUD(self, active_player):
"""Prints information for the user on each turn."""
print()
print("TURN OF: {0}".format(active_player))
print("TOP CARD: {0}".format(str(self.discarded_deck.check()).upper()))
print("============================================")
def _play_game(self):
"""Starts the game itself. A continous loop that will only break
when a player wins.
"""
n = 0
while True:
if self.main_deck.is_empty:
self.react_empty_deck()
if self.forcibly_skip_next_turn:
n = self._add_or_remove(n)
self.forcibly_skip_next_turn = False
continue
active_player = self.players[n % len(self.players)]
self._print_player_HUD(active_player)
if self.cards_to_forcibly_take:
self.make_player_grab_cards(active_player)
n = self._add_or_remove(n)
continue
active_player.play_card()
if active_player.hand.is_empty():
self.victory(active_player)
break
n = self._add_or_remove(n)
a = input("Press return to advance to next turn. \n")
if __name__ == '__main__':
Game()
```
#### File: joaquinlpereyra/uno/safe_input.py
```python
from exceptions import WrongUserInput
from utils import format_input_string
_CONVERT_TO_INT = (lambda i: int(i), "Your input doesn't seem to be a number")
_LOWER_CASE = (lambda s: s.lower(), "Why wouldn't this work on a string? You beat me.")
_STRIP_STRING = (lambda s: s.strip(), "I can't think of a reason why this would fail."
"Have you tried turning it off and on again?")
# CONDITIONS:
def _BETWEEN(min, max):
return (lambda n: min <= n <= max, "Your input doesn't seem to be between {0} and {1}".format(min, max))
_NO_SPACES_IN_STRING = (lambda s: " " not in s, "No spaces allowed in this field.")
_YES_OR_NO = (lambda s: s.lower() in 'yn', "You must answer Yes (y) or No (n).")
_IS_COLOR = (lambda s: s.lower() in ('red', 'yellow', 'blue', 'green'), "Your input doesn't seem to be a color")
def _safe_input(input_string, convertions_and_reasons_for_fail, conditions_and_reasons_for_fail):
"""Keep asking the user for input until he gives an accepted answer.
@args:
str: input_string = the text displayed to the user when expecting input
[(f: a -> b, string)] convertions_and_reasons_for_fail = modifies the user input if it can
as first element, possible explanation of why it couldn as second element
[(f: a -> bool, string]) conditions_and_reasons_for_fail = conditions that the modified user
input should pass as first element, show second element string if it didn't
@return:
z, if the last function on convertions_and_reasons_for_fail was of type f: w -> z:
the user input, validated: it is assured to be of the the correct
type after all the convertions and can pass all tests given as conditions.
@sideffects:
input() function accepted from user.
"""
while True:
user_input = input(format_input_string(input_string))
try:
correct_type_and_format_user_input = _converter(user_input, *convertions_and_reasons_for_fail)
_condition_checker(correct_type_and_format_user_input, *conditions_and_reasons_for_fail)
break
except WrongUserInput as e:
print("Woho, not so fast! Your input doesn't seem to be valid. \n"
"Maybe the problem has something to with this: \n{0}".format(e.reason))
return correct_type_and_format_user_input
# YOU SHALL NOT PASS!
def _condition_checker(user_input, *test_condition_and_if_it_didnt_work_why_tuples):
"""Raise a WrongUserInput error if user_input doesn't pass one of the
tests with its appropiate reason.
@args:
str: user_input = the input the user gave
*(f: a -> bool, str): test_condition_and_if_it_didnt_work_why_tuples =
arbitrary amount of tuples containing a tester function and an
explaiation of why it might fail
@return:
None
@raise:
WrongUserInput
"""
for condition, reason in test_condition_and_if_it_didnt_work_why_tuples:
if not condition(user_input):
raise WrongUserInput(reason)
# Wololo.
def _converter(user_input, *try_converting_and_if_it_did_not_work_why_tuples):
"""Tries to convert user_input according as instructed be the functions
@args:
str: user_input = the input the user gave
*(f: a -> bool, str): test_condition_and_if_it_didnt_work_why_tuples =
arbitrary amount of tuples containing a tester function and an
explaiation of why it might fail
@return:
z, if the last function on try_converting_and_if_it_did_not_work_why_tuples
was of type f: w -> z.
@raise:
WrongUserInput
"""
for try_function, reason in try_converting_and_if_it_did_not_work_why_tuples:
try:
user_input = try_function(user_input)
except:
raise WrongUserInput(reason)
return user_input
def choose_card(list_of_cards):
convertions_and_reasons_for_fail = [_CONVERT_TO_INT]
conditions_and_reasons_for_fail = [_BETWEEN(1, len(list_of_cards))]
user_input = _safe_input("Choose the card you want to play from the available cards.",
convertions_and_reasons_for_fail, conditions_and_reasons_for_fail)
return user_input - 1
def want_to_add_ai_player():
convertions_and_reasons_for_fail = []
conditions_and_reasons_for_fail = [_YES_OR_NO]
res = _safe_input("Do you want to add a new AI player? [Y/n]", convertions_and_reasons_for_fail,
conditions_and_reasons_for_fail)
return res.lower() == 'y'
def choose_color():
convertions_and_reasons_for_fail = [_STRIP_STRING]
conditions_and_reasons_for_fail = [_IS_COLOR]
return _safe_input("Choose a color. Valid colors are RED, YELLOW, GREEN, BLUE",
convertions_and_reasons_for_fail, conditions_and_reasons_for_fail)
```
#### File: joaquinlpereyra/uno/utils.py
```python
class Colors:
RED = 'red'
GREEN = 'green'
BLUE = 'blue'
YELLOW = 'yellow'
def format_strings(*strings):
"""Take an arbitrary number of strings and format them nicely.
Returns the nicely formatted string.
"""
accum_string = ""
for str in strings:
accum_string = "{0} {1}\n".format(accum_string, str)
return accum_string
def format_input_string(*strings):
"""Takes an arbitrary number of strings and format them nicely with a
'>>>' added on a new line to show the user the program is waiting for
its input.
Return the nicely formatted string.
"""
nice_string = format_strings(*strings)
final_string = "{0}\n>>> ".format(nice_string) if nice_string else '>>> '
return final_string
def pretty_print(*strings):
"""Print an arbtrary number of strings.
Return None.
"""
print(format_strings(*strings))
def pretty_print_as_supermarket_list(title, *strings):
"""Print a title (for no title, give a falsey value on first param)
and an arbitrary number of strings like it was a nice supermarket list.
"""
if title and strings:
print('[{0}]'.format(title))
for index, string in enumerate(strings, start=1):
print('{0}.\t{1}'.format(index, string))
def pretty_print_as_supermarket_list_a_dictionary(titles_strings_dictionary):
if not titles_strings_dictionary:
print("No results for this query")
for title, strings in titles_strings_dictionary.items():
title = title if title else 'No tag'
pretty_print_as_supermarket_list(title, *strings)
``` |
{
"source": "joaquinOnSoft/gsites2md",
"score": 2
} |
#### File: src/gsites2md/GoogleDriveWrapper.py
```python
import io
import logging
import os.path
import re
import shutil
# socket.timeout with will cause api client to become unusable
# https://github.com/googleapis/google-api-python-client/issues/563#issuecomment-738363829
import socket
socket.setdefaulttimeout(4000)
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
# If modifying these scopes, delete the file token.json.
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
# Authorizing requests with OAuth 2.0 (in Google Drive API v3)
# https://developers.google.com/drive/api/v3/about-auth
SCOPES = ['https://www.googleapis.com/auth/drive.readonly']
class GoogleDriveWrapper:
GOOGLE_DRIVE_URL_START = "https://drive.google.com"
GOOGLE_DRIVE_FILE_URL_START = "https://drive.google.com/file"
GOOGLE_DRIVE_OPEN_CONTENT_URL_START = "https://drive.google.com/open"
INDEX_ID = 0
INDEX_NAME = 1
INDEX_MIME_TYPE = 2
METADATA_FIELD_ID = "id"
METADATA_FIELD_NAME = "name"
METADATA_FIELD_MIMETYPE = "mimeType"
METADATA_FIELD_PARENTS = "parents"
MIME_TYPE_FOLDER = "application/vnd.google-apps.folder"
CONTENT_TYPE_FILE = "file"
CONTENT_TYPE_FOLDER = "folder"
HTTP_ERROR_404 = 404
MAX_NUMBER_RETRIES = 5
def __init__(self):
"""
Shows basic usage of the Drive v3 API.
Prints the names and ids of the first 10 files the user has access to.
"""
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
dir_path = os.path.dirname(os.path.realpath(__file__))
credentials_json = os.path.join(dir_path, 'credentials.json')
flow = InstalledAppFlow.from_client_secrets_file(credentials_json, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
self.service = build('drive', 'v3', credentials=creds)
def get_content_id_from_url(self, content_url: str) -> str:
"""
Get content (file/folder) identifier from a Google Drive URL
:param content_url: Google Drive URL
:return: Content (file/folder) identifier in Google Drive
"""
file_id = None
if self.__is_url_type(GoogleDriveWrapper.GOOGLE_DRIVE_FILE_URL_START, content_url):
result = re.search(r'(\/file\/d\/)((.)+)(\/)', content_url)
if result and len(result.regs) >= 2:
file_id = content_url[result.regs[2][0]: result.regs[2][1]]
elif self.__is_url_type(GoogleDriveWrapper.GOOGLE_DRIVE_OPEN_CONTENT_URL_START, content_url):
result = re.search(r'[?&]id=([^&]+).*$', content_url)
if result and len(result.regs) >= 2:
file_id = content_url[result.regs[1][0]: result.regs[1][1]]
return file_id
def __get_content_metadata(self, content_id) -> str:
"""
Recover the original file/folder metatata (id, name, parents, mimeType) from a Google Drive Identifier
:param content_id: Google Drive identifier
:return: File/folder metadata map containing 'id', 'name', 'parents' and 'mimeType' or 'None' if not found
"""
results = None
try:
results = self.service.files().get(fileId=content_id, fields="id, name, parents, mimeType").execute()
except HttpError as e:
logging.debug(f"{e.resp.status} - {e.resp.reason} - Recovering content metadata from URL: {e.uri}")
except ConnectionResetError as e:
logging.debug(f"{e.resp.status} - {e.resp.reason} - Recovering content metadata from URL: {e.uri}")
return results
def get_content_metadata_by_name(self, content_id: str, field_name: str):
"""
Recover the original file/folder metadata (id, name, parents, mimeType) from a Google Drive Identifier
:param content_id: Google Drive identifier
:param field_name: id, name, parents or mimeType
:return: File/folder name or None if not found
"""
field_value = None
results = self.__get_content_metadata(content_id)
if results and results.get(field_name):
field_value = results.get(field_name)
return field_value
def get_content_name(self, content_id) -> str:
"""
Recover the original file/folder name from a Google Drive Identifier
:param content_id: Google Drive identifier
:return: File/folder name or None if not found
"""
return self.get_content_metadata_by_name(content_id, GoogleDriveWrapper.METADATA_FIELD_NAME)
def get_content_type_from_url(self, url: str) -> str:
"""
Check if a given URL correspond with Google Drive URL linking a file or a folder
:param url: string containing an URL
:return: 'folder' if is a Google Drive URL that links a folder, 'file' if links a file,
or 'None' in in other case
"""
content_type = None
if GoogleDriveWrapper.__is_url_type(self.GOOGLE_DRIVE_FILE_URL_START, url):
content_type = GoogleDriveWrapper.CONTENT_TYPE_FILE
elif GoogleDriveWrapper.__is_url_type(self.GOOGLE_DRIVE_OPEN_CONTENT_URL_START, url):
content_id = self.get_content_id_from_url(url)
if content_id:
mimetype = self.get_content_metadata_by_name(content_id, GoogleDriveWrapper.METADATA_FIELD_MIMETYPE)
if mimetype == GoogleDriveWrapper.MIME_TYPE_FOLDER:
content_type = GoogleDriveWrapper.CONTENT_TYPE_FOLDER
else:
content_type = GoogleDriveWrapper.CONTENT_TYPE_FILE
return content_type
def get_content_path(self, content_id: str) -> str:
path = None
results = self.__get_content_metadata(content_id)
if results:
parents = results.get(GoogleDriveWrapper.METADATA_FIELD_PARENTS)
if parents and len(parents) > 0:
path = ""
while True:
results = self.__get_content_metadata(parents[0])
parents = results.get(GoogleDriveWrapper.METADATA_FIELD_PARENTS)
if parents is None:
break
path = os.path.join(results.get(GoogleDriveWrapper.METADATA_FIELD_NAME), path)
return path
def download_content_from_url(self, url: str, path: str) -> str:
download_url = None
if self.is_google_drive_url(url):
content_id = self.get_content_id_from_url(url)
if content_id:
if self.is_file_url(url):
content_name = self.get_content_name(content_id)
download_url = self.download_file_from_id(content_id, path, content_name)
elif self.is_folder_url(url):
download_url = self.download_folder_from_id(content_id, path)
else:
logging.warning(f"File name not found for URL: {url}")
return download_url
def download_file_from_url(self, file_url: str, path: str) -> str:
"""
Download a shared file from Google Drive and download a copy to the local path defined
SEE: https://developers.google.com/drive/api/v3/manage-downloads
:param file_url: A google Drive URL to a shared file that looks like this for files
https://drive.google.com/file/d/1moXo98Pp6X1hpSUbeql9TMlRO8GIyDBY/view?usp=sharing
and like this for folders https://drive.google.com/open?id=0B-t5SY0w2S8icVFyLURtUVNQQVU&authuser=0
:param path: Local path to store the downloaded file
:return: Local path of the file downloaded
"""
downloaded_file_full_path = None
file_id = self.get_content_id_from_url(file_url)
if file_id:
file_name = self.get_content_name(file_id)
if file_name:
downloaded_file_full_path = self.download_file_from_id(file_id, path, file_name)
return downloaded_file_full_path
def __replicate_google_drive_folder_structure(self, content_id, path):
"""
Replicate Google Drive folder structure under the local path
:param content_id: Google Drive content identifie (file or folder)
:param path: local base path
:return: Local path that replicates the Google Drive folder structure under the local base path
"""
google_drive_path = self.get_content_path(content_id)
if google_drive_path is not None:
path = os.path.join(path, google_drive_path)
# Create folder if not exists
if not os.path.exists(path):
os.makedirs(path)
if not self.is_file(content_id):
folder_name = self.get_content_name(content_id)
path = os.path.join(path, folder_name)
if not os.path.exists(path):
os.makedirs(path)
return path
def download_file_from_id(self, file_id: str, path: str, file_name: str,
replicate_google_drive_folder_structure: bool = True) -> str:
"""
Download a shared file from Google Drive and download a copy to the local path defined
:param file_id: file identifier
:param path: local path where the file will be downloaded
:param file_name: File name to be used to save the file
local environment
:param replicate_google_drive_folder_structure: Flag to indicate if Google Drive folder structure must be
replicated under the local path or not.1
:return: Local path of the downloaded file, None if the file doesn't exist
(usually a 404 happens when you try to download the file)
"""
error_on_download = False
number_retries = 0
if replicate_google_drive_folder_structure:
path = self.__replicate_google_drive_folder_structure(file_id, path)
request = self.service.files().get_media(fileId=file_id, fields="files(id, name)")
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
while number_retries < GoogleDriveWrapper.MAX_NUMBER_RETRIES:
try:
done = False
while done is False:
status, done = downloader.next_chunk()
logging.debug("Download %s: %d%%." % (file_name, int(status.progress() * 100)))
# File successfully downloaded. Exit loop
break
except HttpError as e:
logging.error(f"HTTP Error downloading file: {e.uri} - {e.resp.status} - {e.resp.reason}")
error_on_download = True
if e.status_code == GoogleDriveWrapper.HTTP_ERROR_404:
# Retry not needed
break
except ConnectionResetError as e:
logging.error(f"Connection Reset Error downloading file: {e.uri} - {e.resp.status} - {e.resp.reason}")
error_on_download = True
except Exception as e:
logging.error(f"Error downloading file: {str(e)}")
error_on_download = True
number_retries += 1
logging.info(f"Retrying download: {file_id} - {file_name}")
downloaded_file_path = None
if not error_on_download:
# The file has been downloaded into RAM, now save it in a file
# https://stackoverflow.com/questions/60111361/how-to-download-a-file-from-google-drive-using-python-and-the-drive-api-v3
downloaded_file_path = os.path.join(path, file_name)
fh.seek(0)
with open(downloaded_file_path, 'wb') as f:
shutil.copyfileobj(fh, f)
return downloaded_file_path
def download_folder_from_id(self, folder_id: str, path: str) -> str:
download_path = self.__replicate_google_drive_folder_structure(folder_id, path)
# Call the Drive v3 API
results = self.service.files().list(
q=f"'{folder_id}' in parents",
pageSize=10, fields="nextPageToken, files(id, name, mimeType)").execute()
items = results.get('files', [])
if not items:
logging.info('No files found.')
else:
logging.debug('Files:')
for item in items:
logging.debug(u'{0} ({1}) - {2}'.format(item['name'], item['id'], item['mimeType']))
if item['mimeType'] == self.MIME_TYPE_FOLDER:
# Base path DOESN'T CHANGE for FOLDERS (the remote path is replicated under the base path)
self.download_folder_from_id(item['id'], path)
else:
# Base path CHANGES for FILES
self.download_file_from_id(item['id'], download_path, item['name'], False)
return download_path
@staticmethod
def __is_url_type(url_type_pattern: str, url: str):
is_url_type = False
if url is not None:
is_url_type = url.startswith(url_type_pattern)
return is_url_type
def is_google_drive_url(self, url: str) -> bool:
"""
Check if a given URL correspond with Google Drive URL
:param url: string containing an URL
:return: True if is a Google Drive URL, false in other case
"""
return GoogleDriveWrapper.__is_url_type(self.GOOGLE_DRIVE_URL_START, url)
def is_file_url(self, url: str) -> bool:
"""
Check if a given URL correspond with Google Drive URL linking a file
:param url: string containing an URL
:return: True if is a Google Drive URL that links a file, false in other case
"""
return self.get_content_type_from_url(url) == GoogleDriveWrapper.CONTENT_TYPE_FILE
def is_folder_url(self, url: str) -> bool:
"""
Check if a given URL correspond with Google Drive URL linking a folder
:param url: string containing an URL
:return: True if is a Google Drive URL that links a folder, false in other case
"""
return self.get_content_type_from_url(url) == GoogleDriveWrapper.CONTENT_TYPE_FOLDER
def is_file(self, content_id):
mimetype = self.get_content_metadata_by_name(content_id, GoogleDriveWrapper.METADATA_FIELD_MIMETYPE)
return mimetype != GoogleDriveWrapper.MIME_TYPE_FOLDER
def replicate_content_path_from_url(self, url: str, path: str) -> str:
download_url = None
if self.is_google_drive_url(url):
content_id = self.get_content_id_from_url(url)
if content_id:
if self.is_file_url(url):
content_name = self.get_content_name(content_id)
download_url = self.replicate_file_path_from_id(content_id, path, content_name)
elif self.is_folder_url(url):
download_url = self.download_folder_from_id(content_id, path)
else:
logging.warning(f"File name not found for URL: {url}")
return download_url
def replicate_file_path_from_id(self, file_id: str, path: str, file_name: str,
replicate_google_drive_folder_structure: bool = True) -> str:
"""
Replicate the Google Drive folder structure (path) in the local drive for a shared file
from Google Drive
:param file_id: file identifier
:param path: local path where the file will be downloaded
:param file_name: File name to be used to save the file
local environment
:param replicate_google_drive_folder_structure: Flag to indicate if Google Drive folder structure must be
replicated under the local path or not.1
:return: Local path of the downloaded file, None if the file doesn't exist
(usually a 404 happens when you try to download the file)
"""
downloaded_file_path = None
if replicate_google_drive_folder_structure:
path = self.__replicate_google_drive_folder_structure(file_id, path)
request = self.service.files().get_media(fileId=file_id, fields="files(id, name)")
if file_name is not None:
downloaded_file_path = os.path.join(path, file_name)
return downloaded_file_path
def replicate_folder_path_from_id(self, folder_id: str, path: str) -> str:
download_path = self.__replicate_google_drive_folder_structure(folder_id, path)
# Call the Drive v3 API
results = self.service.files().list(
q=f"'{folder_id}' in parents",
pageSize=10, fields="nextPageToken, files(id, name, mimeType)").execute()
items = results.get('files', [])
if not items:
logging.info('No files found.')
else:
logging.debug('Files:')
for item in items:
logging.debug(u'{0} ({1}) - {2}'.format(item['name'], item['id'], item['mimeType']))
if item['mimeType'] == self.MIME_TYPE_FOLDER:
# Base path DOESN'T CHANGE for FOLDERS (the remote path is replicated under the base path)
self.replicate_folder_path_from_id(item['id'], path)
else:
# Base path CHANGES for FILES
self.replicate_file_path_from_id(item['id'], download_path, item['name'], False)
return download_path
```
#### File: src/gsites2md/HTML2md.py
```python
import copy
import logging
import os
import shutil
from gsites2md.HTML2mdConfig import HTML2mdConfig
from gsites2md.URLUtils import URLUtils
from gsites2md.HTMLParser2md import HTMLParser2md
class HTML2md:
@staticmethod
def process(config: HTML2mdConfig):
"""
Convert and HTML file or folder (with all their nested files) in a Markdown file.
:param config: Object that contains the following configuration properties:
"source": source file or folder
"destination": destination file or folder
"replace_google_drive_links": (flag) Replace Google Drive links to local links)
"google_drive_content_download": (flag) Download Google Drive content to local drive.
"downloads": Path to download Google drive content. Default value, "."
"timeout": Timeout, in seconds, to use in link validation connections. Default value "-1" (unlimited)
"""
if os.path.isfile(config.source):
HTML2md.__process_file(config)
else:
HTML2md.__process_folder(config)
@staticmethod
def __process_folder(config):
for dir_path, dirs, files in os.walk(config.source):
for d in dirs:
d_in_name = os.path.join(config.source, os.path.join(dir_path, d))
d_out_name = d_in_name.replace(config.source, config.destination)
if not os.path.exists(d_out_name):
logging.debug("Creating folder: " + d_out_name)
os.mkdir(d_out_name)
for filename in files:
f_in_name = os.path.join(dir_path, filename)
f_out_name = f_in_name.replace(config.source, config.destination)
if URLUtils.is_friendly_url(f_in_name):
f_out_name = f_out_name + ".md"
logging.debug("HTML2MD: " + f_in_name)
config4files = HTML2md.update_cloned_config(f_in_name, f_out_name, config)
HTML2md.__process_file(config4files)
elif URLUtils.is_html(f_in_name):
f_out_name = f_out_name.replace(".html", ".md").replace(".htm", ".md")
logging.debug("HTML2MD: " + f_in_name)
config4files = HTML2md.update_cloned_config(f_in_name, f_out_name, config)
HTML2md.__process_file(config4files)
else:
logging.debug("Copying: " + f_in_name)
shutil.copy2(f_in_name, f_out_name)
@staticmethod
def update_cloned_config(f_in_name, f_out_name, config):
config4files = copy.copy(config)
config4files.source = f_in_name
config4files.destination = f_out_name
return config4files
@staticmethod
def __process_file(config):
"""
Convert and HTML file in a Markdown file.
:param config: object that contains the following properties:
"source": source file or folder
"destination": destination file or folder
"replace_google_drive_links": (flag) Replace Google Drive links to local links)
"google_drive_content_download": (flag) Download Google Drive content to local drive.
"downloads": Path to download Google drive content. Default value, "."
"timeout": Timeout, in seconds, to use in link validation connections. Default value "-1" (unlimited)
"""
f = open(config.source, "r")
html_txt = f.read()
f.close()
# Parse html file
parser = HTMLParser2md(config)
parser.feed(html_txt)
md = parser.md
md = HTML2md.__remove_useless_md(md)
if config.destination is None:
config.destination = config.source.replace('.html', '.md').replace('.htm', '.md')
f = open(config.destination, "w")
f.write(md)
f.close()
@staticmethod
def __remove_useless_md(md: str) -> str:
if md is not None:
md = md.replace("\n| | \n", "")
return md
```
#### File: gsites2md/test/TestGoogleDriveWrapper.py
```python
import os
import shutil
import unittest
from ..GoogleDriveWrapper import GoogleDriveWrapper
class TestGoogleDriveWrapper(unittest.TestCase):
URL = "https://drive.google.com/file/d/1moXo98Pp6X1hpSUbeql9TMlRO8GIyDBY/view?usp=sharing"
FILE_ID = '1moXo98Pp6X1hpSUbeql9TMlRO8GIyDBY'
FILE_NAME = 'openimaj-tutorial-pdf.pdf'
URL_WITH_SPECIAL_CHARACTERS = "https://drive.google.com/file/d/1PIoLKylUslWs1X9ZhSI-jx7i3POmrDii/view?usp=sharing"
FILE_ID_WITH_SPECIAL_CHARACTERS = '1PIoLKylUslWs1X9ZhSI-jx7i3POmrDii'
GOOGLE_DRIVE_FILE_UNDER_FOLDER_HIERARCHY_URL = \
"https://drive.google.com/file/d/1Vgfp5pWzI1YBBF819HZq5LVyO68z9yeq/view"
FILE_UNDER_FOLDER_HIERARCHY_ID = "1Vgfp5pWzI1YBBF819HZq5LVyO68z9yeq"
GOOGLE_DRIVE_FILE_URL = "https://drive.google.com/file/d/1moXo98Pp6X1hpSUbeql9TMlRO8GIyDBY/view?usp=sharing"
FILE_ID_NOT_EXISTS = "0B-t5SY0w2A9aa9AaAAa9Aa99Aa9"
GOOGLE_DRIVE_FOLDER_URL = "https://drive.google.com/open?id=0B-t5SY0w2S8iRXNJU1RPTmZpaEE&authuser=0"
FOLDER_ID = "0B-t5SY0w2S8iRXNJU1RPTmZpaEE"
FOLDER_NAME = "media"
GOOGLE_DRIVE_FOLDER_URL_WITHOUT_EXTRA_PARAMS = "https://drive.google.com/open?id=0B-t5SY0w2S8iXzI1VHE1TUxSRUk"
FOLDER_WITHOUT_EXTRA_PARAMS_ID = "0B-t5SY0w2S8iXzI1VHE1TUxSRUk"
GOOGLE_DRIVE_FOLDER_UNED_URL = \
"https://drive.google.com/drive/folders/0B" \
<KEY>"
FOLDER_UNED_ID = "0B-t5SY0w2S8ifktpVUNVNWJ3NzVoVkZlSXBfWW1pTF9MR2ljVWxYNWNrLVBOZGo3eVFMVms"
FOLDER_UNED_NAME = "uned"
GOOGLE_DRIVE_FOLDER_WITH_SUBFOLDERS_URL = "https://drive.google.com/drive/folders/0B-t5SY0w2S8iXzI1VHE1TUxSRUk"
FOLDER_WITH_SUBFOLDERS_ID = "0B-t5SY0w2S8iXzI1VHE1TUxSRUk"
FOLDER_WITH_SUBFOLDERS_NAME = "GradoMedioxComunidades"
def setUp(self) -> None:
super().setUp()
self.wrapper = GoogleDriveWrapper()
@staticmethod
def remove_folder(path: str):
if os.path.isdir(path):
shutil.rmtree(path)
def test_get_content_id_from_url(self):
file_id = self.wrapper.get_content_id_from_url(self.URL)
self.assertIsNotNone(file_id)
self.assertEqual(self.FILE_ID, file_id)
folder_id = self.wrapper.get_content_id_from_url(self.GOOGLE_DRIVE_FOLDER_URL)
self.assertIsNotNone(folder_id)
self.assertEqual(self.FOLDER_ID, folder_id)
def test_get_content_id_from_url_without_extra_params(self):
folder_id = self.wrapper.get_content_id_from_url(self.GOOGLE_DRIVE_FOLDER_URL_WITHOUT_EXTRA_PARAMS)
self.assertIsNotNone(folder_id)
self.assertEqual(self.FOLDER_WITHOUT_EXTRA_PARAMS_ID, folder_id)
def test_get_content_id_with_special_characters_from_url(self):
file_id = self.wrapper.get_content_id_from_url(self.URL_WITH_SPECIAL_CHARACTERS)
self.assertIsNotNone(file_id)
self.assertEqual(self.FILE_ID_WITH_SPECIAL_CHARACTERS, file_id)
def test_get_content_name(self):
file_name = self.wrapper.get_content_name(self.FILE_ID)
self.assertIsNotNone(file_name)
self.assertEqual(self.FILE_NAME, file_name)
# def test_get_content_metadata_by_name(self):
# file_name = self.wrapper.get_content_metadata_by_name(self.FILE_ID, GoogleDriveWrapper.METADATA_FIELD_NAME)
# self.assertIsNotNone(file_name)
# self.assertEqual(self.FILE_NAME, file_name)
#
# mimetype = self.wrapper.get_content_metadata_by_name(self.FILE_ID, GoogleDriveWrapper.METADATA_FIELD_MIMETYPE)
# self.assertIsNotNone(mimetype)
# self.assertEqual('application/pdf', mimetype)
#
# parents = self.wrapper.get_content_metadata_by_name(self.FILE_UNDER_FOLDER_HIERARCHY_ID,
# GoogleDriveWrapper.METADATA_FIELD_PARENTS)
# self.assertIsNotNone(parents)
# self.assertEqual(['1qK_9zEcFePjcGDkX2VayPkQ2XNBTLBoD'], parents)
# def test_get_content_path(self):
# path = self.wrapper.get_content_path(self.FILE_UNDER_FOLDER_HIERARCHY_ID)
# self.assertEqual("OposicionesFQ/Canarias/", path)
def test_get_folder_name(self):
folder_name = self.wrapper.get_content_name(self.FOLDER_ID)
self.assertIsNotNone(folder_name)
self.assertEqual(self.FOLDER_NAME, folder_name)
def test_is_file_url(self):
self.assertTrue(self.wrapper.is_file_url(TestGoogleDriveWrapper.GOOGLE_DRIVE_FILE_URL))
self.assertFalse(self.wrapper.is_file_url("https://www.fiquipedia.es"))
self.assertFalse(self.wrapper.is_file_url(TestGoogleDriveWrapper.GOOGLE_DRIVE_FOLDER_URL))
self.assertFalse(self.wrapper.is_file_url(None))
def test_is_folder_url(self):
self.assertTrue(self.wrapper.is_folder_url(TestGoogleDriveWrapper.GOOGLE_DRIVE_FOLDER_URL))
self.assertFalse(self.wrapper.is_folder_url("https://www.fiquipedia.es"))
self.assertFalse(self.wrapper.is_folder_url(TestGoogleDriveWrapper.GOOGLE_DRIVE_FILE_URL))
self.assertFalse(self.wrapper.is_folder_url(None))
def test_is_google_drive_url(self):
self.assertTrue(self.wrapper.is_google_drive_url(TestGoogleDriveWrapper.GOOGLE_DRIVE_FOLDER_URL))
self.assertTrue(self.wrapper.is_google_drive_url(TestGoogleDriveWrapper.GOOGLE_DRIVE_FILE_URL))
self.assertFalse(self.wrapper.is_google_drive_url("https://www.fiquipedia.es"))
self.assertFalse(self.wrapper.is_google_drive_url(None))
def __check_file_download(self, file_path: str):
self.assertIsNotNone(file_path)
self.assertTrue(os.path.isfile(file_path))
folder = os.path.dirname(file_path)
if folder != "." and folder != "./":
shutil.rmtree(folder)
else:
os.remove(file_path)
def test_download_file_from_id_not_exists(self):
file_path = self.wrapper.download_file_from_id(self.FILE_ID_NOT_EXISTS, "./", "file_not_found.pdf")
self.assertIsNone(file_path)
def test_download_file_from_id(self):
file_path = self.wrapper.download_file_from_id(self.FILE_ID, "./", self.FILE_NAME)
self.__check_file_download(file_path)
def test_download_file_from_url(self):
file_path = self.wrapper.download_file_from_url(self.URL, "./")
self.__check_file_download(file_path)
def test_download_content_from_url(self):
file_path = self.wrapper.download_content_from_url(self.URL, "./")
self.__check_file_download(file_path)
# def test_download_folder_from_id(self):
# path = self.wrapper.download_folder_from_id(TestGoogleDriveWrapper.FOLDER_UNED_ID, ".")
#
# self.assertEqual("./PAUxComunidades/electrotecnia/uned", path)
# self.assertTrue(os.path.exists("./PAUxComunidades/electrotecnia/uned"))
# self.assertTrue(os.path.isfile("./PAUxComunidades/electrotecnia/uned/2008-06-uned-electrotecnia-exam.pdf"))
# self.assertTrue(os.path.isfile("./PAUxComunidades/electrotecnia/uned/2012-mo-uned-electrotecnia-exam.pdf"))
# self.assertTrue(os.path.isfile("./PAUxComunidades/electrotecnia/uned/2014-06-09-uned-electrotecnia-exam.pdf"))
# self.assertTrue(os.path.isfile("./PAUxComunidades/electrotecnia/uned/2014-mo-uned-electrotecnia-guia.pdf"))
# self.assertTrue(os.path.isfile("./PAUxComunidades/electrotecnia/uned/2015-06-uned-electrotecnia-exam.pdf"))
#
# TestGoogleDriveWrapper.remove_folder("./PAUxComunidades")
# def test_download_folder_with_subfolders_from_id(self):
# path = self.wrapper.download_folder_from_id(TestGoogleDriveWrapper.FOLDER_WITH_SUBFOLDERS_ID, ".")
#
# self.assertIsNotNone(path)
# self.assertEqual("./GradoMedioxComunidades", path)
# self.assertTrue(os.path.exists("./GradoMedioxComunidades"))
#
# self.assertTrue(os.path.exists("./GradoMedioxComunidades/CastillaLaMancha"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/CastillaLaMancha/2010-CastillaLaMancha-modelo-GM-CT.pdf"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/CastillaLaMancha/2012-CastillaLaMancha-06-GM-CT.pdf"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/CastillaLaMancha/2012-CastillaLaMancha-09-GM-CT.pdf"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/CastillaLaMancha/2013-CastillaLaMancha-06-GM-CT.pdf"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/CastillaLaMancha/2013-CastillaLaMancha-09-GM-CT.pdf"))
#
# self.assertTrue(os.path.exists("./GradoMedioxComunidades/Madrid"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/Madrid/2004-madrid-GM-CT-exam.doc"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/Madrid/2004-madrid-GM-CT-soluc.doc"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/Madrid/2005-madrid-GM-CT-exam.doc"))
# self.assertTrue(
# os.path.isfile("./GradoMedioxComunidades/Madrid/2005-madrid-GM-CT-soluc.doc"))
#
# TestGoogleDriveWrapper.remove_folder(TestGoogleDriveWrapper.FOLDER_WITH_SUBFOLDERS_NAME)
def test_replicate_file_path_from_id(self):
file_path = self.wrapper.replicate_file_path_from_id(self.FILE_ID, "./", self.FILE_NAME)
self.assertIsNotNone(file_path)
self.assertEqual("./TV detect ads/openimaj-tutorial-pdf.pdf", file_path)
# Cleanup. Remove folder structure created during the test
TestGoogleDriveWrapper.remove_folder("./TV detect ads")
# def test_replicate_folder__from_id(self):
# path = self.wrapper.replicate_folder_path_from_id(TestGoogleDriveWrapper.FOLDER_UNED_ID, ".")
#
# self.assertEqual("./PAUxComunidades/electrotecnia/uned", path)
# self.assertTrue(os.path.exists("./PAUxComunidades/electrotecnia/uned"))
#
# TestGoogleDriveWrapper.remove_folder("./PAUxComunidades")
# def test_replicate_folder_with_subfolders_path_from_id(self):
# path = self.wrapper.replicate_folder_path_from_id(TestGoogleDriveWrapper.FOLDER_WITH_SUBFOLDERS_ID, ".")
#
# self.assertIsNotNone(path)
# self.assertEqual("./GradoMedioxComunidades", path)
# self.assertTrue(os.path.exists("./GradoMedioxComunidades"))
# self.assertTrue(os.path.exists("./GradoMedioxComunidades/CastillaLaMancha"))
# self.assertTrue(os.path.exists("./GradoMedioxComunidades/Madrid"))
#
# TestGoogleDriveWrapper.remove_folder("./GradoMedioxComunidades")
```
#### File: gsites2md/test/TestHTMLParser2md.py
```python
from unittest import TestCase
from ..HTML2md import HTMLParser2md
from ..HTML2mdConfig import HTML2mdConfig
class TestHTMLParser2md(TestCase):
def setUp(self) -> None:
super().setUp()
config = HTML2mdConfig()
self.parser = HTMLParser2md(config)
def test_li(self):
self.parser.nested_list.append("ul")
self.assertEqual("\n * ", self.parser.li())
self.parser.nested_list.append("ol")
self.assertEqual("\n 1. ", self.parser.li())
def test_img(self):
attrs = [("src", "img/fiquipedia.png"), ("alt", "Fiquipedia logo")]
self.assertEqual("\n", self.parser.img(attrs))
# The image is inside a link
self.parser.href = "htt://www.fiquipedia.es"
self.assertEqual("", self.parser.img(attrs))
def test_img_with_title(self):
attrs = [("src", "img/fiquipedia.png"), ("alt", "Fiquipedia logo"), ("title", "Fiquipedia")]
self.assertEqual("\n", self.parser.img(attrs))
def test_md(self):
md = "---\n\n| | \n### Recursos\n | | \n"
self.parser.md = md
md_cleaned = "---\n\n### Recursos\n\n"
self.assertEqual(md_cleaned, self.parser.md)
``` |
{
"source": "joaquinOnSoft/oxint",
"score": 3
} |
#### File: oxint/scraping/URLReader.py
```python
import urllib.request
class URLReader:
def __init__(self, url):
self.url = url
# print(url)
def read(self):
resource = urllib.request.urlopen(self.url)
response = resource .read()
html = response.decode(resource.headers.get_content_charset())
resource .close()
return html
```
#### File: oxint/utils/NameUtils.py
```python
class NameUtils:
@staticmethod
def get_first_name_from_full_name(full_name: str) -> str:
first_name = None
if full_name is not None:
tokens = full_name.split(" ")
num_tokens = len(tokens)
if num_tokens == 3:
first_name = tokens[0]
elif num_tokens >= 4:
first_name = tokens[0] + " " + tokens[1]
return first_name
@staticmethod
def get_last_name_from_full_name(full_name: str) -> str:
last_name = None
if full_name is not None:
tokens = full_name.split(" ")
num_tokens = len(tokens)
if num_tokens == 3:
last_name = tokens[1] + " " + tokens[2]
elif num_tokens >= 4:
last_name = ""
for count in range(2, num_tokens):
last_name += tokens[count]
if count < (num_tokens -1):
last_name += " "
return last_name
@staticmethod
def get_party_name_from_title(title: str) -> str:
name = None
if title is not None:
index_parenthesis = title.find("(")
if index_parenthesis > 0:
name = title[0: index_parenthesis].strip()
return name
@staticmethod
def get_party_abbrev_from_title(title: str) -> str:
name = None
if title is not None:
index_parenthesis = title.find("(") + 1
if index_parenthesis > 0:
name = title[index_parenthesis:].replace(")", "")
return name
```
#### File: utils/test/TestNameUtils.py
```python
import unittest
from oxint.utils.NameUtils import NameUtils
class TestNameUtils(unittest.TestCase):
def test_get_first_name_from_full_name(self):
name = NameUtils.get_first_name_from_full_name("<NAME>")
self.assertEqual("<NAME>", name)
name = NameUtils.get_first_name_from_full_name("<NAME>")
self.assertEqual("Enrique", name)
name = NameUtils.get_first_name_from_full_name("<NAME>")
self.assertEqual("<NAME>", name)
def test_get_last_name_from_full_name(self):
name = NameUtils.get_last_name_from_full_name("<NAME>")
self.assertEqual("<NAME>", name)
name = NameUtils.get_last_name_from_full_name("<NAME>")
self.assertEqual("<NAME>", name)
name = NameUtils.get_last_name_from_full_name("<NAME>")
self.assertEqual("<NAME>", name)
def test_get_party_abbrev_from_title(self):
party_abbrev = NameUtils.get_party_abbrev_from_title("TERCERA EDAD EN ACCIÓN (3e en acción)")
self.assertEqual("3e en acción", party_abbrev)
def test_get_party_name_from_title(self):
party_name = NameUtils.get_party_name_from_title("TERCERA EDAD EN ACCIÓN (3e en acción)")
self.assertEqual("TERCERA EDAD EN ACCIÓN", party_name)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joaquinpco/Votaciones-Pinf-19",
"score": 2
} |
#### File: Votaciones-Pinf-19/UsuarioUca/forms.py
```python
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, AuthenticationForm
from django.forms import ModelForm
from .models import UsuarioUca, uvalidonifworld, uvalidonifspain, validonifspain, validonifworld, Estudiante, PASS, \
Profesor
from django.core.validators import RegexValidator
istextvalidator = RegexValidator("^(?=.{3,15}$)[A-ZÁÉÍÓÚ][a-zñáéíóú]+(?: [A-ZÁÉÍÓÚ][a-zñáéíóú]+)?$",
message='El Nombre no debe contener números',
code='Nombre/Apellidos incorrectos')
isemailvalidator = RegexValidator("^\w+([\.-]?\w+)*@<EMAIL>",
message='El email debe pertenecer al dominio de la UCA',
code='Email invalido')
class CustomUserCreationForm(UserCreationForm):
nif = forms.CharField(label='NIF', max_length=9, widget=forms.TextInput(attrs={"placeholder": "Ej:32085090"}))
email = forms.EmailField(label='Email', max_length=64, help_text="El correo debe pertener al dominio de la UCA",
required=True, validators=[isemailvalidator])
first_name = forms.CharField(label="Nombre", max_length=20, min_length=2, required=True,
validators=[istextvalidator])
last_name = forms.CharField(label="Apellidos", max_length=64, min_length=2, required=True,
validators=[istextvalidator])
class Meta:
model = UsuarioUca
fields = '__all__'
def clean_nif(self):
nif = self.cleaned_data['nif']
nif = "u" + nif
if UsuarioUca.objects.filter(nif=nif).exists():
raise forms.ValidationError("Ya existe un usuario con este NIF")
return nif
class CustomUserChangeForm(UserChangeForm):
email = forms.EmailField(label='Email', max_length=64,
required=True)
nif = forms.CharField(label="Nif", required=True, max_length=10)
first_name = forms.CharField(label="Nombre", max_length=20, required=True, validators=[istextvalidator])
last_name = forms.CharField(label="Apellidos", max_length=64, required=True, validators=[istextvalidator])
class Meta:
model = UsuarioUca
fields = '__all__'
class UserLoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(UserLoginForm, self).__init__(*args, **kwargs)
username = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'id': 'username', 'autocomplete': 'off'}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'id': 'password',
}
))
class createUserForm(ModelForm):
class Meta:
model = UsuarioUca
fields = ['nif', 'first_name', 'last_name', 'email', 'password', 'rol']
labels = {'first_name': ('Nombre'), 'last_name': ('Apellidos'), 'nif': ('NIF'),
'email': ('Correo electrónico'), 'password': ('<PASSWORD>'), 'rol': ('Rol'), }
help_texts = {'first_name': ('Introduce un nombre valido.'), 'last_name': ('Introduce los apellidos válidos.'),
'nif': ('Introduce un nif válido'),
'email': ('Introduce un correo válido y del dominio de la UCA'),
'groups': ('Grupo al que pertenece el usuario'), }
widgets = {'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'nif': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.EmailInput(attrs={'class': 'form-control'}),
'password': forms.PasswordInput(attrs={'class': 'form-control'}), }
def clean_nif(self):
nif = self.cleaned_data['nif']
nif = "u" + nif
if UsuarioUca.objects.filter(nif=nif).exists():
raise forms.ValidationError("Ya existe un usuario con este NIF")
return nif
def save(self, commit=True):
user = super(createUserForm, self).save()
user.set_password(<PASSWORD>)
user.save()
return user
class editUserForm(ModelForm):
class Meta:
model = UsuarioUca
fields = ['nif', 'first_name', 'last_name', 'email',
]
labels = {'first_name': ('Nombre'), 'last_name': ('Apellido'), 'nif': ('NIF'),
'email': ('Correo electrónico'),
}
widgets = {'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'nif': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.EmailInput(attrs={'class': 'form-control'}),
}
```
#### File: Votaciones-Pinf-19/UsuarioUca/views.py
```python
from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Avg
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.contrib.auth import authenticate, login, logout
from django.views.generic import DetailView
from django.views.generic.base import TemplateView
from django.utils import timezone
from django.views.generic.list import ListView
from django.views.generic.edit import UpdateView, CreateView
from import_export.formats import base_formats
from UsuarioUca.admin import UsuarioUcaResource
from UsuarioUca.forms import createUserForm, editUserForm
from UsuarioUca.import_export_views import ImportView
from UsuarioUca.models import UsuarioUca, Estudiante, Profesor, PASS
from import_export import resources, fields
from django.contrib import messages
from VotacionesUca.models import Votacion, Eleccion, Censo
def my_view(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
# Redirect to a success page.
...
else:
# Return an 'invalid login' error message.
...
class UsuarioUcaListView(LoginRequiredMixin, ListView):
model = UsuarioUca
paginate_by = 100 # if pagination is desired
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
return context
class UsuarioUcaUpdate(LoginRequiredMixin, UpdateView):
model = UsuarioUca
form_class = editUserForm
template_name_suffix = '_update_form'
def get_success_url(self):
return reverse('usuariouca_list', )
class UsuarioUcaCreate(LoginRequiredMixin, CreateView):
model = UsuarioUca
form_class = createUserForm
def get_success_url(self):
if self.object.rol == "Estudiante":
return reverse('estudiante_create')
if self.object.rol == "Profesor":
return reverse('profesor_create')
if self.object.rol == "PASS":
return reverse('pass_create')
class EstudianteCreate(LoginRequiredMixin, CreateView):
model = Estudiante
fields = '__all__'
def get_success_url(self):
return reverse('usuariouca_edit', kwargs={'pk': self.object.pk})
class ProfesorCreate(LoginRequiredMixin,CreateView):
model = Profesor
fields = '__all__'
def get_success_url(self):
return reverse('usuariouca_edit', kwargs={'pk': self.object.pk})
class PASSCreate(LoginRequiredMixin, CreateView):
model = PASS
fields = '__all__'
def get_success_url(self):
return reverse('usuariouca_edit', kwargs={'pk': self.object.pk})
class UsuarioUcaExportView(LoginRequiredMixin, ImportView, resources.ModelResource):
class Meta:
model = UsuarioUca
def get(self, queryset, *args, **kwargs):
queryset = UsuarioUca.objects.all()
dataset = UsuarioUcaResource().export(queryset)
response = HttpResponse(dataset.csv, content_type="csv")
response['Content-Disposition'] = 'attachment; filename=UsuariosUCA' + datetime.now().__str__() + '.csv'
return response
class MyModelImportView(LoginRequiredMixin, ImportView):
model = UsuarioUca
template_name = 'usuariouca_upload.html'
formats = (base_formats.CSV,)
resource_class = UsuarioUcaResource
def get_success_url(self):
return reverse('usuariouca_list')
def create_dataset(self, *args, **kwargs):
""" Insert an extra 'source_user' field into the data.
"""
dataset = super().create_dataset(*args, **kwargs)
length = len(dataset._data)
dataset.append_col([self.request.user.id] * length,
header="source_user")
return dataset
class VotacionView(LoginRequiredMixin, DetailView):
model = Votacion
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
pregunta = Pregunta.objects.get(votacion=self.object.id)
context['now'] = datetime.datetime.now()
context['opciones'] = Opciones.objects.filter(pregunta=pregunta.pk)
return context
class HomeView(LoginRequiredMixin, ListView):
template_name = "home.html"
context_object_name = 'votacion_list'
model = Votacion
def get_context_data(self, **kwargs):
censos_id = Censo.objects.filter(usuario=self.request.user).values_list('eleccion_id', flat=True)
context = super(HomeView, self).get_context_data(**kwargs)
context.update({
'eleccion_list': Eleccion.objects.filter(id__in=censos_id),
'more_context': Eleccion.objects.filter(id__in=censos_id),
})
return context
def get_queryset(self):
censos_id = Censo.objects.filter(usuario=self.request.user).values_list('votacion_id', flat=True)
return Votacion.objects.filter(id__in=censos_id)
class CrearVotacionView(LoginRequiredMixin, TemplateView):
template_name = "CrearVotacion.html"
class FAQView(LoginRequiredMixin, TemplateView):
template_name = "faq2.0.html"
class EstadisticasVotacionSimpleView(LoginRequiredMixin, TemplateView):
template_name = "votacionSimpleResultados.html"
class EstadisticasEleccionView(LoginRequiredMixin, TemplateView):
template_name = "votacionEleccionesResultado.html"
def logout_request(request):
logout(request)
return redirect('home')
def erase_request(request, pk):
UsuarioUca.objects.filter(id=pk).delete()
return redirect('usuariouca_list')
``` |
{
"source": "joaquinquintas/shipwell_backend_ricardo",
"score": 3
} |
#### File: core/exceptions/conversion_unit_not_implemented.py
```python
class ConversionUnitNotImplemented(Exception):
'''
raises when tring can not convert a TemperatureUnit
'''
def __init__(self, unit_name: str):
super().__init__('Conversion unit %s not implemented' % unit_name)
```
#### File: core/primitives/average_temperature.py
```python
from statistics import mean
from typing import List
from core.primitives import Temperature, TemperatureUnit
from core.exceptions import (
NoneWeatherServiceDataRetrived,
ConversionUnitNotImplemented
)
class AverageTemperature:
'''
Final message with the average temperature data
'''
__service_data: List[Temperature]
__conversion_unit: TemperatureUnit
def __init__(self, service_data: List[Temperature], conversion_unit: TemperatureUnit) -> None:
if not service_data:
raise NoneWeatherServiceDataRetrived()
self.__service_data = service_data
self.__conversion_unit = conversion_unit
def __converted_data(self):
'''
no need that the original data retrived from services change, only
convert to a common unit for achive the average
'''
for temperature in self.__service_data:
converted = False
if self.__conversion_unit.is_celsius:
yield temperature.to_celsius()
converted = True
if self.__conversion_unit.is_fahrenheit:
yield temperature.to_fahrenheit()
converted = True
if not converted:
raise ConversionUnitNotImplemented(self.__conversion_unit.unit_name)
@property
def average(self) -> Temperature:
'''
final average temperature value
:return: Temperature
'''
average = mean([temperature.value for temperature in self.__converted_data()])
return Temperature(
'weather_mi',
average,
self.__conversion_unit
)
@property
def service_data(self) -> List[Temperature]:
'''
service result data of Temperature's responded from the diferent
weather service providers
:return: List[Temperature]
'''
return self.__service_data
```
#### File: infrastructure/exceptions/weather_service_not_implemented.py
```python
class WeatherServiceNotImplemented(Exception):
'''
exception type for the weather service not implemented
'''
def __init__(self, service_implementation: str) -> None:
super().__init__(f'Service: {service_implementation} NOT implemented')
```
#### File: infrastructure/serializers/average_temperature_serializer.py
```python
from typing import Dict
from core.primitives import AverageTemperature
from .dict_serializer import DictSerializer
from .temperature_serializer import TemperatureSerializer
class AverageTemperatureSerializer(DictSerializer):
__temperature_serializer: TemperatureSerializer
def __init__(self):
self.__temperature_serializer = TemperatureSerializer()
def to_dict(self, obj: AverageTemperature) -> Dict:
return {
'average': self.__temperature_serializer.to_dict(obj.average),
'service_data': [
self.__temperature_serializer.to_dict(temperature)
for temperature in obj.service_data
]
}
```
#### File: infrastructure/serializers/validation_error_serializer.py
```python
from typing import Dict
from django.core.exceptions import ValidationError
from .dict_serializer import DictSerializer
class ValidationErrorSerializer(DictSerializer):
'''
serialize an error validation
'''
def to_dict(self, obj: ValidationError) -> Dict:
return {
'messages': obj.messages
}
```
#### File: shipwell_backend_ricardo/weather_mi/tests.py
```python
from django.test import TestCase, Client
class AverageTemperatureRequestTest(TestCase):
def setUp(self):
self.client = Client()
def test_full_case(self):
response = self.client.get('/average_temperature/', {
'latitude': 12.23,
'longitude': -12.23,
'services[]': ['noaa', 'weatherdotcom', 'accuweather']
})
self.assertEqual(200, response.status_code)
data = response.json()
self.assertEqual('celsius', data['data']['average']['unit'])
self.assertEqual(8.9, data['data']['average']['value'])
def test_filtering_some_service(self):
response = self.client.get('/average_temperature/', {
'latitude': 12.23,
'longitude': -12.23,
'services[]': ['noaa', 'accuweather']
})
self.assertEqual(200, response.status_code)
data = response.json()
self.assertEqual('celsius', data['data']['average']['unit'])
self.assertEqual(12, data['data']['average']['value'])
def test_missing_latitude_validation(self):
response = self.client.get('/average_temperature/', {
'longitude': -12.23,
'services[]': ['noaa', 'weatherdotcom', 'accuweather']
})
self.assertEqual(403, response.status_code)
error = response.json()
self.assertIn('must fill the field: latitude', error['error']['messages'])
def test_missing_longitude_validation(self):
response = self.client.get('/average_temperature/', {
'latitude': 12.23,
'services[]': ['noaa', 'weatherdotcom', 'accuweather']
})
self.assertEqual(403, response.status_code)
error = response.json()
self.assertIn('must fill the field: longitude', error['error']['messages'])
def test_missing_services_validation(self):
response = self.client.get('/average_temperature/', {
'latitude': 12.23,
'longitude': -12.23,
})
self.assertEqual(403, response.status_code)
error = response.json()
self.assertIn('must fill the field: services[]', error['error']['messages'])
```
#### File: shipwell_backend_ricardo/weather_mi/validators.py
```python
from django.http.request import QueryDict, HttpRequest
from django.core.exceptions import ValidationError
def validate_required(fieldname, querydict: QueryDict):
'''
validates that a field is present in the request
'''
if fieldname not in querydict:
raise ValidationError(f'must fill the field: {fieldname}')
def validate_list_not_empty(fieldname, querydict: QueryDict):
if not querydict.getlist(fieldname):
raise ValidationError(f'must pass some option on filter: {fieldname}')
def validate_coordinate(fieldname, value):
'''
validatates that de field is a float
'''
try:
float(value)
except ValueError:
raise ValidationError(f'field {fieldname} -> {value} is not a decimal number')
def validate_latitude(request: HttpRequest):
'''
validates a latidude field
'''
validate_required('latitude', request.GET)
validate_coordinate('latitude', request.GET['latitude'])
def validate_longitude(request: HttpRequest):
'''
validates a longitude field
'''
validate_required('longitude', request.GET)
validate_coordinate('longitude', request.GET['longitude'])
def validate_services(request: HttpRequest):
'''
validates a services filter field
'''
validate_required('services[]', request.GET)
validate_list_not_empty('services[]', request.GET)
``` |
{
"source": "JoaquinRodriguez2006/RoboCup_Junior_Material",
"score": 3
} |
#### File: Codigo/ejemplos_erebus/victim_detection_test.py
```python
import cv2
import numpy as np
def detectVisualSimple(image_data):
coords_list = []
# img = np.array(np.frombuffer(image_data, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4)))
# img[:,:,2] = np.zeros([img.shape[0], img.shape[1]])
img = image_data
cv2.imshow('Primera Imagen', img)
#convert from BGR to HSV color space
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('Imagen Grises', gray)
#apply threshold
thresh = cv2.threshold(gray, 140, 255, cv2.THRESH_BINARY)[1]
cv2.imshow('Trhes', thresh)
# draw all contours in green and accepted ones in red
contours, h = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) > 1000:
coords = list(c[0][0])
coords_list.append(coords)
return ((int(coords[0])),int(coords[1]))
# robot = Robot()
timeStep = 32
# camera_centro = robot.getDevice('camera1')
# camera_centro.enable(timeStep)
img = 'Imagenes/imagenes rombo/imagenes_rombo_gasrombo_2.png'
print(detectVisualSimple(img))
cv2.waitKey(0)
""""
while robot.step(timeStep) != -1:
img = camera_centro.getImage()
print(detectVisualSimple(img, camera_centro))
img = np.array(np.frombuffer(img, np.uint8).reshape((camera_centro.getHeight(), camera_centro.getWidth(), 4)))
cv2.imshow("Imagen", img)
img =
cv2.waitKey(1)
"""
```
#### File: Movimiento/avance_recto/avanzar_baldosa_encoder.py
```python
from controller import Robot
from controller import Motor
from controller import PositionSensor
from controller import Robot, DistanceSensor, GPS, Camera, Receiver, Emitter
from controller import PositionSensor
import math
import time
robot = Robot()
timeStep = 32
tile_size = 0.12
speed = 6.28
media_baldoza = 0.06
estado = 0
start = 0
# Motor initialization
ruedaIzquierda = robot.getDevice("wheel1 motor")
ruedaDerecha = robot.getDevice("wheel2 motor")
ruedaIzquierda.setPosition(float('inf'))
ruedaDerecha.setPosition(float('inf'))
rIzq_encoder = ruedaIzquierda.getPositionSensor()
rDer_encoder = ruedaDerecha.getPositionSensor()
rIzq_encoder.enable(timeStep)
rDer_encoder.enable(timeStep)
# Functions
def avanzar(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(vel)
def girar(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(vel)
def girar_izq(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(-vel)
while robot.step(timeStep) != -1:
if estado == 0:
start = rDer_encoder.getValue()
estado = 1
if estado == 1:
avanzar(6.28)
if rDer_encoder.getValue() >= start + 2.9:
avanzar(0)
break
```
#### File: Codigo/Movimiento/ejemplo_navegacion.py
```python
from controller import Robot, Motor, DistanceSensor, Camera, Emitter, GPS
import struct
import numpy as np
import cv2 as cv
timeStep = 32 # Set the time step for the simulation
max_velocity = 6.28 # Set a maximum velocity time constant
robot = Robot()
# Create an object to control the left wheel
wheel_left = robot.getDevice("wheel1 motor")
# Create an object to control the right wheel
wheel_right = robot.getDevice("wheel2 motor")
#[left wheel speed, right wheel speed]
speeds = [max_velocity, max_velocity]
#Create objects for all robot sensors
leftDist = robot.getDevice("leftDist") # Get robot's left distance sensor
leftDist.enable(timeStep) # Enable left distance sensor
frontDist = robot.getDevice("frontDist")
frontDist.enable(timeStep)
rightDist = robot.getDevice("rightDist")
rightDist.enable(timeStep)
cam = robot.getDevice("camera")
cam.enable(timeStep)
colorSensor = robot.getDevice("color")
colorSensor.enable(timeStep)
emitter = robot.getDevice("emitter") # Emitter doesn't need enable
gps = robot.getDevice("gps")
gps.enable(timeStep)
wheel_left.setPosition(float("inf"))
wheel_right.setPosition(float("inf"))
def turn_right():
#set left wheel speed
speeds[0] = 0.6 * max_velocity
#set right wheel speed
speeds[1] = -0.2 * max_velocity
def turn_left():
#set left wheel speed
speeds[0] = -0.2 * max_velocity
#set right wheel speed
speeds[1] = 0.6 * max_velocity
def spin():
#set left wheel speed
speeds[0] = 0.6 * max_velocity
#set right wheel speed
speeds[1] = -0.6 * max_velocity
def delay(ms):
initTime = robot.getTime() # Store starting time (in seconds)
while robot.step(timeStep) != -1:
# If time elapsed (converted into ms) is greater than value passed in
if (robot.getTime() - initTime) * 1000.0 > ms:
break
def getColor():
img = colorSensor.getImage() # Grab color sensor camera's image view
# Return grayness of the only pixel (0-255)
print("Color: " + str(img[0][0][0]))
return colorSensor.imageGetGray(img, colorSensor.getWidth(), 0, 0)
def checkVic(img):
# Convert img to RGBA format (for OpenCV)
img = np.frombuffer(img, np.uint8).reshape(
(cam.getHeight(), cam.getWidth(), 4))
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Grayscale image
# Inverse threshold image (0-80 -> white; 80-255 -> black)
img, thresh = cv.threshold(img, 80, 255, cv.THRESH_BINARY_INV)
# Find all shapes within thresholded image
contours, hierarchy = cv.findContours(
thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x, y, w, h = cv.boundingRect(cnt) # Find width and height of contour
contArea = cv.contourArea(cnt) # Area covered by the shape
ratio = w / h # Calculate width to height ratio of contour
# if the contour area and width to height ratio are within certain ranges
if contArea > 300 and contArea < 1000 and ratio > 0.65 and ratio < 0.95:
return True
return False
def report(victimType):
# Struct package to be sent to supervisor to report victim/hazard
# First four bytes store robot's x coordinate
# Second four bytes store robot's z coordinate
# Last byte stores type of victim
# Victims: H, S, U, T
# Hazards: F, P, C, O
wheel_left.setVelocity(0) # Stop for 1 second
wheel_right.setVelocity(0)
delay(1300)
# Convert victimType to character for struct.pack
victimType = bytes(victimType, "utf-8")
posX = int(gps.getValues()[0] * 100) # Convert from cm to m
posZ = int(gps.getValues()[2] * 100)
message = struct.pack("i i c", posX, posZ, victimType)
emitter.send(message)
robot.step(timeStep)
while robot.step(timeStep) != -1:
speeds[0] = max_velocity
speeds[1] = max_velocity
# Check left and right sensor to avoid walls
# for sensor on the left, either
if leftDist.getValue() < 0.05:
turn_right() # We see a wall on the left, so turn right away from the wall
if rightDist.getValue() < 0.05: # for sensor on the right too
turn_left()
# for front sensor
if frontDist.getValue() < 0.05:
spin()
# if on black, turn away
if getColor() < 80:
spin()
wheel_left.setVelocity(speeds[0])
wheel_right.setVelocity(speeds[1])
delay(600)
# if sees victim, report it
if checkVic(cam.getImage()):
report('T') # Cannot determine type of victim, so always try 'T' for now
# Send the speed values we have choosen to the robot
wheel_left.setVelocity(speeds[0])
wheel_right.setVelocity(speeds[1])
```
#### File: pruebas/Joaquin_Rodriguez/prueba_color_sensor.py
```python
from controller import Robot
from controller import Motor
from controller import Robot, GPS
import time
import math
robot = Robot() # Create robot object
timeStep = 32 # timeStep = numero de milisegundos entre actualizaciones mundiales (del mundo)
tile_size = 0.12 # Tamaño de casilla
angulo_actual = 0
tiempo_anterior = 0
media_baldoza = 0.06
speed = 6.28
global start
global finalLetter
# Distance sensor initialization
distancia_sensor1 = robot.getDevice("distance sensor1")
distancia_sensor1.enable(timeStep)
distancia_sensorDer = robot.getDevice("distance sensor2")
distancia_sensorDer.enable(timeStep)
distancia_sensorIzq = robot.getDevice("distance sensor2")
distancia_sensorIzq.enable(timeStep)
maxima_distancia = 0.4
valor_sensor_distancia_Izq = distancia_sensorIzq.getValue() * 1000
# Motor initialization
ruedaIzquierda = robot.getDevice("wheel1 motor")
ruedaDerecha = robot.getDevice("wheel2 motor")
ruedaIzquierda.setPosition(float('inf'))
ruedaDerecha.setPosition(float('inf'))
# Gyroscope initialization
gyro = robot.getDevice("gyro")
gyro.enable(timeStep)
#Gps initialization
gps = robot.getDevice("gps")
gps.enable(timeStep)
robot.step(timeStep) # Actualizo los valores de los sensores
startX = gps.getValues()[0] # Cargo La posicion inicial
startY = gps.getValues()[2]
offset_xy = [0, 2]
x = 0
y = 0
global lista_victim
lista_victim = []
# Color sensor initialization
colorSensor = robot.getDevice("colour_sensor")
colorSensor.enable(timeStep)
# Util Functions
def avanzar(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(vel)
def girar(vel):
ruedaIzquierda.setVelocity(vel//3)
ruedaDerecha.setVelocity(vel)
def rotar(angulo):
global angulo_actual
global tiempo_anterior
# iniciar_rotacion
girar(0.8)
# Mientras no llego al angulo solicitado sigo girando
if (abs(angulo - angulo_actual) > 1):
tiempo_actual = robot.getTime()
# print("Inicio rotacion angulo", angulo, "Angulo actual:",angulo_actual)
tiempo_transcurrido = tiempo_actual - tiempo_anterior # tiempo que paso en cada timestep
radsIntimestep = abs(gyro.getValues()[1]) * tiempo_transcurrido # rad/seg * mseg * 1000
degsIntimestep = radsIntimestep * 180 / math.pi
# print("rads: " + str(radsIntimestep) + " | degs: " + str(degsIntimestep))
angulo_actual += degsIntimestep
# Si se pasa de 360 grados se ajusta la rotacion empezando desde 0 grados
angulo_actual = angulo_actual % 360
# Si es mas bajo que 0 grados, le resta ese valor a 360
if angulo_actual < 0:
angulo_actual += 360
tiempo_anterior = tiempo_actual
return False
print("Rotacion finalizada.")
angulo_actual = 0
return True
def type_floor():
image = colorSensor.getImage()
r = colorSensor.imageGetRed(image, 1, 0, 0)
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if r == 212 :
return 'arena'
if r >= 242:
return 'checkpoint'
if r == 233 :
return 'common'
if r <= 110:
return 'pozo'
# Main Code
start = robot.getTime()
while robot.step(timeStep) != -1:
# TYPES OF FLOOR DETERMINE EACH STATE
image = colorSensor.getImage()
r = colorSensor.imageGetRed(image, 1, 0, 0)
floor = type_floor()
if floor == 'common':
print("Comun")
avanzar(3)
if floor == 'arena':
print("Arena")
avanzar(2)
rotar(90)
if floor == 'checkpoint':
print("Checkpoint")
avanzar(2)
tiempo_anterior = robot.getTime()
if robot.getTime() >= start + 14:
if rotar(90):
break
if floor == 'pozo':
print('Pozo')
avanzar(0)
"""if r >= 212 :
return 'arena'
if r >= 242:
return 'checkpoint'
if r < 240 and r >= 233 :
return 'common'"""
```
#### File: pruebas/Jose_Gonzalez/Solucion_PruebaTipoPiso.py
```python
from controller import Robot
from controller import Motor
from controller import PositionSensor
from controller import Robot, DistanceSensor, GPS, Camera, Receiver, Emitter
import cv2
import numpy as np
import math
import time
robot = Robot()
timeStep = 32
tile_size = 0.12
speed = 6.28
media_baldoza = 0.06
estado = 1
start = 0
global r
global g
global b
r = 0
g = 0
b = 0
# start = robot.getTime()
# Camera initialization
camera = robot.getDevice("camera3")
camera.enable(timeStep)
# Colour sensor initialization
colour_sensor = robot.getDevice("colour_sensor")
colour_sensor.enable(timeStep)
# Distance sensor initialization
distancia_sensor1 = robot.getDevice("distance sensor1")
distancia_sensor1.enable(timeStep)
# Motor initialization
ruedaIzquierda = robot.getDevice("wheel1 motor")
ruedaDerecha = robot.getDevice("wheel2 motor")
ruedaIzquierda.setPosition(float('inf'))
ruedaDerecha.setPosition(float('inf'))
rIzq_encoder = ruedaIzquierda.getPositionSensor()
rDer_encoder = ruedaDerecha.getPositionSensor()
rIzq_encoder.enable(timeStep)
rDer_encoder.enable(timeStep)
# Functions
def leer_sensores():
global r
global g
global b
# Color sensor
image = colour_sensor.getImage()
r = colour_sensor.imageGetRed(image, 1, 0, 0)
g = colour_sensor.imageGetGreen(image, 1, 0, 0)
b = colour_sensor.imageGetBlue(image, 1, 0, 0)
# azul: r=65 g=65 b=252
# rojo: r=252 g=65 b=65
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
"""
# Camara
image = camera.getImage()
imagen = np.frombuffer(image, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4))
frame = cv2.cvtColor(imagen, cv2.COLOR_BGRA2BGR)
cv2.imshow("frame", frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Grayscale
cv2.imshow("grayScale", frame)
cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # Threshold
cv2.imshow("thresh", frame)
cv2.waitKey(1)
# Sensor de Distancia
print("Distancia: " + str(distancia_sensor1.getValue()))
"""
def avanzar(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(vel)
def retroceder(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(-vel)
def girar_der(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(vel)
def girar_izq(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(-vel)
gyro = robot.getDevice("gyro")
gyro.enable(timeStep)
def rotar(angulo):
global angulo_actual
global tiempo_anterior
# iniciar_rotacion
if angulo > 0:
girar_der(0.5)
else:
girar_izq(0.5)
# Mientras no llego al angulo solicitado sigo girando
if (abs(abs(angulo) - angulo_actual) > 1):
tiempo_actual = robot.getTime()
# print("Inicio rotacion angulo", angulo, "Angulo actual:",angulo_actual)
tiempo_transcurrido = tiempo_actual - \
tiempo_anterior # tiempo que paso en cada timestep
# rad/seg * mseg * 1000
radsIntimestep = abs(gyro.getValues()[1]) * tiempo_transcurrido
degsIntimestep = radsIntimestep * 180 / math.pi
# print("rads: " + str(radsIntimestep) +
# " | degs: " + str(degsIntimestep))
angulo_actual += degsIntimestep
# Si se pasa de 360 grados se ajusta la rotacion empezando desde 0 grados
angulo_actual = angulo_actual % 360
# Si es mas bajo que 0 grados, le resta ese valor a 360
if angulo_actual < 0:
angulo_actual += 360
tiempo_anterior = tiempo_actual
# print("Angulo actual:", angulo_actual)
return False
#print("Rotacion finalizada.")
angulo_actual = 0
return True
def delay(ms):
initTime = robot.getTime() # Store starting time (in seconds)
while robot.step(timeStep) != -1:
print("delay")
if (robot.getTime() - initTime) * 1000.0 > ms: # If time elapsed (converted into ms) is greater than value passed in
avanzar(0)
break
def rotar_enclavado(angulo):
while robot.step(timeStep) != -1:
leer_sensores()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if rotar(angulo) == True: # If time elapsed (converted into ms) is greater than value passed in
avanzar(0)
break
def avance(tipo_avance):
start = rDer_encoder.getValue()
velocidad = 0
avance = 0
if tipo_avance == "medio":
velocidad = 3
avance = 2.9
elif tipo_avance == "largo":
avance = 5.9
velocidad = 5.96
elif tipo_avance == "esquina":
avance = 4.1
velocidad = 6.28
while robot.step(timeStep) != -1:
avanzar(velocidad)
leer_sensores()
tipo_pizza()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if rDer_encoder.getValue() >= start + avance:
avanzar(0)
break
def retroceso(tipo_retroceso):
start = rDer_encoder.getValue()
velocidad = 0
retroceso = 0
if tipo_retroceso == "medio":
velocidad = 6.28
retroceso = 2.9
elif tipo_retroceso == "largo":
retroceso = 5.9
velocidad = 5.96
elif tipo_retroceso == "esquina":
retroceso = 4.1
velocidad = 6.28
elif tipo_retroceso == "poquito":
retroceso = 1.9
velocidad = 6.28
while robot.step(timeStep) != -1:
retroceder(velocidad)
leer_sensores()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if start - retroceso >= rDer_encoder.getValue():
avanzar(0)
break
def tipo_pizza():
#print("valores(1): r:" + str(r) + " , g:" + str(g) + " , b:" + str(b))
if 255 >= r >= 240 and 60 <= b <= 75 and 60 <= g <= 75:
print("(Red)pasaje zona 3 a 1")
elif 150 >= r >= 100 and 210 <= b <= 230 and 60 <= g <= 75:
print("(Vaiolet)pasaje zona 2 a 3")
elif 60 <= r <= 75 and 255 >= b >= 245 and 60 <= g <= 75:
print("(Blue)pasaje zona 1 a 2")
elif 200 <= r <= 220 and 110 >= b >= 100 and 175 <= g <= 180:
print("Entered swamp")
return "swamp"
elif 250 >= r >= 230 and 250 >= b >= 235 and 250 >= g >= 235:
print("Found Checkpoint")
elif r == 233 and b == 233 and g == 233:
print("Azulejo normal")
elif 30 <= r <= 50 :
print("un agujero negro we")
retroceso("medio")
rotar_enclavado(90)
else:
return "prueba"
angulo_actual = 0
tiempo_anterior = robot.getTime()
contador = 0
while robot.step(timeStep) != -1:
avance("medio")
``` |
{
"source": "joaramirezra/Can-You-Solve-a-Problem",
"score": 4
} |
#### File: Can-You-Solve-a-Problem/In-house/Factorial of Number.py
```python
def factorial(n):
if n < 0:
return 0
elif n == 0 or n == 1:
return 1
else:
fact = 1
while(n > 1):
fact *= n
n -= 1
return fact
num = 5;
print("Factorial of",num,"is",
factorial(num)
```
#### File: Can-You-Solve-a-Problem/In-house/palindrome.py
```python
def palindrome_checker():
'''This function converts any string entered to lowercase and checks it against a reversed string'''
string = input('Enter a string: ').lower()
if (string == string[::-1]):
print('This is a palindrome')
else:
print('Not a palindrome')
palindrome_checker()
```
#### File: Can-You-Solve-a-Problem/In-house/romanNumeralConverter.py
```python
def convertRomanNumeral(romNumeral):
"""
converts Roman numerals into ordinary numbers.
Params:
romNumeral (String) - the Roman numeral string to be converted
Returns:
num (int): ordinary numbers equivalant of the roman numeral.
Examples:
>>> num = convertRomanNumeral("M")
>>>print(num)
100
>>> num = convertRomanNumeral("DX")
>>>print(num)
510
>>> num = convertRomanNumeral("XL")
>>>print(num)
40
"""
romNumeral = romNumeral.upper()
rom_dict = {"M":1000,"D":500,"C":100,"L":50,"X":10,"V":5,"I":1,'IV':4,\
'IX':9,'XL':40,'XC':90,'CD':400,'CM':900}
i = 0
num = 0
while i < len(romNumeral):
if i+1<len(romNumeral) and romNumeral[i:i+2] in rom_dict:
num+=rom_dict[romNumeral[i:i+2]]
i+=2
else:
num+=rom_dict[romNumeral[i]]
i+=1
return num
rom_num = input("Enter the Roman Numeral you would like to convert: ")
num = convertRomanNumeral(rom_num)
print("The integer representation of %s is %d"%(rom_num,num))
```
#### File: Can-You-Solve-a-Problem/In-house/X-O.py
```python
def displayboard(board):
print (f'{board[6]:^7}|{board[7]:^7}|{board[8]:^7}')
print ('-----------------------')
print (f'{board[3]:^7}|{board[4]:^7}|{board[5]:^7}')
print ('-----------------------')
print (f'{board[0]:^7}|{board[1]:^7}|{board[2]:^7}')
def playermarker():
marker = ''
while marker != 'X' and marker != 'O':
marker = (input("Choose X or O: ")).upper()
if marker == 'X':
return ('X','O')
else:
return ('O','X')
def placemaker (board,marker,position):
board[position-1] = marker
def wincheck(board,mark):
return board[0]==board[1]==board[2]==mark or board[3]==board[4]==board[5]==mark or board[6]==board[7]==board[8]==mark or board[0]==board[3]==board[6]==mark or board[4]==board[1]==board[7]==mark or board[5]==board[8]==board[2]==mark or board[0]==board[4]==board[8]==mark or board[6]==board[4]==board[2]==mark
def checkfirst():
from random import randint
flip = randint(1,2)
if flip == 1:
return 'player 1'
else:
return 'player 2'
def spacecheck(board,position):
return board[position] == ' '
def fullboardcheck(board):
return board[0] != ' ' and board[1] != ' ' and board[2] != ' ' and board[3] != ' ' and board[4] != ' ' and board[5] != ' ' and board[6] != ' ' and board[7] != ' ' and board[8] != ' '
def fullboardcheck1(board):
for i in range(1,10):
if spacecheck(board,i):
return False
return True
def playerchoice(board):
position = 0
while position not in range (1,10) or not spacecheck(board,position):
position = int(input('Choose a number from 1-9: (1-9)'))
return position
def replay():
choice = ''
while choice.capitalize() != 'Yes' and choice.capitalize() != 'No':
choice = (input('Play again? Yes or No: '))
return choice.capitalize() == 'Yes'
print ('Welcome to X and O')
#Play the game
while replay():
## Set everythin up (board, choose markers and who is first)
gameboard = [' ']*9
player1_marker, player2_marker = playermarker()
turn = checkfirst()
print (turn + ' is the first player')
ready = int(input('Ready to play? To answer yes enter 1 to answer no enter 2: '))
if ready == 1:
gameon = True
else:
gameon = False
## Game play
while gameon:
### Player one turn
if turn == 'player 1':
#### Show the Board
print ('\n'*100)
displayboard(gameboard)
##### Choose position
position = playerchoice(gameboard)
##### Place the marker in the position
placemaker(gameboard,player1_marker,position)
#####Check if they won
if wincheck(gameboard,player1_marker):
print ('\n'*100)
displayboard(gameboard)
print ('CONGRATULATIONS, PLAYER ONE YOU HAVE WON THE GAME.')
gameon = False
#####Or if there is a tie
else:
if fullboardcheck(gameboard):
print ('\n'*100)
displayboard(gameboard)
print('TIE GAME.')
gameon = False
#####No win, no tie next players turn
else:
print ('\n'*100)
turn = 'player 2'
### Player two turn
else:
#### Show the Board
displayboard(gameboard)
##### Choose position
position = playerchoice(gameboard)
##### Place the marker in the position
placemaker(gameboard,player2_marker,position)
#####Check if they won
if wincheck(gameboard,player2_marker):
print ('\n'*100)
displayboard(gameboard)
print ('CONGRATULATIONS, PLAYER TWO YOU HAVE WON THE GAME.')
gameon = False
#####Or if there is a tie
else:
if fullboardcheck(gameboard):
print ('\n'*100)
displayboard(gameboard)
print('TIE GAME.')
gameon = False
#####No win, no tie next players turn
else:
print ('\n'*100)
turn = 'player 1'
if not replay():
break
``` |
{
"source": "joarch/dmsarchiv",
"score": 2
} |
#### File: dmsarchiv/src/dmsarchiv.py
```python
import configparser
import json
import os
import shutil
import sys
from datetime import datetime, timedelta
from decimal import Decimal
from getopt import getopt, GetoptError
from typing import List, Dict
import requests
from requests.auth import HTTPBasicAuth
from common import _json_load
from export_excel import export_nach_excel
DEFAULT_PARAMETER_SECTION = "config.ini:PARAMETER"
DEFAULT_EXPORT_PARAMETER_SECTION = "config.ini:EXPORT"
PARAM_URL = "dms_api_url"
PARAM_USER = "dms_api_benutzer"
PARAM_PASSWD = "<PASSWORD>"
DEFAULT_EXPORT_VON_DATUM = "01.01.2010"
CLASSIFY_ATTRIBUTES_FILENAME = "classify_attributes.json"
FOLDERS_FILENAME = "folders.json"
TYPES_FILENAME = "types.json"
def export(profil=DEFAULT_PARAMETER_SECTION, export_profil=DEFAULT_EXPORT_PARAMETER_SECTION, export_von_datum=None,
export_bis_datum=None, max_documents=None, tage_offset=None, debug=None):
# TODO LOG File schreiben
# TODO timeit Zeit loggen bzw. als info_dauer in ini speichern
# DMS API Connect
api_url, cookies = _connect(profil)
# DMS API Connect Info
api_statistics = _get_statistics(api_url, cookies)
export_info = dict()
export_info["info_api_download_count"] = api_statistics["uploadCount"]
export_info["info_api_upload_count"] = api_statistics["downloadCount"]
export_info["info_api_max_count"] = api_statistics["maxCount"]
# DMS API Klassifizierungsattribute auslesen, wenn noch nicht vorhanden
if not os.path.exists(CLASSIFY_ATTRIBUTES_FILENAME):
classify_attributes = _get_classify_attributes(api_url, cookies)
with open(CLASSIFY_ATTRIBUTES_FILENAME, 'w', encoding='utf-8') as outfile:
json.dump(classify_attributes, outfile, ensure_ascii=False, indent=2, sort_keys=True, default=json_serial)
# DMS API Order auslesen, wenn noch nicht vorhanden
if not os.path.exists(FOLDERS_FILENAME):
folders = _get_folders(api_url, cookies)
with open(FOLDERS_FILENAME, 'w', encoding='utf-8') as outfile:
json.dump(folders, outfile, ensure_ascii=False, indent=2, sort_keys=True, default=json_serial)
# DMS API Dokumentenart auslesen, wenn noch nicht vorhanden
if not os.path.exists(TYPES_FILENAME):
types = _get_types(api_url, cookies)
with open(TYPES_FILENAME, 'w', encoding='utf-8') as outfile:
json.dump(types, outfile, ensure_ascii=False, indent=2, sort_keys=True, default=json_serial)
# Konfiguration lesen
parameter_export = _get_config(export_profil)
export_von_datum = parameter_export["export_von_datum"] if export_von_datum is None else export_von_datum
export_bis_datum = parameter_export["export_bis_datum"] if export_bis_datum is None else export_bis_datum
max_documents = int(parameter_export["max_documents"]) if max_documents is None else max_documents
tage_offset = int(parameter_export["tage_offset"]) if tage_offset is None else tage_offset
export_parameter = _json_load(parameter_export["export_parameter_datei"])
if debug is None:
debug = parameter_export.get("debug") == "true"
if not export_von_datum:
export_von_datum = DEFAULT_EXPORT_VON_DATUM
# DMS API Search
export_info["info_letzter_export"] = datetime.now().strftime("%d.%m.%Y %H:%M:%S")
export_info["info_letzter_export_von_datum"] = export_von_datum
documents = _search_documents(api_url, cookies, export_von_datum, export_parameter.get("suchparameter_list"),
bis_datum=export_bis_datum, max_documents=max_documents, debug=debug)
# Dokumenten Export Informationen auswerten
ctimestamps = list(map(lambda d: datetime.strptime(d["classifyAttributes"]["ctimestamp"], "%Y-%m-%d %H:%M:%S"),
documents))
ctimestamps.sort()
if len(ctimestamps) > 0:
min_ctimestamp = ctimestamps[0]
max_ctimestamp = ctimestamps[-1]
else:
min_ctimestamp = None
max_ctimestamp = None
if export_bis_datum and len(documents) == 0:
raise RuntimeError("Achtung es wurden keine Dokumente exportiert. Bitte das Such 'bis_datum' erweitern.")
if len(documents) >= max_documents:
raise RuntimeError(f"Achtung es wurden evtl. nicht alle Dokumente exportiert, Anzahl >= {max_documents}. "
f"Das Such-Datum muss weiter eingeschränkt werden. "
f"Es wurde gesucht mit {export_von_datum} - {export_bis_datum}.")
if export_bis_datum:
# es gab eine Einschränkung bis Datum
export_von_datum = max_ctimestamp.strftime("%d.%m.%Y")
# - nächste Zeitscheibe in Export-Info schreiben
export_bis_datum = datetime.strptime(export_bis_datum, "%d.%m.%Y") + timedelta(days=tage_offset)
if export_bis_datum < datetime.now():
export_bis_datum = export_bis_datum.strftime("%d.%m.%Y")
else:
# Ende erreicht der nächste Export läuft ohne bis Datum
export_bis_datum = ""
else:
export_von_datum = datetime.now().strftime("%d.%m.%Y")
export_info["info_letzter_export_anzahl_dokumente"] = len(documents)
export_info["info_min_ctimestamp"] = min_ctimestamp.strftime("%d.%m.%Y") if min_ctimestamp else export_von_datum
export_info["info_max_ctimestamp"] = max_ctimestamp.strftime("%d.%m.%Y") if max_ctimestamp else ""
# - Export Parameter für den nächsten Export
export_info["export_von_datum"] = export_von_datum
export_info["export_bis_datum"] = export_bis_datum
export_info["max_documents"] = max_documents
export_info["tage_offset"] = tage_offset
# DMS API Disconnect
_disconnect(api_url, cookies)
# Dokumente als JSON Datei speichern
result = {
"export_time": datetime.now().strftime("%d.%m.%Y %H:%M:%S"),
"documents": documents}
json_export_datei = export_parameter["json_export_datei"]
json_export_datei_tmp = json_export_datei + "_tmp"
with open(json_export_datei_tmp, 'w', encoding='utf-8') as outfile:
json.dump(result, outfile, ensure_ascii=False, indent=2, sort_keys=True, default=json_serial)
result["anzahl_exportiert"] = len(documents)
anzahl_neu = len(documents)
# neue und vorhandene Export Ergebnisse zusammenführen, falls vorhanden
if os.path.exists(json_export_datei):
with open(json_export_datei, encoding="utf-8") as file:
result_vorher = json.load(file)
doc_ids_new = [document["docId"] for document in result["documents"]]
for document in result_vorher["documents"]:
if document["docId"] not in doc_ids_new:
result["documents"].append(document)
else:
anzahl_neu -= 1
result["anzahl"] = len(result["documents"])
result["anzahl_neu"] = anzahl_neu
# Sortierung nach DocId
result["documents"].sort(key=lambda document: document["docId"])
# Speichern in JSON Datei und löschen temp. Export Datei
with open(json_export_datei, 'w', encoding='utf-8') as outfile:
json.dump(result, outfile, ensure_ascii=False, indent=2, sort_keys=True, default=json_serial)
os.remove(json_export_datei_tmp)
print(
f"Dokumente geladen im Zeitraum: {export_von_datum} - {export_bis_datum}, "
f"Anzahl geladen: {result['anzahl_exportiert']}, "
f"Anzahl neu: {result['anzahl_neu']}, "
f"Anzahl gesamt: {result['anzahl']}.")
# wenn alle Dokumente bis zum aktuell Tag exportiert wurden,
# wird die Excel Datei geschrieben und die JSON Datei als Temp.-Datei umbenannt
if export_von_datum == datetime.now().strftime("%d.%m.%Y"):
# Excel Export
if export_parameter["export"]["export_format"] == "xlsx":
export_nach_excel(result, export_parameter["export"])
else:
raise RuntimeError(f"nicht unterstütztes Export Format {export_parameter['export']['export_format']}")
# vorhandene JSON Datei als Temp.-Datei sichern
splitext = os.path.splitext(json_export_datei)
shutil.move(json_export_datei, os.path.join(
os.path.dirname(json_export_datei),
os.path.basename(splitext[0]) + "_tmp" + splitext[1]
))
else:
# noch nicht alle Dokumente geladen
print("Es wurden noch nicht alle Dokumente bis zum heutigen Tag geladen, der Export wird nicht durchgeführt.")
print("Bitte das Programm erneut ausführen.")
# Export Info (letzter Export Zeitstempel und DMS API Info) in die Config-Datei zurückschreiben
_write_config(export_profil, export_info)
def _search_documents(api_url, cookies, von_datum, suchparameter_list=None,
bis_datum=None, max_documents=1000, debug=False) -> List[Dict]:
suchparameter_list = suchparameter_list or []
von_datum = datetime.strptime(von_datum, "%d.%m.%Y")
# Search-Date -1 Tag, vom letzten Lauf aus,
# da die DMS API Suche nicht mit einem Zeitstempel umgehen kann
# zusätzlich (sicherheitshalber) Vergleich mit >=
# von_datum = von_datum.date() - timedelta(days=1)
von_datum = von_datum.date()
von_datum = von_datum.strftime("%Y-%m-%d")
search_parameter = [{"classifyAttribut": "ctimestamp", "searchOperator": ">=",
"searchValue": von_datum}]
if bis_datum:
bis_datum = datetime.strptime(bis_datum, "%d.%m.%Y").strftime("%Y-%m-%d")
search_parameter.append({"classifyAttribut": "ctimestamp", "searchOperator": "<=",
"searchValue": bis_datum})
for suchparameter in suchparameter_list:
search_parameter.append(suchparameter)
such_data = json.dumps(search_parameter)
if debug:
print(f"Suche mit: {json.dumps(search_parameter)}")
r = requests.post("{}/searchDocumentsExt?maxDocumentCount={}".format(api_url, max_documents),
data=such_data,
cookies=cookies, headers=_headers())
_assert_request(r)
documents = json.loads(r.text)
if debug:
print(f"Suche Fertig. Anzahl Dokumente : {len(documents)}")
return documents
def _get_statistics(api_url, cookies):
r = requests.get("{}/apiStatistics".format(api_url), cookies=cookies, headers=_headers())
_assert_request(r)
return json.loads(r.text)
def _get_classify_attributes(api_url, cookies):
r = requests.get("{}/classifyAttributes".format(api_url), cookies=cookies, headers=_headers())
_assert_request(r)
return json.loads(r.text)
def _get_folders(api_url, cookies):
r = requests.get("{}/folders".format(api_url), cookies=cookies, headers=_headers())
_assert_request(r)
return json.loads(r.text)
def _get_types(api_url, cookies):
r = requests.get("{}/types".format(api_url), cookies=cookies, headers=_headers())
_assert_request(r)
return json.loads(r.text)
def _headers():
return {'Content-Type': 'application/json; charset=utf8'}
def _connect(profil):
params = _get_config(profil)
r = requests.get("{}/connect/1".format(params[PARAM_URL]),
auth=HTTPBasicAuth(params[PARAM_USER], params[PARAM_PASSWD]))
_assert_request(r)
cookies = r.cookies.get_dict()
return params[PARAM_URL], cookies
def _disconnect(api_url, cookies):
r = requests.get("{}/disconnect".format(api_url), cookies=cookies)
_assert_request(r)
def _assert_request(request):
if request.status_code != 200:
raise RuntimeError(f"Fehler beim Request: {request.status_code}, Message: {request.text}")
def _get_config(profil):
split = profil.split(":")
config_file = split[0]
config_section = split[1]
config = configparser.ConfigParser()
config.read(config_file)
return config[config_section]
def _write_config(profil, new_params):
"""
Aktualisiert die Config-Datei mit neuen Werten.
"""
split = profil.split(":")
config_file = split[0]
config_section = split[1]
config = configparser.ConfigParser()
config.read(config_file)
# merge alte und neue Parameter
for section in config.sections():
if section == config_section:
for key, value in new_params.items():
config[section][key] = str(value)
with open(config_file, 'w') as configfile:
config.write(configfile)
def json_serial(obj):
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
if isinstance(obj, Decimal):
serial = str(obj)
return serial
raise TypeError("Type not serializable")
def main(argv):
"""
Export DMS Dokumenten Infos. Das Zielformat wird über das Export Profil übergeben.
Programmargumente:
- parameter (INI-Datei und Section): z.B.: 'config.ini:PARAMETER'
- export_parameter (INI-Datei und Section): z.B.: 'config.ini:EXPORT'
"""
hilfe = f"{os.path.basename(__file__)} -p <parameter> -e <export_parameter>"
parameter = ""
export_parameter = ""
try:
opts, args = getopt(argv, "hp:e:", ["parameter=", "export_parameter="])
except GetoptError:
print(hilfe)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(hilfe)
sys.exit()
elif opt in ("-p", "--parameter"):
parameter = arg
elif opt in ("-e", "--export_parameter"):
export_parameter = arg
if not parameter:
parameter = DEFAULT_PARAMETER_SECTION
if not export_parameter:
export_parameter = DEFAULT_EXPORT_PARAMETER_SECTION
export(parameter, export_parameter)
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: dmsarchiv/src/export_excel.py
```python
import json
import os
import re
import sys
from datetime import datetime, date
from decimal import Decimal
from getopt import getopt, GetoptError
from openpyxl import Workbook, load_workbook
from openpyxl.styles import PatternFill, Font
from openpyxl.utils import get_column_letter
from common import _json_load
def export_nach_excel(documents, export_profil):
# Zieldateiname ermitteln
ziel_dateiname = export_profil["dateiname"]
postfix = export_profil.get("dateiname_postfix")
if postfix:
if "%" in postfix:
postfix = datetime.now().strftime(postfix)
splitext = os.path.splitext(os.path.basename(export_profil["dateiname"]))
ziel_dateiname = os.path.join(
os.path.dirname(export_profil["dateiname"]),
splitext[0] + postfix + splitext[1]
)
# letzte fortlaufende Nummer ermitteln
fortlaufendes_feld = export_profil.get("fortlaufendes_feld")
letzte_fortlaufende_nummer = -1
filename_fortlaufendes_feld = None
if fortlaufendes_feld:
filename_fortlaufendes_feld = os.path.join(
os.path.dirname(export_profil["dateiname"]),
os.path.splitext(os.path.basename(export_profil["dateiname"]))[0] + "_" +
"fortlaufendes_feld.txt"
)
if os.path.exists(filename_fortlaufendes_feld):
with open(filename_fortlaufendes_feld, 'r', encoding='utf-8') as outfile:
value = outfile.read()
if value:
letzte_fortlaufende_nummer = int(value)
# Datei Mapping Caches
datei_mappings = dict()
# Zeilen und Spalten aus den Dokumenten anhand Export Profil ermitteln
rows = list()
for document in documents["documents"]:
columns = list()
rows.append(columns)
for spalte in export_profil["spalten"]:
column = dict()
columns.append(column)
feld_name = spalte["feld"]
if spalte.get("alias"):
column["feld_name"] = spalte["alias"]
else:
column["feld_name"] = spalte["feld"]
mapped_value = ""
if feld_name:
# Spalten Wert auslesen und mappen
if feld_name in document:
value = document[feld_name]
elif feld_name in document["classifyAttributes"]:
value = document["classifyAttributes"][feld_name]
else:
raise RuntimeError(
f"Die Spalte '{feld_name}' existiert nicht im Dokument. Bitte Export-Profil überprüfen.")
# Mapping
mapping_def = spalte.get("mapping")
if mapping_def is not None:
# konfiguriertes Mapping anwenden
if mapping_def["typ"] == "re":
# Mapping mit RegEx Methode
# - zuerst immer in String umwandeln, wegen RegEx Methode auf String
mapped_value = map_value(value, "string")
re_operation = getattr(re, mapping_def["methode"])
argumente = mapping_def["argumente"]
if len(argumente) == 2:
mapped_value = re_operation(argumente[0], argumente[1], mapped_value)
else:
raise RuntimeError(
f"Fehler beim Mapping zum Feld '{feld_name}'. "
f"Es werden nur 2 Argument unterstützt.")
mapped_value = map_value(mapped_value, spalte.get("type"))
elif mapping_def["typ"] == "datei":
# Mapping aus Datei auslesen
# - zuerst immer in String umwandeln, Id wird immer als String normalisiert
mapped_value = map_value(value, "string")
# - Datei Mapping Cache initialisieren
if datei_mappings.get(mapping_def["dateiname"]) is None:
datei_mappings[mapping_def["dateiname"]] = _init_mapping_data(mapping_def)
# mapping von id zu name
mapping_data = datei_mappings[mapping_def["dateiname"]]
mapped_value = mapping_data[mapped_value]
else:
raise RuntimeError(f"Unbekannter Mapping Typ: {mapping_def['type']}")
else:
mapped_value = map_value(value, spalte.get("type"))
else:
# keine Feld Name, damit bleibt die Spalte leer
pass
column["value"] = mapped_value
if spalte.get("number_format"):
column["number_format"] = spalte["number_format"]
else:
if isinstance(mapped_value, date):
column["number_format"] = 'DD.MM.YYYY'
if isinstance(mapped_value, datetime):
column["number_format"] = 'DD.MM.YYYY HH:MM:SS'
if spalte.get("computed"):
column["computed"] = spalte["computed"]
# sortieren
if export_profil.get("sortierung"):
for sort_def in reversed(export_profil["sortierung"]["felder"]):
if sort_def["wie"] == "absteigend":
reverse = True
elif sort_def["wie"] == "aufsteigend":
reverse = False
else:
raise RuntimeError(
f"Unbekannte Sortierung zum 'feld'='{sort_def['feld']}' mit 'wie'='{sort_def['wie']}' "
f", erlaubt sind nur 'aufsteigend' oder 'absteigend'.")
rows.sort(
key=lambda r: list(filter(lambda c: c["feld_name"] == sort_def["feld"], r))[0]["value"],
reverse=reverse
)
# Computed und Format ermitteln
for row in rows:
for column in row:
# computed Wert ermitteln
if column.get("computed"):
computed = column.get("computed")
# bekannte Methoden ersetzen
computed = computed \
.replace("nicht_fortlaufend()",
"pruefe_is_nicht_fortlaufend(row, fortlaufendes_feld, letzte_fortlaufende_nummer)")
column["value"] = eval(computed)
# Format ermitteln
if export_profil.get("formate"):
for format_candidate in export_profil["formate"]:
if re.match(format_candidate["match"], str(column["value"])):
if "PatternFill" == format_candidate["format"]["format"]:
column["fill"] = PatternFill(start_color=format_candidate["format"]["start_color"],
end_color=format_candidate["format"]["end_color"],
fill_type=format_candidate["format"]["fill_type"])
for column in row:
# max. fortlaufendes Feld merken
if fortlaufendes_feld and column["feld_name"] == fortlaufendes_feld:
letzte_fortlaufende_nummer = column["value"]
if not letzte_fortlaufende_nummer:
raise RuntimeError("Die fortlaufende Nummer konnte nicht ermittelt werden")
# als Excel speichern
if not os.path.exists(ziel_dateiname):
# neue Excel Datei
if not export_profil.get("vorlage_dateiname"):
# neu
wb = Workbook()
ws = wb.active
else:
# aus Vorlage
wb = load_workbook(filename=export_profil["vorlage_dateiname"])
if not export_profil.get("vorlage_sheet_name"):
ws = wb.active
else:
ws = wb[export_profil["vorlage_sheet_name"]]
row_idx = 1
# mit Spaltenüberschrifen
if export_profil["spaltenueberschrift"].lower() == "ja":
column_header_format = export_profil.get("spaltenueberschrift_format")
if column_header_format is not None:
if "PatternFill" == column_header_format["format"]:
column_header = PatternFill(start_color=column_header_format["start_color"],
end_color=column_header_format["end_color"],
fill_type=column_header_format["fill_type"])
else:
raise RuntimeError(
f"Unbekanntes Format {column_header_format['format']} in 'spaltenueberschrift_format/format'. "
f"Möglich ist nur 'PatternFill'")
else:
# Standard Format
column_header = PatternFill(start_color='AAAAAA',
end_color='AAAAAA',
fill_type='solid')
column_idx = 1
for spalte in export_profil["spalten"]:
ws.cell(column=column_idx, row=row_idx, value=spalte["ueberschrift"])
col = ws["{}{}".format(get_column_letter(column_idx), row_idx)]
col.font = Font(bold=True)
col.fill = column_header
column_idx += 1
row_idx += 1
# Zeilen und Spalten ins Excel Dokument schreiben
append_rows(row_idx, rows, ws)
else:
# vorhandene Excel Datei fortschreiben
wb = load_workbook(filename=ziel_dateiname)
if not export_profil.get("vorlage_sheet_name"):
ws = wb.active
else:
ws = wb[export_profil["vorlage_sheet_name"]]
id_feld = export_profil["id_feld"]
id_feld_idx = -1
for idx, spalte in enumerate(export_profil["spalten"]):
if spalte["feld"] == id_feld:
id_feld_idx = idx
if id_feld_idx == -1:
raise RuntimeError(
f"Fehler das id_feld '{id_feld}' existiert nicht als Spalte in der Export Konfiguration.")
# update Rows
empties = 0
last_row = 0
for row_idx, row in enumerate(ws.iter_rows()):
cell = row[id_feld_idx]
if cell.value:
empties = 0
update_row(cell.value, id_feld_idx, rows, row)
rows = remove_row(cell.value, id_feld_idx, rows)
else:
empties += 1
for cell in row:
# evtl. leere Zeile, nur wenn alle Spalten ebenfalls leer sind
if cell.value:
empties = 0
break
if empties == 0:
last_row = row_idx + 1
if empties > 100:
# fertig, nur noch leere Id Spalten
break
# neue Rows anhängen
row_idx = last_row + 1
append_rows(row_idx, rows, ws)
wb.save(filename=ziel_dateiname)
print(f"Die Excel-Datei wurde geschrieben: '{ziel_dateiname}'")
# letzte fortlaufende Nummer in Datei merken
if fortlaufendes_feld:
with open(filename_fortlaufendes_feld, 'w', encoding='utf-8') as outfile:
outfile.write(str(letzte_fortlaufende_nummer))
def append_rows(row_idx, rows, ws):
"""
Hängt die rows an das Sheet, beginnend ab Zeile row_idx
"""
for row in rows:
column_idx = 1
for column in row:
new_cell = ws.cell(column=column_idx, row=row_idx, value=column["value"])
if column.get("number_format"):
new_cell.number_format = column["number_format"]
if column.get("fill"):
new_cell.fill = column["fill"]
column_idx += 1
row_idx += 1
def update_row(id_value, id_feld_idx, rows, row):
"""
Aktualisiert die row im Sheet, wenn sie innerhalb der neuen rows existiert.
Existiert die row nicht in den neuen rows, bleibt sie unverändert.
"""
existing = list(filter(lambda r: r[id_feld_idx]["value"] == id_value, rows))
if len(existing) == 1:
# aktualisieren vorhandene Row
for column_idx, column in enumerate(existing[0]):
row[column_idx].value = column["value"]
if column.get("number_format"):
row[column_idx].number_format = column["number_format"]
if column.get("fill"):
row[column_idx].fill = column["fill"]
elif len(existing) > 1:
raise RuntimeError(f"Zeile mit Id '{id_value}' ist mehrfach vorhanden. Anzahl: {len(existing)}")
# ignorieren, Row nur im Excel Dokument
def remove_row(id_value, id_feld_idx, rows):
"""
Löscht die row mit der id_value aus den rows
"""
return [row for row in rows if row[id_feld_idx]["value"] != id_value]
def map_value(value, mapping_type=None):
if mapping_type == "string":
return str(value)
if mapping_type == "int":
try:
return int(value)
except ValueError:
return -1
return map_str_value(value)
def map_str_value(value):
if type(value) != str:
return value
if value == "undefined":
# clean up
value = ""
if value == "true":
value = "ja"
if value == "false":
value = "nein"
if "€" in value \
and (value[0].isnumeric() or len(value) >= 2 and value[0] == "-" and value[1].isnumeric()):
return map_eur(value)
eur_pattern = re.compile(r"^-?[0-9]+,?[0-9]* (€|EUR)$")
if eur_pattern.match(value):
return map_eur(value)
datum_pattern = re.compile(r"^[0-9]{2}\.[0-9]{2}\.[0-9]{4}$")
if datum_pattern.match(value):
return map_datum(value)
datum_pattern = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$")
if datum_pattern.match(value):
return map_datum(value)
datum_pattern = re.compile(r"^[0-9]{2}\.[0-9]{2}\.[0-9]{4} [0-9]{2}:[0-9]{2}:[0-9]{2}$")
if datum_pattern.match(value):
return map_datum_zeit(value)
datum_pattern = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}$")
if datum_pattern.match(value):
return map_datum_zeit(value)
decimal_pattern = re.compile(r"^-?[0-9]+,?[0-9]*$")
if decimal_pattern.match(value):
return map_number(value)
return value
def map_number(value):
if value is None:
return None
return Decimal(value.replace('.', '').replace(' ', '').replace(',', '.'))
def map_eur(value):
return map_number(value.replace("€", "").replace("EUR", ""))
def map_datum(value):
if "-" in value:
return datetime.strptime(value, "%Y-%m-%d").date()
return datetime.strptime(value, "%d.%m.%Y").date()
def map_datum_zeit(value):
if "-" in value:
return datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
return datetime.strptime(value, "%d.%m.%Y %H:%M:%S")
def pruefe_is_nicht_fortlaufend(columns, fortlaufendes_feld, previous_fortlaufendes_feld):
return not list(filter(lambda c: c["feld_name"] == fortlaufendes_feld, columns))[0][
"value"] == previous_fortlaufendes_feld + 1
def _init_mapping_data(mapping_def):
"""
List alle Einträge und erzeugt ein neues Dict anhand der 'id' und 'name' Definition
"""
result = dict()
mapping_data = _json_load(mapping_def["dateiname"])
for entry in mapping_data:
result[str(entry[mapping_def["id"]])] = entry[mapping_def["name"]]
return result
def main(argv):
"""
Export die übergebene JSON Datei (documents_datei) mit den exportierten DMS Dokumenten Feldern nach Excel.
Das Export Format wird mit der übergebenen Export Parameter Datei (export_parameter_datei) konfiguriert.
"""
hilfe = f"{os.path.basename(__file__)} -d <documents_datei> -e <export_parameter_datei>"
documents_datei = ""
export_parameter_datei = ""
try:
opts, args = getopt(argv, "hd:e:", ["documents_datei=", "export_parameter_datei="])
except GetoptError:
print(hilfe)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(hilfe)
sys.exit()
elif opt in ("-d", "--documents_datei"):
documents_datei = arg
elif opt in ("-e", "--export_parameter_datei"):
export_parameter_datei = arg
if not documents_datei or not export_parameter_datei:
print("Usage: " + hilfe)
sys.exit(2)
if not os.path.exists(documents_datei):
raise RuntimeError(f"Die Datei '{documents_datei}' existiert nicht.")
if not os.path.exists(export_parameter_datei):
raise RuntimeError(f"Die Datei '{export_parameter_datei}' existiert nicht.")
with open(documents_datei, encoding="utf-8") as file:
documents = json.load(file)
with open(export_parameter_datei, encoding="utf-8") as file:
export_parameter = json.load(file)
export_nach_excel(documents, export_parameter["export"])
if __name__ == '__main__':
# main(sys.argv[1:])
main(["-d", "../export_documents.json", "-e", "../config/dmsarchiv_vorlage.json"])
``` |
{
"source": "joar/disk-usage-exporter",
"score": 3
} |
#### File: disk-usage-exporter/disk_usage_exporter/logging.py
```python
import logging
import logging.config
import sys
from typing import Dict, Any, Optional, List
import attr
import structlog
class Loggable(Dict[str, Any]):
def __structlog__(self):
if attr.has(type(self)):
return attr.asdict(self)
return self
def add_message(logger, method_name, event_dict):
"""
Creates a ``message`` value based on the ``hint`` and ``key_hint`` keys.
``key_hint`` : ``Optional[str]``
a '.'-separated path of dictionary keys.
``hint`` : ``Optional[str]``
will be formatted using ``.format(**event_dict)``.
"""
def from_hint(ed):
hint = event_dict.pop('hint', None)
if hint is None:
return
try:
return hint.format(**event_dict)
except Exception as exc:
return f'! error formatting message: {exc!r}'
def path_value(start: Loggable, key_path: str) -> Optional[Any]:
value = start
for key in key_path.split('.'):
if value is None:
return None
if hasattr(value, '__structlog__'):
value = value.__structlog__()
value = value.get(key)
return value
def from_key_hint(ed) -> Optional[str]:
key_hint = ed.pop('key_hint', None)
if key_hint is None:
return None
value = path_value(ed, key_hint)
return f'{key_hint}={value!r}'
def from_key_hints(ed) -> List[str]:
key_hints = ed.pop('key_hints', None)
if key_hints is None:
return []
return [
f'{key_hint}={path_value(ed, key_hint)!r}'
for key_hint in key_hints
]
hints = [
from_hint(event_dict),
from_key_hint(event_dict)
]
hints += from_key_hints(event_dict)
existing_message = event_dict.get('message')
# The new message
message = None
if any(hint is not None for hint in hints):
prefix = event_dict['event']
hint = ', '.join(hint for hint in hints if hint is not None)
if existing_message is not None:
# Use existing message in the new hint-based message
message = f'{prefix}: {existing_message}, {hint}'
else:
message = f'{prefix}: {hint}'
elif existing_message is None:
# Use "event" as default message
message = event_dict.get('event')
if message is not None:
event_dict['message'] = message
return event_dict
def add_severity(logger, method_name, event_dict):
event_dict = structlog.stdlib.add_log_level(logger, method_name, event_dict)
level = event_dict.pop('level')
if level is not None:
event_dict['severity'] = level.upper()
return event_dict
def configure_logging(for_humans=False, level=logging.INFO):
if not for_humans:
renderer = structlog.processors.JSONRenderer()
else:
renderer = structlog.dev.ConsoleRenderer(
colors=structlog.dev._has_colorama
)
timestamper = structlog.processors.TimeStamper(fmt='%Y-%m-%d %H:%M:%S')
foreign_pre_chain = [
# Add the log level and a timestamp to the event_dict if the log entry
# is not from structlog.
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.add_log_level,
timestamper,
]
if for_humans:
foreign_pre_chain += [
structlog.processors.format_exc_info,
add_message,
]
processors = [
structlog.stdlib.filter_by_level,
structlog.stdlib.add_logger_name,
add_severity,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
timestamper,
add_message,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
]
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'structlog': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': renderer,
'foreign_pre_chain': foreign_pre_chain,
},
},
'handlers': {
'default': {
'level': level,
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'structlog',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True,
},
}
})
structlog.configure(
processors=processors,
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
```
#### File: disk-usage-exporter/tests/conftest.py
```python
import logging
import pytest
from disk_usage_exporter import logging as _logging
@pytest.fixture(scope='session', autouse=True)
def configure_logging():
_logging.configure_logging(for_humans=True, level=logging.DEBUG)
``` |
{
"source": "JoarGruneau/yatal",
"score": 3
} |
#### File: yatal/yatal/price.py
```python
import numpy as np
from core import rolling_func, offset, divide, replace_nan
def rolling_change(price, window=1):
return offset(price[window:] / price[:-window], window)
def rolling_diff(series, window=1):
return offset(series[window:] / series[:-window], window)
def rolling_max(series, window=20):
return rolling_func(series, window, [np.max])[0]
def rolling_min(series, window=20):
return rolling_func(series, window, [np.min])[0]
def true_price(high, low, close):
return (high + low + close) / 3
def aroon(high, low, window=20):
aroon_up = rolling_func(high, window + 1, [np.argmax])[0] / window
aroon_down = rolling_func(low, window + 1, [np.argmin])[0] / window
return aroon_down * 100, aroon_up * 100
def aroon_oscillator(high, low, window=20):
aroon_down, aroon_up = aroon(high, low, window=window)
return aroon_up - aroon_down
def b_bands(price, window=20, multiplier=2):
mean, std = rolling_func(price, window, [np.mean, np.std])
lower = mean - multiplier * std
upper = mean + multiplier * std
return upper, mean, lower
def b_bands_percent(price, window=20, multiplier=2):
upper, _, lower = b_bands(price, window=window, multiplier=multiplier)
out = divide(price - lower, upper - lower)
return replace_nan(out)
def vortex_index(high, low, close, window=20):
true_range = np.maximum.reduce(
[high[1:] - low[1:], high[1:] - close[:-1], low[1:] - close[:-1]])
true_range = offset(rolling_func(true_range, window, [np.sum])[0], 1)
uptrend = offset(np.abs(high[1:] - low[:-1]), 1)
downtrend = offset(np.abs(low[1:] - high[:-1]), 1)
vi_plus = divide(rolling_func(uptrend, window, [np.sum])[0], true_range)
vi_minus = divide(rolling_func(downtrend, window, [np.sum])[0], true_range)
vi_plus[:window] = np.nan
vi_minus[:window] = np.nan
return replace_nan(vi_plus), replace_nan(vi_minus)
def roc(series, widnow=20):
return offset(
rolling_diff(series, window)[widnow:] / series[:-widnow], window)
```
#### File: yatal/yatal/volume.py
```python
import numpy as np
from core import rolling_func, divide, replace_nan
def obv(price, volume):
obv = volume.copy()
obv[1:][price[1:] == price[:-1]] = 0
negative_change = price[1:] < price[:-1]
obv[1:][negative_change] = -obv[1:][negative_change]
return obv.cumsum()
def vwma(price, volume, window=20):
volume_sum = rolling_func(volume, window, [np.sum])[0]
price_volume = rolling_func(price * volume, window, [np.sum])[0]
vwma = divide(price_volume, volume_sum)
return replace_nan(vwma)
def adl(high, low, close, volume):
flow = divide(2 * close - high - low, high - low) * volume
flow[np.isnan(flow)] = 0
return np.cumsum(flow)
``` |
{
"source": "JoaRiski/django-simple-graphql",
"score": 2
} |
#### File: example/tests/test_mutations.py
```python
from textwrap import dedent
from example.test_utils.client import GraphQLClient
def test_mutation_addition(gclient: GraphQLClient):
query = dedent(
"""
mutation {
addition(input: {a: 1, b: 2}) {
result
}
}
"""
)
response = gclient.query(query)
gclient.assert_response_has_no_errors(response)
gclient.assert_first_result_matches_expected(
response,
{
"result": 3,
},
)
```
#### File: simple_graphql/auth/middleware.py
```python
from typing import Any
from django.contrib.auth.models import AnonymousUser
from simple_graphql.auth.auth import AUTH_HEADER, TokenAuthentication
AUTH = TokenAuthentication()
class AuthMiddleware:
# TODO: Replace Any types with better types
def resolve(self, next: Any, root: Any, info: Any, **kwargs) -> Any:
self.authenticate(info)
return next(root, info, **kwargs)
def authenticate(self, info: Any) -> None:
graphql_authenticated = getattr(info.context, "graphql_authenticated", False)
if not hasattr(info.context, "user") or (
info.context.user and not graphql_authenticated
):
info.context.user = AnonymousUser()
if info.context.META.get(AUTH_HEADER) and not graphql_authenticated:
auth_result = AUTH.authenticate(info.context)
if auth_result:
info.context.user = auth_result[0]
info.context.graphql_authenticated = True
```
#### File: simple_graphql/auth/models.py
```python
import binascii
import os
from typing import Any, Optional, Tuple
from django.conf import settings
from django.db import models
from django.db.models import Manager
from django.utils.translation import gettext_lazy as _
from simple_graphql.auth.exceptions import AuthenticationException
class AuthenticationSession(models.Model):
objects: "Manager[AuthenticationSession]"
key = models.CharField(_("Key"), max_length=40, primary_key=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="authentication_sessions",
on_delete=models.CASCADE,
verbose_name=_("User"),
)
datetime_created = models.DateTimeField(_("Datetime created"), auto_now_add=True)
def __str__(self) -> str:
return f"{self.key[:5]}{'*' * len(self.key[5:])}"
@classmethod
def generate_key(cls) -> str:
return binascii.hexlify(os.urandom(20)).decode()
@classmethod
def create_for_user(cls, user: Any) -> "AuthenticationSession":
return cls.objects.create(user=user, key=cls.generate_key())
@classmethod
def authenticate(cls, key: str) -> Optional[Tuple[Any, Any]]:
try:
token = cls.objects.select_related("user").get(key=key)
except cls.DoesNotExist:
raise AuthenticationException(_("Invalid token."))
if not token.user.is_active:
raise AuthenticationException(_("Invalid token."))
return (token.user, token)
```
#### File: django/fields/authorize.py
```python
from typing import TYPE_CHECKING, Any
from simple_graphql.django.fields.exceptions import AuthorizationError
if TYPE_CHECKING:
from simple_graphql.django import ModelSchemaConfig
def authorize_query(config: "ModelSchemaConfig", info: Any):
if config.require_login is True:
user = getattr(info.context, "user", None)
if user is None or user.is_anonymous:
raise AuthorizationError("Unauthorized")
```
#### File: django/schema/exceptions.py
```python
from simple_graphql.django.types import ModelClass
class ModelAlreadyRegistered(Exception):
def __init__(self, model_cls: ModelClass):
super().__init__(
f"Model {model_cls.__name__} "
"has already been registered to the GraphQL schema"
)
class MutationAlreadyRegistered(Exception):
def __init__(self, name: str):
super().__init__(
f"Mutation {name} has already been registered to the GraphQL schema"
)
class QueryAlreadyRegistered(Exception):
def __init__(self, name: str):
super().__init__(
f"Query {name} has already been registered to the GraphQL schema"
)
class SchemaAlreadyBuilt(Exception):
pass
``` |
{
"source": "joarkm/olmonopolet-api",
"score": 2
} |
#### File: management/commands/update_beer_details.py
```python
from django.core.management.base import BaseCommand
from beers.models import Product, Beer
from olmonopolet.vmp_api import products as vmp_api_products
from olmonopolet.vmp_api import utilities as vmp_utils
from datetime import datetime
import httpx
class Command(BaseCommand):
help = '''
Update Beer details from Vinmonopolet
'''
def handle(self, *args, **options):
# Log when the job is executed
self.stdout.write(f"Updating Beer details @ {datetime.now()}")
start_time = datetime.now()
# Instantiate httpx Client
with httpx.Client() as client:
# Verify that VMP is online before using the VMP api
if not vmp_utils.isVMPonline(client):
self.stdout.write(f"Vinmonopolet is not available...")
return
beers = Beer.objects.all()
for beer in beers:
# Update Beer details from Vinmonopolet
beer_details = vmp_api_products.get_product_details(client, beer.pk)
# TODO: Add functionality to notify user if beer with stock becomes "buyable:True"
try:
updated_beer_obj, created = Beer.objects.update_or_create(
beer_id = beer,
defaults = {
'name' : beer_details["name"],
'alc_volume' : beer_details['alcohol']['value'],
'buyable' : beer_details["buyable"],
'status' : beer_details["status"],
'launch_date' : datetime.strptime(beer_details["expiredDate"],"%Y-%m-%d")
}
)
self.stdout.write(f"Updating beer details for: {updated_beer_obj.name}")
except Exception as err:
self.stdout.write(f"Could not update beer details for: {beer.name}")
self.stdout.write(err)
# Product stock is not to be updated if status is "Utgått" or "Utsolgt"
# This is to reduce load on the system as these beers are not available for purchase
# if updated_beer_obj.status not in ['utgatt', 'utsolgt']:
self.stdout.write(f"Update of beer details took {datetime.now() - start_time} seconds")
return
```
#### File: olmonopolet-api/notifications/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from stores.models import Store
# Create your models here.
class EmailNotification(models.Model):
'''
Model containing user preferences with regards to how they receive email notifications.
'''
username = models.ForeignKey(User, to_field='username', verbose_name='Users', related_name='notification', on_delete=models.CASCADE)
store_id = models.ForeignKey(Store, verbose_name='Stores', to_field='store_id',related_name='notification', on_delete=models.CASCADE, help_text='Store which the user want to receive notifications for')
store_updates = models.BooleanField(help_text='Should the user receive email notification when product stock is updated for chosen store?', default=False)
class Meta:
verbose_name_plural = 'Email Notifications'
ordering = ['-username']
def __str__(self):
return self.username.username
```
#### File: olmonopolet/untappd_scraper/details.py
```python
from bs4 import BeautifulSoup
import httpx
def get_beer_details(url):
'''
Retrieve Untappd beer details
Parameters:
url: Untappd URL for the beer
Returns:
dict: Dictionary with rating data
'''
untappd_details = {}
# Get the web page where the Untappd beer rating data is and parse it using BeautifulSoup
raw_untappd_response = httpx.get(url)
untappd_html = BeautifulSoup(raw_untappd_response,'html.parser')
# Retrieve Data
try:
untappd_details["brewery"] = untappd_html.find("div",class_="content").find('div', class_='name').find('a').string
untappd_details["brewery_url"] = untappd_html.find("div",class_="content").find('div', class_='name').find('a')['href']
untappd_details["style"] = untappd_html.find("div",class_="content").find('div', class_='name').find('p', class_='style').string
# TODO: Få inn description korrekt. Per nå er det problematisk å få kun tekst siden det også er en <a> tag i div.
untappd_details["description"] = untappd_html.find("div", class_="content").find("div",class_="bottom").find("div",class_="desc").find("div",class_="beer-descrption-read-less").contents
untappd_details["img_url"] = untappd_html.find("div",class_="content").find('a',class_ = 'label').find('img')['src']
untappd_details["rating"] = untappd_html.find("div", class_="content").find("div",class_="details").find("div",class_="caps")['data-rating']
# Text is typically 'xxx Ratings', hence I need to split the string and get the first element
untappd_details["num_regs"] = untappd_html.find("div", class_="content").find("div",class_="details").find("p",class_="raters").string.split()[0]
untappd_details["check_in_total"] = untappd_html.find("div",class_="content").find("div",class_="stats").find("span",class_="count").string
untappd_details["check_in_unique"] = untappd_html.find("div",class_="content").find("div",class_="stats").find_all("span",class_="count")[1].string
# TODO: Add further validations and try/excepts
for arg in ['num_regs', 'check_in_total', 'check_in_unique']:
try:
# Stripping thousand separator ',' from numbers
untappd_details[arg] = int(untappd_details[arg].replace(',',''))
except ValueError:
untappd_details[arg] = 0
print(f"Error, {arg} is not an integer.")
except Exception as err:
print(f"Exception cought while updating untappd data: {err}")
return untappd_details
```
#### File: olmonopolet/untappd_scraper/mapping.py
```python
import httpx
from bs4 import BeautifulSoup
import json
import itertools
import time
def _get_search_result_count(search_object):
'''
get number of results when searching for a beer at Untappd
Parameters:
BeautifulSoup object
Returns:
int: Number of search results
'''
try:
for el in search_object.find_all('p', class_ ='total'):
count = el.strong.string
return int(count.replace(' beer',''))
except UnboundLocalError:
return 0
def _get_untappd_mapping_data(search_item):
'''
Pass in a Beautifulsoup html object representing a search on Untappd to return details about the search result.
Returns data for the first search result on Untappd.com
Parameters:
Beautifulsoup object
Returns:
dict: beer id, name and URL
'''
# Untappd beer object to return
mapping_data = {
'id':'',
'url': '',
'name': '',
}
_id = search_item.find("a",class_="label")["href"].replace('/beer/','')
_url = search_item.find("div",class_="beer-details").find('p',class_ = 'name').find('a')['href']
_name = search_item.find("div",class_="beer-item").find('p',class_ = 'name').find('a').string
mapping_data["id"]= int(_id)
mapping_data["url"]='https://untappd.com' + _url
mapping_data["name"]=_name
return mapping_data
def clean_beer_name(beer_name):
'''
Pre-process beer name before searching for the given name on Untappd.com.
Parameters:
str: Beer name
Returns:
list: Processed beer name as list
'''
# Words to clean from beer name
word_replacements = [
{
'replace': ' x ',
'replace_with': ' '
},
{
'replace': ',',
'replace_with': ''
},
{
'replace': ' & ',
'replace_with': ' '
},
{
'replace': '!',
'replace_with': ''
},
]
for word in word_replacements:
beer_name = beer_name.lower().replace(word['replace'],word['replace_with'])
# Limiting number of search words to [6] for long beer names
beer_name = beer_name.split()[:6] if len(beer_name.split()) > 7 else beer_name.split()
return beer_name
def find_untappd_mapping(beer_name, current_search_count, search_limit):
'''
Obtain mapping between beer from Vinmonopolet and Untappd. Number of searches are limited according to set parameters.
Parameters:
string: Vinmonopolet beer name
int: Number of searches performed at the time when the method is called
int: The maximum number of searches allowed
Returns:
dict: beer id, name, URL and mapping status for beer at Untappd.com. Returns dictionary with id=0 and empty name and URL if not found.
'''
####################################
URL = "https://untappd.com/search"
####################################
# Dictionary to return with mapping details on Untappd.com
mapping_details = {
'name':'',
'id': 0,
'url':'',
'match': False,
'searches': current_search_count
}
best_search = {
'results' : 10000,
'words' : []
}
beer_name = clean_beer_name(beer_name)
word_count = len(beer_name)
# Indicates if a mapping is successfully obtained with Untappd.com
mapping_success = False
for i in range(word_count):
# List with all word combinations
word_combinations = [list(word) for word in itertools.combinations(beer_name, word_count - i)]
for words in word_combinations:
query_words = ' '.join(words)
# Search param for untappd
PARAMS = {"q": query_words }
# Check number of searches - exit if search count > limit
current_search_count += 1
mapping_details['searches'] = current_search_count
if current_search_count > search_limit:
return mapping_details
print(f"Searching for: {query_words}, count ({current_search_count}/{search_limit})")
# Perform query on Untappd using Word combinations from name on VMP
response = httpx.get(URL,params=PARAMS)
untappd_html = BeautifulSoup(response,'html.parser')
if _get_search_result_count(untappd_html) == 1:
mapping_details = _get_untappd_mapping_data(untappd_html)
mapping_details['searches'] = current_search_count
mapping_details['match'] = True
mapping_success = True
break
else:
if _get_search_result_count(untappd_html) < best_search['results']:
best_search['results'] = _get_search_result_count(untappd_html)
best_search['html'] = untappd_html
# TODO: add more logic to find beer if it is not possible to find a single search result
# Reduce request intensity towards Untappd.com
time.sleep(1)
if mapping_success:
# Break if result is found
break
# if not mapping_success:
# mapping_details = _get_untappd_mapping_data(best_search['html'])
# print(f"Using best result with {best_search['results']} search results")
return mapping_details
```
#### File: olmonopolet/vmp_api/products.py
```python
import os, httpx
def get_all_products():
'''
!!!!DEPRECATED!!!!
Retrieve all products that are currently in stock by Vinmonopolet. Uses "products/GET accumulated-stock" API endpoint from VMP.
Parameters:
none
Returns:
list: List of all products currently in stock at Vinmonopolet in JSON format
'''
URL = "https://apis.vinmonopolet.no/products/v0/accumulated-stock"
HEADERS = {"Ocp-Apim-Subscription-Key": os.environ.get('VMP_PRIMARY_KEY') }
PARAMS = {"maxResults": 100000}
# Retrieve all products currently in stock at VMP
try:
all_products = httpx.get(URL,headers=HEADERS,params=PARAMS).json()
except Exception as err:
all_products = []
# For testing purposes change this return value to all_products[:X] to slice list
return all_products
def get_products(client, selection, page):
'''Retrieve products, for a selection, that are currently in stock by Vinmonopolet.
Each page retrieved returns 100 products
Parameters:
arg1 obj: httpx Client instance
arg2 str: product selection, i.e. 'øl', 'mjød', 'sider'
arg3 int: page number to retrieve as results from VMP are paginated
Returns:
int: HTTP status code from VMP request
list: Products on current page
int: Total number of pages with products for current selection
'''
URL = 'https://www.vinmonopolet.no/api/search'
PARAMS = {'q':f':relevance:visibleInSearch:true:mainCategory:{selection}',
'searchType': 'product',
'fields': 'FULL',
'pageSize': 100,
'currentPage': page}
HEADERS = {
# 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_16_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_16_0)'
}
try:
response = client.get(URL, params=PARAMS, headers=HEADERS)
response_code = response.status_code
content = response.json()
products = content['productSearchResult']['products']
total_pages = content['productSearchResult']['pagination']['totalPages']
total_products = content['productSearchResult']['pagination']['totalResults']
except Exception as err:
response_code = 500
products = []
total_pages = 0
return response_code, products, total_pages
def get_product_details(client, product_id):
'''Retrieve product details.
Parameters:
arg1 obj: httpx Client instance
arg2 int: product_id
Returns:
dict: JSON with product details if success, otherwise returns False (bool).
'''
product_url = f"https://www.vinmonopolet.no/api/products/{product_id}" #+ product_id
PARAMS = {"fields": 'FULL'}
HEADERS = {
# 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_16_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_16_0)'
}
# Get details from VMP about product
try:
product_details = client.get(product_url,params=PARAMS, headers=HEADERS).json()
except Exception as err:
product_details = False
return product_details
```
#### File: olmonopolet/vmp_scraper/product_details.py
```python
from bs4 import BeautifulSoup
import httpx
def test():
return 'test'
def get_details_web(url):
'''
Retrieve Vinmonopolet beer details from the web
Parameters:
url: Vinmonopolet URL path for the beer (excl. www.vinmonopolet.no)
Returns:
dict: Dictionary with beer data - keys: style, alcohol, brewery
'''
VMP_BASE_URL = 'https://www.vinmonopolet.no'
beer_details = {}
# Get the web page of the beer (Vinmonopolet) and parse it using BeautifulSoup
raw_vmp_response = httpx.get(f"{VMP_BASE_URL}{url}")
untappd_html = BeautifulSoup(raw_vmp_response,'html.parser')
# Scrape Data from Product page on Vinmonopolet
beer_details['alcohol'] = untappd_html.find("ul", class_="product__contents-list").find("span", class_='product__contents-list__content-percentage').string
for tab in untappd_html.find_all("ul", class_="product__tab-list"):
for detail in tab.find_all('li'):
product_info_category = detail.find('span', class_='product__tab-list__label').string
if product_info_category == 'Varetype':
beer_details['style'] = detail.find('span', class_='product__tab-list__value').get_text()
elif product_info_category == 'Produsent':
beer_details["brewery"] = detail.find('span', class_='product__tab-list__value').get_text()
# Modify and clean product details
beer_details['style'] = beer_details['style'].replace('\n','').replace('\t','').replace('Øl, ','')
beer_details['brewery'] = beer_details['brewery'].replace('\n','').replace('\t','')
beer_details['alcohol'] = beer_details['alcohol'].replace('%','')
return beer_details
```
#### File: olmonopolet-api/profiles/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from stores.models import Store
# Create your models here.
class Profile(models.Model):
'''
Profile associated with User model.
Automatically added from signal when User is created.
'''
user = models.OneToOneField(User, verbose_name='User',related_name='profiles',on_delete=models.CASCADE)
store = models.ForeignKey(Store, verbose_name='Store', related_name='profile_stores', on_delete=models.CASCADE, blank=True, null=True)
premium = models.BooleanField(help_text='Is user premium?', default=False)
# Untappd Fields
untappd_username = models.CharField(help_text='Untappd username', max_length=250, blank=True)
untappd_avatar_url = models.URLField(help_text='URL to Avatar used on Untappd', max_length=256, blank=True, default='')
untappd_sync_date = models.DateTimeField(help_text='Time when Profile was last synced with Untappd',blank=True, null=True)
def __str__(self):
return self.user.username
```
#### File: management/commands/map_untappd.py
```python
from django.core.management.base import BaseCommand
from beers.models import Beer
from untappd.models import UntappdMapping
from olmonopolet.untappd_scraper import mapping
from datetime import datetime
class Command(BaseCommand):
help = '''
Find mapping between Beers at Vinmonopolet and Untappd.
Beers will be automatically matched if a single search result is found on Untappd.
A maximum of 100 searches per hour is used to avoid blocking as this equals the rate limiting employed by the Untappd API.
Hence, this management command can only be executed once per hour.
'''
def handle(self, *args, **options):
# Log when the job is executed
self.stdout.write(f"Updating Beer mappings @ {datetime.now()}")
SEARCH_COUNT = 0
SEARCH_LIMIT = 150
# Retrieve all Beers in DB that does not have an UntappdMapping, using related_name
unmapped_beers = Beer.objects.filter(mappings = None)
self.stdout.write(f"Number of unmapped beers: {unmapped_beers.count()}")
for beer in unmapped_beers:
untappd_mapping = mapping.find_untappd_mapping(beer.name, SEARCH_COUNT, SEARCH_LIMIT)
SEARCH_COUNT = untappd_mapping['searches']
# Log mapping process and monitor that number of searches does not exceed limit
self.stdout.write(f"Mapping '{beer.name}' with matching status: {untappd_mapping['match']}. Number of searches performed: {untappd_mapping['searches']}")
mapping_obj = UntappdMapping.objects.create(
beer_id = beer,
untappd_id = untappd_mapping['id'],
name = untappd_mapping['name'],
url = untappd_mapping['url'],
auto_match = untappd_mapping['match']
)
if SEARCH_COUNT > SEARCH_LIMIT:
break
return
``` |
{
"source": "joarndt/ts_telegram_bot",
"score": 3
} |
#### File: ts_telegram_bot/src/birthday.py
```python
from datetime import datetime
from sticker import Sticker
class Birthday(object):
def __init__(self, name, date):
self.name = name
self.date = date
def __str__(self):
new = datetime(2000, self.date.month, self.date.day)
return new.strftime("%d %B ") + str(self.date.year) + " - *" + self.name + "*"
def isToday(self):
return self.date.month == datetime.today().month and self.date.day == datetime.today().day
def wishHappyBirthday(self, bot, chat_id):
if self.isToday():
bot.sendMessage(chat_id, "Happy Birthday *" + self.name + "*", parse_mode="Markdown")
bot.sendSticker(chat_id, Sticker.getInstance().getSticker("celebration"))
def setName(self, name):
self.name = name
def setDate(self, date):
self.date = date
def getName(self):
return self.name
def getDate(self):
return self.date
```
#### File: src/tsclient/connection.py
```python
from collections import deque
import logging
from telnetlib import Telnet
from time import time, sleep
from message import Command, MessageFactory
class TeamspeakConnection(Telnet):
def __init__(self, hostname, port, timeout, pipe_in, pipe_out, keep_alive=30, poll_interval=0.125):
self.pipe_in = pipe_in
self.pipe_out = pipe_out
self.logger = logging.getLogger('teamspeak3.TeamspeakConnection')
self.commands_unresponded = deque()
self.keep_alive = keep_alive
self.poll_interval = poll_interval
Telnet.__init__(self, hostname, port, timeout)
def write_command(self, command):
self.logger.info("Sending command %s" % command.__repr__())
self.commands_unresponded.append(command)
self.write("%s\n" % command.output)
def write_keep_alive(self):
self.logger.debug("Sending keepalive message.")
self.write("\n")
def main_loop(self):
while True:
incoming = self.receive_message()
if incoming:
self.pipe_out.put(incoming)
else:
# Only write messages if we have nothing incoming
if not self.pipe_in.empty():
comm = self.pipe_in.get_nowait()
if isinstance(comm, Command):
self.write_command(comm)
elif int(time()) % self.keep_alive == 0:
self.write_keep_alive()
sleep(self.poll_interval)
def receive_message(self):
try:
incoming_message = self.read_until('\n', self.timeout).strip()
if incoming_message.strip():
self.logger.debug("Incoming string \"%s\"" % incoming_message)
message = MessageFactory.get_message(incoming_message)
if message:
if message.is_response():
message.set_origination(
self.commands_unresponded.popleft()
)
elif message.is_reset_message():
# Command didn't ask for a response
if self.commands_unresponded:
self.commands_unresponded.popleft()
self.logger.info("Received message %s" % message.__repr__())
return message
except ValueError as e:
pass
except IndexError as e:
self.logger.warning(
"Unable to create message for \"%s\"; %s" % (
incoming_message,
e
)
)
except Exception as e:
return e
``` |
{
"source": "joaromera/generating-melodies-with-rnn-lstm",
"score": 3
} |
#### File: joaromera/generating-melodies-with-rnn-lstm/train.py
```python
import tensorflow.keras as keras
from preprocess import generate_training_sequences, SEQUENCE_LENGTH
OUTPUT_UNITS = 38
NUM_UNITS = [256]
LOSS = "sparse_categorical_crossentropy"
LEARNING_RATE = 0.001
EPOCHS = 90
BATCH_SIZE = 64
SAVE_MODEL_PATH = "model.h5"
def build_model(output_units, num_units, loss, learning_rate):
"""Builds and compiles model
:param output_units (int): Num output units
:param num_units (list of int): Num of units in hidden layers
:param loss (str): Type of loss function to use
:param learning_rate (float): Learning rate to apply
:return model (tf model): Where the magic happens :D
"""
# create the model architecture
model = keras.Sequential()
activation_f = 'relu'
n_of_timesteps = 32
kernel_size = 3
# activation_f = 'sigmoid'
# activation_f = 'tanh'
# Parameters explanation: https://keras.io/api/layers/core_layers/embedding/
# model.add(keras.layers.Embedding(num_units[0], output_units, input_length=n_of_timesteps * 2,trainable=True))
model.add(keras.layers.Input(shape=(None, output_units)))
# Parameters explanation: https://keras.io/api/layers/convolution_layers/convolution1d/
model.add(keras.layers.Conv1D(n_of_timesteps*2*2,kernel_size, padding='causal',activation=activation_f))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.MaxPool1D(2))
model.add(keras.layers.Conv1D(n_of_timesteps*4*2,kernel_size, activation=activation_f,dilation_rate=2,padding='causal'))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.MaxPool1D(2))
model.add(keras.layers.Conv1D(n_of_timesteps*8*2,kernel_size, activation=activation_f,dilation_rate=4,padding='causal'))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.MaxPool1D(2))
#model.add(Conv1D(256,5,activation=activation_f))
model.add(keras.layers.GlobalMaxPool1D())
# Parameters explanation: https://keras.io/api/layers/core_layers/dense/
# 256 -> 512
model.add(keras.layers.Dense(512, activation=activation_f))
model.add(keras.layers.Dense(output_units, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
# compile model
model.compile(loss=loss,
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
metrics=["accuracy"])
model.summary()
return model
def train(output_units=OUTPUT_UNITS, num_units=NUM_UNITS, loss=LOSS, learning_rate=LEARNING_RATE):
"""Train and save TF model.
:param output_units (int): Num output units
:param num_units (list of int): Num of units in hidden layers
:param loss (str): Type of loss function to use
:param learning_rate (float): Learning rate to apply
"""
# generate the training sequences
inputs, targets = generate_training_sequences(SEQUENCE_LENGTH)
# build the network
model = build_model(output_units, num_units, loss, learning_rate)
# train the model
model.fit(inputs, targets, epochs=EPOCHS, batch_size=BATCH_SIZE)
# save the model
model.save(SAVE_MODEL_PATH)
if __name__ == "__main__":
train()
``` |
{
"source": "joaromi/Spiking-RetinaNet",
"score": 3
} |
#### File: backends/inisim/temporal_mean_rate_tensorflow.py
```python
import os
import json
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, AveragePooling2D, \
MaxPooling2D, Conv2D, DepthwiseConv2D, UpSampling2D, ZeroPadding2D, Reshape, Layer, \
Concatenate
from snntoolbox.parsing.utils import get_inbound_layers
from snntoolbox.simulation.backends.custom_layers import NormReshape, NormAdd, NormConv2D
# Experimental
clamp_var = False
v_clip = False
class SpikeLayer(Layer):
"""Base class for layer with spiking neurons."""
def __init__(self, **kwargs):
self.config = kwargs.pop(str('config'), None)
# if self.config is None:
# from snntoolbox.bin.utils import load_config
# self.config = load_config('<log_dir_of_current_run>/.config')
self.layer_type = self.class_name
self.dt = self.config.getfloat('simulation', 'dt')
self.duration = self.config.getint('simulation', 'duration')
self.tau_refrac = self.config.getfloat('cell', 'tau_refrac')
self._v_thresh = self.config.getfloat('cell', 'v_thresh')
self.v_thresh = None
self.time = None
self.out_shape = None
self.mem = self.spiketrain = self.impulse = self.spikecounts = None
self.refrac_until = self.max_spikerate = None
if clamp_var:
self.spikerate = self.var = None
from snntoolbox.utils.utils import get_abs_path
path, filename = \
get_abs_path(self.config.get('paths', 'filename_clamp_indices'),
self.config)
if filename != '':
filepath = os.path.join(path, filename)
assert os.path.isfile(filepath), \
"File with clamp indices not found at {}.".format(filepath)
self.filename_clamp_indices = filepath
self.clamp_idx = None
self.payloads = None
self.payloads_sum = None
self.online_normalization = self.config.getboolean(
'normalization', 'online_normalization')
allowed_kwargs = {'input_shape',
'batch_input_shape',
'batch_size',
'dtype',
'name',
'trainable',
'weights',
'input_dtype', # legacy
}
for kwarg in kwargs.copy():
if kwarg not in allowed_kwargs:
kwargs.pop(kwarg)
Layer.__init__(self, **kwargs)
self.stateful = True
self._floatx = tf.keras.backend.floatx()
def reset(self, sample_idx):
"""Reset layer variables."""
self.reset_spikevars(tf.constant(sample_idx))
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
def update_neurons(self):
"""Update neurons according to activation function."""
new_mem = self.get_new_mem()
if hasattr(self, 'activation_str'):
if self.activation_str == 'softmax':
output_spikes = self.softmax_activation(new_mem)
elif self.activation_str == 'binary_sigmoid':
output_spikes = self.binary_sigmoid_activation(new_mem)
elif self.activation_str == 'binary_tanh':
output_spikes = self.binary_tanh_activation(new_mem)
elif '_Q' in self.activation_str:
m, f = map(int, self.activation_str[
self.activation_str.index('_Q') + 2:].split('.'))
output_spikes = self.quantized_activation(new_mem, m, f)
else:
output_spikes = self.linear_activation(new_mem)
else:
output_spikes = self.linear_activation(new_mem)
# Store spiking
self.set_reset_mem(new_mem, output_spikes)
# Store refractory
if self.tau_refrac > 0:
new_refractory = tf.where(tf.not_equal(output_spikes, 0),
self.time + self.tau_refrac,
self.refrac_until)
self.refrac_until.assign(new_refractory)
if self.payloads:
residuals = tf.where(tf.not_equal(output_spikes, 0),
new_mem - self._v_thresh, new_mem)
self.update_payload(residuals, output_spikes)
if self.online_normalization:
self.spikecounts.assign_add(tf.cast(tf.not_equal(output_spikes, 0),
self._floatx))
self.max_spikerate.assign(tf.reduce_max(self.spikecounts)
* self.dt / self.time)
if self.spiketrain is not None:
self.spiketrain.assign(tf.cast(tf.not_equal(output_spikes, 0),
self._floatx) * self.time)
return tf.cast(output_spikes, self._floatx)
def update_payload(self, residuals, spikes):
"""Update payloads.
Uses the residual of the membrane potential after spike.
"""
idxs = tf.not_equal(spikes, 0)
payloads = tf.where(idxs, residuals[idxs] - self.payloads_sum[idxs],
self.payloads)
payloads_sum = tf.where(idxs, self.payloads_sum + self.payloads,
self.payloads_sum)
self.payloads.assign(payloads)
self.payloads_sum.assign(payloads_sum)
def linear_activation(self, mem):
"""Linear activation."""
return tf.cast(tf.greater_equal(mem, self.v_thresh), self._floatx) * \
self.v_thresh
def binary_sigmoid_activation(self, mem):
"""Binary sigmoid activation."""
return tf.cast(tf.greater(mem, 0), self._floatx) * self.v_thresh
def binary_tanh_activation(self, mem):
"""Binary tanh activation."""
output_spikes = tf.cast(tf.greater(mem, 0), self._floatx) \
* self.v_thresh
output_spikes += tf.cast(tf.less(mem, 0), self._floatx) \
* -self.v_thresh
return output_spikes
def softmax_activation(self, mem):
"""Softmax activation."""
# spiking_samples = k.less_equal(k.random_uniform([self.config.getint(
# 'simulation', 'batch_size'), ]), 300 * self.dt / 1000.)
# spiking_neurons = k.repeat(spiking_samples, 10)
# activ = k.softmax(mem)
# max_activ = k.max(activ, axis=1, keepdims=True)
# output_spikes = k.equal(activ, max_activ).astype(self._floatx)
# output_spikes = tf.where(k.equal(spiking_neurons, 0),
# k.zeros_like(output_spikes), output_spikes)
# new_and_reset_mem = tf.where(spiking_neurons, k.zeros_like(mem),
# mem)
# self.add_update([(self.mem, new_and_reset_mem)])
# return output_spikes
output_spikes = tf.less_equal(tf.random.uniform(tf.shape(mem)),
tf.nn.softmax(mem))
return tf.cast(output_spikes, self._floatx) * self.v_thresh
def quantized_activation(self, mem, m, f):
"""Activation with precision reduced to fixed point format Qm.f."""
# Todo: Needs to be implemented somehow...
return tf.cast(tf.greater_equal(mem, self.v_thresh), self._floatx) * \
self.v_thresh
def get_new_mem(self):
"""Add input to membrane potential."""
# Destroy impulse if in refractory period
masked_impulse = self.impulse if self.tau_refrac == 0 else \
tf.where(tf.greater(self.refrac_until, self.time),
tf.zeros_like(self.impulse), self.impulse)
# Add impulse
if clamp_var:
# Experimental: Clamp the membrane potential to zero until the
# presynaptic neurons fire at their steady-state rates. This helps
# avoid a transient response.
new_mem = tf.cond(tf.less(tf.reduce_mean(self.var), 1e-4) +
tf.greater(self.time, self.duration / 2),
lambda: self.mem + masked_impulse,
lambda: self.mem)
elif hasattr(self, 'clamp_idx'):
# Set clamp-duration by a specific delay from layer to layer.
new_mem = tf.cond(tf.less(self.time, self.clamp_idx),
lambda: self.mem,
lambda: self.mem + masked_impulse)
elif v_clip:
# Clip membrane potential to prevent too strong accumulation.
new_mem = tf.clip_by_value(self.mem + masked_impulse, -3, 3)
else:
new_mem = self.mem + masked_impulse
if self.config.getboolean('cell', 'leak'):
# Todo: Implement more flexible version of leak!
new_mem = tf.where(tf.greater(new_mem, 0), new_mem - 0.1 * self.dt,
new_mem)
return new_mem
def set_reset_mem(self, mem, spikes):
"""
Reset membrane potential ``mem`` array where ``spikes`` array is
nonzero.
"""
if (hasattr(self, 'activation_str') and
self.activation_str == 'softmax'):
# Turn off reset (uncomment second line) to get a faster and better
# top-1 error. The top-5 error is better when resetting:
# new = tf.where(tf.not_equal(spikes, 0), tf.zeros_like(mem), mem)
new = tf.identity(mem)
elif self.config.get('cell', 'reset') == 'Reset by subtraction':
if self.payloads: # Experimental.
new = tf.where(tf.not_equal(spikes, 0),
tf.zeros_like(mem), mem)
else:
new = tf.where(tf.greater(spikes, 0), mem - self.v_thresh, mem)
new = tf.where(tf.less(spikes, 0), new + self.v_thresh, new)
elif self.config.get('cell', 'reset') == 'Reset by modulo':
new = tf.where(tf.not_equal(spikes, 0), mem % self.v_thresh, mem)
else: # self.config.get('cell', 'reset') == 'Reset to zero':
new = tf.where(tf.not_equal(spikes, 0), tf.zeros_like(mem), mem)
self.mem.assign(new)
def get_new_thresh(self):
"""Get new threshhold."""
thr_min = self._v_thresh / 100
thr_max = self._v_thresh
r_lim = 1 / self.dt
return thr_min + (thr_max - thr_min) * self.max_spikerate / r_lim
# return tf.cond(
# k.equal(self.time / self.dt % settings['timestep_fraction'], 0) *
# k.greater(self.max_spikerate, settings['diff_to_min_rate']/1000)*
# k.greater(1 / self.dt - self.max_spikerate,
# settings['diff_to_max_rate'] / 1000),
# lambda: self.max_spikerate, lambda: self.v_thresh)
def get_time(self):
"""Get simulation time variable.
Returns
-------
time: float
Current simulation time.
"""
return self.time.eval
def set_time(self, time):
"""Set simulation time variable.
Parameters
----------
time: float
Current simulation time.
"""
self.time.assign(time)
def init_membrane_potential(self, output_shape=None, mode='zero'):
"""Initialize membrane potential.
Helpful to avoid transient response in the beginning of the simulation.
Not needed when reset between frames is turned off, e.g. with a video
data set.
Parameters
----------
output_shape: Optional[tuple]
Output shape
mode: str
Initialization mode.
- ``'uniform'``: Random numbers from uniform distribution in
``[-thr, thr]``.
- ``'bias'``: Negative bias.
- ``'zero'``: Zero (default).
Returns
-------
init_mem: ndarray
A tensor of ``self.output_shape`` (same as layer).
"""
if output_shape is None:
output_shape = self.output_shape
if mode == 'uniform':
init_mem = tf.random.uniform(output_shape,
-self._v_thresh, self._v_thresh)
elif mode == 'bias':
init_mem = tf.zeros(output_shape, self._floatx)
if hasattr(self, 'b'):
b = self.get_weights()[1]
for i in range(len(b)):
init_mem[:, i, Ellipsis] = -b[i]
else: # mode == 'zero':
init_mem = tf.zeros(output_shape, self._floatx)
return init_mem
@tf.function
def reset_spikevars(self, sample_idx):
"""
Reset variables present in spiking layers. Can be turned off for
instance when a video sequence is tested.
"""
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
do_reset = sample_idx % mod == 0
if do_reset:
self.mem.assign(self.init_membrane_potential())
self.time.assign(self.dt)
if self.tau_refrac > 0:
self.refrac_until.assign(tf.zeros(self.output_shape, self._floatx))
if self.spiketrain is not None:
self.spiketrain.assign(tf.zeros(self.output_shape, self._floatx))
if self.payloads:
self.payloads.assign(tf.zeros(self.output_shape, self._floatx))
self.payloads_sum.assign(tf.zeros(self.output_shape, self._floatx))
if self.online_normalization and do_reset:
self.spikecounts.assign(tf.zeros(self.output_shape, self._floatx))
self.max_spikerate.assign(0)
self.v_thresh.assign(self._v_thresh)
if clamp_var and do_reset:
self.spikerate.assign(tf.zeros(self.input_shape, self._floatx))
self.var.assign(tf.zeros(self.input_shape, self._floatx))
@tf.function
def init_neurons(self, input_shape):
"""Init layer neurons."""
from snntoolbox.bin.utils import get_log_keys, get_plot_keys
output_shape = self.compute_output_shape(input_shape)
if self.v_thresh is None: # Need this check because of @tf.function.
self.v_thresh = tf.Variable(self._v_thresh, name='v_thresh',
trainable=False)
if self.mem is None:
self.mem = tf.Variable(self.init_membrane_potential(output_shape),
name='v_mem', trainable=False)
if self.time is None:
self.time = tf.Variable(self.dt, name='dt', trainable=False)
# To save memory and computations, allocate only where needed:
if self.tau_refrac > 0 and self.refrac_until is None:
self.refrac_until = tf.Variable(
tf.zeros(output_shape), name='refrac_until', trainable=False)
if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
'hist_spikerates_activations', 'operations',
'synaptic_operations_b_t', 'neuron_operations_b_t',
'spiketrains_n_b_l_t'} & (get_plot_keys(self.config) |
get_log_keys(self.config))) and self.spiketrain is None:
self.spiketrain = tf.Variable(tf.zeros(output_shape),
trainable=False, name='spiketrains')
if self.online_normalization and self.spikecounts is None:
self.spikecounts = tf.Variable(tf.zeros(output_shape),
trainable=False, name='spikecounts')
self.max_spikerate = tf.Variable(tf.zeros([1]), trainable=False,
name='max_spikerate')
if self.config.getboolean('cell', 'payloads') \
and self.payloads is None:
self.payloads = tf.Variable(tf.zeros(output_shape),
trainable=False, name='payloads')
self.payloads_sum = tf.Variable(
tf.zeros(output_shape), trainable=False, name='payloads_sum')
if clamp_var and self.spikerate is None:
self.spikerate = tf.Variable(tf.zeros(input_shape),
trainable=False, name='spikerates')
self.var = tf.Variable(tf.zeros(input_shape),
trainable=False, name='var')
if hasattr(self, 'clamp_idx'):
self.clamp_idx = self.get_clamp_idx()
def get_layer_idx(self):
"""Get index of layer."""
label = self.name.split('_')[0]
layer_idx = None
for i in range(len(label)):
if label[:i].isdigit():
layer_idx = int(label[:i])
return layer_idx
def get_clamp_idx(self):
"""Get time step when to stop clamping membrane potential.
Returns
-------
: int
Time step when to stop clamping.
"""
with open(self.filename_clamp_indices) as f:
clamp_indices = json.load(f)
clamp_idx = clamp_indices.get(str(self.get_layer_idx()))
print("Clamping membrane potential until time step {}.".format(
clamp_idx))
return clamp_idx
def update_avg_variance(self, spikes):
"""Keep a running average of the spike-rates and the their variance.
Parameters
----------
spikes:
Output spikes.
"""
delta = spikes - self.spikerate
spikerate_new = self.spikerate + delta / self.time
var_new = self.var + delta * (spikes - spikerate_new)
self.var.assign(var_new / self.time)
self.spikerate.assign(spikerate_new)
@tf.function
def update_b(self):
"""
Get a new value for the bias, relaxing it over time to the true value.
"""
i = self.get_layer_idx()
m = tf.clip_by_value(1 - (1 - 2 * self.time / self.duration) * i / 50,
0, 1)
self.bias.assign(self.bias * m)
def add_payloads(prev_layer, input_spikes):
"""Get payloads from previous layer."""
# Get only payloads of those pre-synaptic neurons that spiked
payloads = tf.where(tf.equal(input_spikes, 0.),
tf.zeros_like(input_spikes), prev_layer.payloads)
print("Using spikes with payloads from layer {}".format(prev_layer.name))
return input_spikes + payloads
def spike_call(call):
@tf.function
def decorator(self, x):
if clamp_var:
# Clamp membrane potential if spike rate variance too high
self.update_avg_variance(x)
if self.online_normalization:
# Modify threshold if firing rate of layer too low
self.v_thresh.assign(self.get_new_thresh())
if self.payloads:
# Add payload from previous layer
x = add_payloads(get_inbound_layers(self)[0], x)
self.impulse = call(self, x)
return self.update_neurons()
return decorator
def get_isi_from_impulse(impulse, epsilon):
return tf.where(tf.less(impulse, epsilon), tf.zeros_like(impulse),
tf.divide(1., impulse))
class SpikeConcatenate(Concatenate):
"""Spike merge layer"""
def __init__(self, axis, **kwargs):
kwargs.pop(str('config'))
Concatenate.__init__(self, axis, **kwargs)
@staticmethod
def get_time():
pass
@staticmethod
def reset(sample_idx):
"""Reset layer variables."""
pass
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
class SpikeFlatten(Flatten):
"""Spike flatten layer."""
def __init__(self, **kwargs):
kwargs.pop(str('config'))
Flatten.__init__(self, **kwargs)
def call(self, x, mask=None):
return super(SpikeFlatten, self).call(x)
@staticmethod
def get_time():
pass
@staticmethod
def reset(sample_idx):
"""Reset layer variables."""
pass
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
class SpikeZeroPadding2D(ZeroPadding2D):
"""Spike ZeroPadding2D layer."""
def __init__(self, **kwargs):
kwargs.pop(str('config'))
ZeroPadding2D.__init__(self, **kwargs)
def call(self, x, mask=None):
return ZeroPadding2D.call(self, x)
@staticmethod
def get_time():
pass
@staticmethod
def reset(sample_idx):
"""Reset layer variables."""
pass
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
class SpikeUpSampling2D(UpSampling2D):
"""Spike UpSampling2D layer."""
def __init__(self, **kwargs):
kwargs.pop(str('config'))
UpSampling2D.__init__(self, **kwargs)
def call(self, x, mask=None):
return UpSampling2D.call(self, x)
@staticmethod
def get_time():
pass
@staticmethod
def reset(sample_idx):
"""Reset layer variables."""
pass
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
class SpikeReshape(Reshape):
"""Spike Reshape layer."""
def __init__(self, **kwargs):
kwargs.pop(str('config'))
Reshape.__init__(self, **kwargs)
def call(self, x, mask=None):
return Reshape.call(self, x)
@staticmethod
def get_time():
pass
@staticmethod
def reset(sample_idx):
"""Reset layer variables."""
pass
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
class SpikeDense(Dense, SpikeLayer):
"""Spike Dense layer."""
def build(self, input_shape):
"""Creates the layer neurons and connections.
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
Dense.build(self, input_shape)
self.init_neurons(input_shape.as_list())
if self.config.getboolean('cell', 'bias_relaxation'):
self.update_b()
@spike_call
def call(self, x, **kwargs):
return Dense.call(self, x)
class SpikeConv2D(Conv2D, SpikeLayer):
"""Spike 2D Convolution."""
def build(self, input_shape):
"""Creates the layer weights.
Must be implemented on all layers that have weights.
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
Conv2D.build(self, input_shape)
self.init_neurons(input_shape.as_list())
if self.config.getboolean('cell', 'bias_relaxation'):
self.update_b()
@spike_call
def call(self, x, mask=None):
return Conv2D.call(self, x)
class SpikeDepthwiseConv2D(DepthwiseConv2D, SpikeLayer):
"""Spike 2D DepthwiseConvolution."""
def build(self, input_shape):
"""Creates the layer weights.
Must be implemented on all layers that have weights.
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
DepthwiseConv2D.build(self, input_shape)
self.init_neurons(input_shape.as_list())
if self.config.getboolean('cell', 'bias_relaxation'):
self.update_b()
@spike_call
def call(self, x, mask=None):
return DepthwiseConv2D.call(self, x)
class SpikeAveragePooling2D(AveragePooling2D, SpikeLayer):
"""Spike Average Pooling."""
def build(self, input_shape):
"""Creates the layer weights.
Must be implemented on all layers that have weights.
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
AveragePooling2D.build(self, input_shape)
self.init_neurons(input_shape.as_list())
@spike_call
def call(self, x, mask=None):
return AveragePooling2D.call(self, x)
class SpikeMaxPooling2D(MaxPooling2D, SpikeLayer):
"""Spike Max Pooling."""
def build(self, input_shape):
"""Creates the layer neurons and connections..
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
MaxPooling2D.build(self, input_shape)
self.init_neurons(input_shape.as_list())
@spike_call
def call(self, x, mask=None):
"""Layer functionality."""
print("WARNING: Rate-based spiking MaxPooling layer is not "
"implemented in TensorFlow backend. Falling back on "
"AveragePooling. Switch to Theano backend to use MaxPooling.")
return tf.nn.avg_pool2d(x, self.pool_size, self.strides, self.padding)
class SpikeNormAdd(Layer):
def __init__(self, activation=None, **kwargs):
self._initial_weights = kwargs.pop('weights', None)
self.kwargs = kwargs.copy()
kwargs.pop(str('config'))
super(SpikeNormAdd, self).__init__(**kwargs)
def build(self, input_shape):
super(SpikeNormAdd, self).build(input_shape)
n = len(input_shape)
self.filters = input_shape[0][-1]
# Weights
if self._initial_weights is not None:
weights_conv = self._initial_weights[:2]
self.b = self._initial_weights[2:]
self._initial_weights = None
else:
self.b = [None]*n
for i in range(len(self.b)):
self.b[i] = self.add_weight(
name="unshift"+str(i),
shape = (self.filters,),
initializer = "zeros",
trainable = True
)
weights_conv = (
np.zeros([1, 1, n*self.filters, self.filters]),
np.zeros(self.filters)
)
for k in range(self.filters):
weights_conv[0][:, :, k::self.filters, k] = 1
# Convolution layer
self.conv = SpikeConv2D(
filters=self.filters,
kernel_size=1,
weights=weights_conv,
**self.kwargs
)
del self.kwargs
conv_in_shape = tf.TensorShape(np.array(input_shape[0])*[1,1,1,n])
self.conv.build(conv_in_shape)
self.conv.data_format = 'channels_last'
def call(self, input_data):
tensor = [None]*len(self.b)
for i,image in enumerate(input_data):
tensor[i] = image+self.b[i]
out = tf.concat(tensor, axis=-1)
out = self.conv.call(out)
return out
def compute_output_shape(self, input_shape):
return input_shape[0] + (self.filters,)
def get_config(self):
config = super().get_config().copy()
config['weights'] = self.get_weights()
config.update({
'activation': None,
'filters': self.filters
})
return config
def set_weights(self, weights):
conv_weights = self.conv.get_weights()
if len(weights) == 2:
conv_weights[:2] = weights
#print('--',self.name,' - Basic Conv2D weights set.')
elif len(weights) == 2 + len(self.b):
conv_weights[:2] = weights[:2]
self.b = weights[2:]
#print('--',self.name,' - Basic Conv2D weights and input biases set.')
elif len(weights) == len(conv_weights) + len(self.b):
conv_weights = weights[:len(conv_weights)]
self.b = weights[len(conv_weights):]
#print('--',self.name,' - SpikeConv2D weights and input biases set.')
elif len(weights) == len(conv_weights):
conv_weights = weights
#print('--',self.name,' - SpikeConv2D weights set.')
else:
print('<!!! - ',self.name,'> The weights provided do not match the layer shape. \n \
- SpikeConv2D accepts list of either length 2 or 5. \n \
- Input biases accept list of length',len(self.b),'.\n \
(Always write [SpikeConv2D weights , input biases])')
self.conv.set_weights(conv_weights)
def get_weights(self):
return self.conv.get_weights()[:2]+self.b
def get_time(self):
return self.conv.time.eval
def set_time(self, time):
self.conv.time.assign(time)
def reset(self, sample_idx):
self.conv.reset_spikevars(tf.constant(sample_idx))
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
class SpikeNormReshape(NormReshape):
"""Spike Reshape layer."""
def __init__(self, **kwargs):
kwargs.pop(str('config'))
NormReshape.__init__(self, **kwargs)
self.dt = 1
def build(self, input_shape):
super(SpikeNormReshape, self).build(input_shape)
self.accum = self.add_weight(
name="accumulator",
shape = input_shape,
initializer = "zeros", trainable = False
)
def call(self, x, mask=None):
x = x + self.accum
out = x*(self.lmbda-self.shift)+(self.shift)
out = self.resh(out)
self.accum = x
return out
@staticmethod
def get_time():
pass
@staticmethod
def reset(self, sample_idx):
"""Reset layer variables."""
self.accum += -self.accum
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
def set_dt(self, t):
self.dt = t
class SpikeNormConv2D(NormConv2D, SpikeLayer):
"""Spike Normalized 2D Convolution."""
def build(self, input_shape):
NormConv2D.build(self, input_shape)
self.init_neurons(input_shape.as_list())
if self.config.getboolean('cell', 'bias_relaxation'):
self.update_b()
@spike_call
def call(self, x, mask=None):
return NormConv2D.call(self, x)
custom_layers = {
'SpikeFlatten': SpikeFlatten,
'SpikeReshape': SpikeReshape,
'SpikeZeroPadding2D': SpikeZeroPadding2D,
'SpikeDense': SpikeDense,
'SpikeConv2D': SpikeConv2D,
'SpikeDepthwiseConv2D': SpikeDepthwiseConv2D,
'SpikeAveragePooling2D': SpikeAveragePooling2D,
'SpikeMaxPooling2D': SpikeMaxPooling2D,
'SpikeConcatenate': SpikeConcatenate,
'SpikeUpsampling2D': SpikeUpSampling2D,
'SpikeNormReshape': SpikeNormReshape,
'SpikeNormAdd': SpikeNormAdd
}
``` |
{
"source": "joaroque/sidown",
"score": 2
} |
#### File: joaroque/sidown/sidown.py
```python
try:
import os
import io
import sys
import time
import urllib
import requests
except ImportError:
exit("Verifica os módulos e tente denovo ...")
#-------------COLOR/STYLE-------------#
class Color:
END = '\033[0m'
BOLD = '\33[1m'
RED = '\033[91m'
PISCA = '\33[5m'
BGRED = '\33[41m'
BLUE = '\033[94m'
GREEN = '\033[92m'
BGBLUE = '\33[44m'
WARNING = '\033[93m'
UNDERLINE = '\033[4m'
IMPORTANT = '\33[35m'
#atalhos-cores
r = '\033[91m'
g = '\033[92m'
e = '\033[0m'
b = '\33[1m'
abrir = r+"["+e
fechar = r+"]"+e
banner = """
.::::::. ::::::::::-. ... .:: . .::::::. :::.
;;;` ` ;;; ;;, `';, .;;;;;;;. ';;, ;; ;;;' `;;;;, `;;;
'[==/[[[[,[[[ `[[ [[,[[ \[[,'[[, [[, [[' [[[[[. '[[
''' $$$$ $$, $$$$$, $$$ Y$c$$$c$P $$$ "Y$c$$
88b dP888 888_,o8P'"888,_ _,88P "88"888 888 Y88
"YMmMY" MMM MMMMP"` "YMMMMMP" "M "M" MMM YM
╔═════════════════════════════════════════════════════════════╝
╚[04-2020]═════════════════[HaguacomH]═════════════════[V.1.0]$
"""
#-------------FUNÇÕES-------------#
#-------------LIMPATELA-------------#
def clearScr():
os.system('clear')
#-------------SAI-DO-PROGRAMA-------------#
def exit():
os.system('exit')
#-------------BANNER&MENU-------------#
def menu():
ops=[g+"1", g+"2", g+"3", g+"4", g+"5", g+"6", g+"7", g+"8", g+"9", g+"0"+e]
print("\t"+abrir+ops[0]+fechar+" start full scan "+abrir+ops[4]+fechar+" find passwds.txt")
print("\t"+abrir+ops[1]+fechar+" find css files "+abrir+ops[5]+fechar+" find htaccess")
print("\t"+abrir+ops[2]+fechar+" find js files "+abrir+ops[6]+fechar+" find admin")
print("\t"+abrir+ops[3]+fechar+" find fonts "+abrir+ops[7]+fechar+" about & exit")
#-------------BAIXA-ARQUIVO-------------#
def down(m, nome=None):
if nome is None:
nome = os.path.basename(m.split("?")[0])
file_res = requests.get(m, stream=True)
if file_res.status_code == requests.codes.OK:
with open(nome, 'wb') as novo_arquivo:
for parte in file_res.iter_content(chunk_size=256):
novo_arquivo.write(parte)
print("\r Download {}".format(nome))
else:
file_res.raise_for_status()
#-------------ACHA OS DIRECTÓRIOS E ARQUIVOS DENTRO-------------#
def find(url,dirs_txt,files_txt):
#---LISTA DIR
dirs = io.open(dirs_txt , "r", encoding="utf8")
dirs_list = dirs.readlines()
dirs.close()
#print("Number of dirs: "+str(len(dirs_list)))
#---ACHA DIR
for i in range(len(dirs_list)):
search = dirs_list[i].strip()
target = (url + "/" + search + "/")
response = requests.get(target)
if response.status_code == 403:
print("\nDIR FOUND: {}".format(target))
#---LISTA ARQUIVO
files = io.open(files_txt , "r", encoding="utf8")
files_list = files.readlines()
files.close()
#---ACHA ARQUIVO
for x in range(len(files_list)):
sub_search = files_list[x].strip()
sub_target = (target + sub_search)
sub_response = requests.get(sub_target)
if sub_response.status_code == 200:
print(" File Found ══> {}".format(files_list[x]))
down(sub_target)
#---BAIXA ARQUVIO
else:
#print("Bad file")
pass
pass
pass
else:
#print("Bad dirs")
pass
pass
back2menu = str(input("\nBack to menu(Y/N): "))
if back2menu.upper() == "Y":
main()
else:
exit()
pass
pass
#--------------ACHA ARQUIVOS ÚNICOS-----------#
def find_single(url,wordlists):
#---LISTA WORDLIST
word = io.open(wordlists, "r", encoding="utf-8")
word_list = word.readlines()
word.close()
#---ACHA ARQUIVO
for i in range(len(word_list)):
search = word_list[i].strip()
target = (url + "/" + search)
response = requests.get(target)
if respnse.status_code == 200:
print("\nFILE FOUND: {}".format(target))
else:
pass
pass
back2menu = str(input("\nBack to menu(Y/N): "))
if back2menu.upper() == "Y":
main()
else:
exit()
pass
pass
#-------------ACHA O PAINEL ADMIN-------------#
def find_admin(url, wordlists):
#---LISTA WORDLIST
word = io.open(wordlists, "r", encoding="utf-8")
word_list = word.readlines()
word.close()
for i in range(len(word_list)):
search = word_list[i].strip()
target = (url + "/" + search + "/")
response = requests.get(target)
if response.status_code == 200:
print("ADMIN FOUND: {}".format(target))
else:
pass
pass
back2menu = str(input("\nBack to menu(Y/N): "))
if back2menu.upper() == "Y":
main()
else:
exit()
pass
pass
#-------------FUNÇÃO-PRINCIPAL-------------#
def main():
clearScr()
#---banner
b = (Color.BOLD+banner+Color.END)
print(b)
#---inserir url
url = str(input("\n[Ex: http://viado.com]\nInsere uma URL: http://"))
url = "http://"+url
#---função menu
clearScr()
print(b)
menu()
#---PROMPT
abrir = g+"["+e
fechar = g+"]"+e
op = int(input("\n"+r+"╔═══"+abrir+"SiDown"+fechar+r+"══"+abrir+url[7:]+fechar+r+"═"+abrir+"menu"+fechar+r+":\n╚═════> "+e))
if op == 1:
#full_scan()
print("Full scan")
elif op == 2:
p1 = "wordlists/css_dirs.txt"
p2 = "wordlists/css_files.txt"
find(url,p1,p2)
elif op == 3:
p1 = "wordlists/js_dirs.txt"
p2 = "wordlists/js_files.txt"
find(url,p1,p2)
elif op == 4:
p1 = "wordlists/fonts_dirs.txt"
p2 = "wordlists/fonts_files.txt"
find(url,p1,p2)
elif op == 5:
p1 = "wordlists/psswrd_dirs.txt"
find_single(url,p1)
elif op == 6:
p1 = "wordlists/htaccess.txt"
find_single(url,p1)
elif op == 7:
#about()
p1 = "wordlists/admin.txt"
find_admin(url,p1)
elif op == 8:
#find_psswd_txt()
clearScr()
print(b)
print("""
SiDown is a tool for download public files in websites.
Sipmle, minimalist and nobbie kkk
By: Joa_Roque
<NAME>
""")
time.sleep(10)
main()
else:
main()
pass
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\n\nFIM DA EXECUÇÃO...\n")
pass
pass
``` |
{
"source": "joar/py-openapilib",
"score": 2
} |
#### File: source/examples/route-decorator.py
```python
import json
from typing import Union, GenericMeta, List, get_type_hints
from openapilib import serialize_spec, spec
def api_route(
summary: str=None,
description: str=None,
request_body_type: Union[type, GenericMeta]=None,
response_type: Union[type, GenericMeta]=None,
# ...
):
def inner(func):
# defaults
summary_: spec.Skippable[str] = spec.SKIP
description_: spec.Skippable[str] = spec.SKIP
response_type_: spec.SchemaSourceType = None
request_body_type_: spec.SchemaSourceType = None
tags_: Set[str] = []
# ...
# Argument parsing
if description is None:
if func.__doc__ is not None:
description_ = func.__doc__
else:
description_ = spec.SKIP
if summary is not None:
summary_ = summary
else:
if description_ is not spec.SKIP:
summary_ = description_.strip().splitlines()[0]
if response_type is not None:
response_type_ = response_type
else:
response_type_ = get_type_hints(func).get('return')
if request_body_type is not None:
request_body_type_ = request_body_type
# Output
responses = {}
if response_type_ is not None:
responses = {
'200': spec.Response(
description=description_,
content={
'application/json': spec.MediaType(
schema=spec.Schema.from_type(
response_type_,
)
)
}
)
}
request_body = spec.SKIP
if request_body_type_ is not None:
request_body = spec.RequestBody(
content={
'application/json': spec.MediaType(
schema=spec.Schema.from_type(
request_body_type_,
)
)
}
)
operation_spec = spec.Operation(
summary=summary_,
description=description_,
request_body=request_body,
responses=responses,
tags=tags_
)
# Do something with the Operation spec: We'll attach it to the route
# handler for now
func.operation_spec = operation_spec
return func
return inner
# Our example route handler code
# ------------------------------------------------------------------------------
@api_route(request_body_type=List[int])
def example_handler(request) -> int:
pass
from openapilib.helpers import LazyPretty
print(
json.dumps(serialize_spec(example_handler.operation_spec))
)
```
#### File: py-openapilib/openapilib/helpers.py
```python
import json
from typing import TYPE_CHECKING, TypeVar, Callable, Any
if TYPE_CHECKING:
from .spec import Skippable
T = TypeVar('T')
def convert_skippable(
convert: Callable[
[
'Skippable[Any]'
],
T
],
) -> Callable[
[
'Skippable[Any]'
],
'Skippable[T]'
]:
def convert_if_not_skip(value: 'Skippable[Any]') -> 'Skippable[T]':
from .spec import SKIP
if value is SKIP:
return SKIP
else:
return convert(value)
return convert_if_not_skip
class LazyString:
NOTHING = object()
def __init__(self, callback):
self.callback = callback
self._result = self.NOTHING
@property
def result(self):
if self._result is self.NOTHING:
self._result = self.callback()
return self._result
def __str__(self):
return self.result
class LazyPretty(LazyString):
def __str__(self):
return '\n' + pretty_json(self.result)
class Pretty:
def __init__(self, obj):
self.obj = obj
def __str__(self):
return '\n' + pretty_json(self.obj)
def pretty_json(obj):
return json.dumps(
obj,
indent=2,
default=lambda o: repr(o),
)
``` |
{
"source": "joarreg/Sherpa.ai-Federated-Learning-Framework",
"score": 3
} |
#### File: shfl/data_base/data_base.py
```python
import abc
import numpy as np
def split_train_test(data, labels, dim):
"""
Method that randomly choose the train and test sets from data and labels.
# Arguments:
data: Numpy matrix with data for extract the validation data
labels: Numpy array with labels
dim: Size for validation data
# Returns:
new_data: Data, labels, validation data and validation labels
"""
randomize = np.arange(len(labels))
np.random.shuffle(randomize)
data = data[randomize, ]
labels = labels[randomize]
test_data = data[0:dim, ]
test_labels = labels[0:dim]
rest_data = data[dim:, ]
rest_labels = labels[dim:]
return rest_data, rest_labels, test_data, test_labels
class DataBase(abc.ABC):
"""
Abstract class for data base.
Load method must be implemented in order to create a database able to \
interact with the system, in concrete with data distribution methods \
(see: [Data Distribution](../data_distribution)).
Load method should save data in the protected Attributes:
# Attributes:
* **train_data, train_labels, test_data, test_labels**
# Properties:
train: Returns train data and labels
test: Returns test data and labels
data: Returns train data, train labels, validation data, validation labels, test data and test labels
"""
def __init__(self):
self._train_data = []
self._test_data = []
self._train_labels = []
self._test_labels = []
@property
def train(self):
return self._train_data, self._train_labels
@property
def test(self):
return self._test_data, self._test_labels
@property
def data(self):
return self._train_data, self._train_labels, self._test_data, self._test_labels
@abc.abstractmethod
def load_data(self):
"""
Abstract method that loads the data
"""
def shuffle(self):
"""
Shuffles all data
"""
randomize = np.arange(len(self._train_labels))
np.random.shuffle(randomize)
self._train_data = self._train_data[randomize, ]
self._train_labels = self._train_labels[randomize]
randomize = np.arange(len(self._test_labels))
np.random.shuffle(randomize)
self._test_data = self._test_data[randomize, ]
self._test_labels = self._test_labels[randomize]
class LabeledDatabase(DataBase):
"""
Class to create generic labeled database from data and labels vectors
# Arguments
data: Data features to load
labels: Labels for this features
train_percentage: float between 0 and 1 to indicate how much data is dedicated to train
"""
def __init__(self, data, labels, train_percentage=0.8):
super(DataBase, self).__init__()
self._data = data
self._labels = labels
self._train_percentage = train_percentage
def load_data(self):
"""
Load data
# Returns
all_data : train data, train labels, test data and test labels
"""
test_size = round(len(self._data) * (1 - self._train_percentage))
self._train_data, self._train_labels, \
self._test_data, self._test_labels = split_train_test(self._data, self._labels, test_size)
self.shuffle()
return self.data
```
#### File: shfl/federated_aggregator/iowa_federated_aggregator.py
```python
from shfl.federated_aggregator.weighted_fedavg_aggregator import WeightedFedAvgAggregator
import numpy as np
class IowaFederatedAggregator(WeightedFedAvgAggregator):
"""
Class of the IOWA version of [WeightedFedAvgAggregator](../federated_aggregator/#weightedfedavgaggregator-class)
"""
def __init__(self):
super().__init__()
self._a = 0
self._b = 0
self._c = 0
self._y_b = 0
self._k = 0
self._performance = None
self._dynamic = None
def set_ponderation(self, performance, dynamic=True, a=0, b=0.2, c=0.8, y_b=0.4, k=3/4):
"""
Method which calculate ponderation weights of each client based on the performance vector.
# Arguments:
performance: vector with the performance of each local client in a validation set
dynamic: boolean indicating if we use the dynamic or static version (default True)
a: first argument of linguistic quantifier (default 0)
b: second argument of linguistic quantifier (default 0.2)
c: third argument of linguistic quantifier (default 0.8)
y_b: fourth argument of linguistic quantifier (default 0.4)
k: distance param of the dynamic version (default 3/4)
"""
self._a = a
self._b = b
self._c = c
self._y_b = y_b
self._k = k
self._performance = performance
self._dynamic = dynamic
self._percentage = self.get_ponderation_weights()
def q_function(self, x):
"""
Method that returns ponderation weights for OWA operator
# Arguments:
x: value of the ordering function u (orderer performance of each local model)
# Returns:
ponderation_weights: ponderation of each client.
"""
if x <= self._a:
return 0
elif x <= self._b:
return (x - self._a) / (self._b - self._a) * self._y_b
elif x <= self._c:
return (x - self._b) / (self._c - self._b) * (1 - self._y_b) + self._y_b
else:
return 1
def get_ponderation_weights(self):
"""
Method that returns the value of the linguistic quantifier (Q_function) for each value x
# Returns:
ponderation_weights: ponderation of each client.
"""
ordered_idx = np.argsort(-self._performance)
self._performance = self._performance[ordered_idx]
num_clients = len(self._performance)
ponderation_weights = np.zeros(num_clients)
if self._dynamic:
max_distance = self._performance[0] - self._performance[-1]
vector_distances = np.array([self._performance[0] - self._performance[i] for i in range(num_clients)])
is_outlier = np.array([vector_distances[i] > self._k * max_distance for i in range(num_clients)])
num_outliers = len(is_outlier[is_outlier is True])
self._c = 1 - num_outliers / num_clients
self._b = self._b * self._c
for i in range(num_clients):
ponderation_weights[i] = self.q_function((i + 1) / num_clients) - self.q_function(i / num_clients)
return ponderation_weights
```
#### File: shfl/federated_aggregator/weighted_fedavg_aggregator.py
```python
import numpy as np
from shfl.federated_aggregator.federated_aggregator import FederatedAggregator
class WeightedFedAvgAggregator(FederatedAggregator):
"""
Implementation of Weighted Federated Avegaring Aggregator. The aggregation of the parameters is based in the number of data \
in every node.
It implements [Federated Aggregator](../federated_aggregator/#federatedaggregator-class)
"""
def aggregate_weights(self, clients_params):
"""
Implementation of abstract method of class [AggregateWeightsFunction](../federated_aggregator/#federatedaggregator-class)
# Arguments:
clients_params: list of multi-dimensional (numeric) arrays. Each entry in the list contains the model's parameters of one client.
# Returns:
aggregated_weights: aggregator weights representing the global learning model
"""
clients_params_array = np.array(clients_params)
num_clients = clients_params_array.shape[0]
num_layers = clients_params_array.shape[1]
ponderated_weights = np.array([self._percentage[client] * clients_params_array[client, :] for client in range(num_clients)])
aggregated_weights = np.array([np.sum(ponderated_weights[:, layer], axis=0) for layer in range(num_layers)])
return aggregated_weights
```
#### File: shfl/model/logistic_regression_model.py
```python
from shfl.model.model import TrainableModel
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
class LogisticRegressionModel(TrainableModel):
"""
This class offers support for scikit-learn logistic regression model. It implements [TrainableModel](../model/#trainablemodel-class)
# Arguments:
n_features: integer number of features (independent variables).
classes: array of classes to predict. At least 2 classes must be provided.
model_inputs: optional dictionary containing the [model input parameters](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html)
"""
def __init__(self, n_features, classes, model_inputs=None):
if model_inputs is None:
model_inputs = {}
self._check_initialization(n_features, classes)
self._model = LogisticRegression(**model_inputs)
self._n_features = n_features
classes = np.sort(np.asarray(classes))
self._model.classes_ = classes
n_classes = len(classes)
if n_classes == 2:
n_classes = 1
self.set_model_params(np.zeros((n_classes, n_features + 1)))
def train(self, data, labels):
"""
Implementation of abstract method of class [TrainableModel](../model/#trainablemodel-class)
# Arguments
data: Data, array-like of shape (n_samples, n_features)
labels: Target classes, array-like of shape (n_samples,)
"""
self._check_data(data)
self._check_labels(labels)
self._model.fit(data, labels)
def predict(self, data):
"""
Implementation of abstract method of class [TrainableModel](../model/#trainablemodel-class)
Arguments:
data: Data, array-like of shape (n_samples, n_features)
"""
self._check_data(data)
prediction = self._model.predict(data)
return prediction
def evaluate(self, data, labels):
"""
Implementation of abstract method of class [TrainableModel](../model/#trainablemodel-class)
Metrics for evaluating model's performance.
Arguments:
data: Data, array-like of shape (n_samples, n_features)
labels: Target classes, array-like of shape (n_samples,)
"""
self._check_data(data)
self._check_labels(labels)
prediction = self.predict(data)
bas = metrics.balanced_accuracy_score(labels, prediction)
cks = metrics.cohen_kappa_score(labels, prediction)
return bas, cks
def performance(self, data, labels):
"""
Implementation of abstract method of class [TrainableModel](../model/#trainablemodel-class)
Arguments:
data: Data, array-like of shape (n_samples, n_features)
labels: Target classes, array-like of shape (n_samples,)
"""
self._check_data(data)
self._check_labels(labels)
prediction = self.predict(data)
bas = metrics.balanced_accuracy_score(labels, prediction)
return bas
def get_model_params(self):
"""
Implementation of abstract method of class [TrainableModel](../model/#trainablemodel-class)
"""
return np.column_stack((self._model.intercept_, self._model.coef_))
def set_model_params(self, params):
"""
Implementation of abstract method of class [TrainableModel](../model/#trainablemodel-class)
"""
self._model.intercept_ = params[:,0]
self._model.coef_ = params[:, 1:]
def _check_data(self, data):
"""
Method that checks whether the data dimension is correct.
"""
if data.ndim == 1:
if self._n_features != 1:
raise AssertionError("Data need to have the same number of features described by the model, " + str(self._n_features)
+ ". Current data have only 1 feature.")
elif data.shape[1] != self._n_features:
raise AssertionError("Data need to have the same number of features described by the model, " + str(self._n_features) +
". Current data has " + str(data.shape[1]) + " features.")
def _check_labels(self, labels):
"""
Method that checks whether the classes are correct.
The classes in client's data must be the same as the input ones.
# Arguments:
labels: array with classes
"""
classes = np.unique(np.asarray(labels))
if not np.array_equal(self._model.classes_, classes):
raise AssertionError("Labels need to have the same classes described by the model, " + str(self._model.classes_)
+ ". Labels of this node are " + str(classes) + " .")
@staticmethod
def _check_initialization(n_features, classes):
"""
Method that checks if model's initialization is correct.
The number of features must be an integer equal or greater to one, and there must be at least two classes.
# Arguments:
n_features: number of features
classes: array of classes to predict
"""
if not isinstance(n_features, int):
raise AssertionError("n_features must be a positive integer number. Provided " + str(n_features) + " features.")
if n_features < 0:
raise AssertionError("It must verify that n_features > 0. Provided value " + str(n_features) + ".")
if len(classes) < 2:
raise AssertionError("It must verify that the number of classes > 1. Provided " + str(len(classes)) + " classes.")
if len(np.unique(classes)) != len(classes):
classes = list(classes)
duplicated_classes = [i_class for i_class in classes if classes.count(i_class) > 1]
raise AssertionError("No duplicated classes allowed. Class(es) duplicated: " + str(duplicated_classes) )
```
#### File: shfl/private/federated_attack.py
```python
import abc
from shfl.private.federated_operation import FederatedTransformation
import random
import numpy as np
class FederatedDataAttack(abc.ABC):
"""
Interface defining method to apply an FederatedAttack over [FederatedData](../federated_operation/#federateddata-class)
"""
@abc.abstractmethod
def apply_attack(self, data):
"""
This method receives federated data to be modified and performs the required modifications \
(federated_attack) over it simulating the adversarial attack.
# Arguments:
federated_data: The data of nodes that we attack
"""
class ShuffleNode(FederatedTransformation):
"""
Implementation of Federated Transformation for shuffling labels of labeled data in order to implement \
data poisoning attack.
This class implements interface [FederatedTransformation](../federated_operation/#federatedtransformation-class).
"""
def apply(self, labeled_data):
"""
Method that implements apply abstract method of [FederatedTransformation](../federated_operation/#federatedtransformation-class) \
shuffling labels of labeled_data
"""
random.shuffle(labeled_data.label)
class FederatedPoisoningDataAttack(FederatedDataAttack):
"""
Class representing poisoning data attack simulation. This simulation consists on shuffling \
the labels of some nodes. For that purpose, it uses class [ShuffleNode](./#shufflenode-class).
This class implements interface [FederatedDataAttack](./#federateddataattack-class).
# Arguments:
percentage: percentage of nodes that are adversarial ones
# Properties:
adversaries: Returns adversaries value
"""
def __init__(self, percentage):
super().__init__()
self._percentage = percentage
self._adversaries = []
@property
def adversaries(self):
return self._adversaries
def apply_attack(self, federated_data):
"""
Method that implements federated attack of data poisoning shuffling training labels of some nodes.
# Arguments:
federated_data: Instance of Federated Data [Federated Data](../federated_operation#federateddata-class)
"""
num_nodes = federated_data.num_nodes()
list_nodes = np.arange(num_nodes)
self._adversaries = random.sample(list(list_nodes), k=int(self._percentage / 100 * num_nodes))
boolean_adversaries = [1 if x in self._adversaries else 0 for x in list_nodes]
for node, boolean in zip(federated_data, boolean_adversaries):
if boolean:
node.apply_data_transformation(ShuffleNode())
```
#### File: test/differential_privacy/test_composition_dp.py
```python
import pytest
import numpy as np
from shfl.private import DataNode
from shfl.private.data import UnprotectedAccess
from shfl.differential_privacy.composition_dp import ExceededPrivacyBudgetError
from shfl.differential_privacy.composition_dp import AdaptiveDifferentialPrivacy
from shfl.differential_privacy.dp_mechanism import GaussianMechanism
def test_exception__budget():
exception = ExceededPrivacyBudgetError(epsilon_delta=1)
assert str(exception) is not None
def test_exception_exceeded_privacy_budget_error():
scalar = 175
dp_mechanism = GaussianMechanism(1, epsilon_delta=(0.1, 1))
data_access_definition = AdaptiveDifferentialPrivacy(epsilon_delta=(1, 0),
differentially_private_mechanism=dp_mechanism)
node = DataNode()
node.set_private_data("scalar", scalar)
node.configure_data_access("scalar", data_access_definition)
with pytest.raises(ExceededPrivacyBudgetError):
node.query("scalar")
def test_constructor_bad_params():
with pytest.raises(ValueError):
AdaptiveDifferentialPrivacy(epsilon_delta=(1, 2, 3))
with pytest.raises(ValueError):
AdaptiveDifferentialPrivacy(epsilon_delta=(-1, 2))
with pytest.raises(ValueError):
AdaptiveDifferentialPrivacy(epsilon_delta=(1, -2))
with pytest.raises(ValueError):
AdaptiveDifferentialPrivacy(epsilon_delta=(1, 1), differentially_private_mechanism=UnprotectedAccess())
def test_configure_data_access():
data_access_definition = AdaptiveDifferentialPrivacy(epsilon_delta=(1, 1))
data_node = DataNode()
data_node.set_private_data("test", np.array(range(10)))
with pytest.raises(ValueError):
data_node.configure_data_access("test", data_access_definition)
data_node.query("test")
def test_data_access():
data_access_definition = AdaptiveDifferentialPrivacy(epsilon_delta=(1, 1))
data_node = DataNode()
array = np.array(range(10))
data_node.set_private_data("test", array)
data_node.configure_data_access("test", data_access_definition)
query_result = data_node.query("test", differentially_private_mechanism=GaussianMechanism(1,
epsilon_delta=(0.1, 1)))
assert query_result is not None
def test_exception_no_access_definition():
data_access_definition = AdaptiveDifferentialPrivacy(epsilon_delta=(1, 1))
data_node = DataNode()
array = np.array(range(10))
data_node.set_private_data("test", array)
data_node.configure_data_access("test", data_access_definition)
with pytest.raises(ValueError):
data_node.query("test")
def test_exception_budget():
dp_mechanism = GaussianMechanism(1, epsilon_delta=(0.1, 1))
data_access_definition = AdaptiveDifferentialPrivacy(epsilon_delta=(1, 1),
differentially_private_mechanism=dp_mechanism)
data_node = DataNode()
array = np.array(range(10))
data_node.set_private_data("test", array)
data_node.configure_data_access("test", data_access_definition)
with pytest.raises(ExceededPrivacyBudgetError):
for i in range(1, 1000):
data_node.query("test")
def test_exception_budget_2():
data_access_definition = AdaptiveDifferentialPrivacy(epsilon_delta=(1, 0.001))
data_node = DataNode()
array = np.array(range(10))
data_node.set_private_data("test", array)
data_node.configure_data_access("test", data_access_definition)
with pytest.raises(ExceededPrivacyBudgetError):
for i in range(1, 1000):
data_node.query("test", differentially_private_mechanism=GaussianMechanism(1, epsilon_delta=(0.1, 1)))
```
#### File: test/differential_privacy/test_sensitivity_sampler.py
```python
import numpy as np
from shfl.private.query import Mean
from shfl.differential_privacy.probability_distribution import NormalDistribution
from shfl.differential_privacy import SensitivitySampler
from shfl.differential_privacy import L1SensitivityNorm
from shfl.differential_privacy import L2SensitivityNorm
def test_sample_sensitivity_gamma():
distribution = NormalDistribution(0, 1)
sampler = SensitivitySampler()
_, mean = sampler.sample_sensitivity(Mean(), L1SensitivityNorm(), distribution, n=100, gamma=0.33)
assert np.abs(mean - 0) < 0.5
def test_sample_sensitivity_m():
distribution = NormalDistribution(0, 1)
sampler = SensitivitySampler()
_, mean = sampler.sample_sensitivity(Mean(), L1SensitivityNorm(), distribution, n=100, m=285)
assert np.abs(mean - 0) < 0.5
def test_sample_sensitivity_gamma_m():
distribution = NormalDistribution(0, 1)
sampler = SensitivitySampler()
_, mean = sampler.sample_sensitivity(Mean(), L1SensitivityNorm(), distribution, n=100, m=285, gamma=0.33)
assert np.abs(mean - 0) < 0.5
def test_l2_sensitivity_norm():
distribution = NormalDistribution(0, 1)
sampler = SensitivitySampler()
_, mean = sampler.sample_sensitivity(Mean(), L2SensitivityNorm(), distribution, n=100, m=285, gamma=0.33)
assert np.abs(mean - 0) < 0.5
```
#### File: test/federated_aggregator/test_cluster_fedavg_aggregator.py
```python
import numpy as np
from unittest.mock import Mock, patch
from shfl.federated_aggregator.cluster_fedavg_aggregator import ClusterFedAvgAggregator
@patch('shfl.federated_aggregator.cluster_fedavg_aggregator.KMeans')
def test_aggregate_weights(mock_kmeans):
cfa = ClusterFedAvgAggregator()
model_aggregator = Mock()
centers = np.random.rand(10)
model_aggregator.cluster_centers_ = centers
mock_kmeans.return_value = model_aggregator
clients_params = np.random.rand(90).reshape((10, 3, 3))
clients_params_array = np.concatenate((clients_params))
n_clusters = clients_params[0].shape[0]
res = cfa.aggregate_weights(clients_params)
mock_kmeans.assert_called_once_with(n_clusters=n_clusters, init='k-means++')
model_aggregator.fit.assert_called_once()
np.testing.assert_array_equal(clients_params_array, model_aggregator.fit.call_args[0][0])
assert isinstance(res, np.ndarray)
assert np.array_equal(res, centers)
```
#### File: test/federated_government/test_federated_images_classifier.py
```python
from shfl.federated_government.federated_images_classifier import FederatedImagesClassifier, ImagesDataBases
from shfl.model.deep_learning_model import DeepLearningModel
from shfl.federated_aggregator.federated_aggregator import FederatedAggregator
from shfl.private.federated_operation import FederatedData
from unittest.mock import Mock
import pytest
import random
import string
def test_images_classifier_iid():
example_database = list(ImagesDataBases.__members__.keys())[0]
fic = FederatedImagesClassifier(example_database)
for node in fic._federated_data:
assert isinstance(node._model, DeepLearningModel)
assert isinstance(fic._model, DeepLearningModel)
assert isinstance(fic._aggregator, FederatedAggregator)
assert isinstance(fic._federated_data, FederatedData)
assert fic._test_data is not None
assert fic._test_labels is not None
def test_images_classifier_noiid():
example_database = list(ImagesDataBases.__members__.keys())[0]
fic = FederatedImagesClassifier(example_database, False)
for node in fic._federated_data:
assert isinstance(node._model, DeepLearningModel)
assert isinstance(fic._model, DeepLearningModel)
assert isinstance(fic._aggregator, FederatedAggregator)
assert isinstance(fic._federated_data, FederatedData)
assert fic._test_data is not None
assert fic._test_labels is not None
def test_images_classifier_wrong_database():
letters = string.ascii_lowercase
wrong_database = ''.join(random.choice(letters) for i in range(10))
fic = FederatedImagesClassifier(wrong_database)
assert fic._test_data == None
with pytest.raises(AttributeError):
fic._model
fic._aggregator
fic._federated_data
def test_run_rounds():
example_database = list(ImagesDataBases.__members__.keys())[0]
fic = FederatedImagesClassifier(example_database)
fic.deploy_central_model = Mock()
fic.train_all_clients = Mock()
fic.evaluate_clients = Mock()
fic.aggregate_weights = Mock()
fic.evaluate_global_model = Mock()
fic.run_rounds(1)
fic.deploy_central_model.assert_called_once()
fic.train_all_clients.assert_called_once()
fic.evaluate_clients.assert_called_once_with(fic._test_data, fic._test_labels)
fic.aggregate_weights.assert_called_once()
fic.evaluate_global_model.assert_called_once_with(fic._test_data, fic._test_labels)
def test_run_rounds_wrong_database():
letters = string.ascii_lowercase
wrong_database = ''.join(random.choice(letters) for i in range(10))
fic = FederatedImagesClassifier(wrong_database)
fic.deploy_central_model = Mock()
fic.train_all_clients = Mock()
fic.evaluate_clients = Mock()
fic.aggregate_weights = Mock()
fic.evaluate_global_model = Mock()
fic.run_rounds(1)
fic.deploy_central_model.assert_not_called()
fic.train_all_clients.assert_not_called()
fic.evaluate_clients.assert_not_called()
fic.aggregate_weights.assert_not_called()
fic.evaluate_global_model.assert_not_called()
``` |
{
"source": "Joarrs/interpolazionedati",
"score": 3
} |
#### File: Joarrs/interpolazionedati/interpolazione.py
```python
import pandas as pd
import statistics
file_excel = 'dati.xls'
def crea_liste(excelFile): #crea le liste dei tempi e delle cordinate
t_Action = pd.read_excel(excelFile, index_col=None, na_values=['NA'], usecols = "A").dropna().to_numpy()
x_Action = pd.read_excel(excelFile, index_col=None, na_values=['NA'], usecols = "B").dropna().to_numpy()
t_Vicon = pd.read_excel(excelFile, index_col=None, na_values=['NA'], usecols = "C").to_numpy()
x_Vicon = pd.read_excel(excelFile, index_col=None, na_values=['NA'], usecols = "D").to_numpy()
return t_Action, x_Action, t_Vicon, x_Vicon
def istantiVicini(tempo_action, t_vicon, x_vicon ):
distanze = []
x_vicine = []
print(tempo_action)
t_vicon = t_vicon.flatten()
x_vicon = x_vicon.flatten()
for element in t_vicon:
distanze.append(abs(element - tempo_action))
posizione_minima = distanze.index(min(distanze))
x_vicine.append(x_vicon[posizione_minima])
#print("prima ", t_vicon[posizione_minima:])
distanza_prima = abs(t_vicon[posizione_minima - 1] - tempo_action)
if posizione_minima + 1 < len(t_vicon):
distanza_dopo = abs(t_vicon[posizione_minima + 1] - tempo_action)
else:
distanza_dopo = distanza_prima
if distanza_prima <= distanza_dopo:
print("tempi Vicon vicini trovati :", t_vicon[posizione_minima - 1], t_vicon[posizione_minima])
x_vicine.append(x_vicon[posizione_minima - 1])
x_vicine.reverse()
else:
print("tempi Vicon vicini trovati :", t_vicon[posizione_minima], t_vicon[posizione_minima + 1])
if posizione_minima + 1 < len(t_vicon):
x_vicine.append(x_vicon[posizione_minima + 1])
else:
x_vicine.append(x_vicon[posizione_minima - 1])
x_vicine.reverse()
print(x_vicine)
return x_vicine
t_Action, x_Action, t_Vicon, x_Vicon = crea_liste(file_excel)
coordinate = []
"""for tempo in t_Action[0:4]:
x_vicini = istantiVicini(tempo, t_Vicon, x_Vicon)
#print(len(x_vicini))
#print("primo elemento di x_vicini = ", x_vicini[0], "secondo elemento: ", x_vicini[1], "media =", statistics.median(x_vicini))
coordinate.append(statistics.median(x_vicini))"""
for tempo in t_Action:
x_vicini = istantiVicini(tempo, t_Vicon, x_Vicon)
coordinate.append(statistics.median(x_vicini))
output = 'nuove_coordinate.xls'
df = pd.DataFrame(coordinate)
df.to_excel(output, index=False)
print(coordinate)
print("ci sono ", len(coordinate), " elementi interpolati e ", len(x_Action), " coordinate di Action Cam")
``` |
{
"source": "joar/rust-csv-py",
"score": 2
} |
#### File: rust-csv-py/rustcsv/__init__.py
```python
from __future__ import absolute_import
from typing import Union, BinaryIO
# Import the Rust extension module
from ._rustcsv import CSVReader as _RustCSVReader, CSVWriter, __build__
try:
from ._version import version
except ImportError:
version = "UNKNOWN"
__all__ = ["CSVReader", "CSVWriter", "__build__", "version"]
CSVReader = _RustCSVReader
# Can't subclass for better docstrings: https://github.com/PyO3/pyo3/issues/220
# class CSVReader(_RustCSVReader):
# def __new__(
# cls,
# source: Union[BinaryIO, str],
# delimiter: bytes = b",",
# terminator: bytes = b"\n",
# ):
# """
#
# Parameters
# ----------
# source
# :any:`binary file` or string to read CSV from.
# delimiter
# Byte to use as CSV field delimiter
# terminator
# Byte to use as CSV record terminator
#
# Returns
# -------
# CSVReader
# """
# self = super(CSVReader, cls).__new__(
# cls, path_or_fd=source, delimiter=delimiter, terminator=terminator
# )
#
# return self
```
#### File: rust-csv-py/rustcsv/__main__.py
```python
import click
from rustcsv import CSVReader
@click.group()
def cli():
pass
@cli.command()
@click.argument("file", type=click.File(mode="rb"))
def read(file):
for row in CSVReader(file):
print(row)
if __name__ == "__main__":
cli()
``` |
{
"source": "joas77/AnimeMovieDown",
"score": 3
} |
#### File: AnimeMovieDown/TelegramDownloader/TelegramDownloader.py
```python
import json
from telethon import TelegramClient, client, events
from telethon.tl.types import InputMessagesFilterDocument, InputMessagesFilterVideo
from telethon.utils import get_display_name
class TelegramDownloader:
def __init__(self, api_id, api_hash) -> None:
self._api_id = api_id
self._api_hash = api_hash
self._client = TelegramClient("Downloader",
self._api_id, self._api_hash)
def run(self):
with self._client:
self._client.loop.run_until_complete(self._main())
async def _main(self):
#TODO: remove dialogs limit
dialogs = await self._client.get_dialogs(43)
for d in dialogs:
name = get_display_name(d.entity)
if name in ("Fumetsu No Anata E"):
messages = self._client.iter_messages(name, filter=InputMessagesFilterDocument) #InputMessagesFilterVideo)
async for msg in messages:
print(get_display_name(msg.sender))
if hasattr(msg, "message"):
print(msg.message)
print("---------------")
def get_media(self, channel:str)->list:
pass
if __name__=="__main__":
# TODO: get api id and hash from condig file
API_ID = "TELEGRAM_API_ID"
API_HASH = "TELEGRAM_API_HASH"
teldown = TelegramDownloader(API_ID, API_HASH)
teldown.run()
# TODO where is the script going to be executed
# how to set path
cfg_json_path = "./TelegramDownloader/config/channels.json"
with open(cfg_json_path) as json_file:
channels_cfg = json.load(json_file)
print(channels_cfg)
``` |
{
"source": "joas77/LectorNomina",
"score": 3
} |
#### File: LectorNomina/src/payroll_reader.py
```python
import xml.etree.ElementTree as ET
from zipfile import ZipFile
import os
import argparse
from payment import Payment
from payroll_plotter import PayrollPlotter
def get_concept(node, concept_tag, amount_tag, amount_tax_tag=None):
concept = node.get(concept_tag)
amount = float(node.get(amount_tag))
amount_taxed = float(node.get(amount_tax_tag)) if amount_tax_tag else 0.0
return (concept, amount + amount_taxed)
def get_date(node):
# TODO: return a date object
date = node.get("Fecha")
return date
def generate_payment(payroll_path):
nomina_path = payroll_path
tree = ET.parse(nomina_path)
root = tree.getroot()
perceptions = {}
deductions = {}
tag = "{http://www.sat.gob.mx/nomina12}" # TODO code a function to get tag
for node in tree.iter():
# TODO: create a dictionary with all strings keys
if tag+"Percepcion" == node.tag:
concept, amount = get_concept(node, "Concepto", "ImporteExento", "ImporteGravado")
perceptions[concept] = amount
elif tag+"Deduccion" == node.tag:
concept, amount = get_concept(node, "Concepto", "Importe")
deductions[concept] = amount
date = get_date(root)
payment = Payment(date, perceptions, deductions)
return payment
def find_xml(zipfile):
for file in zipfile.filelist:
if file.filename.endswith("xml"):
return file.filename
return None
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Parsea y gráfica archivos de nomina")
parser.add_argument("payroll_folder", type=str,
help="<nominas_folder>")
args = parser.parse_args()
payroll_folder = args.payroll_folder
payments = []
for file in os.listdir(payroll_folder):
file_path = payroll_folder + file
if file_path.endswith(".zip"):
zipfile = ZipFile(file_path)
xmlfile_path = zipfile.extract(find_xml(zipfile))
payments.append(generate_payment(xmlfile_path))
os.remove(xmlfile_path)
payments.sort(key = lambda x : x.date)
for payment in payments:
print(payment)
payroll_plt = PayrollPlotter(payments)
payroll_plt.plot()
``` |
{
"source": "joAschauer/evaluating_methods_for_reconstructing_large_gaps_in_historic_snow_depth_time_series",
"score": 2
} |
#### File: evaluating_methods_for_reconstructing_large_gaps_in_historic_snow_depth_time_series/src/cv_evaluate_monthly.py
```python
"""
evaluate monthly means and max
"""
import os
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from sklearn.metrics import r2_score, mean_squared_error
import scoring_utils as scu
import cv_results_database as db
import plotting_utils as pu
sns.set_color_codes(palette='deep')
sc = db.get_cv_results_as_df()
def scatterplot_true_pred_monthly(
methods_used,
station_grid='full',
filename=None,
stn=None,
equal_xy_axes=False):
fig, axes = plt.subplots(6, len(methods_used),
figsize=[len(methods_used)*2.3,6*2.55],
sharex=False, sharey=False)
#different markers and colors for different station grids:
markers={'full': "s",
'only_target_stations': "^"}
colors={'full': 'tab:orange',
'only_target_stations': "b"}
grids = ['full', 'only_target_stations']
gridlabel = {'full':'dense station network',
'only_target_stations':'evaluation stations only'}
months = {11: 'Nov',
12: 'Dez',
1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr'}
for row, month in enumerate([11,12,1,2,3,4]):
color = 'k'
for column, method in enumerate(methods_used):
if stn is None:
view = sc.loc[(sc['fill_method']==method) & (sc['station_grid']==station_grid)]
else:
view = sc.loc[(sc['fill_method']==method) & (sc['station_grid']==station_grid) & (sc['gap_stn']==stn)]
hs_true_files = view['HS_true_file'].tolist()
hs_pred_files = view['HS_pred_file'].tolist()
hs_true = pd.concat([pd.read_pickle(file) for file in hs_true_files],
axis=0)
hs_pred = pd.concat([pd.read_pickle(file) for file in hs_pred_files],
axis=0)
# remove nans
concated_series = pd.concat([hs_true, hs_pred], axis=1).dropna()
# filter month:
concated_series = concated_series.loc[concated_series.index.month==month]
hs_true = concated_series.iloc[:,0]
hs_pred = concated_series.iloc[:,1]
# markersize = 0.1 if stn is None else 2
if stn is None:
markersize = 0.1
else:
if hs_true.max() <= 300:
markersize = -0.0133*hs_true.max()+4.1
else:
markersize = 0.1
axes[row, column].scatter(
hs_true,
hs_pred,
s=markersize,
marker='o',
facecolor=color,
lw=0,
alpha=0.9,
label=station_grid)
try:
# linear fit to the scatterplot:
#obtain m (slope) and b(intercept) of linear regression line
m, b = np.polyfit(hs_true, hs_pred, 1)
# new x-vector
x_fitline = np.linspace(hs_true.min(), hs_true.max())
#add linear regression line to scatterplot
axes[row,column].plot(
x_fitline,
m*x_fitline+b,
linestyle='--',
color='k',
lw=0.8)
# coefficient of determination
r2 = r2_score(hs_true, hs_pred)
rmse = np.sqrt(mean_squared_error(hs_true, hs_pred))
maape = scu._maape_score(hs_true, hs_pred)
bias = scu._bias_score(hs_true, hs_pred)
plt.rcParams.update({
"text.usetex": True})
axes[row,column].text(
0.95,
0.05,
f"$r^2$ = {r2:.2f}\nRMSE = {rmse:.1f}\nBIAS = {bias:.2f}",
ha='right',
va='bottom',
transform=axes[row,column].transAxes,
fontsize=11
)
plt.rcParams.update({
"text.usetex": False})
except TypeError:
# only nans are in y_pred (for some stations/years for IDS)
pass
# y-labels
if column == 0:
axes[row, column].set_ylabel(f'modeled [cm] in\n{months[month]}', fontsize=11)
else:
axes[row, column].set_ylabel(None)
axes[row, column].tick_params(labelleft=False)
# x-labels
if row == 1:
axes[row, column].set_xlabel(f'measured [cm]',
fontsize=11)
else:
axes[row, column].set_xlabel(None)
axes[row, column].tick_params(labelbottom=False)
# titles
if row == 0:
axes[row, column].set_title(pu.METHOD_NAMES[method], fontsize=13)
ygmin = 0.; ygmax = 0.
xgmin = 0.; xgmax = 0.
for ax in axes.flatten():
#Get global minimum and maximum y values accross all axis
ymin, ymax = ax.get_ylim()
ygmin = min(ygmin,ymin)
ygmax = max(ygmax,ymax)
xmin, xmax = ax.get_xlim()
xgmin = min(xgmin,xmin)
xgmax = max(xgmax,xmax)
[ax.set_ylim((ygmin,ygmax)) for ax in axes.flatten()]
[ax.set_xlim((xgmin,xgmax)) for ax in axes.flatten()]
if equal_xy_axes:
gmin = min(xgmin,ygmin)
gmax = max(xgmax,ygmax)
for ax in axes.flatten():
ax.set_ylim((gmin,gmax))
ax.set_xlim((gmin,gmax))
ax.set_aspect(1, adjustable='box')
# draw x=y line:
for ax in axes.flatten():
ax.axline([0, 0], [1, 1], linestyle='-', color='k', lw=0.8)
plt.tight_layout()
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=300)
plt.close(fig)
else:
plt.show()
return None
standard_methods = [
'SingleStation_best_correlated_mean_ratio',
'Inverse distance squared',
'matiu vertical weighted_min_corr_-1.0',
'Elastic Net Regression',
'RandomForest_V3.5',
'SWE2HS_Snow17_shifted_dates']
scatterplot_true_pred_monthly(
methods_used=standard_methods,
station_grid='only_target_stations',
filename=None,
equal_xy_axes=True)
```
#### File: evaluating_methods_for_reconstructing_large_gaps_in_historic_snow_depth_time_series/src/HSmax_date_difference.py
```python
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from cv_results_database import get_cv_results_as_df
import plotting_utils as pu
sns.set_color_codes(palette='deep')
METHODS = [
'SingleStation_best_correlated_mean_ratio',
'Inverse distance squared',
'matiu vertical weighted_min_corr_-1.0',
'Elastic Net Regression',
'RandomForest_V3.5',
'SWE2HS_Snow17_shifted_dates',
# 'ERA5-land_RF_surrounding_gridcells_max_depth_70_n_estimators_200'
]
def calculate_HSmax_date_difference(HS_true_file, HS_pred_file):
hs_true = pd.read_pickle(HS_true_file)
hs_pred = pd.read_pickle(HS_pred_file)
true_max_date = hs_true.idxmax()
pred_max_date = hs_pred.idxmax()
try:
datediff = (true_max_date-pred_max_date).days
except TypeError:
# only nans in predicted series will cause idxmax to be
# nan and result in TypeError when nan is subtracted from timestamp
datediff = np.nan
return datediff
if __name__ == '__main__':
df = get_cv_results_as_df()
df = df.loc[df['gap_type']=='LOWO']
df = df.rename(columns={'bias': 'BIAS'})
df = df.loc[df.fill_method.isin(METHODS), :]
df['fill_method'] = df['fill_method'].map(pu.METHOD_NAMES)
df['HSmax_datediff'] = df.apply(lambda x: calculate_HSmax_date_difference(x.HS_true_file, x.HS_pred_file), axis=1)
# ax = df['HSmax_datediff'].plot.hist(bins=71)
# ax.set_xlabel("HSmax date measured - HSmax date predicted")
# plt.gcf().set_dpi(250)
# plt.show()
# g = sns.FacetGrid(df, col='fill_method')
# g.map(plt.hist, 'HSmax_datediff', bins=41)
# plt.gcf().set_dpi(250)
# plt.show()
fig, axs = plt.subplots(3,2, figsize=(5, 6), sharex=True, sharey=True)
for (method, data) in df.groupby('fill_method', sort=False):
if method=='BCS':
ax = axs[0,0]
if method=='IDW':
ax = axs[0,1]
if method=='WNR':
ax = axs[1,0]
if method=='ENET':
ax = axs[1,1]
if method=='RF':
ax = axs[2,0]
if method=='SM':
ax = axs[2,1]
# sns.histplot(data, x='HSmax_datediff', hue='station_grid', ax=ax, bins=21, stat='count', multiple="stack")
colors={'full': 'tab:orange',
'only_target_stations': "b"}
for grid, griddata in data.groupby('station_grid'):
ax.hist(griddata['HSmax_datediff'], bins=31, color=colors[grid], alpha=0.7, label=pu.GRIDLABELS[grid], edgecolor='grey')
ax.text(0.05, 0.9, method, ha='left', va='top', transform=ax.transAxes)
plt.tight_layout()
legend_kw = {}
top = axs[0,-1].get_position().ymax
right = axs[0,-1].get_position().xmax
legend_kw['bbox_to_anchor'] = [right, top+0.01]
legend_kw['borderaxespad'] = 0
legend_kw['edgecolor'] = 'black'
legend_kw['fancybox'] = False
legend_kw['framealpha'] = 1
# legend_kw['bbox_transform'] = fig.transFigure
legend_kw['loc'] = 4
legend_kw['ncol'] = 2
# 'fontsize': 11,
# 'frameon': False
handles, labels = axs[0,0].get_legend_handles_labels()
fig.legend(handles, labels, **legend_kw)
[ax.set_ylabel('# gaps') for ax in axs[:,0]]
fig.text(0.55, 0., 'HSmax date measured - HSmax date predicted [days]', ha='center', va='center')
plt.tight_layout()
fig.savefig('../results/revision/HSmax_date_differences.png', bbox_inches='tight', dpi=300)
plt.show()
description = df.groupby(['fill_method','station_grid'])['HSmax_datediff'].describe()
# sample = df.sort_values('HSmax_datediff', ascending=False).head(100)
# for x in sample.itertuples():
# hs_true = pd.read_pickle(x.HS_true_file)
# hs_pred = pd.read_pickle(x.HS_pred_file)
# pd.DataFrame({'true':hs_true, 'pred':hs_pred}, index=hs_true.index).plot()
# plt.show()
```
#### File: evaluating_methods_for_reconstructing_large_gaps_in_historic_snow_depth_time_series/src/scoring_utils.py
```python
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
def get_gap_HS_data(modeling_data, gap_period, gap_station):
"""
Get original HS data in the gap period. Only winter values (Nov-Apr) will
be returned.
"""
y_true = (modeling_data
.loc[modeling_data['stn']==gap_station, 'HS']
.loc[gap_period]
)
y_true = y_true.loc[y_true.index.month.isin([11,12,1,2,3,4])]
y_true.name = f'Measured HS Data'
return y_true
def get_station_altitude(modeling_data, gap_period, gap_station):
altitude = (modeling_data
.loc[modeling_data['stn']==gap_station, 'Z']
.loc[gap_period]
.iloc[0])
return altitude
def HSavg(series):
return series.mean()
def dHS1(series):
if series.isna().all():
result = np.nan
else:
result = np.count_nonzero(series.values >= 1)
return result
def dHSn(series, n):
"""
count number of days with HS above threshold n.
Parameters
----------
series : pd.Series
n : float
Returns
-------
result : float
"""
if series.isna().all():
result = np.nan
else:
result = np.count_nonzero(series.values >= n)
return result
def HSmax(series):
return series.max()
def _HSavg_diff(y_true, y_hat):
return np.mean(y_hat) - np.mean(y_true)
def _HSavg_abs_diff(y_true, y_hat):
return np.abs(np.mean(y_hat) -np.mean(y_true))
def _HSavg_relative_diff(y_true, y_hat):
return (np.mean(y_hat) - np.mean(y_true))/np.mean(y_true)
def _HSavg_relative_abs_diff(y_true, y_hat):
return np.abs(np.mean(y_hat) - np.mean(y_true))/np.mean(y_true)
def _dHS1_diff(y_true, y_hat):
return dHS1(y_hat)-dHS1(y_true)
def _dHS1_abs_diff(y_true, y_hat):
return np.abs(_dHS1_diff(y_true, y_hat))
def _dHS1_relative_diff(y_true, y_hat):
return (_dHS1_diff(y_true, y_hat)) / dHS1(y_true)
def _dHS1_relative_abs_diff(y_true, y_hat):
return (_dHS1_abs_diff(y_true, y_hat)) / dHS1(y_true)
def _HSmax_diff(y_true, y_hat):
return np.max(y_hat) - np.max(y_true)
def _HSmax_abs_diff(y_true, y_hat):
return np.abs(np.max(y_hat) -np.max(y_true))
def _HSmax_relative_diff(y_true, y_hat):
return (np.max(y_hat) - np.max(y_true))/np.max(y_true)
def _HSmax_relative_abs_diff(y_true, y_hat):
return np.abs(np.max(y_hat) - np.max(y_true))/np.max(y_true)
def _maape_score(y_true, y_hat):
"""
mean arctangent absolute percentage error (MAAPE) calculated by::
mean(artan(abs(error/y_true))))
Reference:
<NAME>., & <NAME>. (2016). A new metric of absolute percentage error for
intermittent demand forecasts. International Journal of Forecasting, 32(3),
669-679.
"""
assert(len(y_true) == len(y_hat))
error = y_true-y_hat
# only divide if error is not zero (leave it as zero, avoid 0/0), and dont
# divide if y is zero (avoide division by zero):
percentage_error = np.divide(error, y_true, out=np.zeros_like(y_true),
where=(error!=0) & (y_true!=0))
# if error is not zero and y is zero set percentage error to infinity
percentage_error[(error!=0) & (y_true==0)] = np.inf
return np.mean(np.arctan(np.abs(percentage_error)))
def _bias_score(y_true, y_hat):
assert(len(y_true) == len(y_hat))
error = y_hat-y_true
return np.average(error)
def get_climate_score_value(y_true, y_hat, metric):
func = {'HSavg_diff': _HSavg_diff,
'HSavg_abs_diff': _HSavg_abs_diff,
'HSavg_relative_diff': _HSavg_relative_diff,
'HSavg_relative_abs_diff': _HSavg_relative_abs_diff,
'dHS1_diff': _dHS1_diff,
'dHS1_abs_diff': _dHS1_abs_diff,
'dHS1_relative_diff': _dHS1_relative_diff,
'dHS1_relative_abs_diff': _dHS1_relative_abs_diff,
'HSmax_diff': _HSmax_diff,
'HSmax_abs_diff': _HSmax_abs_diff,
'HSmax_relative_diff': _HSmax_relative_diff,
'HSmax_relative_abs_diff': _HSmax_relative_abs_diff}
assert metric in func.keys()
try:
result = func[metric](y_true, y_hat)
except ZeroDivisionError: # exception for the relative errors
result = np.nan
return result
def get_daily_score_value(y_true, y_pred, metric):
assert metric in ['RMSE',
'RMSE_nonzero',
'RMSE_nonzero_true',
'RMSE_nonzero_pred',
'MAAPE',
'MAAPE_nonzero',
'MAAPE_nonzero_true',
'MAAPE_nonzero_pred',
'bias',
'r2_score',
'r2_score_nonzero',
'r2_score_nonzero_true',
'r2_score_nonzero_pred']
if 'nonzero' in metric:
data = pd.DataFrame({'y_true': y_true,
'y_pred': y_pred},
index = y_true.index)
if 'true' in metric:
data = data.loc[data['y_true']!=0, :]
elif 'pred' in metric:
data = data.loc[data['y_pred']!=0, :]
else: # no zero in true and pred
data = data.loc[data.all(axis=1)]
y_true = data['y_true']
y_pred = data['y_pred']
if y_true.size == 0 or y_pred.isna().all():
score_value = np.nan
elif 'RMSE' in metric:
score_value = np.sqrt(mean_squared_error(y_true.values,
y_pred.values))
elif 'MAAPE' in metric:
score_value = _maape_score(y_true.values,
y_pred.values)
elif 'bias' in metric:
score_value = _bias_score(y_true.values,
y_pred.values)
elif 'r2_score' in metric:
score_value = r2_score(y_true.values, y_pred.values)
return score_value
``` |
{
"source": "Joashc702/depthai-experiments",
"score": 2
} |
#### File: depthai-experiments/coronamask/main.py
```python
import logging
import cv2
from depthai_utils import DepthAI, DepthAIDebug
from config import MODEL_LOCATION, DEBUG
log = logging.getLogger(__name__)
class Main:
depthai_class = DepthAI
def __init__(self):
self.depthai = self.depthai_class(MODEL_LOCATION, 'people')
def parse_frame(self, frame, results):
pass
def run(self):
try:
log.info("Setup complete, parsing frames...")
for frame, results in self.depthai.capture():
self.parse_frame(frame, results)
finally:
del self.depthai
class MainDebug(Main):
depthai_class = DepthAIDebug
def parse_frame(self, frame, results):
super().parse_frame(frame, results)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
if __name__ == '__main__':
if DEBUG:
log.info("Setting up debug run...")
MainDebug().run()
else:
log.info("Setting up non-debug run...")
Main().run()
```
#### File: depthai-experiments/mjpeg-streaming/depthai.py
```python
import json
from pathlib import Path
import platform
import os
import subprocess
from time import time, sleep, monotonic
import cv2
import numpy as np
import depthai
import consts.resource_paths
from depthai_helpers import utils
from depthai_helpers.cli_utils import cli_print, parse_args, PrintColors
import socket
import socketserver
import threading
from PIL import Image
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from io import StringIO,BytesIO
class Streamer:
def __init__(self):
self.data_json = None
def get_data(self, data_input):
self.data_json = data_input
return self.data_json
def pass_data(self):
return json.dumps(self.data_json)
# TCPServer
class TCPServerRequest(socketserver.BaseRequestHandler):
def handle(self):
# Handle is called each time a client is connected
# When OpenDataCam connects, do not return - instead keep the connection open and keep streaming data
# First send HTTP header
header = 'HTTP/1.0 200 OK\r\nServer: Mozarella/2.2\r\nAccept-Range: bytes\r\nConnection: close\r\nMax-Age: 0\r\nExpires: 0\r\nCache-Control: no-cache, private\r\nPragma: no-cache\r\nContent-Type: application/json\r\n\r\n['
self.request.send(header.encode())
while True:
sleep(0.1)
json_string = self.server.mycustomadata
json_separator = ',\n'
json_to_send = json_string + json_separator
self.request.send(json_to_send.encode())
# HTTPServer MJPEG
class VideoStreamHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary')
self.end_headers()
while True:
MJPEG_frame_RGB=cv2.cvtColor(MJPEG_frame,cv2.COLOR_BGR2RGB)
JPG = Image.fromarray(MJPEG_frame_RGB)
stream_file = BytesIO()
JPG.save(stream_file,'JPEG')
self.wfile.write("--jpgboundary".encode())
self.send_header('Content-type','image/jpeg')
self.send_header('Content-length',str(stream_file.getbuffer().nbytes))
self.end_headers()
JPG.save(self.wfile,'JPEG')
# sleep(0.01)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def decode_mobilenet_ssd(nnet_packet):
detections = []
# the result of the MobileSSD has detection rectangles (here: entries), and we can iterate threw them
for _, e in enumerate(nnet_packet.entries()):
# for MobileSSD entries are sorted by confidence
# {id == -1} or {confidence == 0} is the stopper (special for OpenVINO models and MobileSSD architecture)
if e[0]['id'] == -1.0 or e[0]['confidence'] == 0.0 or e[0]['label'] > len(labels):
break
# save entry for further usage (as image package may arrive not the same time as nnet package)
detections.append(e)
return detections
def nn_to_depth_coord(x, y):
x_depth = int(nn2depth['off_x'] + x * nn2depth['max_w'])
y_depth = int(nn2depth['off_y'] + y * nn2depth['max_h'])
return x_depth, y_depth
def average_depth_coord(pt1, pt2):
factor = 1 - config['depth']['padding_factor']
x_shift = int((pt2[0] - pt1[0]) * factor / 2)
y_shift = int((pt2[1] - pt1[1]) * factor / 2)
avg_pt1 = (pt1[0] + x_shift), (pt1[1] + y_shift)
avg_pt2 = (pt2[0] - x_shift), (pt2[1] - y_shift)
return avg_pt1, avg_pt2
def show_mobilenet_ssd(entries_prev, frame, is_depth=0):
img_h = frame.shape[0]
img_w = frame.shape[1]
global config
# iterate through pre-saved entries & draw rectangle & text on image:
for e in entries_prev:
# the lower confidence threshold - the more we get false positives
if e[0]['confidence'] > config['depth']['confidence_threshold']:
if is_depth:
pt1 = nn_to_depth_coord(e[0]['left'], e[0]['top'])
pt2 = nn_to_depth_coord(e[0]['right'], e[0]['bottom'])
color = (255, 0, 0) # bgr
avg_pt1, avg_pt2 = average_depth_coord(pt1, pt2)
cv2.rectangle(frame, avg_pt1, avg_pt2, color)
color = (255, 255, 255) # bgr
else:
pt1 = int(e[0]['left'] * img_w), int(e[0]['top'] * img_h)
pt2 = int(e[0]['right'] * img_w), int(e[0]['bottom'] * img_h)
color = (0, 0, 255) # bgr
x1, y1 = pt1
cv2.rectangle(frame, pt1, pt2, color)
# Handles case where TensorEntry object label is out if range
if e[0]['label'] > len(labels):
print("Label index=",e[0]['label'], "is out of range. Not applying text to rectangle.")
else:
pt_t1 = x1, y1 + 20
cv2.putText(frame, labels[int(e[0]['label'])], pt_t1, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
pt_t2 = x1, y1 + 40
cv2.putText(frame, '{:.2f}'.format(100*e[0]['confidence']) + ' %', pt_t2, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
if config['ai']['calc_dist_to_bb']:
pt_t3 = x1, y1 + 60
cv2.putText(frame, 'x:' '{:7.3f}'.format(e[0]['distance_x']) + ' m', pt_t3, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
pt_t4 = x1, y1 + 80
cv2.putText(frame, 'y:' '{:7.3f}'.format(e[0]['distance_y']) + ' m', pt_t4, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
pt_t5 = x1, y1 + 100
cv2.putText(frame, 'z:' '{:7.3f}'.format(e[0]['distance_z']) + ' m', pt_t5, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
return frame
def decode_age_gender_recognition(nnet_packet):
detections = []
for _, e in enumerate(nnet_packet.entries()):
if e[1]["female"] > 0.8 or e[1]["male"] > 0.8:
detections.append(e[0]["age"])
if e[1]["female"] > e[1]["male"]:
detections.append("female")
else:
detections.append("male")
return detections
def show_age_gender_recognition(entries_prev, frame):
# img_h = frame.shape[0]
# img_w = frame.shape[1]
if len(entries_prev) != 0:
age = (int)(entries_prev[0]*100)
cv2.putText(frame, "Age: " + str(age), (0, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
gender = entries_prev[1]
cv2.putText(frame, "G: " + str(gender), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
frame = cv2.resize(frame, (300, 300))
return frame
def decode_emotion_recognition(nnet_packet):
detections = []
for i in range(len(nnet_packet.entries()[0][0])):
detections.append(nnet_packet.entries()[0][0][i])
return detections
def show_emotion_recognition(entries_prev, frame):
# img_h = frame.shape[0]
# img_w = frame.shape[1]
e_states = {
0 : "neutral",
1 : "happy",
2 : "sad",
3 : "surprise",
4 : "anger"
}
if len(entries_prev) != 0:
max_confidence = max(entries_prev)
if(max_confidence > 0.7):
emotion = e_states[np.argmax(entries_prev)]
cv2.putText(frame, emotion, (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
frame = cv2.resize(frame, (300, 300))
return frame
def decode_landmarks_recognition(nnet_packet):
landmarks = []
for i in range(len(nnet_packet.entries()[0][0])):
landmarks.append(nnet_packet.entries()[0][0][i])
landmarks = list(zip(*[iter(landmarks)]*2))
return landmarks
def show_landmarks_recognition(entries_prev, frame):
img_h = frame.shape[0]
img_w = frame.shape[1]
if len(entries_prev) != 0:
for i in entries_prev:
try:
x = int(i[0]*img_h)
y = int(i[1]*img_w)
except:
continue
# # print(x,y)
cv2.circle(frame, (x,y), 3, (0, 0, 255))
frame = cv2.resize(frame, (300, 300))
return frame
# TCPServer initialize
server_TCP = socketserver.TCPServer(('127.0.0.1', 8070), TCPServerRequest)
th = threading.Thread(target=server_TCP.serve_forever)
def json_stream(frame_id, entries_prev):
img_h = frame.shape[0]
img_w = frame.shape[1]
json_dic = {"frame_id": frame_id, "object": []}
global config
# iterate through pre-saved entries & draw rectangle & text on image:
for e in entries_prev:
# the lower confidence threshold - the more we get false positives
if e[0]['confidence'] > config['depth']['confidence_threshold']:
class_id = e[0]['label']
label_name = labels[int(e[0]['label'])]
center_x = 1 # replace with actual coordinates
center_y = 2 # replace with actual coordinates
width = 3 # replace with actual coordinates
height = 4 # replace with actual coordinates
confidence = e[0]['confidence']
data_json = send_json(json_dic, class_id, label_name, center_x, center_y, width, height, confidence)
streamer = Streamer()
streamer.get_data(data_json)
# start a thread to allow server and video running at the same time
server_TCP.mycustomadata = streamer.pass_data()
th.daemon = True
th.start()
def send_json(json_dic, class_id, label_name, center_x, center_y, width, height, confidence):
json_dic['object'].append(
{
'class_id': class_id,
'name': label_name,
'relative_coordinates': {
'center_x': center_x,
'center_y': center_y,
'width': width,
'height': height
},
'confidence': confidence
}
)
return json_dic
global args
try:
args = vars(parse_args())
except:
os._exit(2)
stream_list = args['streams']
if args['config_overwrite']:
args['config_overwrite'] = json.loads(args['config_overwrite'])
print("Using Arguments=",args)
if args['force_usb2']:
cli_print("FORCE USB2 MODE", PrintColors.WARNING)
cmd_file = consts.resource_paths.device_usb2_cmd_fpath
else:
cmd_file = consts.resource_paths.device_cmd_fpath
if args['dev_debug']:
cmd_file = ''
print('depthai will not load cmd file into device.')
calc_dist_to_bb = True
if args['disable_depth']:
calc_dist_to_bb = False
decode_nn=decode_mobilenet_ssd
show_nn=show_mobilenet_ssd
if args['cnn_model'] == 'age-gender-recognition-retail-0013':
decode_nn=decode_age_gender_recognition
show_nn=show_age_gender_recognition
calc_dist_to_bb=False
if args['cnn_model'] == 'emotions-recognition-retail-0003':
decode_nn=decode_emotion_recognition
show_nn=show_emotion_recognition
calc_dist_to_bb=False
if args['cnn_model'] in ['facial-landmarks-35-adas-0002', 'landmarks-regression-retail-0009']:
decode_nn=decode_landmarks_recognition
show_nn=show_landmarks_recognition
calc_dist_to_bb=False
if args['cnn_model']:
cnn_model_path = consts.resource_paths.nn_resource_path + args['cnn_model']+ "/" + args['cnn_model']
blob_file = cnn_model_path + ".blob"
suffix=""
if calc_dist_to_bb:
suffix="_depth"
blob_file_config = cnn_model_path + suffix + ".json"
blob_file_path = Path(blob_file)
blob_file_config_path = Path(blob_file_config)
if not blob_file_path.exists():
cli_print("\nWARNING: NN blob not found in: " + blob_file, PrintColors.WARNING)
os._exit(1)
if not blob_file_config_path.exists():
cli_print("\nWARNING: NN json not found in: " + blob_file_config, PrintColors.WARNING)
os._exit(1)
with open(blob_file_config) as f:
data = json.load(f)
try:
labels = data['mappings']['labels']
except:
print("Labels not found in json!")
print('depthai.__version__ == %s' % depthai.__version__)
print('depthai.__dev_version__ == %s' % depthai.__dev_version__)
if platform.system() == 'Linux':
ret = subprocess.call(['grep', '-irn', 'ATTRS{idVendor}=="03e7"', '/etc/udev/rules.d'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if(ret != 0):
cli_print("\nWARNING: Usb rules not found", PrintColors.WARNING)
cli_print("\nSet rules: \n"
"""echo 'SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"' | sudo tee /etc/udev/rules.d/80-movidius.rules \n"""
"sudo udevadm control --reload-rules && udevadm trigger \n"
"Disconnect/connect usb cable on host! \n", PrintColors.RED)
os._exit(1)
if not depthai.init_device(cmd_file, args['device_id']):
print("Error initializing device. Try to reset it.")
exit(1)
print('Available streams: ' + str(depthai.get_available_steams()))
# Do not modify the default values in the config Dict below directly. Instead, use the `-co` argument when running this script.
config = {
# Possible streams:
# ['left', 'right','previewout', 'metaout', 'depth_sipp', 'disparity', 'depth_color_h']
# If "left" is used, it must be in the first position.
# To test depth use:
# 'streams': [{'name': 'depth_sipp', "max_fps": 12.0}, {'name': 'previewout', "max_fps": 12.0}, ],
'streams': stream_list,
'depth':
{
'calibration_file': consts.resource_paths.calib_fpath,
'padding_factor': 0.3,
'depth_limit_m': 10.0, # In meters, for filtering purpose during x,y,z calc
'confidence_threshold' : 0.5, #Depth is calculated for bounding boxes with confidence higher than this number
},
'ai':
{
'blob_file': blob_file,
'blob_file_config': blob_file_config,
'calc_dist_to_bb': calc_dist_to_bb,
'keep_aspect_ratio': not args['full_fov_nn'],
},
'board_config':
{
'swap_left_and_right_cameras': args['swap_lr'], # True for 1097 (RPi Compute) and 1098OBC (USB w/onboard cameras)
'left_fov_deg': args['field_of_view'], # Same on 1097 and 1098OBC
'rgb_fov_deg': args['rgb_field_of_view'],
'left_to_right_distance_cm': args['baseline'], # Distance between stereo cameras
'left_to_rgb_distance_cm': args['rgb_baseline'], # Currently unused
'store_to_eeprom': args['store_eeprom'],
'clear_eeprom': args['clear_eeprom'],
'override_eeprom': args['override_eeprom'],
},
#'video_config':
#{
# 'rateCtrlMode': 'cbr',
# 'profile': 'h265_main', # Options: 'h264_baseline' / 'h264_main' / 'h264_high' / 'h265_main'
# 'bitrate': 8000000, # When using CBR
# 'maxBitrate': 8000000, # When using CBR
# 'keyframeFrequency': 30,
# 'numBFrames': 0,
# 'quality': 80 # (0 - 100%) When using VBR
#}
}
if args['board']:
board_path = Path(args['board'])
if not board_path.exists():
board_path = Path(consts.resource_paths.boards_dir_path) / Path(args['board'].upper()).with_suffix('.json')
if not board_path.exists():
print('ERROR: Board config not found: {}'.format(board_path))
os._exit(2)
with open(board_path) as fp:
board_config = json.load(fp)
utils.merge(board_config, config)
if args['config_overwrite'] is not None:
config = utils.merge(args['config_overwrite'],config)
print("Merged Pipeline config with overwrite",config)
if 'depth_sipp' in config['streams'] and ('depth_color_h' in config['streams'] or 'depth_mm_h' in config['streams']):
print('ERROR: depth_sipp is mutually exclusive with depth_color_h')
exit(2)
# del config["streams"][config['streams'].index('depth_sipp')]
# Append video stream if video recording was requested and stream is not already specified
video_file = None
if args['video'] is not None:
# open video file
try:
video_file = open(args['video'], 'wb')
if config['streams'].count('video') == 0:
config['streams'].append('video')
except IOError:
print("Error: couldn't open video file for writing. Disabled video output stream")
if config['streams'].count('video') == 1:
config['streams'].remove('video')
stream_names = [stream if isinstance(stream, str) else stream['name'] for stream in config['streams']]
# create the pipeline, here is the first connection with the device
p = depthai.create_pipeline(config=config)
if p is None:
print('Pipeline is not created.')
exit(3)
nn2depth = depthai.get_nn_to_depth_bbox_mapping()
t_start = time()
frame_count = {}
frame_count_prev = {}
for s in stream_names:
frame_count[s] = 0
frame_count_prev[s] = 0
entries_prev = []
process_watchdog_timeout=10 #seconds
def reset_process_wd():
global wd_cutoff
wd_cutoff=monotonic()+process_watchdog_timeout
return
reset_process_wd()
frame_id = 0
# start MJPEG HTTP Server
global MJPEG_frame
server_HTTP = ThreadedHTTPServer(('localhost', 8090), VideoStreamHandler)
print("server started")
th2 = threading.Thread(target=server_HTTP.serve_forever)
th2.daemon = True
th2.start()
while True:
# retreive data from the device
# data is stored in packets, there are nnet (Neural NETwork) packets which have additional functions for NNet result interpretation
nnet_packets, data_packets = p.get_available_nnet_and_data_packets()
packets_len = len(nnet_packets) + len(data_packets)
if packets_len != 0:
reset_process_wd()
else:
cur_time=monotonic()
if cur_time > wd_cutoff:
print("process watchdog timeout")
os._exit(10)
for _, nnet_packet in enumerate(nnet_packets):
entries_prev = decode_nn(nnet_packet)
frame_id += 1
for packet in data_packets:
if packet.stream_name not in stream_names:
continue # skip streams that were automatically added
packetData = packet.getData()
if packetData is None:
print('Invalid packet data!')
continue
elif packet.stream_name == 'previewout':
# the format of previewout image is CHW (Chanel, Height, Width), but OpenCV needs HWC, so we
# change shape (3, 300, 300) -> (300, 300, 3)
data0 = packetData[0,:,:]
data1 = packetData[1,:,:]
data2 = packetData[2,:,:]
frame = cv2.merge([data0, data1, data2])
nn_frame = show_nn(entries_prev, frame)
cv2.putText(nn_frame, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
cv2.imshow('previewout', nn_frame)
# pass MJPEG streaming
MJPEG_frame = nn_frame
elif packet.stream_name == 'left' or packet.stream_name == 'right' or packet.stream_name == 'disparity':
frame_bgr = packetData
cv2.putText(frame_bgr, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
cv2.putText(frame_bgr, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
if args['draw_bb_depth']:
show_nn(entries_prev, frame_bgr, is_depth=True)
cv2.imshow(packet.stream_name, frame_bgr)
elif packet.stream_name.startswith('depth'):
frame = packetData
if len(frame.shape) == 2:
if frame.dtype == np.uint8: # grayscale
cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
cv2.putText(frame, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
else: # uint16
frame = (65535 // frame).astype(np.uint8)
#colorize depth map, comment out code below to obtain grayscale
frame = cv2.applyColorMap(frame, cv2.COLORMAP_HOT)
# frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET)
cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
cv2.putText(frame, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
else: # bgr
cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
cv2.putText(frame, "fps: " + str(frame_count_prev[packet.stream_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
if args['draw_bb_depth']:
show_nn(entries_prev, frame, is_depth=True)
cv2.imshow(packet.stream_name, frame)
elif packet.stream_name == 'jpegout':
jpg = packetData
mat = cv2.imdecode(jpg, cv2.IMREAD_COLOR)
cv2.imshow('jpegout', mat)
elif packet.stream_name == 'video':
videoFrame = packetData
videoFrame.tofile(video_file)
elif packet.stream_name == 'meta_d2h':
str_ = packet.getDataAsStr()
dict_ = json.loads(str_)
print('meta_d2h Temp',
' CSS:' + '{:6.2f}'.format(dict_['sensors']['temperature']['css']),
' MSS:' + '{:6.2f}'.format(dict_['sensors']['temperature']['mss']),
' UPA:' + '{:6.2f}'.format(dict_['sensors']['temperature']['upa0']),
' DSS:' + '{:6.2f}'.format(dict_['sensors']['temperature']['upa1']))
frame_count[packet.stream_name] += 1
json_stream(frame_id, entries_prev)
t_curr = time()
if t_start + 1.0 < t_curr:
t_start = t_curr
for s in stream_names:
frame_count_prev[s] = frame_count[s]
frame_count[s] = 0
key = cv2.waitKey(1)
if key == ord('c'):
depthai.request_jpeg()
elif key == ord('q'):
break
del p # in order to stop the pipeline object should be deleted, otherwise device will continue working. This is required if you are going to add code after the main loop, otherwise you can ommit it.
depthai.deinit_device()
# Close video output file if was opened
if video_file is not None:
video_file.close()
print('py: DONE.')
``` |
{
"source": "Joash-JW/Auto-DCA",
"score": 3
} |
#### File: Joash-JW/Auto-DCA/td.py
```python
from config import config
from broker import Broker
from requests import post, get
from requests.models import Response
class TD(Broker):
# TD Ameritrade Implementation. Paper trading is only available via thinkorswim.
def __init__(self):
self._place_order_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders'.format(config['TD_ID'])
self._refresh_token_url = 'https://api.tdameritrade.com/v1/oauth2/token'
def print_response(self, response: Response):
print('{name} Broker - HTTP_CODE={status_code}&MESSAGE={message}'.format(
name=self.name, status_code=response.status_code, message=response.text
))
def get_price(self):
# Refer to TD API - https://developer.tdameritrade.com/quotes/apis/get/marketdata/%7Bsymbol%7D/quotes
query_url = 'https://api.tdameritrade.com/v1/marketdata/{}/quotes'
params = {'apikey': config['TD_CONSUMER_KEY'] + '@AMER.OAUTHAP'}
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + config['TD_ACCESS_TOKEN']
}
# Attempt 1 to see response status code.
response = get(query_url.format(config['SYMBOL']), params=params, headers=headers)
# Refresh token if status code 401.
# From TD - indicating the caller must pass a valid Authorization in the HTTP authorization request header.
if response.status_code == 401:
self.refresh_token()
# Resend request using the new access token.
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + config['TD_ACCESS_TOKEN']
}
response = get(query_url.format(config['SYMBOL']), params=params, headers=headers)
assert response.status_code == 200 # only status code 200 is able to pass this step
return float(response.json()[config['SYMBOL']]['askPrice']) # return ask price
def refresh_token(self):
# Refer to TD API - https://developer.tdameritrade.com/authentication/apis/post/token-0
data = {
'grant_type': 'refresh_token',
'refresh_token': config['TD_REFRESH_TOKEN'],
'access_type': '',
'code': config['TD_CODE'],
'client_id': config['TD_CONSUMER_KEY'] + '@AMER.OAUTHAP',
'redirect_uri': config['TD_CALLBACK_URL']
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
response = post(self._refresh_token_url, headers=headers, data=data)
self.print_response(response)
if response.status_code == 200:
# New access token
config['TD_ACCESS_TOKEN'] = response.json()['access_token']
def place_order(self, quantity):
# Refer to TD API - https://developer.tdameritrade.com/account-access/apis/post/accounts/%7BaccountId%7D/orders-0
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + config['TD_ACCESS_TOKEN']
}
# This request does not actually place the order.
# Test request to ensure status code 200.
response = post(self._place_order_url, headers=headers)
# Refresh token if status code 401.
if response.status_code == 401:
self.refresh_token()
# Update with new access token after refresh.
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + config['TD_ACCESS_TOKEN']
}
# This specifies the order that will be placed.
data = {
'orderType': 'MARKET',
'session': 'NORMAL',
'duration': 'DAY',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [
{
'instruction': 'Buy',
'quantity': quantity,
'instrument': {
'symbol': config['SYMBOL'],
'assetType': 'EQUITY'
}
}
]
}
response = post(self._place_order_url, headers=headers, json=data)
self.print_response(response)
``` |
{
"source": "Joash-JW/Boro",
"score": 3
} |
#### File: Joash-JW/Boro/boro.py
```python
from trading_calendar import TradingCalendar as tc
from pytz import timezone
from datetime import datetime, timedelta
class Boro():
def __init__(self):
self._dca_date = None
self._tc = tc()
@property
def dca_date(self):
return self._dca_date
@dca_date.setter
def dca_date(self, new_dca_date):
if new_dca_date.date() < self.get_lse_time_now().date():
raise ValueError("New DCA date is in the past")
print("Next DCA Date: " + new_dca_date.strftime("%Y-%m-%d %H:%M:%S"))
self._dca_date = new_dca_date
def get_lse_time_now(self):
""" Returns current time of the London Stock Exchange.
"""
return datetime.now(tz=timezone('Europe/London'))
def get_est_time_now(self):
""" Returns current time of the US Stock Exchange.
"""
return datetime.now(tz=timezone("US/Eastern"))
def set_next_dca_date(self, market):
""" Set the next Dollar-Cost Averaging date
"""
date = self.get_est_time_now() if market.upper() == 'US' else self.get_lse_time_now()
while True:
try:
self.dca_date = self._tc.compute_next_dca_date(date)
break
except ValueError:
print("Calculating next DCA Date based on " + date.strftime("%Y-%m-%d %H:%M:%S"))
date += timedelta(days=7) # search the next week
is_open = False
if market.upper() == 'US':
self._tc.set_us_market_calendar_cache(self.dca_date.year, self.dca_date.month)
is_open = self._tc.check_us_market_open(self.dca_date)
while not is_open:
# move DCA date to next earliest market open day
self.dca_date += timedelta(days=1)
is_open = self._tc.check_us_market_open(self.dca_date)
else:
# for UK market
# TODO: Add check for UK market open on a day
pass
```
#### File: Joash-JW/Boro/trading_calendar.py
```python
from config import config
from calendar import Calendar
from datetime import datetime
from requests import get
class TradingCalendar():
def __init__(self):
self._us_market_calendar_cache = None
def set_us_market_calendar_cache(self, year, month):
""" Set the US market calendar based on the given month and year.
This method sets the US market calendar based on the given month and year.
Since it is a cache of the calendar, it only queries the api when update is needed.
"""
if self._us_market_calendar_cache is None or self._us_market_calendar_cache['month'] != month or self._us_market_calendar_cache['year'] != year:
while True:
try:
print("Querying US market calendar date for year={year}, month={month}".format(year=year, month=month))
response = get(
'https://api.tradier.com/v1/markets/calendar',
params={'month': str(month), 'year': str(year)},
headers={'Authorization': 'Bearer <TOKEN>', 'Accept': 'application/json'}
)
assert response.status_code == 200
break
except AssertionError:
print("Unable to get US market calendar")
self._us_market_calendar_cache = response.json()['calendar']
def compute_next_dca_date(self, previous_date):
""" Returns the computed next Dollar-Cost Averaging date.
"""
next_dca_date = [
date for date in Calendar().itermonthdays4(previous_date.year, previous_date.month) \
if date[1] == previous_date.month and date[3] == config['DCA_DAY']][config['DCA_WEEK']
]
return datetime(next_dca_date[0], next_dca_date[1], next_dca_date[2], config['DCA_HOUR'], config['DCA_MINUTE'])
def check_us_market_open(self, date):
""" Returns a boolean value true if the market is open on the given date and time, false otherwise.
"""
calendar = self._us_market_calendar_cache['days']['day']
calendar_index = self.binary_search(calendar, 0, len(calendar)-1, date)
day_open = True if calendar[calendar_index]['status'] == 'open' else False
if day_open:
hour_open = True if date < datetime.strptime(
date.strftime("%Y-%m-%d") + " " + calendar[calendar_index]['open']['end'], "%Y-%m-%d %H:%M"
) else False
return hour_open
else:
return False
def binary_search(self, date_arr, left, right, search_date):
""" Binary search for the date in the calendar cache as it is sorted. This is mainly for my practice.
"""
mid = (left + right) // 2
mid_date = datetime.strptime(date_arr[mid]['date'], '%Y-%m-%d')
if (mid_date.date() == search_date.date()):
return mid
elif (mid_date.date() < search_date.date()):
# mid_date is before search_date, hence look right
return self.binary_search(date_arr, mid+1, right, search_date)
else:
# search left
return self.binary_search(date_arr, left, mid-1, search_date)
``` |
{
"source": "Joash-JW/CZ3004-MDP-Image-Recognition",
"score": 3
} |
#### File: rpi/woImgReg/ArduinoV3.py
```python
import serial
import threading
import time
class ArduinoV3:
def __init__(self):
self.baudrate = 115200
self.serial = 0
self.connected = False
def connect(self):
try:
self.serial = serial.Serial("/dev/ttyACM0", self.baudrate, write_timeout = 0)
print("Connected to Arduino 0 successfully.")
self.connected = True
return 1
except:
try:
self.serial = serial.Serial("/dev/ttyACM1", self.baudrate, write_timeout = 0)
print("Connected to Arduino 1 successfully.")
self.connected = True
return 1
except Exception as e2:
print("Failed to connect to Arduino: %s" %str(e2))
self.connected = False
return 0
def readThread(self, pc, android):
while True:
try:
message = self.serial.readline()
print("Read from Arduino: %s" %str(message))
if len(message) <= 1:
continue
if (message[0] == 80):
pc.write(message[1:])
continue
if (message[0] == 68):
android.write(message[1:])
continue
except Exception as e:
print("Failed to read from Arduino: %s" %str(e))
self.connected = False
return
def write(self, message):
try:
self.serial.write(message)
print("Write to Arduino: %s" %str(message))
print()
except Exception as e:
print("Failed to write to Arduino: %s" %str(e))
```
#### File: rpi/woImgReg/RpiMainV3.py
```python
import _thread
import os
from ArduinoV3 import *
from PCV3 import *
from AndroidV3 import *
class Main:
def __init__(self):
os.system("sudo hciconfig hci0 piscan")
print("Please wait...")
self.arduino = ArduinoV3()
self.pc = PCV3()
self.android = AndroidV3()
def test(self):
while True:
if self.arduino.connected == False:
result = self.arduino.connect()
if (result == 0):
continue
try:
_thread.start_new_thread(self.arduino.readThread, (self.pc, self.android))
print("Arduino thread started.")
except Exception as e:
print("Arduino threading error: %s" %str(e))
if self.pc.connected == False:
self.pc.connect()
if self.pc.connected == True:
try:
_thread.start_new_thread(self.pc.readThread, (self.arduino, self.android))
print("PC thread started.")
except Exception as e:
print("PC threading error: %s" %str(e))
if self.android.connected == False:
self.android.connect()
if self.android.connected == True:
try:
_thread.start_new_thread(self.android.readThread, (self.arduino, self.pc))
print("Android thread started.")
except Exception as e:
print("Android threading error: %s" %str(e))
def disconnectAll(self):
try:
self.android.disconnect()
self.pc.disconnect()
except:
pass
if __name__ == "__main__":
a = Main()
try:
a.test()
except KeyboardInterrupt:
print("Terminating program...")
a.disconnectAll()
``` |
{
"source": "joasiee/elastix",
"score": 2
} |
#### File: thesispy/elastix_wrapper/watchdog.py
```python
from pathlib import Path
import re
import shutil
import threading
import time
import wandb
import os
import pandas as pd
import numpy as np
class SaveStrategy:
def save(self, headers, row, resolution) -> None:
pass
def save_custom(self, metric, value) -> None:
pass
def close(self) -> None:
pass
class SaveStrategyWandb(SaveStrategy):
def __init__(self, experiment, run_dir: Path, batch_size: int = 1) -> None:
wandb.init(project=experiment.project,
name=str(experiment.params), reinit=True)
wandb.config.update(experiment.params.params)
self.run_dir = run_dir
self.batch_size = batch_size
self._rowcount = 0
self._sum_time = 0
self._resolution = 0
self._buffer = (None, None)
def _reset_state(self):
self._rowcount = 0
self._sum_time = 0
self._resolution = 0
self._buffer = (None, None)
def _log_buffer(self):
if self._rowcount > 0:
headers, row = self._buffer
row[-1] = self._sum_time
headers = [f"R{self._resolution}/{header}" for header in headers]
metrics = dict(zip(headers, row))
wandb.log(metrics)
self._reset_state()
def save(self, headers, row, resolution) -> None:
self._rowcount += 1
self._sum_time += row[-1]
self._buffer = (headers, row)
self._resolution = resolution
if self._rowcount == self.batch_size:
self._log_buffer()
def save_custom(self, metric: str, value) -> None:
wandb.log({metric: value}, commit=False)
def close(self) -> None:
self._log_buffer()
print(self.run_dir)
wandb.save(
str((self.run_dir / "out"/ "*").resolve()), base_path=str(self.run_dir.parents[0].resolve())
)
wandb_dir = Path(wandb.run.dir)
wandb.finish()
shutil.rmtree(self.run_dir.absolute())
shutil.rmtree(wandb_dir.parent.absolute())
class Watchdog(threading.Thread):
def __init__(
self,
out_dir,
n_resolutions,
*args,
**kwargs,
):
super(Watchdog, self).__init__(*args, **kwargs)
self._stop_event = threading.Event()
self.set_input(out_dir, n_resolutions)
def set_strategy(self, strategy: SaveStrategy):
self.sv_strategy: SaveStrategy = strategy
def set_input(self, out_dir, n_resolutions):
self.out_dir = out_dir
self.n_resolutions = n_resolutions
def run(self):
line_counts = [0 for _ in range(self.n_resolutions)]
file_names = [
self.out_dir / f"IterationInfo.0.R{r}.txt"
for r in range(self.n_resolutions)
]
r = 0
while True:
if not os.path.exists(file_names[r]):
continue
try:
resolution_results = pd.read_csv(file_names[r], sep=" ")
except pd.errors.EmptyDataError:
continue
if np.count_nonzero(resolution_results.isnull().values) > 1:
continue
headers = resolution_results.columns.values
headers = [re.sub(r"\d:", "", header).lower() for header in headers]
values = resolution_results.values
len_values = values.shape[0]
len_diff = len_values - line_counts[r]
line_counts[r] = len_values
if len_diff > 0:
for row in values[len_values - len_diff :]:
self.sv_strategy.save(headers, row, r)
elif r < self.n_resolutions - 1 and os.path.exists(file_names[r + 1]):
r += 1
elif self._stop_event.is_set():
break
time.sleep(0.1)
def stop(self):
self._stop_event.set()
```
#### File: thesispy/experiments/dataset.py
```python
from pathlib import Path
from typing import Any, Dict, List
import itertools
import pickle
import numpy as np
import pandas as pd
import dictquery as dq
from thesispy.definitions import ROOT_DIR
DATASETS_PATH = ROOT_DIR / Path("datasets")
if not DATASETS_PATH.exists():
DATASETS_PATH.mkdir(parents=True)
class FinishedRun:
def __init__(
self, name: str, config: Dict[str, Any], metrics: pd.DataFrame
) -> None:
self.name = name
self.config = config
self.resolutions_train = []
self.resolutions_val = []
nr_resolutions = int(self.config["NumberOfResolutions"])
for r in range(0, nr_resolutions):
condition = (
~np.isnan(metrics[f"R{r}/metric"])
if nr_resolutions > 1
else metrics.index
)
indices = metrics.index[condition]
columns = ["_step", "_runtime", "_timestamp"] + [
c for c in metrics.columns if f"R{r}/" in c
]
metrics_r = metrics[columns]
metrics_r.columns = [c.replace(f"R{r}/", "") for c in metrics_r.columns]
self.resolutions_train.append(metrics_r.loc[indices].iloc[:-1])
self.resolutions_val.append(metrics_r.loc[indices].iloc[-1])
def query(self, query: str):
return dq.match(self.config, query)
class Dataset:
def __init__(self, project: str, runs: List[FinishedRun]) -> None:
self.runs: List[FinishedRun] = runs
self.project = project
def add_run(self, run: FinishedRun):
self.runs.append(run)
def filter(self, query: str):
return Dataset(self.project, [run for run in self.runs if run.query(query)])
def groupby(self, attrs: List[str]):
if len(attrs) == 0:
yield (), self.runs
else:
query_parts = [set() for _ in range(len(attrs))]
unique_values = [set() for _ in range(len(attrs))]
for i, attr in enumerate(attrs):
for run in self.runs:
if attr in run.config:
value = run.config[attr]
if isinstance(value, list):
value = tuple(value)
unique_values[i].add(value)
if isinstance(value, str):
query_parts[i].add(f"{attr} == '{run.config[attr]}'")
else:
query_parts[i].add(f"{attr} == {run.config[attr]}")
else:
unique_values[i].add(None)
query_parts[i].add(f"NOT {attr}")
for group, query_tuple in zip(
itertools.product(*unique_values), itertools.product(*query_parts)
):
query = query_tuple[0]
for i in range(1, len(query_tuple) - 1):
query += " AND " + query_tuple[i]
query += " AND " + query_tuple[-1]
yield group, self.filter(query).runs
def aggregate(
self,
attrs: List[str] = [],
metrics: List[str] = ["metric"],
resolution: int = 0,
val: bool = True,
):
df = pd.DataFrame(columns=metrics)
for group, runs in self.groupby(attrs):
df_add = pd.DataFrame(columns=metrics)
for run in runs:
if val:
val_df = run.resolutions_val[resolution][metrics].to_frame()
val_df = val_df.transpose()
df_add = pd.concat([df_add, val_df])
else:
df_add = pd.concat(
[df_add, run.resolutions_train[resolution][metrics]]
)
for i, attr in enumerate(attrs):
df_add[attr] = str(group[i])
df = pd.concat([df, df_add])
return df
def save(self):
path = DATASETS_PATH / f"{self.project}.pkl"
with path.open("wb") as file:
pickle.dump(self, file)
@staticmethod
def load(project: str):
path = DATASETS_PATH / f"{project}.pkl"
with path.open("rb") as file:
return pickle.load(file)
```
#### File: thesispy/experiments/experiment.py
```python
import json
from pathlib import Path
import redis
from dotenv import load_dotenv
import os
from sshtunnel import SSHTunnelForwarder
from thesispy.elastix_wrapper import wrapper
from thesispy.elastix_wrapper.parameters import Parameters
from thesispy.elastix_wrapper.watchdog import SaveStrategyWandb
WANDB_ENTITY = "joasiee"
class Experiment:
def __init__(self, params: Parameters, project: str = None) -> None:
params.prune()
self.project = project
self.params = params
@classmethod
def from_json(cls, jsondump):
pyjson = json.loads(jsondump)
params = Parameters(pyjson["params"]).set_paths()
return cls(params, pyjson["project"])
def to_json(self):
return json.dumps({"project": self.project, "params": self.params.params})
def __str__(self) -> str:
return self.to_json()
class ExperimentQueue:
queue_id = "queue:experiments"
def __init__(self) -> None:
load_dotenv()
self.ssh_forwarding_enable()
self.client = redis.Redis(host="localhost", port=self.local_port, db=0)
def ssh_forwarding_enable(self):
self.sshserver = SSHTunnelForwarder(
os.environ["REDIS_HOST"],
ssh_username="root",
remote_bind_address=("127.0.0.1", 6379),
)
self.sshserver.start()
self.local_port = self.sshserver.local_bind_port
def push(self, experiment: Experiment) -> None:
self.client.rpush(ExperimentQueue.queue_id, experiment.to_json())
def pop(self) -> Experiment:
packed = self.client.lpop(ExperimentQueue.queue_id)
if packed:
return Experiment.from_json(packed)
return None
def peek(self) -> Experiment:
return self.client.lrange(ExperimentQueue.queue_id, 0, 0)
def size(self) -> int:
return self.client.llen(ExperimentQueue.queue_id)
def clear(self) -> None:
self.client.delete(ExperimentQueue.queue_id)
def run_experiment(experiment: Experiment):
run_dir = Path("output") / experiment.project / str(experiment.params)
batch_size = (
50
if experiment.params["Optimizer"] == "AdaptiveStochasticGradientDescent"
else 1
)
sv_strat = SaveStrategyWandb(experiment, run_dir, batch_size)
wrapper.run(experiment.params, run_dir, sv_strat)
if __name__ == "__main__":
expq = ExperimentQueue()
expq.clear()
print(expq.size())
``` |
{
"source": "joast/wcmatch",
"score": 3
} |
#### File: wcmatch/tests/test_pathlib.py
```python
import contextlib
import pytest
import unittest
import os
from wcmatch import pathlib, glob, _wcparse
import pathlib as pypathlib
import pickle
import warnings
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""
Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change CWD to: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
class TestGlob(unittest.TestCase):
"""
Test file globbing.
NOTE: We are not testing the actual `glob` library, just the interface on the `pathlib` object and specifics
introduced by the particular function.
"""
def test_relative(self):
"""Test relative path."""
abspath = os.path.abspath('.')
p = pathlib.Path(abspath)
with change_cwd(os.path.dirname(abspath)):
results = list(p.glob('docs/**/*.md', flags=pathlib.GLOBSTAR))
self.assertTrue(len(results))
self.assertTrue(all([file.suffix == '.md' for file in results]))
def test_relative_exclude(self):
"""Test relative path exclude."""
abspath = os.path.abspath('.')
p = pathlib.Path(abspath)
with change_cwd(os.path.dirname(abspath)):
results = list(p.glob('docs/**/*.md|!**/index.md', flags=pathlib.GLOBSTAR | pathlib.NEGATE | pathlib.SPLIT))
self.assertTrue(len(results))
self.assertTrue(all([file.name != 'index.md' for file in results]))
def test_glob(self):
"""Test globbing function."""
p = pathlib.Path('docs')
results = list(p.glob('*.md'))
self.assertTrue(not results)
results = list(p.glob('**/*.md', flags=pathlib.GLOBSTAR))
self.assertTrue(len(results))
self.assertTrue(all([file.suffix == '.md' for file in results]))
def test_rglob(self):
"""Test globbing function."""
p = pathlib.Path('docs')
results = list(p.rglob('*.md'))
self.assertTrue(len(results))
self.assertTrue(all([file.suffix == '.md' for file in results]))
results = list(p.rglob('*.md'))
self.assertTrue(len(results))
self.assertTrue(all([file.suffix == '.md' for file in results]))
results = list(p.rglob('markdown/*.md'))
self.assertTrue(len(results))
self.assertTrue(all([file.suffix == '.md' for file in results]))
def test_integrity(self):
"""Test glob integrity, or better put, test the path structure comes out sane."""
orig = [pathlib.Path(x) for x in glob.iglob('docs/**/*.md', flags=glob.GLOBSTAR)]
results = list(pathlib.Path('docs').glob('**/*.md', flags=glob.GLOBSTAR))
self.assertEqual(orig, results)
orig = [pathlib.Path(x) for x in glob.iglob('**/*.md', flags=glob.GLOBSTAR)]
results = list(pathlib.Path('').glob('**/*.md', flags=glob.GLOBSTAR))
self.assertEqual(orig, results)
class TestPathlibGlobmatch:
"""
Tests that are performed against `globmatch`.
Each case entry is a list of 4 parameters.
* Pattern
* File name
* Expected result (boolean of whether pattern matched file name)
* Flags
* Force Windows or Unix (string with `windows` or `unix`)
The default flags are `NEGATE` | `EXTGLOB` | `BRACE`. Any flags passed through via entry are XORed.
So if any of the default flags are passed via an entry, they will be disabled. All other flags will
enable the feature.
NOTE: We are not testing the actual `globmatch` library, just the interface on the `pathlib` object.
"""
cases = [
['some/*/*/match', 'some/path/to/match', True, pathlib.G],
['some/**/match', 'some/path/to/match', False],
['some/**/match', 'some/path/to/match', True, pathlib.G],
# `pathlib` doesn't keep trailing slash, so we can't tell it's a directory
['some/**/match/', 'some/path/to/match/', False, pathlib.G],
['.', '.', True],
['.', '', True],
# `PurePath`
['some/*/*/match', 'some/path/to/match', True, pathlib.G, "pure"],
['some/**/match', 'some/path/to/match', False, 0, "pure"],
['some/**/match', 'some/path/to/match', True, pathlib.G, "pure"],
['some/**/match/', 'some/path/to/match/', False, pathlib.G, "pure"],
['.', '.', True, 0, "pure"],
['.', '', True, 0, "pure"],
# Force a specific platform with a specific `PurePath`.
['//?/C:/**/file.log', r'\\?\C:\Path\path\file.log', True, pathlib.G, "windows"],
['/usr/*/bin', '/usr/local/bin', True, pathlib.G, "unix"]
]
@classmethod
def setup_class(cls):
"""Setup default flag options."""
# The tests we scraped were written with this assumed.
cls.flags = pathlib.NEGATE | pathlib.EXTGLOB | pathlib.BRACE
@classmethod
def evaluate(cls, case):
"""Evaluate case."""
pattern = case[0]
name = case[1]
goal = case[2]
flags = cls.flags
path = None
platform = "auto"
if len(case) > 3:
flags ^= case[3]
if len(case) > 4:
if case[4] == "windows":
path = pathlib.PureWindowsPath(name)
platform = case[4]
elif case[4] == "unix":
path = pathlib.PurePosixPath(name)
platform = case[4]
elif case[4] == "pure":
path = pathlib.PurePath(name)
if path is None:
path = pathlib.Path(name)
print('PATH: ', str(path))
print("PATTERN: ", pattern)
print("FILE: ", name)
print("GOAL: ", goal)
print("FLAGS: ", bin(flags))
print("Platform: ", platform)
cls.run(path, pattern, flags, goal)
@classmethod
def run(cls, path, pattern, flags, goal):
"""Run the command."""
assert path.globmatch(pattern, flags=flags) == goal, "Expression did not evaluate as %s" % goal
@pytest.mark.parametrize("case", cases)
def test_cases(self, case):
"""Test ignore cases."""
self.evaluate(case)
class TestPathlibMatch(TestPathlibGlobmatch):
"""
Test match method.
NOTE: We are not testing the actual `globmatch` library, just the interface on the `pathlib` object and the
additional behavior that match injects (recursive logic).
"""
cases = [
['match', 'some/path/to/match', True],
['to/match', 'some/path/to/match', True],
['path/to/match', 'some/path/to/match', True],
['some/**/match', 'some/path/to/match', False],
['some/**/match', 'some/path/to/match', True, pathlib.G]
]
@classmethod
def run(cls, path, pattern, flags, goal):
"""Run the command."""
assert path.match(pattern, flags=flags) == goal, "Expression did not evaluate as %s" % goal
class TestRealpath(unittest.TestCase):
"""Test real path of pure paths."""
def test_real_directory(self):
"""Test real directory."""
p = pathlib.PurePath('wcmatch')
self.assertTrue(p.globmatch('*/', flags=pathlib.REALPATH))
self.assertTrue(p.globmatch('*', flags=pathlib.REALPATH))
def test_real_file(self):
"""Test real file."""
p = pathlib.PurePath('setup.py')
self.assertFalse(p.globmatch('*/', flags=pathlib.REALPATH))
self.assertTrue(p.globmatch('*', flags=pathlib.REALPATH))
class TestExceptions(unittest.TestCase):
"""Test exceptions."""
def test_bad_path(self):
"""Test bad path."""
with self.assertRaises(NotImplementedError):
obj = pathlib.PosixPath if os.name == 'nt' else pathlib.WindowsPath
obj('name')
def test_bad_realpath(self):
"""Test bad real path."""
with self.assertRaises(ValueError):
obj = pathlib.PurePosixPath if os.name == 'nt' else pathlib.PureWindowsPath
p = obj('wcmatch')
p.globmatch('*', flags=pathlib.REALPATH)
def test_absolute_glob(self):
"""Test absolute patterns in `pathlib` glob."""
with self.assertRaises(ValueError):
p = pathlib.Path('wcmatch')
list(p.glob('/*'))
def test_inverse_absolute_glob(self):
"""Test inverse absolute patterns in `pathlib` glob."""
with self.assertRaises(ValueError):
p = pathlib.Path('wcmatch')
list(p.glob('!/*', flags=pathlib.NEGATE))
class TestComparisons(unittest.TestCase):
"""Test comparison."""
def test_instance(self):
"""Test instance."""
p1 = pathlib.Path('wcmatch')
p2 = pypathlib.Path('wcmatch')
self.assertTrue(isinstance(p1, pathlib.Path))
self.assertTrue(isinstance(p1, pypathlib.Path))
self.assertFalse(isinstance(p2, pathlib.Path))
self.assertTrue(isinstance(p2, pypathlib.Path))
def test_equal(self):
"""Test equivalence."""
p1 = pathlib.Path('wcmatch')
p2 = pypathlib.Path('wcmatch')
p3 = pathlib.Path('docs')
self.assertTrue(p1 == p2)
self.assertFalse(p1 == p3)
self.assertFalse(p3 == p2)
def test_pure_equal(self):
"""Test equivalence."""
p1 = pathlib.PureWindowsPath('wcmatch')
p2 = pathlib.PurePosixPath('wcmatch')
p3 = pypathlib.PureWindowsPath('wcmatch')
p4 = pypathlib.PurePosixPath('wcmatch')
self.assertTrue(p1 != p2)
self.assertTrue(p3 != p4)
self.assertTrue(p1 == p3)
self.assertTrue(p2 == p4)
def test_flavour_equal(self):
"""Test that the same flavours equal each other, regardless of path type."""
p1 = pathlib.PurePath('wcmatch')
p2 = pathlib.Path('wcmatch')
p3 = pypathlib.PurePath('wcmatch')
p4 = pypathlib.Path('wcmatch')
self.assertTrue(p1 == p2)
self.assertTrue(p3 == p4)
self.assertTrue(p1 == p3)
self.assertTrue(p2 == p4)
self.assertTrue(p1 == p4)
self.assertTrue(p2 == p3)
def test_pickle(self):
"""Test pickling."""
p1 = pathlib.PurePath('wcmatch')
p2 = pathlib.Path('wcmatch')
p3 = pickle.loads(pickle.dumps(p1))
p4 = pickle.loads(pickle.dumps(p2))
self.assertTrue(type(p1) == type(p3))
self.assertTrue(type(p2) == type(p4))
self.assertTrue(type(p1) != type(p2))
self.assertTrue(type(p3) != type(p4))
class TestExpansionLimit(unittest.TestCase):
"""Test expansion limits."""
def test_limit_globmatch(self):
"""Test expansion limit of `globmatch`."""
with self.assertRaises(_wcparse.PatternLimitException):
pathlib.PurePath('name').globmatch('{1..11}', flags=pathlib.BRACE, limit=10)
def test_limit_match(self):
"""Test expansion limit of `match`."""
with self.assertRaises(_wcparse.PatternLimitException):
pathlib.PurePath('name').match('{1..11}', flags=pathlib.BRACE, limit=10)
def test_limit_glob(self):
"""Test expansion limit of `glob`."""
with self.assertRaises(_wcparse.PatternLimitException):
list(pathlib.Path('.').glob('{1..11}', flags=pathlib.BRACE, limit=10))
def test_limit_rglob(self):
"""Test expansion limit of `rglob`."""
with self.assertRaises(_wcparse.PatternLimitException):
list(pathlib.Path('.').rglob('{1..11}', flags=pathlib.BRACE, limit=10))
``` |
{
"source": "joathert/Spherical2TreeAttributes",
"score": 2
} |
#### File: Spherical2TreeAttributes/CompactAll.future/app.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from PIL import Image
class MainWindow(QMainWindow):
'''
MainFrame
|-- MenuBar
|-- ProjectPanel
|-- TabPanel
|-- IndividualTab
|-- BasalAreaTab
|-- PlantFractionTab
'''
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle("Spherical2TreeAttributes (Beta 0.1)")
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
screen = QDesktopWidget().screenGeometry()
self.resize(screen.width(), screen.height())
self.move(0,0)
self.setWindowIcon(QIcon('./img/logo.png'))
self.setupUI()
self.functionConnector()
#self.glWidget = GLWidget()
def setupUI(self):
# Menu Bar UI
self.menuBar = MenuBar()
# Project Panel UI
self.projectPanel = ProjectPanel()
# Tabs UI
self.tabs = QTabWidget()
self.indTab = IndividualTreePanel()
self.baTab = BasalAreaPanel()
self.pfTab = PlantFractionPanel()
self.tabs.addTab(self.indTab, 'Individual DBH|HT')
self.tabs.addTab(self.baTab, 'Basal Area')
self.tabs.addTab(self.pfTab, 'Plant Fraction')
#self.tabs.setTabPosition(QTabWidget.West)
#self.tabs.setStyleSheet("QTabBar::tab {width: 50px}")
self.tabs.setStyleSheet("QTabBar::tab {height: 50px}")
# Pack these UIs together
self.mainWidget = QWidget()
wl = QHBoxLayout(self.mainWidget)
layout = QVBoxLayout()
layout.addWidget(self.menuBar, stretch=1)
layout.addLayout(self.projectPanel, stretch=7)
wl.addLayout(layout, stretch=1)
wl.addWidget(self.tabs, stretch=3)
self.setCentralWidget(self.mainWidget)
def functionConnector(self):
self.menuBar.actionNew.clicked.connect(self.newProject)
self.menuBar.actionOpen.clicked.connect(self.openProject)
self.menuBar.actionSave.clicked.connect(self.saveProject)
self.menuBar.actionQuit.clicked.connect(self.quitSoftware)
def newProject(self):
self.updateStatus("add new project")
def openProject(self):
self.updateStatus("open project")
def saveProject(self):
self.updateStatus("save project")
def quitSoftware(self):
qApp = QApplication.instance()
qApp.quit()
def updateStatus(self, string):
if len(string) >= 50:
self.projectPanel.statusBar.setText(string[:50] + '...')
else:
self.projectPanel.statusBar.setText(string)
class MenuBar(QTabWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.btnStyle='''
QPushButton {
background-color:white;
border: none;
padding: 5px;
}
QPushButton:hover {
border:1px solid;
}
'''
self.setupUI()
def setupUI(self):
self.fileTab = QWidget()
self.plotTab = QWidget()
self.helpTab = QWidget()
self.addTab(self.fileTab, 'File')
self.addTab(self.plotTab, 'Plot')
self.addTab(self.helpTab, 'Help')
self.setStyleSheet("QTabBar::tab { height: 50px}")
# File Tab
self.fileLayout = QGridLayout()
self.actionNew = QPushButton("&New")
self.actionNew.setIcon(QIcon("./img/new.png"))
self.actionNew.setShortcut("Ctrl+N")
self.actionNew.setStyleSheet(self.btnStyle)
self.actionOpen = QPushButton("&Open")
self.actionOpen.setIcon(QIcon("./img/open.png"))
self.actionOpen.setShortcut("Ctrl+O")
self.actionOpen.setStyleSheet(self.btnStyle)
self.actionSave = QPushButton("&Save")
self.actionSave.setEnabled(False)
self.actionSave.setIcon(QIcon("./img/save.png"))
self.actionSave.setShortcut("Ctrl+S")
self.actionSave.setStyleSheet(self.btnStyle)
self.actionSaveAs = QPushButton("Save As")
self.actionSaveAs.setEnabled(False)
self.actionSaveAs.setIcon(QIcon("./img/save.png"))
self.actionSaveAs.setShortcut("Ctrl+Alt+S")
self.actionSaveAs.setStyleSheet(self.btnStyle)
self.actionExport = QPushButton("Export")
self.actionExport.setEnabled(False)
self.actionExport.setIcon(QIcon("./img/export.png"))
self.actionExport.setStyleSheet(self.btnStyle)
self.actionQuit = QPushButton("Quit")
self.actionQuit.setEnabled(True)
self.actionQuit.setIcon(QIcon("./img/quit.png"))
self.actionQuit.setStyleSheet(self.btnStyle)
self.fileLayout.addWidget(self.actionNew, 0, 0, alignment=Qt.AlignLeft)
self.fileLayout.addWidget(self.actionOpen, 1, 0, alignment=Qt.AlignLeft)
self.fileLayout.addWidget(self.actionSave, 0, 1, alignment=Qt.AlignLeft)
self.fileLayout.addWidget(self.actionSaveAs, 1,1, alignment=Qt.AlignLeft)
self.fileLayout.addWidget(self.actionExport, 0, 2, alignment=Qt.AlignLeft)
self.fileLayout.addWidget(self.actionQuit, 1, 2, alignment=Qt.AlignLeft)
#self.fileLayout.addStretch(0)
self.fileTab.setLayout(self.fileLayout)
# Plot Tab
self.plotLayout = QGridLayout()
self.actionAdd = QPushButton("Single")
self.actionAdd.setIcon(QIcon("./img/add.png"))
self.actionAdd.setStyleSheet(self.btnStyle)
self.actionBatchAdd = QPushButton("Batch")
self.actionBatchAdd.setIcon(QIcon("./img/add.png"))
self.actionBatchAdd.setStyleSheet(self.btnStyle)
#self.labelAdd = QLabel('Add')
#self.labelAdd.setAlignment(Qt.AlignHCenter)
self.actionEditName = QPushButton("Name")
self.actionEditName.setIcon(QIcon("./img/edit.png"))
self.actionEditName.setStyleSheet(self.btnStyle)
self.actionEditEle = QPushButton("Elevation")
self.actionEditEle.setIcon(QIcon("./img/edit.png"))
self.actionEditEle.setStyleSheet(self.btnStyle)
self.actionEditImg = QPushButton("Image")
self.actionEditImg.setIcon(QIcon("./img/edit.png"))
self.actionEditImg.setStyleSheet(self.btnStyle)
#self.labelEdit = QLabel('Edit')
#self.labelEdit.setAlignment(Qt.AlignHCenter)
self.actionDel = QPushButton("Delete")
self.actionDel.setIcon(QIcon("./img/delete.png"))
self.actionDel.setStyleSheet(self.btnStyle)
self.plotLayout.addWidget(self.actionAdd, 0, 0, alignment=Qt.AlignLeft)
self.plotLayout.addWidget(self.actionBatchAdd, 1, 0, alignment=Qt.AlignLeft)
#self.plotLayout.addWidget(self.labelAdd, 3, 0)
self.plotLayout.addWidget(self.actionEditName, 0, 1, alignment=Qt.AlignLeft)
self.plotLayout.addWidget(self.actionEditEle, 1, 1, alignment=Qt.AlignLeft)
self.plotLayout.addWidget(self.actionEditImg, 0, 2, alignment=Qt.AlignLeft)
#self.plotLayout.addWidget(self.labelEdit, 3, 1)
self.plotLayout.addWidget(self.actionDel, 1, 2, alignment=Qt.AlignLeft)
self.plotTab.setLayout(self.plotLayout)
# Help Tab
self.helpLayout = QGridLayout()
self.actionApp = QPushButton("User Manual")
self.actionApp.setIcon(QIcon("./img/file.png"))
self.actionApp.setStyleSheet(self.btnStyle)
self.actionAuthor = QPushButton("About Author")
self.actionAuthor.setIcon(QIcon("./img/file.png"))
self.actionAuthor.setStyleSheet(self.btnStyle)
self.actionLab = QPushButton("About Lab")
self.actionLab.setIcon(QIcon("./img/file.png"))
self.actionLab.setStyleSheet(self.btnStyle)
self.helpLayout.addWidget(self.actionApp, 0, 0, alignment=Qt.AlignLeft)
self.helpLayout.addWidget(self.actionAuthor, 0, 1, alignment=Qt.AlignLeft)
self.helpLayout.addWidget(self.actionLab, 0, 2, alignment=Qt.AlignLeft)
#self.helpLayout.addStretch(0)
self.helpTab.setLayout(self.helpLayout)
class ProjectPanel(QVBoxLayout):
'''
ProjectPanel
|-- ImageInfo(QTreeWidget)
|-- PlotInfo(QTableWidget)
|-- ControlGroups(HorizontalLayout)
|-- AddPlot
|-- DelPlot
|-- EditPlot
'''
def __init__(self, parent=None):
super().__init__(parent)
self.setupUI()
def setupUI(self):
# Image Info Panel
self.projectTree = QTreeWidget()
self.projectTree.setColumnCount(2)
self.projectTree.setHeaderLabels(['Plot', 'Elevation', 'Image File Path'])
plot1 = QTreeWidgetItem(self.projectTree)
plot1.setText(0, 'Plot1')
img1 = QTreeWidgetItem(plot1)
img1.setText(0, 'Upper')
img1.setText(1, '2.6m')
img1.setText(2, 'D:/Test/test2.jpg')
img1.setIcon(0, QIcon('./img/img.png'))
img2 = QTreeWidgetItem(plot1)
img2.setText(0, 'Lower')
img2.setText(1, '1.6m')
img2.setText(2, 'D:/Test/test1.jpg')
img2.setIcon(0, QIcon('./img/img.png'))
#for i in range(0,3):
# self.projectTree.resizeColumnToContents(i)
self.projectTree.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.projectTree.header().setStretchLastSection(True)
self.projectTree.expandAll()
# Plot Info Panel
self.projectTableView = QTableView()
self.projectModel = QStandardItemModel(0, 5)
self.projectModel.setHorizontalHeaderLabels(['Plot', 'meanDBH', 'meanHT', 'standBA', 'PF'])
self.projectTableView.setModel(self.projectModel)
self.projectTableView.resizeColumnsToContents()
self.projectTableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
# Control Panel
# self.projectButtonLayout = QHBoxLayout()
# self.addPlot = QPushButton('Add')
# self.delPlot = QPushButton('Del')
# self.editPlot = QPushButton('Edit')
# self.projectButtonLayout.addWidget(self.addPlot)
# self.projectButtonLayout.addWidget(self.delPlot)
# self.projectButtonLayout.addWidget(self.editPlot)
self.statusBar = QLabel('[AI]: Hello World!')
self.addWidget(QLabel('Plot Management'))
self.addWidget(self.projectTree, stretch=2)
#self.addLayout(self.projectButtonLayout)
self.addWidget(QLabel('Plot Overview'))
self.addWidget(self.projectTableView, stretch=1)
self.addWidget(self.statusBar)
class IndividualTreePanel(QWidget):
'''
IndividualTreePanel
|-- OpenGLContainer(VerticalLayout)
|-- UpperHorizontalLayout
| |-- VerticalBar(Zenith Angle)
| |-- OpenGLPanel(VerticalLayout)
| | |-- UpperGL
| | |-- LowerGL
| |-- VerticalBar(Zoom in/out)
| |-- HorzontalBar
|-- VertialLayout
|-- TreeInfoTable
|-- ButtonHorizontalLayout
|-- AddBtn
|-- DelBtn
'''
def __init__(self, parent=None):
super().__init__(parent)
self.setupUI()
self.functionConnector()
def setupUI(self):
self.layout = QHBoxLayout(self)
### OpenGLCtrl
self.openglCtrlLayout = QVBoxLayout()
self.xSlider = QSlider(Qt.Horizontal)
self.xSlider.setMinimum(0)
self.xSlider.setMaximum(360)
self.xSlider.setSingleStep(10)
self.xSlider.setValue(180)
self.xSlider.setTickPosition(QSlider.TicksBelow)
self.xSlider.setTickInterval(10)
self.ySlider = QSlider(Qt.Vertical)
self.ySlider.setMinimum(-90)
self.ySlider.setMaximum(90)
self.ySlider.setSingleStep(5)
self.ySlider.setValue(0)
self.ySlider.setTickPosition(QSlider.TicksLeft)
self.ySlider.setTickInterval(5)
self.zoomSlider = QSlider(Qt.Vertical)
self.zoomSlider.setMinimum(100)
self.zoomSlider.setMaximum(500)
self.zoomSlider.setSingleStep(10)
self.zoomSlider.setValue(100)
self.zoomSlider.setTickPosition(QSlider.TicksRight)
self.zoomSlider.setTickInterval(10)
self.glUp = GLWidget()
self.glUp.setEnabled(True)
#self.glDown = QOpenGLWidget()
self.glDown = GLWidget()
self.glDown.setEnabled(True)
self.upperImgName = QLabel('Upper Image (2.6m): [D:/test/test2.jpg]')
self.lowerImgName = QLabel('Lower Image (1.6m): [D:/test/test1.jpg]')
self.line = QFrame()
self.line.setFrameShape(QFrame.HLine)
self.line.setFrameShadow(QFrame.Sunken)
self.openglLayout = QVBoxLayout()
self.openglLayout.addWidget(self.upperImgName, stretch=1)
self.openglLayout.addWidget(self.glUp, stretch=20)
self.openglLayout.addWidget(self.line)
self.openglLayout.addWidget(self.lowerImgName, stretch=1)
self.openglLayout.addWidget(self.glDown, stretch=20)
self.openglLR = QHBoxLayout()
self.ySliderLabels = QVBoxLayout()
self.ySliderLabels.addWidget(QLabel('90°'), 1, Qt.AlignTop)
self.ySliderLabels.addWidget(QLabel('Zenith\nAngle'), 10, Qt.AlignVCenter)
self.ySliderLabels.addWidget(QLabel('-90°'), 1, Qt.AlignBottom)
self.zoomSliderLabels = QVBoxLayout()
self.zoomSliderLabels.addWidget(QLabel('500%'), 1, Qt.AlignTop)
self.zoomSliderLabels.addWidget(QLabel('Zoom\nRatio'), 10, Qt.AlignVCenter)
self.zoomSliderLabels.addWidget(QLabel('100%'), 1, Qt.AlignBottom)
self.openglLR.addLayout(self.ySliderLabels,2)
self.openglLR.addWidget(self.ySlider,1)
self.openglLR.addLayout(self.openglLayout,40)
self.openglLR.addWidget(self.zoomSlider,1)
self.openglLR.addLayout(self.zoomSliderLabels, 2)
self.openglCtrlLayout.addLayout(self.openglLR)
self.openglCtrlLayout.addWidget(self.xSlider)
self.xSliderLabels = QHBoxLayout()
self.xSliderLabels.addWidget(QLabel('0°'), 1, Qt.AlignLeft)
self.xSliderLabels.addWidget(QLabel('Azimuth Angle'), 10, Qt.AlignCenter)
self.xSliderLabels.addWidget(QLabel('360°'), 1, Qt.AlignRight)
self.openglCtrlLayout.addLayout(self.xSliderLabels)
## Results Layouts
self.individualTreeInfoLayout = QVBoxLayout()
### IndividualModel
self.individualModel = QStandardItemModel(0, 4)
self.individualModel.setHorizontalHeaderLabels(['Distance', '△H', 'DBH', 'HT'])
self.individualTableView = QTableView()
self.individualTableView.setModel(self.individualModel)
self.individualTableView.resizeColumnsToContents()
self.individualTableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
### DiameterMdoel
self.diameterModel = QStandardItemModel(0,2)
self.diameterModel.setHorizontalHeaderLabels(['No.', 'Diameter', 'at height'])
self.diameterTableView = QTableView()
self.diameterTableView.setModel(self.diameterModel)
self.diameterTableView.resizeColumnsToContents()
self.diameterTableView.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.individualTreeInfoLayout.addWidget(QLabel('Individual Tree Panel'))
self.individualTreeInfoLayout.addWidget(self.individualTableView, stretch=2)
self.individualTreeInfoLayout.addWidget(QLabel('Diameter Panel'))
self.individualTreeInfoLayout.addWidget(self.diameterTableView, stretch=1)
self.layout.addLayout(self.openglCtrlLayout, stretch=2)
self.layout.addLayout(self.individualTreeInfoLayout, stretch=1)
def functionConnector(self):
self.xSlider.valueChanged.connect(self.glUp.setXRotation)
self.xSlider.valueChanged.connect(self.glDown.setXRotation)
self.glUp.xRotationChanged.connect(self.xSlider.setValue)
self.glDown.xRotationChanged.connect(self.xSlider.setValue)
self.ySlider.valueChanged.connect(self.glUp.setYRotation)
self.ySlider.valueChanged.connect(self.glDown.setYRotation)
self.glUp.yRotationChanged.connect(self.ySlider.setValue)
self.glDown.yRotationChanged.connect(self.ySlider.setValue)
#self.zSlider.valueChanged.connect(self.glUp.setZRotation)
#self.zSlider.valueChanged.connect(self.glDown.setZRotation)
#self.glUp.zRotationChanged.connect(self.zSlider.setValue)
#self.glDown.zRotationChanged.connect(self.zSlider.setValue)
def setXRotation(self):
print('setXroataion')
def setYRotation(self):
print('setYroataion')
def setZRotation(self):
print('setZroataion')
class GLWidget(QOpenGLWidget):
xRotationChanged = pyqtSignal(int)
yRotationChanged = pyqtSignal(int)
zRotationChanged = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.object = 0
self.xRot = 0
self.yRot = 0
self.zRot = 0
def getOpenglInfo(self):
info = f"Renderer: {glGetString(GL_RENDERER)}. Version:{glGetString(GL_VERSION)}"
return info
def setXRotation(self, angle):
window.updateStatus(f'XRoate:{angle}')
'''
angle = self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.xRotationChanged.emit(angle)
self.update()
'''
def setYRotation(self, angle):
window.updateStatus(f'YRoate:{angle}')
'''
angle = self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.yRotationChanged.emit(angle)
self.update()
'''
def setZRotation(self, angle):
window.updateStatus(f'ZRoate:{angle}')
'''
angle = self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.zRotationChanged.emit(angle)
self.update()
'''
def mousePressEvent(self, event):
self.lastPos = event.pos()
window.updateStatus(f'LastPosition:{self.lastPos}')
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
'''
if event.buttons() & Qt.LeftButton:
self.setXRotation(self.xRot + 8 * dy)
self.setYRotation(self.yRot + 8 * dx)
elif event.buttons() & Qt.RightButton:
self.setXRotation(self.xRot + 8 * dy)
self.setZRotation(self.zRot + 8 * dx)
'''
self.lastPos = event.pos()
window.updateStatus(f'dx:{dx}, dy:{dy}')
def initializeGL(self):
window.updateStatus(self.getOpenglInfo())
class BasalAreaPanel(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
baLayout = QHBoxLayout()
baLayout.addWidget(QLabel('Under Construction'))
self.setLayout(baLayout)
class PlantFractionPanel(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
pfLayout = QHBoxLayout()
pfLayout.addWidget(QLabel('Under Construction'))
self.setLayout(pfLayout)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.setStyleSheet("QMainWindow {background: rgba(255,255,255,230);}");
window.showMaximized()
sys.exit(app.exec_())
```
#### File: CompactAll.future/textures/glwf.py
```python
from OpenGL.GL import *
from OpenGL.GLU import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtOpenGL import *
import sys
class MainWindow(QWidget):
def __init__(self):
super(MainWindow, self).__init__()
self.widget = GLWidget(self)
#self.statusbar = QStatusBar()
#self.statusbar.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
#self.statusbar.showMessage("Click anywhere on the QGLWidget to see a pixel's RGBA value!")
layout = QVBoxLayout()
layout.addWidget(self.widget)
#layout.addWidget(self.statusbar)
layout.setContentsMargins(5, 5, 5, 5)
self.setLayout(layout)
class GLWidget(QGLWidget):
def __init__(self, parent):
QGLWidget.__init__(self, parent)
self.setMinimumSize(640, 480)
#LMB = left mouse button
#True: fires mouseMoveEvents even when not holding down LMB
#False: only fire mouseMoveEvents when holding down LMB
self.setMouseTracking(False)
def initializeGL(self):
glClearColor(0, 0, 0, 1)
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
def resizeGL(self, width, height):
#glViewport is needed for proper resizing of QGLWidget
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def paintGL(self):
#Renders a triangle... obvious (and deprecated!) stuff
w, h = self.width(), self.height()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glBegin(GL_TRIANGLES)
glColor3f(1, 0, 0)
glVertex3f(0, 0, 0)
glColor3f(0, 1, 0)
glVertex3f(w/2.0, h, 0)
glColor3f(0, 0, 1)
glVertex3f(w, 0, 0)
glEnd()
def mousePressEvent(self, event):
x, y = event.x(), event.y()
w, h = self.width(), self.height()
#required to call this to force PyQt to read from the correct, updated buffer
#see issue noted by @BjkOcean in comments!!!
glReadBuffer(GL_FRONT)
data = self.grabFrameBuffer()#builtin function that calls glReadPixels internally
data.save("test.png")
rgba = QColor(data.pixel(x, y)).getRgb()#gets the appropriate pixel data as an RGBA tuple
message = "You selected pixel ({0}, {1}) with an RGBA value of {2}.".format(x, y, rgba)
statusbar = self.parent().statusbar#goes to the parent widget (main window QWidget) and gets its statusbar widget
statusbar.showMessage(message)
def mouseMoveEvent(self, event):
pass
def mouseReleaseEvent(self, event):
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.setWindowTitle("Color Picker Demo")
window.show()
app.exec_()
```
#### File: Spherical2TreeAttributes/Individual Tree/app.py
```python
import sys
import os
import numpy as np
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PIL import Image, ExifTags
#from scipy import ndimage
#from scipy.interpolate import interp1d
#from skimage.exposure import equalize_hist
import imageio
import random
from ba import in_tree_pixel
baf20 = in_tree_pixel(baf=20, img_width=5376)
class MainWindow(QMainWindow):
keyPressed = pyqtSignal(QEvent)
vLocked = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.coordinate = '(x, y)'
low = input("Please type the lower camera height in METER, default=1.6\n>>>")
try:
e1 = float(low)
except ValueError:
print('No valuable float numbers typed, used default 1.6m')
e1 = 1.6
high = input("Please type the higher camera height in METER, default=2.6\n>>>")
try:
e2 = float(high)
except ValueError:
print('No valuable float numbers typed, used default 2.6m')
e2 = 2.6
if e1 > e2:
e1, e2 = e2, e1
sector_func = input('Use Sector Sampling ? [Y/N]\n>>>')
sector_intensity = None
sector_num = 1
if sector_func in ['Y','y', 'yes']:
intense_loop = True
while intense_loop:
intensity_temp = input('[Sector] Please type the intensity '
'(the percentage of sector width, %, 0-100): ')
try:
intensity_temp = float(intensity_temp)
except ValueError:
print(f'[Sector] <{intensity_temp}> is not a valuable float number, please type again.')
continue
if 0 <= intensity_temp and intensity_temp <= 100:
sector_intensity = intensity_temp
intense_loop = False
else:
print(f'[Sector] <{intensity_temp}> should range from 0 to 100, please type again')
continue
"""
num_loop = True
while num_loop:
number_temp = input('[Sector] Please type the number of sector (0-10, int): ')
try:
number_temp = int(number_temp)
except ValueError:
print(f'[Sector] <{number_temp}> is not a valuable integer number, please type again.')
continue
if 0 <= number_temp and number_temp * sector_intensity <= 50:
sector_num = number_temp
num_loop = False
else:
print(f'[Sector] <{number_temp}>*intensity({sector_intensity}) '
f'should range from 0 to 100, please type again')
continue
"""
if sector_intensity is None:
self.sector_range = None
print(f'App launch! With low camera {e1}m and high camera {e2}m')
else:
self.sector_range = self.getSectorStarts(sector_num, sector_intensity)
print(f'App launch! With low camera {e1}m and high camera {e2}m, '
f'and {sector_num} Sector(s) of {sector_intensity}% width')
self.e1 = e1
self.e2 = e2
self.setWindowTitle('IndividualDemo')
self.setupUI()
self.functionConnector()
self.initPlot()
self.initTree()
self.addTree = -1 # No add
def setupUI(self):
self.mainWidget = QWidget()
self.panel1 = ImgPanel(self, self.e1, self.sector_range)
self.panel2 = ImgPanel(self, self.e2, self.sector_range)
self.line = QFrame()
self.line.setFrameShape(QFrame.VLine)
self.line.setFrameShadow(QFrame.Sunken)
self.wl = QHBoxLayout(self.mainWidget)
self.wl.addWidget(self.panel1)
self.wl.addWidget(self.line)
self.wl.addWidget(self.panel2)
self.setCentralWidget(self.mainWidget)
def functionConnector(self):
self.panel1.xScrollChanged.connect(self.panel2.setXScroll)
self.panel2.xScrollChanged.connect(self.panel1.setXScroll)
self.panel1.yScrollChanged.connect(self.panel2.setYScroll)
self.panel2.yScrollChanged.connect(self.panel1.setYScroll)
self.panel1.exifData.connect(self.updatePlot)
self.panel2.exifData.connect(self.updatePlot)
self.keyPressed.connect(self.panel1.imgShow.changeDirection)
self.keyPressed.connect(self.panel2.imgShow.changeDirection)
self.panel1.imgShow.emitPoint.connect(self.addTree)
self.panel2.imgShow.emitPoint.connect(self.addTree)
self.vLocked.connect(self.panel1.imgShow.changeVLock)
self.vLocked.connect(self.panel2.imgShow.changeVLock)
def keyPressEvent(self, event):
if event.key() == Qt.Key_L:
v, okPressed = QInputDialog.getInt(self, "Get integer","Y pixel", 6000, 0, 13000, 1)
if okPressed:
self.vLocked.emit(v)
elif event.key() == Qt.Key_U:
self.vLocked.emit(-1)
elif event.key() == Qt.Key_N: # Add tree easy mode
self.addTree = 0
self.initTree()
self.changeDirection('NE')
self.showStep(f'[S1:{self.e1}Base]')
else:
self.keyPressed.emit(event)
def initPlot(self):
self.plot = {'GCP16':0,
'LatDeg16':0, 'LatMin16':0, 'LatSec16':0.0,
'LonDeg16':0, 'LonMin16':0, 'LonSec16':0.0,
'Altitude16':0.0, 'North16':0.0,
'GCP26':0,
'LatDeg26':0, 'LatMin26':0, 'LatSec26':0.0,
'LonDeg26':0, 'LonMin26':0, 'LonSec26':0.0,
'Altitude26':0.0, 'North26':0.0}
def updatePlot(self, ht, data_list):
if ht == self.e1:
self.plot['GCP16'] = data_list[0]
self.plot['LatDeg16'] = data_list[1]
self.plot['LatMin16'] = data_list[2]
self.plot['LatSec16'] = data_list[3]
self.plot['LonDeg16'] = data_list[4]
self.plot['LonDeg16'] = data_list[5]
self.plot['LonDeg16'] = data_list[6]
self.plot['Altitude16'] = data_list[7]
self.plot['North16'] = data_list[8]
else:
self.plot['GCP26'] = data_list[0]
self.plot['LatDeg26'] = data_list[1]
self.plot['LatMin26'] = data_list[2]
self.plot['LatSec26'] = data_list[3]
self.plot['LonDeg26'] = data_list[4]
self.plot['LonDeg26'] = data_list[5]
self.plot['LonDeg26'] = data_list[6]
self.plot['Altitude26'] = data_list[7]
self.plot['North26'] = data_list[8]
print(self.plot)
def initTree(self):
self.tree = {'16BX':0, '16BY':0, '16TX':0, '16TY':0,
'26BX':0, '26BY':0, '26TX':0, '26TY':0,
'16LX':0, '16LY':0, '16RX':0, '16RY':0,
'26LX':0, '26LY':0, '26RX':0, '26RY':0,
'Dist':0.0, 'DeltaH':0.0, 'HT':0.0, 'DBH':0.0, 'Gamma':0.0, 'Altitude':0.0}
def addTree(self, x, y):
# 0: Add 16Base
# 1: Add 26Base => Calculate Dist, DeltaH, Altitude, Gamma, 1.3m, set panel16, panel 26
# change direction to "W"
# 2: Add 16Left => change direction to "E"
# 3: Add 16Right => change direction to "W"
# 4: Add 26Left => change direction to "E"
# 5: Add 26Right => calculate DBH
# 6:Add 16Top
# 7: Add 26Top => calculate HT, change to -1
if self.addTree == 0:
self.tree['16BX'] = x
self.tree['16BY'] = y
self.addTree += 1
self.showStep(f'[S2: {self.e2}Base]')
elif self.addTree == 1:
self.tree['26BX'] = x
self.tree['26BY'] = y
k1 = np.tan(-self.zenithRadians(self.tree['16BY']))
k2 = np.tan(-self.zenithRadians(self.tree['26BY']))
ix, iy = self.interactBase(k1, self.e1, k2, self.e2)
self.tree['Dist'] = -ix
self.tree['DeltaH'] = iy
self.tree['Altitude'] = (self.plot['Altitude16'] + self.plot['Altitude26']) / 2 + iy
gamma1 = self.horizonAngle(self.tree['16BX'], self.plot['GCP16'], self.plot['North16'])
gamma2 = self.horizonAngle(self.tree['26BX'], self.plot['GCP26'], self.plot['North26'])
self.tree['Gamma'] = (gamma1 + gamma2) / 2
dbh16pos = self.getDBHPosition(self.e1, ix, iy)
dbh26pos = self.getDBHPosition(self.e2, ix, iy)
self.panel1.imgShow.changeVLock(int(dbh16pos))
self.panel2.imgShow.changeVLock(int(dbh26pos))
self.changeDirection('NW')
self.showStep(f'[S3:{self.e1}Left]')
self.addTree += 1
elif self.addTree == 2:
self.tree['16LX'] = x
self.tree['16LY'] = y
self.changeDirection('NE')
self.showStep(f'[S4:{self.e1}Right]')
self.addTree += 1
elif self.addTree == 3:
self.tree['16RX'] = x
self.tree['16RY'] = y
self.changeDirection('NW')
self.showStep(f'[S5:{self.e2}Left]')
self.addTree += 1
elif self.addTree == 4:
self.tree['26LX'] = x
self.tree['26LY'] = y
self.changeDirection('NE')
self.showStep(f'[S6:{self.e2}Right]')
self.addTree += 1
elif self.addTree == 5:
self.tree['26RX'] = x
self.tree['26RY'] = y
self.changeDirection('NW')
dbh16 = self.getDBH(self.tree['16LX'], self.tree['16RX'], self.tree['Dist'])
dbh26 = self.getDBH(self.tree['26LX'], self.tree['26RX'], self.tree['Dist'])
self.tree['DBH'] = (dbh16 + dbh26) / 2
self.panel1.imgShow.changeVLock(-1)
self.panel2.imgShow.changeVLock(-1)
self.showStep(f'[S7:{self.e1}Top]')
self.addTree += 1
elif self.addTree == 6:
self.tree['16TX'] = x
self.tree['16TY'] = y
self.showStep(f'[S8:{self.e2}Top]')
self.addTree += 1
elif self.addTree == 7:
self.tree['26TX'] = x
self.tree['26TY'] = y
k1 = np.tan(-self.zenithRadians(self.tree['16TY']))
k2 = np.tan(-self.zenithRadians(self.tree['26TY']))
ht1 = - self.tree['Dist'] * k1 + self.e1 - self.tree['DeltaH']
ht2 = - self.tree['Dist'] * k2 + self.e2 - self.tree['DeltaH']
self.tree['HT'] = (ht1 + ht2) / 2
self.showStep('[Done&Paste]')
text = f'{self.e1}\t{self.e2}\t'
for key, value in self.tree.items():
text += f'{value}\t'
self.cb = QApplication.clipboard()
self.cb.clear(mode=self.cb.Clipboard)
self.cb.setText(text[:-1], mode=self.cb.Clipboard)
print(text)
self.addTree = -1
QMessageBox.information(self, 'Tree Info',
f'The info of this measured tree is:\n'
f"Distance\t{round(self.tree['Dist'],2)}\n"
f"ΔH \t{round(self.tree['DeltaH'],2)}\n"
f"HT \t{round(self.tree['HT'],2)}\n"
f"DBH \t{round(self.tree['DBH'],2)}\n"
f"North \t{round(self.tree['Gamma'],2)}\n"
f"Altitude\t{round(self.tree['Altitude'],2)}\n"
f"Please open Excel and Ctrl+V to paste the result\n"
f"Press 'N' to measure a new tree")
def changeDirection(self, direct='NE'):
self.panel1.imgShow.corner = direct
self.panel2.imgShow.corner = direct
self.panel1.imgShow.update()
self.panel2.imgShow.update()
def showStep(self, string):
self.panel1.updateProgress(string)
self.panel2.updateProgress(string)
@staticmethod
def interactBase(k1, b1, k2, b2):
x = (b2-b1)/(k1-k2)
y = k1*x + b1
return x, y
@staticmethod
def zenithRadians(ypos):
return np.radians((1344 - (ypos - 1))/2688*180)
@staticmethod
def horizonAngle(xpos, gcp, north):
gamma = (xpos+gcp) / 5376 * 360 - north
if gamma < 0:
gamma += 360
if gamma > 360:
gamma -= 360
return gamma
@staticmethod
def getDBHPosition(e, bx, by):
angles = -np.degrees(np.arctan((e-1.3-by)/(0-bx)))
pos = 1344 - angles / 180 * 2688
return pos
@staticmethod
def getDBH(lx, rx, dist):
omiga = np.radians(abs(rx-lx) / 5376 * 360)
sin_half_omiga = np.sin(omiga / 2)
dbh = 2 * sin_half_omiga * dist / (1 - sin_half_omiga) * 100
return dbh
@staticmethod
def getSectorStarts(num, width):
total = 100
"""
res = set()
sector_range = []
for i in range(num):
temp = random.uniform(0, total - width)
#print(f'[Sector{i}] -> First guess start point <{temp}> in total {list((idx, idx + width) for idx in res)}')
#print(f'[Sector{i}] -> Check if overlapped {[(temp >= idx and temp <= idx + width) or (temp + width >= idx and temp + width <= idx + width) for idx in res]}')
while any((temp >= idx and temp <= idx + width) or (temp + width >= idx and temp + width <= idx + width) for idx in res):
temp = random.uniform(0, total - width)
#print(f"[Sector{i}] -> repeat guess start point <{temp}> in total {list((idx, idx + width) for idx in res)}")
res.add(temp)
for idx in res:
sector_range.append((idx, idx + width))
"""
st = random.uniform(0, total)
ed = (st + width) % total
sector_range = [(st, ed)]
return sector_range
class ImgPanel(QWidget):
xScrollChanged = pyqtSignal(int)
yScrollChanged = pyqtSignal(int)
exifData = pyqtSignal(float, list)
def __init__(self, parent=None, ht=1.6, sector_range=None):
super().__init__(parent)
self.refX = 0
self.refY = 0
self.scrollX = 0
self.scrollY = 0
self.ht = ht
self.sector_range = sector_range
self.converter = Converter(self)
self.setupUI(ht)
self.functionConnector()
def setupUI(self, ht):
self.layout = QVBoxLayout(self)
self.infoLayout = QHBoxLayout()
self.htName = QLabel(f"[{ht}m]:")
self.imgName = QLabel('D:/xxx.Jpg')
self.infoBtn = QPushButton('Info')
self.changeImgBtn = QPushButton('OpenImg')
self.convertBtn = QPushButton('Convert')
self.saveImgBtn = QPushButton('save')
self.imgShow = ImgShow(self)
#self.scrollArea = QScrollArea()
self.scrollArea = Scroller()
self.scrollArea.setWidget(self.imgShow)
self.hBar = self.scrollArea.horizontalScrollBar()
self.vBar = self.scrollArea.verticalScrollBar()
self.scrollX = self.hBar.value()
self.scrollY = self.vBar.value()
self.infoLayout.addWidget(self.htName)
self.infoLayout.addWidget(self.imgName)
self.infoLayout.addStretch(0)
self.infoLayout.addWidget(self.infoBtn)
self.infoLayout.addWidget(self.changeImgBtn)
self.infoLayout.addWidget(self.convertBtn)
self.infoLayout.addWidget(self.saveImgBtn)
self.layout.addLayout(self.infoLayout)
self.layout.addWidget(self.scrollArea)
def functionConnector(self):
self.imgShow.mouseClicked.connect(self.updateRef)
self.infoBtn.clicked.connect(self.showInfo)
self.changeImgBtn.clicked.connect(self.loadImg)
self.convertBtn.clicked.connect(self.convertImg)
self.saveImgBtn.clicked.connect(self.imgShow.saveImg)
self.hBar.valueChanged.connect(self.emitX)
self.vBar.valueChanged.connect(self.emitY)
self.converter.sigOut.connect(self.updateProgress)
self.imgShow.saver.sigOut.connect(self.updateProgress)
def emitX(self):
self.xScrollChanged.emit(self.hBar.value())
def emitY(self):
self.yScrollChanged.emit(self.vBar.value())
def setXScroll(self, value):
self.hBar.setValue(min(self.hBar.maximum(),value))
def setYScroll(self, value):
self.vBar.setValue(min(self.vBar.maximum(),value))
def wheelEvent(self, event):
if event.modifiers() == Qt.ShiftModifier:
self.hBar.wheelEvent(event)
else:
self.vBar.wheelEvent(event)
def getExifInfo(self):
self.show_str = ''
self.clip_str = ''
# gcp, lat.D, lat.M, lat.S, Lon.D, Lon.M, Lon.S, Altitude, North
# 0, 1, 2, 3, 4, 5, 6, 7, 8
self.data_list = [0, 0, 0, 0.0, 0, 0, 0.0, 0.0, None]
if self.imgShow.img_path is not None:
img = Image.open(self.imgShow.img_path)
exif_human = {ExifTags.TAGS[k]: v for k, v in img._getexif().items() if k in ExifTags.TAGS}
gps_info = exif_human['GPSInfo']
self.data_list[0] = self.refX
lat_label = gps_info[1] # N
lat_exif = gps_info[2] # ((45, 1), (56, 1), (4682, 100))
self.data_list[1] = lat_exif[0][0]
self.data_list[2] = lat_exif[1][0]
self.data_list[3] = lat_exif[2][0]/lat_exif[2][1]
self.show_str += f"{lat_exif[0][0]}°{lat_exif[1][0]}′{self.data_list[3]}″ {lat_label}\n"
lon_label = gps_info[3] # W
lon_exif = gps_info[4] # ((66, 1), (38, 1), (3938, 100))
self.data_list[4] = lon_exif[0][0]
self.data_list[5] = lon_exif[1][0]
self.data_list[6] = lon_exif[2][0]/lat_exif[2][1]
self.show_str += f"{lon_exif[0][0]}°{lon_exif[1][0]}′{self.data_list[6]}″ {lon_label}\n"
alt_exif = gps_info[6] # (3512, 100)
self.data_list[7] = alt_exif[0]/alt_exif[1]
self.show_str += f"altitude:{self.data_list[7]}\n"
if 17 in gps_info.keys():
north_angle = gps_info[17] # (1125, 10)
self.data_list[8] = north_angle[0]/north_angle[1]
self.show_str += f"north:{self.data_list[8]}°"
else:
self.show_str += f"north: missing"
for i in self.data_list:
self.clip_str += f'{i}\t'
#self.clip_str = f'{self.refX}\t{lat_exif[0][0]}\t{lat_exif[1][0]}\t{lat_exif[2][0]/lat_exif[2][1]}\t{lon_exif[0][0]}\t{lon_exif[1][0]}\t{lon_exif[2][0]/lon_exif[2][1]}\t{alt_exif[0]/alt_exif[1]}'
def showInfo(self):
self.getExifInfo()
try:
QMessageBox.information(self, "GPS Info", self.show_str)
self.cb = QApplication.clipboard()
self.cb.clear(mode=self.cb.Clipboard)
self.cb.setText(self.clip_str[:-1], mode=self.cb.Clipboard)
except:
QMessageBox.information(self, "GPS Info", "Please use raw images!")
def loadImg(self, choose=True):
if not isinstance(choose, str):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self, 'QFileDialog.getOpenFileName()', '',
'Images (*.png *.jpeg *.jpg *.bmp *.gif)', options=options)
self.convertBtn.setEnabled(True)
else:
fileName=choose
if fileName:
image = QImage(fileName)
if image.isNull():
QMessageBox.information(self, "Image Viewer", "Cannot load %s." % fileName)
return
self.imgName.setText(fileName[:10]+'...'+fileName[-20:])
self.imgShow.img_path = fileName
self.imgShow.addImg = True
self.imgShow.update()
if 'converted_imgs/' not in fileName:
self.getExifInfo()
def convertImg(self):
if self.imgShow.img_path is not None:
self.converter.set_param(self.imgShow.img_path, append=self.ht,
zenith=89, equalize=False, gcp=self.refX,
sector_range=self.sector_range)
self.data_list[0] = self.refX
self.exifData.emit(self.ht, self.data_list)
self.converter.start()
else:
print('empty img')
def updateRef(self, X, Y):
self.refX = X
self.refY = Y
def updateProgress(self, percent):
if isinstance(percent, str):
self.htName.setText(percent)
else:
# finished processing
base = os.path.basename(self.imgShow.img_path)
file, ext = os.path.splitext(base)
#self.loadImg(f'converted_imgs/{file}_M{self.refX}{ext}')
self.loadImg(f'converted_imgs/{file}_D{self.refX}{ext}')
self.htName.setText(f"|{self.ht}m|:")
self.convertBtn.setEnabled(False)
class Scroller(QScrollArea):
def __init__(self):
QScrollArea.__init__(self)
def wheelEvent(self, ev):
if ev.type() == QEvent.Wheel:
ev.ignore()
class Converter(QThread):
sigOut = pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.img_path = None
self.mode = 'direct'
self.append = 1.6
self.zenith = 85
self.equalize = False
self.gcp = 0
def set_param(self, img_path, append=1.6, zenith=85, equalize=False, gcp=0, mode='direct', sector_range=None):
self.img_path = img_path
self.append = append
self.zenith = zenith
self.equalize = equalize
self.gcp = gcp
self.mode = mode
self.sector_range = sector_range
def run(self):
'''Document see mercator.py'''
img = imageio.imread(self.img_path)
h, w, d = img.shape
self.sigOut.emit('[10%..]')
if self.mode == 'mercator':
'''
if self.equalize:
img = equalize_hist(img)
self.sigOut.emit('[20%..]')
h_id = np.arange(h)
w_id = np.arange(w)
self.sigOut.emit('[30%..]')
angle_id = h/2 - h_id - 0.5
angle = angle_id / angle_id.max() * 90 # degree
self.sigOut.emit('[40%..]')
select = abs(angle) <= self.zenith
select_angle = angle[select]
self.sigOut.emit('[45%..]')
select_img = img[select, :, :]
select_h, _, _ = select_img.shape # (2538, 5376, 3)
self.sigOut.emit('[50%..]')
mecator_coord = h / 2 * np.log(np.tan(np.deg2rad(45 + select_angle / 2)))
mecator_coord_zero = mecator_coord.max() - mecator_coord
self.sigOut.emit('[55%..]')
f = interp1d(mecator_coord_zero, np.arange(select_h), fill_value="extrapolate")
self.sigOut.emit('[60%..]')
xnew = np.arange(0, np.ceil(mecator_coord.max())*2, 1)
mecator_id = f(xnew) # related img_h id in raw image (85 degree selected)
self.sigOut.emit('[65%..]')
# table to refer mecator_id -> zenith angle
f_angle = interp1d(mecator_coord_zero, select_angle, fill_value="extrapolate")
mecator_angle = f_angle(xnew)
self.sigOut.emit('[70%..]')
ww, hh = np.meshgrid(w_id, mecator_id) # shape (8404, 5376)
self.sigOut.emit('[75%..]')
img_out = np.zeros((*hh.shape, 3))
for i in range(0 ,3):
img_out[:,:,i] = ndimage.map_coordinates(select_img[:,:,i],
np.array([hh,ww]),output=float,order=1)
self.sigOut.emit(f'[{70 + 9*(i+1)}%..]')
img_out = np.hstack((img_out[:,self.gcp:, :], img_out[:,0:self.gcp, :]))
self.sigOut.emit('[98%..]')
base = os.path.basename(self.img_path)
file, ext = os.path.splitext(base)
imageio.imwrite(f'converted_imgs/{file}_M{self.gcp}{ext}', img_out)
self.m2a = mecator_angle'''
pass
else:
self.sigOut.emit('[40%..]')
img_out = np.hstack((img[:,self.gcp:, :], img[:,0:self.gcp, :]))
# add sector here
if self.sector_range is not None:
or1 = [197,90,17]
or2 = [244,177,131]
or3 = [247,203,172]
or4 = [257,229,213]
for sector in self.sector_range:
st, ed = sector
st = st * w / 100
ed = ed * w / 100
img_out[:, int(max(st-3, 0)), :] = or4
img_out[:, int(max(st-2, 0)), :] = or3
img_out[:, int(max(st-1, 0)), :] = or2
img_out[:, int(max(st , 0)), :] = or1
img_out[:, int(min(ed+3, w-1)), :] = or4
img_out[:, int(min(ed+2, w-1)), :] = or3
img_out[:, int(min(ed+1, w-1)), :] = or2
img_out[:, int(min(ed, w-1)), :] = or1
self.sigOut.emit('[60%..]')
base = os.path.basename(self.img_path)
file, ext = os.path.splitext(base)
self.sigOut.emit('[80%..]')
imageio.imwrite(f'converted_imgs/{file}_D{self.gcp}{ext}', img_out)
self.sigOut.emit('[99%..]')
self.sigOut.emit(True)
class ImgShow(QWidget):
mouseClicked = pyqtSignal(object, object)
emitPoint = pyqtSignal(object, object)
def __init__(self, parent=None):
super().__init__(parent)
self.x = 0
self.y = 0
self.w = self.frameGeometry().width() # 100
self.h = self.frameGeometry().height() # 30
self.corner = 'NE' # North East(default)
self.vLock = -1
self.img_path = None
self.addImg = False
self.isMoving = True
self.leftPressed = False
self.setMinimumSize(5376, 2000)
self.tempPix = QPixmap(5376, 2000)
self.pix = QPixmap(5376, 2000)
self.pix.fill(Qt.white)
self.saver = Saver(self)
self.setMouseTracking(True)
self.setCursor(Qt.BlankCursor)
self.functionConnector()
def functionConnector(self):
pass
def paintEvent(self, event):
painter = QPainter(self)
if self.addImg:
imgPixmap = QPixmap.fromImage(QImage(self.img_path))
imgH = imgPixmap.height()
imgW = imgPixmap.width()
self.pix = QPixmap(imgW, imgH)
self.tempPix = QPixmap(imgW, imgH)
self.resize(imgW, imgH)
p = QPainter(self.pix)
p.setPen(QColor(255, 255,0))
p.drawPixmap(0, 0, imgPixmap)
p.drawLine(0, imgH/2, imgW, imgH/2)
self.addImg = False
fm = QFontMetrics(QFont('SimSun', 10))
pw = fm.width(f'{self.x},{self.y}')
ph = fm.height()
if self.corner == 'NE':
text_x, text_y = self.x, self.y
rect_x, rect_y = self.x-baf20, self.y-20
elif self.corner == 'NW':
text_x, text_y = self.x-pw, self.y
rect_x, rect_y = self.x, self.y-20
elif self.corner == 'SE':
text_x, text_y = self.x, self.y+ph
rect_x, rect_y = self.x-baf20, self.y-20
else:
text_x, text_y = self.x-pw, self.y+ph
rect_x, rect_y = self.x-baf20, self.y-20
if self.isMoving:
# 把以前的pix复制一遍(相当于清空)
self.tempPix = self.pix.copy()
qp = QPainter(self.tempPix)
qp.setPen(QColor(255, 0,0))
qp.setFont(QFont('SimSun', 10))
qp.drawLine(self.x, 0, self.x, self.h)
qp.drawLine(0, self.y, self.w, self.y)
qp.drawText(text_x, text_y, f'{self.x},{self.y}')
qp.drawRect(rect_x, rect_y, baf20, 20)
painter.drawPixmap(0, 0, self.tempPix)
else:
qp = QPainter(self.pix)
qp.setPen(QColor(255, 0,0))
qp.setFont(QFont('SimSun', 10))
qp.drawLine(self.x, self.y-20, self.x, self.y+20)
qp.drawLine(self.x-20, self.y, self.x+20, self.y)
qp.drawText(text_x, text_y, f'{self.x},{self.y}')
painter.drawPixmap(0, 0, self.pix)
def saveImg(self):
if self.img_path is not None:
self.saver.set_param(self.img_path, self.pix.toImage())
self.saver.start()
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.leftPressed = True
self.x = event.x()
if self.vLock == -1:
self.y = event.y()
else:
self.y = self.vLock
self.mouseClicked.emit(self.x, self.y)
self.w = self.frameGeometry().width() # 100
self.h = self.frameGeometry().height() # 30
self.isMoving=False
self.update()
def mouseMoveEvent(self,event):
if not self.leftPressed:
self.x = event.x()
if self.vLock == -1:
self.y = event.y()
else:
self.y = self.vLock
self.w = self.frameGeometry().width() # 100
self.h = self.frameGeometry().height() # 30
self.update()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
self.leftPressed = False
self.isMoving=True
if window.addTree == -1:
self.copy2Clipboard()
else:
self.emitPoint.emit(self.x, self.y)
def changeDirection(self, event):
if event.key() == Qt.Key_W:
if 'S' in self.corner:
self.corner = self.corner.replace('S', 'N')
elif event.key() == Qt.Key_S:
if 'N' in self.corner:
self.corner = self.corner.replace('N', 'S')
elif event.key() == Qt.Key_A:
if 'E' in self.corner:
self.corner = self.corner.replace('E', 'W')
elif event.key() == Qt.Key_D:
if 'W' in self.corner:
self.corner = self.corner.replace('W', 'E')
self.update()
def changeVLock(self, value):
self.vLock = value
self.update()
def copy2Clipboard(self):
text = f'{self.x}\t{self.y}'
self.cb = QApplication.clipboard()
self.cb.clear(mode=self.cb.Clipboard)
self.cb.setText(text, mode=self.cb.Clipboard)
class Saver(QThread):
sigOut = pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
# https://stackoverflow.com/questions/46945997/creating-qpixmaps-in-a-thread
# that *QPixmaps* cannot be created outside the main thread.
# But it's pretty straightforward to instead use an image loader object,
# move it to one or more background threads, in which it loads a QImage,
# and then sends that to the main thread for later use
self.qimage = QImage()
self.img_path = 'result_imgs/rua.png'
def set_param(self, img_path, qimage):
self.img_path = img_path
self.qimage = qimage
def run(self):
self.sigOut.emit('[20%..]')
base = os.path.basename(self.img_path)
self.sigOut.emit('[40%..]')
file, ext = os.path.splitext(base)
self.sigOut.emit('[60%..]')
#self.pix.save(f"result_imgs/{file}_C{ext}", "PNG")
self.qimage.save(f"result_imgs/{file}_C{ext}", "PNG")
self.sigOut.emit('[Saved]')
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.showMaximized()
sys.exit(app.exec_())
``` |
{
"source": "joatom/blog-resources",
"score": 3
} |
#### File: blog-resources/blog_translator/blog_translator.py
```python
from transformers import MarianMTModel, MarianTokenizer
def translate(blog_in, blog_out, header = []):
# load pretrained model and tokenizer
model_name = 'Helsinki-NLP/opus-mt-de-en'
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
# load german block post
f_in = open(blog_in, "r")
src_text = f_in.readlines()
f_in.close()
# preprocessing
## line break (\n) results to "I don't know." We make it more specific:
src_text = [s.replace('\n',' <<eol>>') for s in src_text]
## remove code block
code = []
inside_code_block = False
for i, line in enumerate(src_text):
if line.startswith('```') and not inside_code_block:
# entering codeblock
inside_code_block = True
code += [line]
src_text[i] = '<<code_block>>'
elif inside_code_block and not line.startswith('```'):
code += [line]
src_text[i] = '<<code_block>>'
elif inside_code_block and line.startswith('```'):
# leaving code block
code += [line]
src_text[i] = '<<code_block>>'
inside_code_block = False
# translate
translated = model.generate(**tokenizer.prepare_seq2seq_batch(src_text, return_tensors="pt"))
tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
# postprocessing
## replace code_blog tags with code
for i, line in enumerate(tgt_text):
if line == '<<code_block>>':
tgt_text[i] = code.pop(0)
## remove the eol (but keep empty list entries / lines)
tgt_text = [s.replace('<<eol>>', '',) for s in tgt_text]
## remove space between ] ( to get the md link syntax right
tgt_text = [s.replace('] (', '](',) for s in tgt_text]
# add header
tgt_text = header + tgt_text
# write english blog post
with open(blog_out, 'w') as f_out:
for line in tgt_text:
f_out.write("%s\n" % line)
f_out.close()
``` |
{
"source": "joatom/watchtrain",
"score": 3
} |
#### File: app/topics/topic.py
```python
from fastapi import WebSocket
from app.topics.agent import Agent
from app.topics.producer_agent import ProducerAgent
from app.topics.consumer_agent import ConsumerAgent
class Topic():
"""
Defines a topic and setup connection managers for producers and consumers.
"""
def __init__(self):
self.producer_manager = _ConnectionManager()
self.consumer_manager = _ConnectionManager()
async def connect(self, agent):
if isinstance(agent, ProducerAgent):
await self.producer_manager.connect(agent)
else:
await self.consumer_manager.connect(agent)
def disconnect(self, agent):
if isinstance(agent, ProducerAgent):
self.producer_manager.disconnect(agent)
else:
self.consumer_manager.disconnect(agent)
class _ConnectionManager:
"""
Connection Manager from the fastapi tutorial:
https://fastapi.tiangolo.com/advanced/websockets/#handling-disconnections-and-multiple-clients)
"""
def __init__(self):
self.active_connections: List[Agent] = []
async def connect(self, agent: Agent):
await agent.websocket.accept()
self.active_connections.append(agent)
def disconnect(self, agent: Agent):
self.active_connections.remove(agent)
async def send_personal_message(self, message: str, websocket: WebSocket):
await websocket.send_text(message)
async def broadcast(self, message: str, verbose: int = 0):
for connection in self.active_connections:
if connection.verbose >= verbose:
await connection.websocket.send_text(message)
``` |
{
"source": "joatuapp/joatu-django",
"score": 2
} |
#### File: joatu-django/demands/apps.py
```python
from django.apps import AppConfig
class DemandsConfig(AppConfig):
name = 'demands'
def ready(self):
import demands.signals
super(DemandsConfig, self).ready()
```
#### File: joatu-django/demands/signals.py
```python
from django.db.models.signals import post_save
from .models import Demand, DemandHub
from profiles.models import ProfileHub
from hubs.models import HubGeolocation
from utils.utils import coordinates_calculation, distance_calculation
def demand_created_or_updated(sender, update_fields, **kwargs):
instance = kwargs['instance']
if kwargs['created']:
lat_cal, lng_cal = coordinates_calculation(
instance.number,
instance.street,
instance.postal_code,
instance.city
)
DemandHub.objects.create(demand=instance, lat=lat_cal, lng=lng_cal)
demand_geo = DemandHub.objects.get(demand=instance)
hub_selected = ProfileHub.objects.get(profile=instance.requester).hub
hub_geo = HubGeolocation.objects.get(hub=hub_selected)
distance = distance_calculation(demand_geo, hub_geo)
demand_geo.hub = hub_selected
demand_geo.distance_km = distance
demand_geo.save()
post_save.connect(demand_created_or_updated, sender=Demand)
```
#### File: joatu-django/hubs/apps.py
```python
from django.apps import AppConfig
class HubsConfig(AppConfig):
name = 'hubs'
def ready(self):
super(HubsConfig, self).ready()
import hubs.signals
```
#### File: joatu-django/hubs/signals.py
```python
from django.db.models.signals import post_save
from utils.utils import coordinates_calculation
from profiles.models import ProfileGeolocation
from .models import HubGeolocation, Hub
def hub_created_or_updated(sender, update_fields, **kwargs):
instance = kwargs['instance']
lat_cal, lng_cal = coordinates_calculation(
instance.number,
instance.street,
instance.postal_code,
instance.city,
instance.country
)
if kwargs['created']:
HubGeolocation.objects.create(hub=instance, lat=lat_cal, lng=lng_cal)
else:
a = HubGeolocation.objects.filter(hub=instance)
if a.exists():
a.update(lat=lat_cal, lng=lng_cal)
else:
ProfileGeolocation.objects.create(hub=instance, lat=lat_cal, lng=lng_cal)
post_save.connect(hub_created_or_updated, sender=Hub)
```
#### File: joatu-django/joatu/middleware.py
```python
import re
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.auth import logout
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
EXEMPT_URLS= [re.compile(settings.LOGIN_URL.lstrip('/'))]
if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS += [re.compile(url) for url in settings.LOGIN_EXEMPT_URLS]
class LoginRequiredMiddleware:
def __init__(self, get_response):
self.get_response= get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
assert hasattr(request, 'user')
path = request.path_info.lstrip('/')
#if not request.user.is_authenticated:
# if not any(url.match(path) for url in EXEMPT_URLS):
# return redirect(settings.LOGIN_URL)
url_is_exempt= any(url.match(path) for url in EXEMPT_URLS)
if path == reverse('accounts:logout').lstrip('/'):
logout(request)
if request.user.is_authenticated and url_is_exempt:
return redirect(settings.LOGIN_REDIRECT_URL)
elif request.user.is_authenticated or url_is_exempt:
return None
else:
return redirect(settings.LOGIN_URL)
class ProfileRequiredMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.path == '/rest-auth/logout/':
return None
if request.user.is_authenticated:
if not request.user.profileIsCreated:
if request.path == '/profiles/create/' or request.path =='/api/profiles/create/':
return None
else:
return redirect('/profiles/create')
else:
return None
else:
return None
#def process_request( self, request ):
# print( "func" )
# print(request.user.profileIsCreated)
# if request.user.is_authenticated and not request.user.profileIsCreated:
# return redirect('/profiles/create/')
# else:
# return None
```
#### File: joatu-django/ledger/signals.py
```python
from django.db.models.signals import post_save
from .models import Transaction, TransactionIsOffer, Operations, ProfileWallet, TransactionIsDemand
from demands.models import Demand
def transaction_created(sender, update_fields, **kwargs):
instance = kwargs['instance']
if kwargs['created']:
if instance.transaction_type == 'OF':
TransactionIsOffer.objects.create(transaction=instance, offer=instance.transaction_id)
elif instance.transaction_type == 'DE':
a = Demand.objects.get(pk=instance.transaction_id)
TransactionIsDemand.objects.create(transaction=instance, demand=a)
else:
pass
wallet_from = ProfileWallet.objects.get(profile=instance.profile_from)
wallet_from.wallet -= instance.amount
wallet_from.save()
wallet_to = ProfileWallet.objects.get(profile=instance.profile_to)
wallet_to.wallet += instance.amount
wallet_to.save()
Operations.objects.create(
profile=instance.profile_from,
transaction=instance,
debit=instance.amount,
balance=wallet_from.wallet,
)
Operations.objects.create(
profile=instance.profile_to,
transaction=instance,
credit=instance.amount,
balance=wallet_to.wallet,
)
post_save.connect(transaction_created, sender=Transaction)
```
#### File: joatu-django/ledger/views.py
```python
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from rest_framework.generics import ListAPIView
from ledger.models import Operations
from profiles.models import Profile
from ledger.serializers import OperationsSerializers
# Create your views here.
class ListGlobalOperationsView(ListAPIView):
queryset=Operations.objects.all().order_by('start')
serializer_class=OperationsSerializers
def get_queryset(self):
operations = Operations.objects.all()
return operations
class ListUserOperationsView(ListAPIView):
queryset=Operations.objects.all().order_by('start')
serializer_class=OperationsSerializers
def get_queryset(self):
user = Profile.objects.get(user= self.request.user)
operations = Operations.objects.filter(profile=user)
return operations
```
#### File: joatu-django/projects/signals.py
```python
from django.db.models.signals import post_save
from django.db.models import ObjectDoesNotExist
from .models import Project, ProjectStatus, ProjectIsValidated, ProjectHub
from profiles.models import ProfileHub
from hubs.models import HubGeolocation
from utils.utils import coordinates_calculation, distance_calculation
def project_created_or_updated(sender, update_fields, **kwargs):
instance = kwargs['instance']
if kwargs['created']:
ProjectStatus.objects.create(project=instance)
ProjectIsValidated.objects.create(project=instance)
lat_cal, lng_cal = coordinates_calculation(
instance.number,
instance.street,
instance.postal_code,
instance.city
)
ProjectHub.objects.create(project=instance, lat=lat_cal, lng=lng_cal)
try:
project_geo = ProjectHub.objects.get(project=instance)
hub_selected = ProfileHub.objects.get(profile=instance.organizer).hub
hub_geo = HubGeolocation.objects.get(hub=hub_selected)
distance = distance_calculation(project_geo, hub_geo)
project_geo.hub = hub_selected
project_geo.distance_km = distance
project_geo.save()
except ObjectDoesNotExist:
pass
post_save.connect(project_created_or_updated, sender=Project)
```
#### File: rest_api/ledger/views.py
```python
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework.generics import ListAPIView
from ledger.models import Operations
from .serializers import OperationsSerializer
class OperationsViewSet(ReadOnlyModelViewSet):
queryset = Operations.objects.all().order_by('id')
serializer_class = OperationsSerializer
class ListGlobalOperationsView(ListAPIView):
queryset = Operations.objects.all().order_by('start')
serializer_class = OperationsSerializer
def get_queryset(self):
operations = Operations.objects.all()
return operations
class ListUserOperationsView(ListAPIView):
queryset = Operations.objects.all().order_by('start')
serializer_class = OperationsSerializer
def get_queryset(self):
user = Profile.objects.get(user=self.request.user)
operations = Operations.objects.filter(profile=user)
return operations
```
#### File: rest_api/projects/serializers.py
```python
from rest_framework import serializers
from projects.models import (
Project,
ProjectVolunteers,
ProjectVolunteersRegistration,
ProjectAttendees,
ProjectAttendeesRegistration,
ProjectDiscussion,
ProjectAnswerDiscussion,
ProjectHub,
)
class ProjectVolunteersRegistrationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProjectVolunteersRegistration
fields = ('url', 'profile', 'project_volunteers', 'project_volunteers_ref')
def create(self, validated_data):
project_volunteers = ProjectVolunteers.objects.get(pk=validated_data['project_volunteers_ref'])
registration = ProjectVolunteersRegistration.objects.create(
project_volunteers=project_volunteers,
**validated_data
)
count = ProjectVolunteersRegistration.objects.filter(
project_volunteers=project_volunteers
).count()
project_volunteers.registered = count
project_volunteers.save()
return registration
class ProjectVolunteersSerializer(serializers.HyperlinkedModelSerializer):
volunteers_registration = ProjectVolunteersRegistrationSerializer(many=True, read_only=True)
class Meta:
model = ProjectVolunteers
fields = (
'url',
'id',
'project',
'role',
'description',
'seats',
'registered',
'minimum_registration',
'volunteers_registration',
)
read_only_fields = ('registered', 'project', 'id')
class ProjectAttendeesRegistrationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProjectAttendeesRegistration
fields = ('url', 'profile', 'project_attendees', 'project_attendees_ref')
def create(self, validated_data):
project_attendees = ProjectAttendees.objects.get(pk=validated_data['project_attendees_ref'])
registration = ProjectAttendeesRegistration.objects.create(project_attendees=project_attendees, **validated_data)
count = ProjectAttendeesRegistration.objects.filter(project_attendees=project_attendees).count()
project_attendees.registered = count
project_attendees.save()
return registration
class ProjectAttendeesSerializer(serializers.HyperlinkedModelSerializer):
attendees_registration = ProjectAttendeesRegistrationSerializer(many=True, read_only=True)
class Meta:
model = ProjectAttendees
fields = (
'url',
'id',
'project',
'seats',
'registered',
'attendees_registration',
'minimum_registration',
)
read_only_fields = ('registered', 'project',)
class ProjectAnswerDiscussionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProjectAnswerDiscussion
fields = ('url', 'id', 'discussion_ref', 'discussion', 'text', 'profile', 'created', 'updated')
read_only_fields = ('discussion', 'profile')
def create(self, validated_data):
project_discussion = ProjectDiscussion.objects.get(pk=validated_data['discussion_ref'])
answer = ProjectAnswerDiscussion.objects.create(discussion=project_discussion, **validated_data)
return answer
class ProjectDiscussionSerializer(serializers.HyperlinkedModelSerializer):
answer_discussion_project = ProjectAnswerDiscussionSerializer(many=True, read_only=True)
class Meta:
model = ProjectDiscussion
fields = (
'url',
'id',
'project',
'project_ref',
'title',
'text',
'profile',
'created',
'updated',
'answer_discussion_project',
)
read_only_fields = ('profile', 'project', 'id')
def create(self, validated_data):
project = Project.objects.get(pk=validated_data['project_ref'])
new_discussion = ProjectDiscussion.objects.create(project=project, **validated_data)
return new_discussion
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
attendees = ProjectAttendeesSerializer()
volunteers = ProjectVolunteersSerializer(many=True)
discussion_project = ProjectDiscussionSerializer(many=True, read_only=True)
### cause of the error :
#serializers.HyperlinkedRelatedField(
# many=True,
# view_name='discussion_project',
# read_only=True
#)
class Meta:
model = Project
fields = ('url', 'id', 'name', 'start',
'end', 'description', 'category',
'sub_category', 'oth_category', 'oth_sub_cat','place_name', 'number', 'street',
'postal_code', 'city', 'organizer', 'created',
'updated', 'project_type', 'attendees',
'volunteers', 'discussion_project')
read_only_fields = ('organizer', 'id')
def create(self, validated_data):
attendees_data = validated_data.pop('attendees')
volunteers_data = validated_data.pop('volunteers')
new_project = Project.objects.create(**validated_data)
if validated_data['project_type'] == 'CO':
ProjectAttendees.objects.create(project=new_project, **attendees_data)
elif validated_data['project_type'] == 'CP':
for volunteer_data in volunteers_data:
ProjectVolunteers.objects.create(project=new_project, **volunteer_data)
else:
ProjectAttendees.objects.create(project=new_project, **attendees_data)
for volunteer_data in volunteers_data:
ProjectVolunteers.objects.create(project=new_project, **volunteer_data)
return new_project
class ProjectShortSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Project
fields = ('url', 'id', 'name', 'start', 'created', 'updated',)
class ProjectHubSerializer(serializers.HyperlinkedModelSerializer):
project = ProjectSerializer()
class Meta:
model = ProjectHub
fields = ('project', 'distance_km', 'lat', 'lng')
```
#### File: rest_api/projects/views.py
```python
from django_filters import rest_framework as filters
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.status import HTTP_204_NO_CONTENT
from .serializers import (
ProjectSerializer,
ProjectVolunteersSerializer,
ProjectVolunteersRegistrationSerializer,
ProjectAttendeesSerializer,
ProjectAttendeesRegistrationSerializer,
ProjectDiscussionSerializer,
ProjectAnswerDiscussionSerializer,
)
from ..permissions import ProjectIsOwnerOrReadOnly
from profiles.models import Profile
from projects.models import (
Project,
ProjectVolunteers,
ProjectVolunteersRegistration,
ProjectAttendees,
ProjectAttendeesRegistration,
ProjectDiscussion,
ProjectAnswerDiscussion,
)
class ProjectViewSet(ModelViewSet):
queryset = Project.objects.all().order_by('name')
serializer_class = ProjectSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('organizer', 'name')
permission_classes = (IsAuthenticatedOrReadOnly,
ProjectIsOwnerOrReadOnly,)
def get_context_data(self, *args, **kwargs):
ctx = super(ProjectViewSet, self).get_context_data(*args, **kwargs)
profile = Profile.objects.get(user=self.request.user)
ctx["user_id"] = profile.pk
print(ctx)
return ctx
def create(self, request, *args, **kwargs):
response = super(ProjectViewSet, self).create(request, *args, **kwargs)
return response
def perform_create(self, serializer):
profile = Profile.objects.get(user=self.request.user)
serializer.save(organizer=profile)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
owner = False
profile = Profile.objects.get(user=self.request.user)
if instance.organizer == profile:
owner = True
response = {"user_Is_Owner": owner}
response.update(serializer.data)
return Response(response)
class ProjectVolunteersViewSet(ModelViewSet):
queryset = ProjectVolunteers.objects.all().order_by('id')
serializer_class = ProjectVolunteersSerializer
class ProjectVolunteersRegistrationViewSet(ModelViewSet):
queryset = ProjectVolunteersRegistration.objects.all().order_by('id')
serializer_class = ProjectVolunteersRegistrationSerializer
def perform_create(self, serializer):
profile = Profile.objects.get(user=self.request.user)
serializer.save(profile=profile)
def destroy(self, request, *args, **kwargs):
try:
instance = self.get_object()
project_volunteers = ProjectVolunteers.objects.get(pk=instance.project_volunteers_ref)
self.perform_destroy(instance)
count = ProjectVolunteersRegistration.objects.filter(project_volunteers=project_volunteers).count()
project_volunteers.registered = count
project_volunteers.save()
except Exception as e:
print(e)
pass
return Response(status=HTTP_204_NO_CONTENT)
class ProjectAttendeesViewSet(ModelViewSet):
queryset = ProjectAttendees.objects.all().order_by('id')
serializer_class = ProjectAttendeesSerializer
def create(self, request, *args, **kwargs):
response = super(ProjectAttendeesViewSet, self).create(request, *args, **kwargs)
return response
class ProjectAttendeesRegistrationViewSet(ModelViewSet):
queryset = ProjectAttendeesRegistration.objects.all().order_by('id')
serializer_class = ProjectAttendeesRegistrationSerializer
def perform_create(self, serializer):
profile = Profile.objects.get(user=self.request.user)
serializer.save(profile=profile)
def destroy(self, request, *args, **kwargs):
try:
instance = self.get_object()
project_attendees = ProjectAttendees.objects.get(pk=instance.project_attendees_ref)
self.perform_destroy(instance)
count = ProjectAttendeesRegistration.objects.filter(project_attendees=project_attendees).count()
project_attendees.registered = count
project_attendees.save()
except Exception as e:
print(e)
pass
return Response(status=HTTP_204_NO_CONTENT)
class ProjectDiscussionViewSet(ModelViewSet):
queryset = ProjectDiscussion.objects.all().order_by('id')
serializer_class = ProjectDiscussionSerializer
def perform_create(self, serializer):
profile = Profile.objects.get(user=self.request.user)
serializer.save(profile=profile)
class ProjectAnswerDiscussionViewSet(ModelViewSet):
queryset = ProjectAnswerDiscussion.objects.all().order_by('id')
serializer_class = ProjectAnswerDiscussionSerializer
def create(self, request, *args, **kwargs):
response = super(ProjectAnswerDiscussionViewSet, self).create(request, *args, **kwargs)
return response
def perform_create(self, serializer):
profile = Profile.objects.get(user=self.request.user)
serializer.save(profile=profile)
```
#### File: joatu-django/utils/utils.py
```python
from math import pi, acos, cos, sin
from decimal import Decimal
import requests
import urllib.parse
from django.conf import settings
def coordinates_calculation(number, street, postal_code, city, country=''):
api_key = settings.GOOGLE_API_KEY
main_api = "https://maps.googleapis.com/maps/api/geocode/json?"
address = number + ' ' + street + ' ' + postal_code + ' ' + city + ' ' + country
address = address.strip()
url = main_api + urllib.parse.urlencode({'address': address}) + '&key=' + api_key
json_data = requests.get(url).json()
if not json_data['results']:
return -1, -1
lat = Decimal(json_data['results'][0]['geometry']['location']['lat'])
lng = Decimal(json_data['results'][0]['geometry']['location']['lng'])
return lat, lng
def distance_calculation(offer, hub):
project_lat_conv = offer.lat * Decimal(pi) / 180
project_lng_conv = offer.lng * Decimal(pi) / 180
hub_lat_conv = hub.lat * Decimal(pi) / 180
hub_lng_conv = hub.lng * Decimal(pi) / 180
r = 6371
d = r * acos(
cos(project_lat_conv) * cos(hub_lat_conv) * cos(hub_lng_conv - project_lng_conv) +
sin(project_lat_conv) * sin(hub_lat_conv)
)
distance_km = round(Decimal(d), 3)
return distance_km
``` |
{
"source": "joausaga/appointment-seeker",
"score": 3
} |
#### File: joausaga/appointment-seeker/notifier.py
```python
import logging
import smtplib, ssl
def notify_appointment(sender_email_address, sender_email_password,
email_host_server, email_host_server_port,
receiver_email_address):
message = """\
Subject: Hi there
This message is sent from Python."""
context = ssl.create_default_context()
with smtplib.SMTP_SSL(email_host_server, email_host_server_port, context=context) as server:
server.login(sender_email_address, sender_email_password)
server.sendmail(sender_email_address, receiver_email_address, message)
logging.info('Notification email sent')
``` |
{
"source": "Joaxin/django-vue-stories",
"score": 2
} |
#### File: lime/api/views.py
```python
from rest_framework import generics
from ..models import Message
from .serializers import MessageSerializer
from rest_framework import viewsets
class MessageViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows messages to be viewed or edited.
"""
queryset = Message.objects.filter(is_public=True)
http_method_names = ['get', 'post', 'head']
serializer_class = MessageSerializer
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
class MessageView(APIView):
permission_classes = [AllowAny]
def get(self, request, format=None):
content = {
'status': 'request was permitted'
}
return Response(content)
``` |
{
"source": "Joaxin/django-welogs",
"score": 2
} |
#### File: django-welogs/aphor/models.py
```python
from django.db import models
from django.utils import timezone
from django.conf import settings
from django.urls import reverse
from taggit.managers import TaggableManager
from markdownx.models import MarkdownxField
from slugify import slugify
# from users.models import CustomUser
# Create your models here.
# _*_ coding: utf-8 _*_
import os
import shutil
# Create your models here.
from django.db.models.signals import post_delete
from django.dispatch import receiver
from django.forms import ClearableFileInput, ModelForm, forms, TextInput
from django.template import loader
from django import forms
from django.utils.safestring import mark_safe
from skeleton.settings import MEDIA_ROOT, WEB_HOST_MEDIA_URL, TEMP_IMAGE_DIR, MODEL_IMAGE_DIR, MODEL_MEDIA_URL
class ImageInput(ClearableFileInput):
template_name = "aphor/img_upload.html"
def render(self, name, value, attrs=None, renderer=None):
context = self.get_context(name, value, attrs)
template = loader.get_template(self.template_name).render(context)
return mark_safe(template)
class UploadImageList(TextInput):
template_name = "aphor/img_upload_list.html"
def render(self, name, value, attrs=None, renderer=None):
context = self.get_context(name, value, attrs)
template = loader.get_template(self.template_name).render(context)
return mark_safe(template)
class UploadModel(models.Model):
images = models.FileField('图片', upload_to="static/upload_multi_img/")
images_list = models.CharField('', max_length=10000)
def save(self, *args, **kwargs):
# 阻止images字段的数据保存在数据库中,因为我们不需要
self.images = ""
model_images = []
# print(self.images_list)
# 将暂存目录中的图片转存到正式目录
for root, dirs, files in os.walk(TEMP_IMAGE_DIR):
# print('files:', files)
for file in files:
if os.path.join(WEB_HOST_MEDIA_URL, file) in self.images_list:
shutil.move(TEMP_IMAGE_DIR + file, MODEL_IMAGE_DIR + file)
model_images.append(os.path.join(MODEL_MEDIA_URL, file))
# 清空暂存目录下所有图片
shutil.rmtree(TEMP_IMAGE_DIR)
os.mkdir(TEMP_IMAGE_DIR)
# 将模型原来的图片URL换为存到正式目录后的URL
self.images_list = str(model_images).replace('[', "").replace(']', '')
# 必须调用父类的方法,否则数据不会保存
super().save(*args, **kwargs)
# 删除被删除的模型的图片
@receiver(post_delete, sender=UploadModel)
def delete_upload_files(sender, instance, **kwargs):
image_list = getattr(instance, 'images_list', '')
if not image_list:
return
else:
# 去除image_list中URL存在的''字符
list = image_list.replace("'", "").replace("'", "").split(",")
# print("000000", list)
# 删除被删除的模型的图片
for image in list:
# 获取文件名
delete_image_name = image.split('/')[-1]
# print("9999999", delete_image_name)
os.remove(MODEL_IMAGE_DIR + delete_image_name)
class UploadForm(ModelForm):
images = forms.FileField(label="图片", widget=ImageInput, help_text="按住ctrl多选,最多9张", required=False)
images_list = forms.CharField(label='', widget=UploadImageList, help_text='', required=False)
class Meta:
model = UploadModel
fields = ['images', 'images_list']
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status='published')
# Create your models here.
class Topic(models.Model):
topic_name = models.CharField(default="", max_length=50, verbose_name="话题名称")
topic_blogs_num = models.IntegerField(verbose_name="相关微博数", default=0, null=True, blank=True)
t_publish = models.DateTimeField(default=timezone.now, verbose_name="创建时间")
class Meta:
verbose_name = "微博话题"
verbose_name_plural = verbose_name
def __str__(self):
return self.topic_name[:20]
class MiniBlog(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="发布用戶",
related_name='miniblog_set',default=1)
STATUS_CHOICES = (('draft', 'Draft'), ('published', 'Published'))
status = models.CharField(verbose_name='发布状态',max_length=10, choices=STATUS_CHOICES, default='published')
t_publish = models.DateTimeField(verbose_name='发布时间',default=timezone.now)
t_update = models.DateTimeField(verbose_name='修改时间', auto_now=True)
content = MarkdownxField(verbose_name="正文 :", blank=True, max_length=2500)
tags = TaggableManager()
images = models.ForeignKey(UploadModel, on_delete=models.CASCADE, verbose_name="图集", null=True, blank=True )
num_click = models.IntegerField(verbose_name="微博点击数", default=0, null=True, blank=True)
num_likes = models.IntegerField(verbose_name="点赞数", default=0, null=True, blank=True)
num_comment = models.IntegerField(verbose_name="评论数", default=0, null=True, blank=True)
num_fav= models.IntegerField(verbose_name="收藏数", default=0, null=True, blank=True)
has_pic = models.IntegerField(verbose_name="是否有图", default=0, null=True, blank=True)
need_unfold = models.IntegerField(choices=((0, "不需展开"), (1, "需要展开且未展开"), (2, "需展开且已展开")), default = 0)
related_topic = models.ForeignKey(Topic, on_delete=models.CASCADE, null=True, blank=True, verbose_name="微博相关话题")
objects = models.Manager() # 默认的管理器
published = PublishedManager() # 自定义管理器
class Meta:
verbose_name = "微博"
verbose_name_plural = verbose_name
ordering = ('-t_publish',)
def __str__(self):
return '{} - {} : {}'.format(self.user,self.t_publish, self.content[:25])
def shorttext(self):
return self.content[:10] + "..."
def judgefold(self):
foldlen = 5
if len(self.content) < foldlen:
self.need_unfold = 0 # 不需展开
else:
self.need_unfold = 1
```
#### File: django-welogs/aphor/views.py
```python
import json
import os
import uuid
from django.contrib.auth.decorators import login_required
# from django.http import HttpResponse
from django.shortcuts import render, HttpResponse, HttpResponseRedirect
# Create your views here.
from django.views.decorators.csrf import csrf_exempt
from skeleton.settings import WEB_HOST_MEDIA_URL, TEMP_IMAGE_DIR
@login_required
@csrf_exempt
def upload_temp_image(request):
result = {}
if request.method == 'POST':
files = request.FILES
if files:
image_url_list = []
for file_name in files:
image_url_list.append(handle_uploaded_file(files.get(file_name))) # 处理上传文件
result = {'msg': 'success', "image_list": image_url_list, }
else:
result = {'msg': 'failed', "image_list": []}
return HttpResponse(json.dumps(result, ensure_ascii=False), content_type="application/json,charset=utf-8") # 返回json
# 处理上传的文件
def handle_uploaded_file(file):
# 分割文件名,提取拓展名
extension = os.path.splitext(file.name)
# 使用uuid4重命名文件,防止重名文件相互覆盖
# 注意首先在项目的根目录下新建media/tempimg,或者自己使用python代码创建目录
file_name = '{}{}'.format(uuid.uuid4(), extension[1])
with open(TEMP_IMAGE_DIR + file_name, 'wb+') as destination:
for chunk in file.chunks(): # 防止文件太大导致内存溢出
destination.write(chunk)
# 返回图片的URL
return os.path.join(WEB_HOST_MEDIA_URL, file_name)
import re
from django.shortcuts import render, redirect
from django.db.models import Q
from django.contrib.auth import authenticate, login
from itertools import chain
# Create your views here.
from django.views.generic.base import View
from django.http import JsonResponse
from django.urls import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import MiniBlog, Topic
# from operations.models import BlogComment
# from users.models import UserProfile
# from operations.models import UserFollowed, UserFav, UserGood, UserMessage
# from .forms import BlogContentForm, SearchContentForm
# 当用户登录时,返回其关注的人的微博,没登录返回所有微博
class MainpageView(View):
def get(self, request):
all_blogs = MiniBlog.published.all().order_by('-t_publish')
paginator = Paginator(all_blogs, 5)
page = request.GET.get('page')
# 进行分页传递
try:
blogs = paginator.page(page)
except PageNotAnInteger:
blogs = paginator.page(1)
except EmptyPage:
blogs = paginator.page(paginator.num_pages)
all_topic = Topic.objects.all().order_by('-topic_blogs_num')[:10]
if request.user.is_authenticated:
return render(request, 'aphor/stella.html', {'page': page,'blogs': blogs, 'all_topic': all_topic})
else:
return render(request, 'aphor/stella.html', {'page': page,'blogs': "请登录", 'all_topic': all_topic})
# from op.models import BlogComment
# class BlogTextPageView(View):
# def get(self, request, blog_id):
# blog = MiniBlog.objects.get(id=int(blog_id))
# blog.num_click += 1
# blog.save()
# # return HttpResponse("to be continued for your work微博+评论区")
# # (新增)
# comments = BlogComment.objects.filter(blog=blog)
# return render(request, 'aphor/stella_detail.html', {
# 'blog': blog,
# 'comments': comments
# })
from .forms import BlogContentForm
class SendBlogView(View):
def post(self, request):
blog_form = BlogContentForm(request.POST, request.FILES or None)
if blog_form.is_valid():
blog = MiniBlog()
print(blog.id)
blog.user = request.user
blog.content = request.POST.get("content", "")
blog.image = blog_form.cleaned_data["image"]
if blog.image:
blog.has_pic = 1
# 替换@用户
re_str = blog.content
users_list = []
users_str = re.findall(r'@(.+?)\s', re_str)
users_str_1 = re.findall(r'^(.+?)@', re_str[::-1])
if users_str_1:
users_str.append(users_str_1[0][::-1])
for user_str in users_str:
user = UserProfile.objects.filter(nickname=user_str)
if user:
users_list.append(user[0])
re_str = re_str.replace('@' + user_str,
'<a href="/user/' + str(user[0].id) + '/">' + '@' + user_str + '</a>')
blog.content = re_str
blog.save()
print(blog.id)
# 新建@提醒
for user in users_list:
message = UserMessage()
message.user = user
message.blog_id = blog.id
message.save()
user.message_nums = UserMessage.objects.filter(user=user, has_read=False).count()
user.save()
# blog_form.save()
# self.topic_test(blog.content, blog)
# 替换话题
# re_str = blog.content
# topics_str = re.findall(r'#(.+?)#', re_str)
# for topic_str in topics_str:
# topic = Topic.objects.filter(topic_name=topic_str)
# if topic:
# re_str = re_str.replace('#' + topic_str + '#', '<a href="/weibo/topic/' + str(
# topic[0].id) + '">' + '#' + topic_str + '#' + '</a>')
# blog.content = re_str
blog.save()
return HttpResponseRedirect(reverse('aphor:mainpage'))
# # def quote_list(request):
# # object_list = Quote.published.all()
# # paginator = Paginator(object_list, 3) # 每页显示3篇文章
# # page = request.GET.get('page')
# # try:
# # quotes = paginator.page(page)
# # except PageNotAnInteger:
# # # 如果page参数不是一个整数就返回第一页
# # quotes = paginator.page(1)
# # except EmptyPage:
# # # 如果页数超出总页数就返回最后一页
# # quotes = paginator.page(paginator.num_pages)
# # return render(request, 'aphor/quote/list.html', {'page': page, 'quotes': quotes})
# from django.views.generic import ListView
# class QuoteListView(ListView):
# queryset = Quote.published.all() # 实际上,可以不使用这个变量,通过指定model = Quote,这个CBV就会去进行Quote.objects.all()查询获得全部文章。
# # 如果不设置context_object_name参数,默认的变量名称是object_list
# context_object_name = 'quotes'
# paginate_by = 10
# template_name = 'aphor/list.html'
# def quote_list(request, tag_slug=None):
# object_list = Quote.published.all()
# tag = None
# if tag_slug:
# tag = get_object_or_404(Tag, slug=tag_slug)
# object_list = object_list.filter(tags__in=[tag])
# paginator = Paginator(object_list, 20) # 3 quotes in each page
# page = request.GET.get('page')
# try:
# quotes = paginator.page(page)
# except PageNotAnInteger:
# quotes = paginator.page(1)
# except EmptyPage:
# quotes = paginator.page(paginator.num_pages)
# return render(request, 'aphor/list.html', {'page': page, 'quotes': quotes, 'tag': tag})
# from .models import Quote, Comment
# from .forms import EmailQuoteForm, CommentForm
# from django.db.models import Count
# # 一些相同主题的文章会具有相同的标签,可以创建一个功能给用户按照共同标签数量的多少推荐文章。
# # 为了实现该功能,需要如下几步:
# # 获得当前文章的所有标签
# # 拿到所有具备这些标签的文章
# # 把当前文章从这个文章列表里去掉以避免重复显示
# # 按照具有相同标签的多少来排列
# # 如果文章具有相同数量的标签,按照时间来排列
# # 限制总推荐文章数目
# def quote_detail(request, quote_id, quote):
# quote = get_object_or_404(Quote, slug=quote, status="published", id=quote_id)
# # 列出文章对应的所有活动的评论
# comments = quote.comments.filter(active=True)
# # 以Comments类中定义的外键的related_name属性的名称作为管理器,对quote对象执行查询从而得到了所需的QuerySet。
# # 新增评论的功能, 初始化了一个new_comment变量为None,用于标记一个新评论是否被创建。
# new_comment = None
# if request.method == "POST":
# comment_form = CommentForm(data=request.POST)
# if comment_form.is_valid():
# # 通过表单直接创建新数据对象,但是不要保存到数据库中
# # commit=False,则数据对象会被创建但不会被写入数据库,便于在保存到数据库之前对对象进行一些操作
# new_comment = comment_form.save(commit=False)
# # 设置外键为当前文章
# new_comment.quote = quote
# # 将评论数据对象写入数据库
# new_comment.save()
# # save()方法仅对ModelForm生效,因为Form类没有关联到任何数据模型。
# else:
# comment_form = CommentForm() # 创建空白表单
# # 显示相近Tag的文章列表
# quote_tags_ids = quote.tags.values_list('id',flat=True)
# # 选出所有包含上述标签的文章并且排除当前文章
# similar_tags = Quote.published.filter(tags__in=quote_tags_ids).exclude(id=quote.id)
# similar_quotes = similar_tags.annotate(same_tags=Count('tags')).order_by('-same_tags','-t_publish')[:4]
# return render(request, 'aphor/detail.html',
# {'quote': quote,
# 'comments': comments,
# 'new_comment': new_comment,
# 'comment_form': comment_form,
# 'similar_quotes': similar_quotes})
# from .forms import EmailQuoteForm,SearchForm
# from django.core.mail import send_mail
# def quote_share(request, quote_id, quote):
# # 通过id 获取 quote 对象
# quote = get_object_or_404(Quote, slug=quote, id=quote_id, status='published')
# sent = False
# if request.method == "POST":
# # 表单被提交
# form = EmailQuoteForm(request.POST)
# if form.is_valid():
# # 表单字段通过验证
# cd = form.cleaned_data
# quote_url = request.build_absolute_uri(quote.get_absolute_url())
# subject = '{} ({}) recommends you reading "{}"'.format(cd['name'], cd['email'], quote.title)
# message = 'Read "{}" at <a href="{}"></a> \n\n{}\'s comments:{}'.format(quote.title, quote_url, cd['name'], cd['comments'])
# send_mail(subject, message, '<EMAIL>', [cd['to']], fail_silently=False)
# sent = True
# else:
# form = EmailQuoteForm()
# return render(request, 'aphor/share.html', {'quote': quote, 'form': form, 'sent': sent})
# # from django.contrib.quotegres.search import SearchVector, SearchQuery, SearchRank, TrigramSimilarity
# # def quote_search(request):
# # form = SearchForm()
# # query = None
# # results = []
# # if 'query' in request.GET:
# # form = SearchForm(request.GET)
# # if form.is_valid():
# # query = form.cleaned_data['query']
# # results = Quote.objects.annotate(
# # similarity=TrigramSimilarity('title', query),
# # ).filter(similarity__gt=0.3).order_by('-similarity')
# # return render(request,
# # 'aphor/search.html',
# # {'form': form,
# # 'query': query,
# # 'results': results})
# # from django.contrib.auth.mixins import LoginRequiredMixin
# # from django.urls import reverse_lazy
# # from django.views import generic
# # from django.contrib.messages.views import SuccessMessageMixin
# # from aphor.models import Quote, Person, Category
# # Create your views here.
# # class Master(generic.ListView):
# # def get_context_data(self, *, object_list=None, **kwargs):
# # context = super().get_context_data(**kwargs)
# # context['persons_widget'] = Person.objects.all()
# # context['categories_widget'] = Category.objects.all()
# # return context
# # class Index(Master):
# # model = Quote
# # ordering = ['-pk']
# # paginate_by = 1
# # context_object_name = 'quotes_object_list'
# # template_name = 'index.html'
# # class Persons(Master):
# # model = Person
# # paginate_by = 1
# # context_object_name = 'persons_object_list'
# # template_name = 'persons.html'
# # class Random(Index):
# # paginate_by = False
# # def get_queryset(self):
# # return Quote.objects.order_by("?")[:10]
# # class QuotesByPerson(Index):
# # def get_queryset(self):
# # return Quote.objects.filter(person=self.kwargs['person_pk'])
# # class QuotesByCategory(Index):
# # def get_queryset(self):
# # return Quote.objects.filter(category=self.kwargs['category_pk'])
# def QuotesByCategory(request, category_pk):
# category = get_object_or_404(Category, pk=category_pk)
# object_list = Quote.published.filter(category_id = category_pk)
# paginator = Paginator(object_list, 20) # 3 quotes in each page
# page = request.GET.get('page')
# try:
# quotes = paginator.page(page)
# except PageNotAnInteger:
# quotes = paginator.page(1)
# except EmptyPage:
# quotes = paginator.page(paginator.num_pages)
# return render(request, 'aphor/list.html', {'page': page, 'quotes': quotes, 'category': category, 'tag':None})
# # class CategoryCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView, Master):
# # fields = ["title"]
# # model = Category
# # success_url = '/create/category'
# # success_message = "Category was created successfully"
# # class PersonCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView, Master):
# # fields = ["full_name", "biography", "picture"]
# # model = Person
# # success_url = '/create/person'
# # success_message = 'Person was created successfully'
# # class QuoteCreateView(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView, Master):
# # fields = ["person", "category", "content"]
# # model = Quote
# # success_url = '/create/quote'
# # success_message = 'Quote was created successfully'
```
#### File: django-welogs/users/models.py
```python
from django.contrib.auth.models import AbstractUser
from django.db import models
from datetime import datetime
#用pillow、django-imagekit模块设置图片,可以处理图片,生成指定大小的缩略图,前端显示src="{{ user.avatar.url }}
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
#扩展Django自带的User模型字
#扩展Django自带的User模型字
# 继承 AbstractUser ,django 自带用户类,可扩展用户个人信息,AbstractUser 模块下有:password,username、first_name、last_name、email、last_login,is_superuser,is_staff,is_active,date_joined
class CustomUser(AbstractUser):
nickname = models.CharField(max_length=30, blank=True, null=True, verbose_name='昵称')
birthday = models.DateField(verbose_name="生日", null=True, blank=True)
GENDER_CHOICES = (
('M', '男'),
('F', '女'),
('U','保密')
)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES,verbose_name="性别",
default='U')
address = models.CharField(max_length=200, verbose_name="地址", null=True, blank=True)
mobile = models.CharField(max_length=11, null=True, blank=True, verbose_name="手机")
# 扩展用户个人网站字段
link = models.URLField('个人网址', blank=True, help_text='提示:网址必须填写以http开头的完整形式')
# 扩展用户头像字段,upload_to后必须是相对路径,上传路径已设置为media,保存的是图片地址,前端user.avatar.url获取
avatar = ProcessedImageField(upload_to='avatar',default='avatar/default.png',verbose_name='头像',
processors=[ResizeToFill(100, 100)], # 处理后的图像大小
format='JPEG', # 处理后的图片格式
options={'quality': 95} # 处理后的图片质量
)
fans_num = models.IntegerField(verbose_name="粉丝数", default=0)
blog_num = models.IntegerField(verbose_name="微博数", default=0)
follow_num = models.IntegerField(verbose_name="关注数", default=0)
t_publish = models.DateField(default=datetime.now, verbose_name="加入时间")
message_nums = models.IntegerField(verbose_name='未读消息数', default=0)
#定义手动保存图(IIS下User.save()保存失败)
def save_avatar(self,upload_image):
import os
import uuid
from django.conf import settings
#创建与用户名的文件夹
upload_path=os.path.join(settings.MEDIA_ROOT,'avatar',self.username)
if not upload_path:
try:
os.makedirs(new_path)
except:
pass
# 生成一个随机字符串
uuid_str_name = uuid.uuid4().hex+'.jpg'
#保存
with open(os.path.join(upload_path,uuid_str_name), 'wb+') as file:
for chunk in upload_image.chunks():
file.write(chunk)
return uuid_str_name
#显示用户的邮箱是否验证过,并提醒他们去验证邮箱
def account_verified(self):
if self.user.is_authenticated: #django的auth系统功能,只能利用django自己的登陆方法才能判断用户是否登录
result = EmailAddress.objects.filter(email=self.user.email)
if len(result):
return result[0].verified
return False
# 定义网站管理后台表名
class Meta:
verbose_name = '用户信息'
verbose_name_plural = verbose_name #指定模型的复数形式是什么,如果不指定Django会自动在模型名称后加一个’s’
ordering = ['-id']
#admin后台显示名字关联到此表的字段的后天显示名字
def __str__(self):
return self.username
# 邮箱验证码类
class EmailVerifyRecord(models.Model):
code = models.CharField(max_length=20, verbose_name="邮箱验证码")
email = models.CharField(max_length=40, verbose_name="邮箱")
send_type = models.CharField(choices=(("register", "注册"), ("findback", "找回密码")), max_length=20)
send_time = models.DateField(default=datetime.now, verbose_name="发送时间")
class Meta:
verbose_name = "邮箱验证码"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}({1})".format(self.code, self.email)
# 未登录页轮播图
class Banner(models.Model):
title = models.CharField(max_length=100, verbose_name="标题")
index = models.IntegerField(default=1000, verbose_name="播放顺序")
t_publish = models.DateField(default=datetime.now, verbose_name="添加时间")
image = models.ImageField(upload_to="banner/%Y/%m", verbose_name="轮播图", max_length=100)
jmp_url = models.URLField(max_length=200, verbose_name="跳转地址")
class Meta:
verbose_name = "轮播图"
verbose_name_plural = verbose_name
def __str__(self):
return self.title
```
#### File: django-welogs/users/views.py
```python
import json
import base64
from django.shortcuts import render
from django.http import HttpResponse
from . import models
# Create your views here.
# auth中用户权限有关的类。auth可以设置每个用户的权限。
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
# 使用login_required装饰器,用户只有登录了才能访问其用户资料
@login_required
def profile(request):
# AUTH_USER_MODEL 类型的对象,表示当前登录的用户。
user = request.user
return render(request, 'account/profile.html', {'user': user})
@login_required # 使用login_required装饰器,用户只有登录了才能访问其用户资料
@csrf_exempt # 取消当前函数防跨站请求伪造功能,即便settings中设置了全局中间件。
def profile_update(request):
# request.is_ajax(): #判断请求头中是否含有X-Requested-With的值
if request.is_ajax():
# request.POST.get('')不存在默认为空,request.POST[]不存在报错
key = request.POST.get('key')
if key == 'link':
link = request.POST['link']
username = request.POST['username']
models.CustomUser.objects.filter(email=username).update(link=link)
link = models.CustomUser.objects.filter(email=username).first().link
linkJson = {'link': link}
return HttpResponse(json.dumps(linkJson))
elif key == 'avatar':
username = request.POST['username']
# 用ModelForm可代替手动编写代码存储上传文件
user_profile = models.CustomUser.objects.filter(email=username).first()
user_profile.avatar = request.FILES.get('avatar')
user_profile.save()
url = user_profile.avatar.url
dataJson = {'url': url}
return HttpResponse(json.dumps(dataJson))
``` |
{
"source": "Joaxin/GitComments",
"score": 3
} |
#### File: Crawler/google/google.py
```python
URL_SEARCH = "https://{domain}/search?hl={language}&q={query}&btnG=Search&gbv=1"
URL_NUM = "https://{domain}/search?hl={language}&q={query}&btnG=Search&gbv=1&num={num}"
def search_page(query, language='en', num=None, start=0, pause=2):
"""
Google search
:param query: Keyword
:param language: Language
:return: result
"""
time.sleep(pause)
domain = self.get_random_domain()
if num is None:
url = URL_SEARCH
url = url.format(
domain=domain, language=language, query=quote_plus(query))
else:
url = URL_NUM
url = url.format(
domain=domain, language=language, query=quote_plus(query), num=num)
try:
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
r = requests.get(url=url,
allow_redirects=False,
verify=False,
timeout=30)
charset = chardet.detect(r.content)
content = r.content.decode(charset['encoding'])
return content
except Exception as e:
logging.error(e)
return None
from MagicGoogle import MagicGoogle
import pprint
# Or PROXIES = None
PROXIES = [{
'http': 'http://192.168.2.207:1080',
'https': 'http://192.168.2.207:1080'
}]
# Or MagicGoogle()
mg = MagicGoogle(PROXIES)
# Crawling the whole page
result = mg.search_page(query='python')
# Crawling url
for url in mg.search_url(query='python'):
pprint.pprint(url)
# Output
# 'https://www.python.org/'
# 'https://www.python.org/downloads/'
# 'https://www.python.org/about/gettingstarted/'
# 'https://docs.python.org/2/tutorial/'
# 'https://docs.python.org/'
# 'https://en.wikipedia.org/wiki/Python_(programming_language)'
# 'https://www.codecademy.com/courses/introduction-to-python-6WeG3/0?curriculum_id=4f89dab3d788890003000096'
# 'https://www.codecademy.com/learn/python'
# 'https://developers.google.com/edu/python/'
# 'https://learnpythonthehardway.org/book/'
# 'https://www.continuum.io/downloads'
# Get {'title','url','text'}
for i in mg.search(query='python', num=1):
pprint.pprint(i)
# Output
# {'text': 'The official home of the Python Programming Language.',
# 'title': 'Welcome to Python .org',
# 'url': 'https://www.python.org/'}
作者:howie6879
链接:https://www.jianshu.com/p/a4d13ba26107
來源:简书
简书著作权归作者所有,任何形式的转载都请联系作者获得授权并注明出处。
```
#### File: Crawler/pyquotes/scraper.py
```python
from bs4 import BeautifulSoup
import requests
import random
# website used :
parent_link = 'https://www.quoteload.com'
# for quote of the day :
day_quote = 'https://www.brainyquote.com/quote_of_the_day'
def get_quotes(person: (None, str) = None, category: (None, str) = None):
"""
usage : call with person name and category of quotes
returns : a list of tuple (quote,author)
"""
# function called without any argument:
if person is None and category is None:
print('too few arguments to get quotes')
return None # can be modified to throw some exception instead
result = []
if person is not None: # person's name is provided
# formatting the name to fit as per URL
Person = '-'.join(person.strip().split())
person_link = requests.get(
parent_link + '/quotes/authors/' + Person).text
soup_obj = BeautifulSoup(person_link, 'lxml')
for interest in soup_obj.find_all('div', class_='card-body text-center'):
quote = interest.p.find('a', class_='href-noshow').text
tag = interest.p.find('a', class_='category-tag').text
result.append((quote, person))
if category is not None:
: # category's name is provided
category_link = requests.get(
parent_link + '/quotes/categories/' + category
).text
soup_obj = BeautifulSoup(category_link, 'lxml')
for interest in soup_obj.find_all('div', class_='card-body text-center'):
quote = interest.p.find('a', class_='href-noshow').text
tag = interest.p.find('a', class_='category-tag').text
author = interest.p.find('a', class_='quote-author').text
if (quote, author) not in result:
result.append((quote, author))
return result # a list of tuples
def get_quote(person: (None, str) = None, category: (None, str) = None):
lst = get_quotes(person=person, category=category)
# error will be handled when list is empty (codes for the same to be added
# soon)
return lst[random.randint(0, len(lst))]
def random_quote():
"""
usage : call without arguments
returns : tuple of random quote,author
"""
link = requests.get(day_quote).text
soup_obj = BeautifulSoup(link, 'lxml')
for interest in soup_obj.find_all('div', class_='clearfix'):
quote = interest.find('a', title='view quote').text
author = interest.find('a', title='view author').text
# return (quote, author)
return (quote, author)
# print(get_quote(person="<NAME>",category="legal"))
# print(get_quote(input('enter the name : ').strip()))
# print(random_quote())
```
#### File: Crawler/pyquotes/scrapper.py
```python
from bs4 import BeautifulSoup
import requests
import random
# website used :
parent_link = 'https://www.quoteload.com'
def get_quotes(person: (None, str) = None, category: (None, str) = None):
# function called without any argument:
if isinstance(person, type(None)) and isinstance(category, type(None)):
print('too few arguments to get quotes')
return None # can be modified to throw some exception instead
result = []
if not isinstance(person, type(None)): # person's name is provided
# formatting the name to fit as per URL
Person = '-'.join(person.strip().split())
person_link = requests.get(
parent_link + '/quotes/authors/' + Person).text
soup_obj = BeautifulSoup(person_link, 'lxml')
for interest in soup_obj.find_all('div', class_='card-body text-center'):
quote = interest.p.find('a', class_='href-noshow').text
tag = interest.p.find('a', class_='category-tag').text
result.append((quote, person))
if not isinstance(category, type(None)): # category's name is provided
category_link = requests.get(
parent_link + '/quotes/categories/' + category).text
soup_obj = BeautifulSoup(category_link, 'lxml')
for interest in soup_obj.find_all('div', class_='card-body text-center'):
quote = interest.p.find('a', class_='href-noshow').text
tag = interest.p.find('a', class_='category-tag').text
author = interest.p.find('a', class_='quote-author').text
if (quote, author) not in result:
result.append((quote, author))
return result # a list of tuples
def get_quote(person: (None, str) = None, category: (None, str) = None):
lst = get_quotes(person=person, category=category)
return lst[random.randint(0, len(lst))]
```
#### File: Intro/Game/Game2.py
```python
from abc import ABCMeta, abstractmethod
from random import randint, randrange
class Fighter(object, metaclass=ABCMeta):
"""战斗者"""
# 通过__slots__魔法限定对象可以绑定的成员变量
__slots__ = ('_name', '_hp')
def __init__(self, name, hp):
"""初始化方法
:param name: 名字
:param hp: 生命值
"""
self._name = name
self._hp = hp
@property
def name(self):
return self._name
@property
def hp(self):
return self._hp
@hp.setter
def hp(self, hp):
self._hp = hp if hp >= 0 else 0
@property
def alive(self):
return self._hp > 0
@abstractmethod
def attack(self, other):
"""攻击
:param other: 被攻击的对象
"""
pass
class Ultraman(Fighter):
"""奥特曼"""
__slots__ = ('_name', '_hp', '_mp')
def __init__(self, name, hp, mp):
"""初始化方法
:param name: 名字
:param hp: 生命值
:param mp: 魔法值
"""
super().__init__(name, hp)
self._mp = mp
def attack(self, other):
other.hp -= randint(15, 25)
def huge_attack(self, other):
"""究极必杀技(打掉对方至少50点或四分之三的血)
:param other: 被攻击的对象
:return: 使用成功返回True否则返回False
"""
if self._mp >= 50:
self._mp -= 50
injury = other.hp * 3 // 4
injury = injury if injury >= 50 else 50
other.hp -= injury
return True
else:
self.attack(other)
return False
def magic_attack(self, others):
"""魔法攻击
:param others: 被攻击的群体
:return: 使用魔法成功返回True否则返回False
"""
if self._mp >= 20:
self._mp -= 20
for temp in others:
if temp.alive:
temp.hp -= randint(10, 15)
return True
else:
return False
def resume(self):
"""恢复魔法值"""
incr_point = randint(1, 10)
self._mp += incr_point
return incr_point
def __str__(self):
return '~~~%s奥特曼~~~\n' % self._name + \
'生命值: %d\n' % self._hp + \
'魔法值: %d\n' % self._mp
class Monster(Fighter):
"""小怪兽"""
__slots__ = ('_name', '_hp')
def attack(self, other):
other.hp -= randint(10, 20)
def __str__(self):
return '~~~%s小怪兽~~~\n' % self._name + \
'生命值: %d\n' % self._hp
def is_any_alive(monsters):
"""判断有没有小怪兽是活着的"""
for monster in monsters:
if monster.alive > 0:
return True
return False
def select_alive_one(monsters):
"""选中一只活着的小怪兽"""
monsters_len = len(monsters)
while True:
index = randrange(monsters_len)
monster = monsters[index]
if monster.alive > 0:
return monster
def display_info(ultraman, monsters):
"""显示奥特曼和小怪兽的信息"""
print(ultraman)
for monster in monsters:
print(monster, end='')
def main():
u = Ultraman('骆昊', 1000, 120)
m1 = Monster('狄仁杰', 250)
m2 = Monster('白元芳', 500)
m3 = Monster('王大锤', 750)
ms = [m1, m2, m3]
fight_round = 1
while u.alive and is_any_alive(ms):
print('========第%02d回合========' % fight_round)
m = select_alive_one(ms) # 选中一只小怪兽
skill = randint(1, 10) # 通过随机数选择使用哪种技能
if skill <= 6: # 60%的概率使用普通攻击
print('%s使用普通攻击打了%s.' % (u.name, m.name))
u.attack(m)
print('%s的魔法值恢复了%d点.' % (u.name, u.resume()))
elif skill <= 9: # 30%的概率使用魔法攻击(可能因魔法值不足而失败)
if u.magic_attack(ms):
print('%s使用了魔法攻击.' % u.name)
else:
print('%s使用魔法失败.' % u.name)
else: # 10%的概率使用究极必杀技(如果魔法值不足则使用普通攻击)
if u.huge_attack(m):
print('%s使用究极必杀技虐了%s.' % (u.name, m.name))
else:
print('%s使用普通攻击打了%s.' % (u.name, m.name))
print('%s的魔法值恢复了%d点.' % (u.name, u.resume()))
if m.alive > 0: # 如果选中的小怪兽没有死就回击奥特曼
print('%s回击了%s.' % (m.name, u.name))
m.attack(u)
display_info(u, ms) # 每个回合结束后显示奥特曼和小怪兽的信息
fight_round += 1
print('\n========战斗结束!========\n')
if u.alive > 0:
print('%s奥特曼胜利!' % u.name)
else:
print('小怪兽胜利!')
if __name__ == '__main__':
main()
```
#### File: Intro/math/quadratic.py
```python
import cmath
import re
def quadratic():
while True:
try:
L = re.sub(r'[^0-9\.-]', ' ', input('Enter three coefficients a,b,c: ').strip())
# re.sub returns str
a, b, c = [float(i) for i in L.split()]
if a == 0:
print("the equation is linear, not quadratic")
else:
# str.split returns a list
d = (b**2) - (4 * a * c)
sol1 = (-b - cmath.sqrt(d)) / (2 * a)
sol2 = (-b + cmath.sqrt(d)) / (2 * a)
except(TypeError, ValueError):
print("You need 3 numbers, e.g '2 5 1'")
return quadratic()
except:
print("Unknown occurred")
return
return print('{0}\n{1}'.format(sol1, sol2))
ch=input("Please input \'c \' to end or any keys to continue \n")
if ch !='c' and ch !='C':
pass
else:
break
quadratic()
```
#### File: Intro/oop/foo_inherit.py
```python
class A(object):
def foo(self):
print('foo of A')
class B(A):
pass
class C(A):
def foo(self):
print('foo fo C')
class D(B, C):
pass
class E(D):
def foo(self):
print('foo in E')
super().foo()
super(B, self).foo()
super(C, self).foo()
if __name__ == '__main__':
d = D()
d.foo()
e = E()
e.foo()
# foo fo C
# foo in E
# foo fo C
# foo fo C
# foo of A
``` |
{
"source": "jobar8/interpies",
"score": 2
} |
#### File: interpies/interpies/segy_io.py
```python
import numpy as np
from obspy.io.segy.segy import _read_segy, BINARY_FILE_HEADER_FORMAT
# most useful trace header keys
STH_keys=[u'trace_sequence_number_within_line',
u'trace_sequence_number_within_segy_file',
u'scalar_to_be_applied_to_all_coordinates',
u'source_coordinate_x',
u'source_coordinate_y',
u'group_coordinate_x',
u'group_coordinate_y',
u'coordinate_units',
u'lag_time_A',
u'lag_time_B',
u'delay_recording_time',
u'number_of_samples_in_this_trace',
u'sample_interval_in_ms_for_this_trace',
u'x_coordinate_of_ensemble_position_of_this_trace',
u'y_coordinate_of_ensemble_position_of_this_trace',
u'for_3d_poststack_data_this_field_is_for_in_line_number',
u'for_3d_poststack_data_this_field_is_for_cross_line_number']
def load_SEGY_header(seis, keys=None):
'''
Load headers from an ObsPy SEGYFile object.
The headers are read from the so-called `binary header`. The function returns
a default selection of useful headers or pick from an optional list (keys).
Parameters
----------
seis : ObsPy SEGYFile object
This is created using the _read_segy function in obspy.io.segy.segy
keys : list of strings
List of headers to load. Must correspond to attributes as defined in ObsPy.
See BINARY_FILE_HEADER_FORMAT dictionary.
Returns
-------
SH : dictionary
A dictionary with the values associated with the selected headers.
'''
# read binary header
SHbin = seis.binary_file_header
# load selection of most useful headers if none requested already
if not keys:
keys = [header[1] for header in BINARY_FILE_HEADER_FORMAT if header[2]]
SH = {}
for key in keys:
SH[key] = SHbin.__getattribute__(key)
return SH
def load_SEGY_trace_header(traces,keys=None):
'''
Load trace headers from an ObsPy SEGYTrace object.
The function returns a default selection of useful headers or pick from
an optional list (keys).
Parameters
----------
traces : ObsPy SEGYTrace object
This is created from a SEGYFile object.
keys : list of strings
List of trace headers to load. Must correspond to attributes as defined
in ObsPy. See obspy.io.segy.header.TRACE_HEADER_FORMAT for a list of all
available trace header attributes or the segyio.STH_keys for a shorter list.
Returns
-------
STH : dictionary
A dictionary with the values associated with the selected headers. The values
are provided as Numpy arrays (vectors with ntraces elements).
'''
# load selection of most useful headers if none requested already
if not keys:
keys = STH_keys
STH = {}
for key in keys:
STH[key] = np.hstack([t.header.__getattr__(key) for t in traces])
return STH
def load_SEGY(filename, endian=None):
"""
Read and load data and headers from a SEGY file.
Usage
-----
data, SH, STH = load_SEGY(filename)
"""
# read file with obspy
seis = _read_segy(filename, endian=endian)
traces = seis.traces
ntraces = len(traces)
# Load SEGY header
SH = load_SEGY_header(seis)
# additional headers for compatibility with older segy module
SH['filename'] = filename
SH["ntraces"] = ntraces
SH["ns"] = SH['number_of_samples_per_data_trace']
SH["dt"] = SH['sample_interval_in_microseconds'] / 1000 # in milliseconds
# Load all the Trace headers in arrays
STH = load_SEGY_trace_header(traces)
# Load the data
data = np.vstack([t.data for t in traces]).T
return data, SH, STH
def load_SH_and_STH(filename, endian=None):
"""
Read and load headers from SEGY file. No data is loaded, saving time and memory.
Usage
-----
SH,STH = load_SH_and_STH(filename)
"""
# read file with obspy (headers only)
seis = _read_segy(filename,endian=endian,headonly=True)
traces = seis.traces
ntraces = len(traces)
# Load SEGY header
SH = load_SEGY_header(seis)
# additional headers for compatibility with older segy module
SH['filename'] = filename
SH["ntraces"] = ntraces
SH["ns"] = SH['number_of_samples_per_data_trace']
SH["dt"] = SH['sample_interval_in_microseconds'] / 1000 # in milliseconds
# Load all the Trace headers in arrays
STH = load_SEGY_trace_header(traces)
return SH, STH
```
#### File: interpies/interpies/spatial.py
```python
import subprocess
# import numpy
import numpy as np
# import GDAL modules
from osgeo import osr, ogr
def project_points(inputPoints, s_srs=4326, t_srs=23029):
'''
Reproject a set of points from one spatial reference to another.
Parameters
----------
s_srs : Integer
Spatial reference system of the input (source) file. Must be defined as a EPSG code,
i.e. 23029 for ED50 / UTM Zone 29N
t_srs : Integer
Spatial reference system of the output (target) file. Must be defined as a EPSG code,
i.e. 23029 for ED50 / UTM Zone 29N
Other example: WGS84 = EPSG:4326
See http://epsg.io/ for all the codes.
'''
# input SpatialReference
inSpatialRef = osr.SpatialReference()
inSpatialRef.ImportFromEPSG(s_srs)
# output SpatialReference
outSpatialRef = osr.SpatialReference()
outSpatialRef.ImportFromEPSG(t_srs)
# create the CoordinateTransformation
coordTrans = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)
# Loop through the points
outputPoints = []
for XY in inputPoints:
point = ogr.CreateGeometryFromWkt("POINT ({} {})".format(*XY))
point.Transform(coordTrans)
outputPoints.append([point.GetX(),point.GetY()])
return np.asarray(outputPoints)
def extent(xll, yll, cellsize, nrows, ncols, scale=1.,
registration='gridlines'):
'''
Return the extent (xmin,xmax,ymin,ymax) of an image given the coordinates of
the lower-left corner, the cellsize and the numbers of rows and columns.
Registration option controls whether the coordinates indicate the position
of the centre of the pixels ('pixels') or the corner ('gridlines').
Returns
-------
(xmin,xmax,ymin,ymax)
'''
if registration == 'gridlines':
xmin = xll*scale
xmax = (xll+(ncols-1)*cellsize)*scale
ymin = yll * scale
ymax = (yll + (nrows-1)*cellsize)*scale
else:
# This is the complete footprint of the grid considered as an image
xmin = (xll - cellsize/2.) *scale
xmax = (xmin + ncols*cellsize) *scale
ymin = (yll - cellsize/2.) * scale
ymax = (ymin + nrows*cellsize) *scale
return (xmin,xmax,ymin,ymax)
def grid_to_coordinates(xll, yll, cellsize, nrows, ncols):
'''
Return vectors of x and y coordinates of the columns and rows of a grid.
The result does not depend on the registration (gridlines or pixels) of the
grid.
Returns
-------
x, y: vectors of length ncols and nrows, respectively.
'''
xmax = xll + ncols*cellsize
ymax = yll + nrows*cellsize
# 1-D arrays of coordinates (use linspace to avoid errors due to floating point rounding)
#x = np.arange(xll , xll+ncols*cellsize , cellsize)
#y = np.arange(ymax , yll - cellsize , -1*cellsize)
x = np.linspace(xll , xmax , num=ncols, endpoint=False)
y = np.linspace(yll , ymax , num=nrows, endpoint=False)
return x,y
def grid_to_points(xll, yll, cellsize, nrows, ncols, flipy=True):
'''
Return x and y coordinates of all the points of a grid.
The result does not depend on the registration (gridlines or pixels) of the
grid.
Returns
-------
X: numpy array of shape (n,2) where n = nrows * ncols
A two column array containing the two output vectors.
'''
x,y = grid_to_coordinates(xll,yll,cellsize,nrows,ncols)
if flipy:
y = np.flipud(y)
xGrid,yGrid = np.meshgrid(x,y)
return np.column_stack((xGrid.flatten(),yGrid.flatten()))
def warp(inputFile, outputFile, xsize, ysize, dst_srs, src_srs=None,
doClip=False, xmin=None, xmax=None, ymin=None, ymax=None,
method='bilinear'):
'''
Image reprojection and warping utility, with option to clip.
This function calls a GDAL executable.
Parameters
----------
inputFile: path to input file
outputFile: path to output file
dst_srs: string
target spatial reference set, for example .prj filename or "EPSG:n"
xsize: float
Output cell size in the x direction (in target georeferenced units)
ysize: float
Output cell size in the y direction (in target georeferenced units)
doClip: boolean
If True, the extent of the reprojected destination are clipped to the
bounding box defined by (xmin,xmax,ymin,ymax).
(xmin,xmax,ymin,ymax): floats
extents of output file (in target SRS) if clipping is required.
method: string, default is 'bilinear'
Resampling method to use. Most frequent methods are:
'near':
nearest neighbour resampling.
'bilinear':
bilinear resampling.
'cubic':
cubic resampling.
'cubicspline':
cubic spline resampling.
'lanczos':
Lanczos windowed sinc resampling.
'''
command = 'gdalwarp -overwrite'
if src_srs is not None:
command = command + ' -s_srs "{}"'.format(src_srs)
command = command + ' -t_srs "{}"'.format(dst_srs)
if doClip:
command = command + ' -te {} {} {} {}'.format(xmin, ymin, xmax, ymax)
command = command + ' -tr {} {}'.format(xsize, ysize)
command = command + ' -r {}'.format(method)
command = command + ' "{}" "{}"'.format(inputFile, outputFile)
print('GDAL command\n------------\n'+command)
print('\nOutput\n------')
# Run the command
try:
retMessage = subprocess.check_output(command, shell=False)
# remove 'b' letter at the beginning of the string
retMessage = retMessage.decode("utf-8")
except subprocess.CalledProcessError as err:
retMessage = "ERROR. GDAL returned code {}.\n{}\n".format(err.returncode, err.output.decode("utf-8"))
return retMessage
```
#### File: interpies/interpies/transforms.py
```python
import numpy as np
from scipy import signal
from scipy.ndimage import filters
#from scipy import interpolate
from scipy import ndimage as nd
# Import scikit-learn modules (used for the find_trend function)
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
### definitions
pi = np.pi
# kernels for convolution filters
derfilt3 = np.array([-0.5, 0, 0.5], np.float32)
derfilt5 = np.array([1, -8, 0, 8, -1], np.float32)/12 # Five-point stencil vector
prewitt1d = np.array([-1, 0, 1], np.float32)/2
#===============================================================================
# miscellaneous functions
#===============================================================================
def replace_edges(data, ncells=1):
"""Replace the values at the edges of an array with the values calculated
with reflection padding. Useful to correct edge effects due to convolution
filters.
"""
return np.pad(data[ncells:-ncells, ncells:-ncells],
ncells, mode='reflect', reflect_type='odd')
def fill_nodata(data, invalid=None):
"""Replace the value of invalid 'data' cells (indicated by 'invalid')
by the value of the nearest valid data cell. Not very pretty but enough
for making sure the calculation works.
Parameters
----------
data: numpy array of any dimension
invalid: a binary array of same shape as 'data'. True cells set where data
value should be replaced.
If None (default), use: invalid = np.isnan(data)
Returns
-------
Return a filled array.
Credits
-------
http://stackoverflow.com/a/9262129
"""
if np.any(np.isnan(data)):
if invalid is None:
invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid,
return_distances=False,
return_indices=True)
return data[tuple(ind)]
else:
return data
def simple_resample(data, sampling=2):
'''
Resample grid by simply picking cells at a given sampling rate.
The starting point is the lower-left corner of grid so the location
of the grid is unchanged.
'''
return np.flipud(np.flipud(data)[::sampling, ::sampling])
def find_trend(X, data, degree=1, returnModel=False):
'''
Calculate trend in 2D data. The fit is made with a polynomial function of
chosen degree. A least-square method is used for the fit.
'''
nrows, ncols = data.shape
# get location of NaNs
mask = np.isnan(data)
# Fit data with a polynomial surface (or a plane if degree=1)
model = Pipeline([('poly', PolynomialFeatures(degree)),
('linear', LinearRegression())])
model.fit(X[~mask.flatten(), :], data[~mask])
# calculate resulting trend
trend = model.predict(X).reshape((nrows, ncols))
if returnModel:
return model
else:
return trend
def stats(data):
'''
Return a list of descriptive statistical values.
'''
mean = np.nanmean(data)
sigma = np.nanstd(data)
minimum = np.nanmin(data)
maximum = np.nanmax(data)
return (mean, sigma, minimum, maximum)
#==============================================================================
# Derivatives with Savitzky-Golay coeficients
#==============================================================================
#-------------------------------------------
# Pre-calculated Savitzky-Golay coeficients
#-------------------------------------------
# <NAME>, Microsoft Research, August 2001
#
# SavGolSize<m>Order<n>X<i>Y<j> is a filter in row-major order for one polynomial with:
# filter size m x m
# polynomial order n
# filter for coefficient of term (x^i)(y^j)
# These are grouped by size
# http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/KRUMM1/SavGol.htm
# Size 2 Order 1
SavGolSize2Order1X0Y0 = np.array([0.25000000,0.25000000,
0.25000000,0.25000000]).reshape((2,2))
SavGolSize2Order1X1Y0 = np.array([-0.50000000,0.50000000,
-0.50000000,0.50000000]).reshape((2,2))
SavGolSize2Order1X0Y1 = np.array([-0.50000000,-0.50000000,
0.50000000,0.50000000]).reshape((2,2))
# Size 3 Order 1
SavGolSize3Order1X0Y0 = np.array([0.11111111,0.11111111,0.11111111,
0.11111111,0.11111111,0.11111111,
0.11111111,0.11111111,0.11111111]).reshape((3,3))
SavGolSize3Order1X1Y0 = np.array([-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667]).reshape((3,3))
SavGolSize3Order1X0Y1 = np.array([-0.16666667,-0.16666667,-0.16666667,
0.00000000,0.00000000,0.00000000,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
# Size 3 Order 2 ## can be used for quadratic polynomial fit
SavGolSize3Order2X0Y0 = np.array([-0.11111111,0.22222222,-0.11111111,
0.22222222,0.55555556,0.22222222,
-0.11111111,0.22222222,-0.11111111]).reshape((3,3))
SavGolSize3Order2X1Y0 = np.array([-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667]).reshape((3,3))
SavGolSize3Order2X2Y0 = np.array([0.16666667,-0.33333333,0.16666667,
0.16666667,-0.33333333,0.16666667,
0.16666667,-0.33333333,0.16666667]).reshape((3,3))
SavGolSize3Order2X0Y1 = np.array([-0.16666667,-0.16666667,-0.16666667,
0.00000000,0.00000000,0.00000000,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
SavGolSize3Order2X1Y1 = np.array([0.25000000,0.00000000,-0.25000000,
0.00000000,0.00000000,0.00000000,
-0.25000000,0.00000000,0.25000000]).reshape((3,3))
SavGolSize3Order2X0Y2 = np.array([0.16666667,0.16666667,0.16666667,
-0.33333333,-0.33333333,-0.33333333,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
#----------------------------------------
def savgol2d(degree, window_size):
'''
Calculate coefficients of two-dimensional Savitzky-Golay filters.
Derived from https://github.com/whatasunnyday/Savitzky-Golay-Filter
Checked against Krumm's coefficients (see list above).
Parameters
----------
degree: positive integer
The degree of the polynomial that is fitted to the data points. The
greater the degree, the larger the fitting window must be.
window_size: positive odd integer
The size of the square window that is used to calculate the fitting
polynomial.
Returns
-------
coeffs : 2D array of shape (n, `window_size**2`), where n is the number of
coefficients in a polynomial of degree `degree` with 2 variables (x and y).
n is equal to (2+d)! / 2d!
Each of the n rows is a kernel of size `window_size` that can be used
to smooth 2D data (with the first one) or to calculate derivatives (with
the others).
'''
if not isinstance(degree, int) or degree < 0:
raise ValueError("Degree of polynomial must be a positive integer")
if not isinstance(window_size, int) or window_size % 2 == 0 or window_size < 0 :
raise ValueError("Window size must be a positive odd integer")
if window_size ** 2 < ((degree + 2) * (degree + 1)) / 2.0:
raise ValueError("Degree too high for window size")
# create dictionary of exponents
exps = [ {"x": k - n, "y": n } for k in range(degree + 1) for n in range(k + 1)]
# coordinates of points in window
n = np.arange(-(window_size - 1)//2, (window_size - 1)//2 + 1,
dtype = np.float64)
dx = np.tile(n, [window_size, 1]).reshape(window_size ** 2, )
dy = np.repeat(n, window_size)
# array
A = np.empty((window_size ** 2, len(exps)))
for i, exp in enumerate(exps):
A[:,i] = (dx ** exp["x"]) * (dy ** exp["y"])
return np.linalg.pinv(A)
#----------------------------------------
# Dictionary to associate types of derivative with Savitzky-Golay coeficients
# and parameters
sg_dicts = {}
sg_dicts['dx'] = {'index':1,'factor':1,'exponent':1,'flipfunc':np.fliplr}
sg_dicts['dy'] = {'index':2,'factor':-1,'exponent':1,'flipfunc':np.flipud}
sg_dicts['dx2'] = {'index':3,'factor':2,'exponent':2,'flipfunc':np.fliplr}
sg_dicts['dxdy'] = {'index':4,'factor':-1,'exponent':2,'flipfunc':lambda x: np.flipud(np.fliplr(x))}
sg_dicts['dy2'] = {'index':5,'factor':2,'exponent':2,'flipfunc':np.flipud}
def savgol_smooth(data, deg=3, win=5, doEdges=False):
'''
Smooth an array by 2D convolution with a Savitzky-Golay (SG) filter.
It works even if NaNs are present in the data.
The SG filter is controlled by two parameters, `deg` (degree) and `win` (window
size). The amount of smoothing will increase with `win` and decrease with
`deg`.
Parameters
----------
data: 2D array
Input data
deg: positive integer, default 3
The degree of the Savitzky-Golay filter. The greater the degree, the
larger the fitting window must be.
win: positive odd integer, default 5
The size of the fitting window that is used to calculate the SG
coefficients.
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
'''
# retrieve Savitzky-Golay coeficients and make kernel
sg_coeffs = savgol2d(deg,win)
sg_kernel = sg_coeffs[0].reshape((win,win))
# calculate filtered result by convolution
convResult = signal.convolve2d(data,sg_kernel,mode='same',
boundary='symm')
# fill edges
if doEdges:
convResult = replace_edges(convResult, (win-1)//2)
return convResult
def savgol_deriv(data, cellsize, direction='dx', deg=3, win=5, doEdges=True):
'''
Calculate horizontal derivatives by convolution with a Savitzky-Golay (SG)
filter. It works even if NaNs are present in the data.
Parameters
----------
data : 2D array
Input array
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
direction : {'dx','dy','dx2','dxdy','dy2'}, optional
Type of derivative. Default is 'dx', first horizontal derivative in the
x direction. The x axis is "West to East", i.e. along rows of the array.
The y axis is "South to North", i.e. along columns of the array.
deg: positive integer, default 3
The degree of the Savitzky-Golay filter. The greater the degree, the
larger the fitting window must be.
win: positive odd integer, default 5
The size of the fitting window that is used to calculate the SG
coefficients.
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
'''
sg_dict = sg_dicts[direction]
index = sg_dict['index']
factor = sg_dict['factor']
exponent = sg_dict['exponent']
flipfunc = sg_dict['flipfunc']
# retrieve Savitzky-Golay coeficients and make kernel
sg_coeffs = savgol2d(deg, win)
sg_kernel = flipfunc(sg_coeffs[index].reshape((win, win))) # flip for convolution
# calculate derivative by convolution
convResult = factor*signal.convolve2d(data, sg_kernel, mode='same',
boundary='symm')/cellsize**exponent
# fill edges
if doEdges:
convResult = replace_edges(convResult, (win-1)//2)
return convResult
#==============================================================================
# fs_deriv - 5-Tap and 7-tap 1st and 2nd discrete derivatives
#==============================================================================
# ** Adapted from Matlab code by <NAME> **
#
# These functions compute 1st and 2nd derivatives of an image using
# coefficients given by <NAME> Simoncelli (2004). The results are significantly
# more accurate than MATLAB's GRADIENT function on edges that are at angles
# other than vertical or horizontal. This in turn improves gradient orientation
# estimation enormously. If you are after extreme accuracy try using the 7-tap
# coefficients.
#
# Reference: <NAME> and <NAME> "Differentiation of Discrete
# Multi-Dimensional Signals" IEEE Trans. Image Processing. 13(4): 496-508 (2004)
#
# Copyright (c) 2010 <NAME>
# http://www.peterkovesi.com/matlabfns/index.html
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided "as is", without warranty of any kind.
# April 2010
def _conv1(a, h):
return np.convolve(a, h, mode='same')
def _conv2(h1, h2, A):
'''
Performs a 1D convolution down the columns using h1 then a 1D
convolution along the rows using h2.
'''
result = np.apply_along_axis(_conv1, 0, A, h1)
result = np.apply_along_axis(_conv1, 1, result, h2)
return result
def fs_coefficients(tap=5, direction='dx'):
'''
This function returns the 5-tap or 7-tap coefficients given by Farid
and Simoncelli (2004).
'''
if tap==5:
if direction in ['dx', 'dy', 'dxdy']:
# 5-tap 1st derivative coefficients. These are optimal if you are just
# seeking the 1st deriavtives.
p = np.array([0.037659, 0.249153, 0.426375, 0.249153, 0.037659])
d1 = np.array([0.109604, 0.276691, 0.000000, -0.276691, -0.109604])
d2 = 0
elif direction in ['dx2', 'dy2', 'dxdy']:
# 5-tap 2nd derivative coefficients. The associated 1st derivative
# coefficients are not quite as optimal as the ones above but are
# consistent with the 2nd derivative interpolator p and thus are
# appropriate to use if you are after both 1st and 2nd derivatives.
p = np.array([0.030320, 0.249724, 0.439911, 0.249724, 0.030320])
d1 = np.array([0.104550, 0.292315, 0.000000, -0.292315, -0.104550])
d2 = np.array([0.232905, 0.002668, -0.471147, 0.002668, 0.232905])
elif tap==7:
# 7-tap interpolant and 1st and 2nd derivative coefficients
p = np.array([0.004711, 0.069321, 0.245410,
0.361117, 0.245410, 0.069321, 0.004711])
d1 = np.array([0.018708, 0.125376, 0.193091,
0.000000, -0.193091, -0.125376, -0.018708])
d2 = np.array([0.055336, 0.137778, -0.056554,
-0.273118, -0.056554, 0.137778, 0.055336])
else:
raise ValueError('The tap value must be either 5 or 7.')
return p, d1, d2
def fs_deriv(data, cellsize, direction='dx', tap=5):
'''
Compute 1st or 2nd derivative of an array using the method of Farid and
Simoncelli (2004).
Parameters
----------
data : 2D array
Input array
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
direction : {'dx','dy','dx2','dxdy','dy2'}, optional
Type of derivative. Default is 'dx', first horizontal derivative in the
x direction. The x axis is "West to East", i.e. along rows of the array.
The y axis is "South to North", i.e. along columns of the array.
tap: {5, 7}, default 5
Size of the kernel that is used to calculate the derivative by
convolution.
'''
# Compute coefficients
p, d1, d2 = fs_coefficients(tap, direction)
# Compute derivatives
if direction=='dx':
result = _conv2(p,d1,data)/cellsize
elif direction=='dy':
result = -1 * _conv2(d1,p,data)/cellsize # origin is in lower left corner
elif direction=='dx2':
result = _conv2(p,d2,data)/cellsize/cellsize
elif direction=='dy2':
result = _conv2(d2,p,data)/cellsize/cellsize
elif direction=='dxdy':
result = _conv2(p,d1,data)/cellsize
result = -1 * _conv2(d1,p,result)/cellsize
return result
#==============================================================================
# Fourier functions
#==============================================================================
def getk(nx, ny, dx, dy):
'''
Given the size `nx` and `ny` of a FFT and the spacing `dx` and `dy`
of the space domain grid, this routine returns the spatial
frequency grid components `kx`, `ky` and `k = sqrt(kx.^2 + ky.^2)`
Makes use of numpy function `fftfreq`.
Returns
-------
[kx,ky,k]
'''
# Discrete Fourier Transform sample frequencies
kx = 2*np.pi*np.fft.fftfreq(nx,dx)
ky = 2*np.pi*np.fft.fftfreq(ny,dy)
# Create matrices for 2D case
kx = np.tile(kx,(ny,1))
ky = np.tile(ky,(nx,1)).T
# calculate k
k=np.sqrt(kx**2+ky**2)
return [kx,ky,k]
def next_pow2(x):
'''
n = up_to_pow2(x)
return the nearest power of 2 going upwards.
'''
return int(2.**np.ceil(np.log(x)/np.log(2)))
# Padding functions
def pad_next_pow2(data, mode='reflect', reflect_type='odd', smooth=False,
end_values=0):
'''
Pad to a square grid with 2**n number of cells in each dimension,
with 2**n being the next power of 2 relative to the size of the input array.
Use numpy padding function (same mode, reflect_type_type and end_values
arguments).
Parameters
----------
data: 2D array
Input data.
mode : {'reflect', 'linear_ramp'}, optional
Mode used by secondary padding after tiling. See numpy pad function for
more information.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect' mode. The 'odd' style is the default with the extented
part of the array created by subtracting the reflected values from
two times the edge value. For the 'even' style, the reflection is
unaltered around the edge value.
smooth : boolean, optional
option to apply a moving average smoothing function over
the edge of the grid.
default: False
Notes
-----
See https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html
'''
nrows,ncols = data.shape
nmax = max([nrows,ncols])
npts = next_pow2(nmax) # next 2^n number
cdiff = (npts - ncols) // 2
rdiff = (npts - nrows) // 2
# if (npts-nrows) is odd, add 1 row on the bottom side
r_remainder = np.mod((npts - nrows),2)
# if (npts-ncols) is odd, add 1 column on the right-hand side
c_remainder = np.mod((npts - ncols),2)
# apply padding
if mode in ['reflect','symmetric']:
padded = np.pad(data, ((rdiff, rdiff+r_remainder),(cdiff,cdiff+c_remainder)),
mode=mode,reflect_type=reflect_type)
else:
padded = np.pad(data, ((rdiff, rdiff+r_remainder),(cdiff,cdiff+c_remainder)),
mode=mode,end_values=(end_values,))
if smooth:
for i in range(-2,3):
padded[:,cdiff+i] = smoothing_average(padded, cdiff+i, axis='cols')
padded[:,ncols-1+cdiff+i] = smoothing_average(padded,
ncols-1+cdiff+i, axis='cols')
padded[rdiff+i,:] = smoothing_average(padded, rdiff+i, axis='rows')
padded[nrows-1+rdiff+i,:] = smoothing_average(padded,
nrows-1+rdiff+i, axis='rows')
return padded
def pad_full(data, mode='reflect', reflect_type='odd'):
'''
Combine tiling and padding.
Extend an array first by tiling symmetrical copies of the input
to a 3x3 array (in reflect mode) then pad with a linear ramp or by reflection
to the next power of 2.
Parameters
----------
data: 2D array
Input data
mode : {'reflect', 'linear_ramp'}, optional
Mode used by secondary padding after tiling. See numpy pad function for
more information.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect' mode. The 'odd' style is the default with the extented
part of the array created by subtracting the reflected values from
two times the edge value. For the 'even' style, the reflection is
unaltered around the edge value.
See also
--------
Numpy pad :
https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html
'''
nrows, ncols = data.shape
# first 3x3 padding
data_pad = np.pad(data, ((nrows,nrows), (ncols,ncols)), mode='reflect',
reflect_type=reflect_type)
# additional padding to size = next power of 2
if mode == 'reflect':
data_pad = pad_next_pow2(data_pad, mode='reflect',
reflect_type=reflect_type)
else:
data_pad = pad_next_pow2(data_pad, mode='linear_ramp',
end_values=int(data_pad.mean())) # linear ramp
return data_pad
def pad_3x3(data, mode='reflect', reflect_type='odd'):
'''
Extend a matrix by tiling symmetrical copies of the input
Return a 3*nrows x 3*ncols array
'''
nrows, ncols = data.shape
# 3x3 padding
if mode == 'reflect':
data_pad = np.pad(data, ((nrows,nrows), (ncols,ncols)), mode=mode,
reflect_type=reflect_type)
else:
data_pad = np.pad(data, ((nrows,nrows), (ncols,ncols)), mode='linear_ramp',
end_values=int(np.nanmean(data))) # linear ramp
return data_pad
def unpad_next_pow2(data, nrows, ncols):
'''
Retrieve the original array after padding with upPow2_pad.
(nrows, ncols) is the shape of the array before padding.
'''
nmax = max([nrows,ncols])
npts = next_pow2(nmax)
cdiff = ((npts - ncols) // 2)
rdiff = ((npts - nrows) // 2)
return data[rdiff:nrows + rdiff,cdiff:ncols + cdiff]
def unpad_3x3(data):
'''
Retrieve the original matrix that was padded with 3x3 reflection padding
'''
return np.hsplit(np.vsplit(data, 3)[1], 3)[1]
def unpad_full(data, nrows, ncols):
'''
Retrieve the original matrix that was padded with pad_full reflection padding.
(nrows, ncols) is the shape of the array before padding.
'''
data_unpad = unpad_next_pow2(data, 3*nrows, 3*ncols)
return unpad_3x3(data_unpad) # remove 3x3 padding
# put everything together
def fourier_transform(data, cellsize, trans='dx', order=1, doEdges=True, ncells=2,
padding='full', mode='reflect', reflect_type='odd',
eps=1e-6, z=500):
'''
Calculate transforms in the frequency domain.
Parameters
----------
data : 2D array
Input array
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
trans: string
One of the following string values:
'dx': horizontal derivative along the x-axis
'dy': horizontal derivative along the y-axis
'dxdy': horizontal derivatives along the x-axis and y-axis
'dz': vertical derivative
'vi': vertical integral
'upcont': upward continuation
order: float, default: 1
The order of differentiation or integration
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
ncells: int, default: 2
Number of cells at the edges of the output grid that are replaced using
padding if the `doEdges` option is True.
padding: string
Type of padding to apply to the input grid before the Fourier calculation.
Can be one of the following options:
'full': initial 3x3 padding (reflect) + ramp or reflection to next power of 2
'3x3': The entire array is duplicated and tiled in a 3x3 pattern
with the original array in the middle.
'pow2': the size of the array is increased by padding to the next
power of 2.
mode: string, default: 'reflect'
Option for padding the input array.
'reflect': Pads with the reflection of the array
'linear_ramp': Pads with a linear ramp between the array edge value
and the mean value of the array.
reflect_type: string, default: 'odd'
Used in reflection padding. Can be 'even' or 'odd'. See numpy function pad.
eps: float
Small number to replace zeros in frequency components k with when
the vertical integral is calculated.
z: float
Height used for upward continuation. Default is 500 m.
'''
nrows,ncols = data.shape
# save array mask before calculation
mask = np.isnan(data)
# Apply padding
padding = padding.lower()
if padding == 'full':
# initial 3x3 padding (reflect) + ramp or reflection to next power of 2
data_pad = pad_full(fill_nodata(data), mode=mode, reflect_type=reflect_type)
elif padding == '3x3':
# 3x3 reflection padding
data_pad = pad_3x3(fill_nodata(data), mode=mode, reflect_type=reflect_type)
elif padding == 'pow2':
# ramp or reflection to next power of 2
data_pad = pad_next_pow2(fill_nodata(data), mode=mode, reflect_type=reflect_type,
smooth=True, end_values=int(np.nanmean(data)))
else:
# no padding
data_pad = fill_nodata(data)
# Calculate the k matrix
(ny,nx) = data_pad.shape
[kx,ky,k] = getk(nx, ny, cellsize, cellsize)
# Apply transformation on padded data
trans = trans.lower()
if trans == 'dx':
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*(1j*kx)**order))
elif trans == 'dy':
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*(1j*ky)**order))
elif trans == 'dxdy':
fouTrans = np.real(np.fft.ifft2(
(np.fft.fft2(data_pad)*(1j*ky)**order)*(1j*kx)**order))
elif trans == 'dz':
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*k**order))
elif trans == 'vi':
# remove zeros in k to avoid division by zero error
k[k==0] = eps
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*k**(-1*order)))
fouTrans = fouTrans - np.mean(fouTrans)
elif trans == 'upcont':
fouTrans = np.real(np.fft.ifft2(np.fft.fft2(data_pad)*(np.exp(-z*k))))
# remove padding
if padding == 'full':
fouTrans = unpad_full(fouTrans, nrows, ncols)
elif padding == '3x3':
fouTrans = unpad_3x3(fouTrans)
elif padding == 'pow2':
fouTrans = unpad_next_pow2(fouTrans, nrows, ncols)
# fill edges
if doEdges:
fouTrans = replace_edges(fouTrans, ncells)
# re-apply the mask
fouTrans[mask] = np.nan
return fouTrans
#===============================================================================
# ISVD (vertical derivative)
#===============================================================================
def isvd(data, cellsize, method='SG', order=1, deg=4, win=5, fs_tap=5,
doEdges=True, **kwargs):
''' Vertical derivatives with the ISVD (integrated second
vertical derivative) method.
Parameters
----------
data: 2D array
Input data
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
method: {'SG, 'FS', 'fourier'}, optional
The method to use for the calculation of the second horizontal
derivatives. The three options are:
- 'SG': Savitzky-Golay method
- 'FS': Farid and Simoncelli method
- 'fourier': fourier method
order: scalar, optional, default: 1
Order of differentiation. Must be either 1 or 2. If 1, then vertical
integration is first applied to the data.
deg: positive integer, default 4
The degree of the Savitzky-Golay filter if the SG method is used.
win: positive odd integer, default 5
The size of the fitting window that is used to calculate the SG
coefficients.
fs_tap: {5, 7}, default 5
Size of the kernel that is used to calculate the derivatives with the
FS method.
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
kwargs : other keywords
Options to pass to the fourier transform.
Reference
---------
<NAME>., <NAME>., 2001. Detection of potential fields source boundaries
by enhanced horizontal derivative method. Geophys. Prospect. 49, 40–58.
'''
if order not in [1, 2]:
raise ValueError('Order must be 1 or 2.')
# save array mask before calculation
mask = np.isnan(data)
# fill no data areas (unchanged if no null cells)
data = fill_nodata(data)
if order==1:
# vertical integral
data = fourier_transform(data, cellsize, trans='vi', order=1)
# smoothing
if kwargs:
data = gauss(data, kwargs['sigma'])
# second derivatives
if method == 'SG':
data_dx2 = savgol_deriv(data, cellsize, direction='dx2', deg=deg,
win=win, doEdges=doEdges)
data_dy2 = savgol_deriv(data, cellsize, direction='dy2', deg=deg,
win=win, doEdges=doEdges)
elif method == 'FS':
data_dx2 = fs_deriv(data, cellsize, direction='dx2', tap=fs_tap)
data_dy2 = fs_deriv(data, cellsize, direction='dy2', tap=fs_tap)
elif method == 'fourier':
data_dx2 = fourier_transform(data, cellsize, trans='dx', order=2, **kwargs)
data_dy2 = fourier_transform(data, cellsize, trans='dy', order=2, **kwargs)
# return DZ using the Laplace equation
data_dz = -1*(data_dx2 + data_dy2)
# fill edges
if doEdges:
data_dz = replace_edges(data_dz, (win-1)//2)
# re-apply mask
data_dz[mask] = np.nan
return data_dz
#===============================================================================
# Various filters
#===============================================================================
def gauss(data, sigma=1):
return filters.gaussian_filter(data, sigma)
def smoothing_average(V, i, axis='cols'):
if axis == 'cols':
Vs = (V[:,i-2]+V[:,i-1]+V[:,i]+V[:,i+1]+V[:,i+2])/5.
else:
Vs = (V[i-2,:]+V[i-1,:]+V[i,:]+V[i+1,:]+V[i+2,:])/5.
return Vs
def laplacian(data, cellsize):
conv_filter = np.array([[0,-1,0],[-1,4,-1],[0,-1,0]])
convResult = signal.convolve2d(data, conv_filter,
mode='valid',boundary='symm')/cellsize
return convResult
``` |
{
"source": "jobar8/segy2segy",
"score": 2
} |
#### File: segy2segy/core/segy_io.py
```python
import numpy as np
from obspy.io.segy.segy import _read_segy, BINARY_FILE_HEADER_FORMAT
# most useful trace header keys
STH_keys = [u'trace_sequence_number_within_line',
u'trace_sequence_number_within_segy_file',
u'scalar_to_be_applied_to_all_coordinates',
u'source_coordinate_x',
u'source_coordinate_y',
u'group_coordinate_x',
u'group_coordinate_y',
u'coordinate_units',
u'lag_time_A',
u'lag_time_B',
u'delay_recording_time',
u'number_of_samples_in_this_trace',
u'sample_interval_in_ms_for_this_trace',
u'x_coordinate_of_ensemble_position_of_this_trace',
u'y_coordinate_of_ensemble_position_of_this_trace',
u'for_3d_poststack_data_this_field_is_for_in_line_number',
u'for_3d_poststack_data_this_field_is_for_cross_line_number']
#==============================================================================
# loadSEGYHeader
#==============================================================================
def loadSEGYHeader(seis, keys=None):
'''
Load headers from an ObsPy SEGYFile object.
The headers are read from the so-called `binary header`. The function returns
a default selection of useful headers or pick from an optional list (keys).
Parameters
----------
seis : ObsPy SEGYFile object
This is created using the _read_segy function in obspy.io.segy.segy
keys : list of strings
List of headers to load. Must correspond to attributes as defined in ObsPy.
See BINARY_FILE_HEADER_FORMAT dictionary.
Returns
-------
SH : dictionary
A dictionary with the values associated with the selected headers.
'''
# read binary header
SHbin = seis.binary_file_header
# load selection of most useful headers if none requested already
if not keys:
keys = [header[1] for header in BINARY_FILE_HEADER_FORMAT if header[2]]
SH = {}
for key in keys:
SH[key] = SHbin.__getattribute__(key)
return SH
#===============================================================================
# loadSEGYTraceHeader
#===============================================================================
def loadSEGYTraceHeader(traces, keys=None):
'''
Load trace headers from an ObsPy SEGYTrace object.
The function returns a default selection of useful headers or pick from
an optional list (keys).
Parameters
----------
traces : ObsPy SEGYTrace object
This is created from a SEGYFile object.
keys : list of strings
List of trace headers to load. Must correspond to attributes as defined
in ObsPy. See obspy.io.segy.header.TRACE_HEADER_FORMAT for a list of all
available trace header attributes or the segyio.STH_keys for a shorter list.
Returns
-------
STH : dictionary
A dictionary with the values associated with the selected headers. The values
are provided as Numpy arrays (vectors with ntraces elements).
'''
# load selection of most useful headers if none requested already
if not keys:
keys = STH_keys
STH = {}
for key in keys:
STH[key] = np.hstack([t.header.__getattr__(key) for t in traces])
return STH
#===============================================================================
# loadSEGY
#===============================================================================
def loadSEGY(filename, endian=None):
"""
Read and load data and headers from a SEGY file.
Usage
-----
data,SH,STH = loadSEGY(filename)
"""
# read file with obspy
seis = _read_segy(filename, endian=endian)
traces = seis.traces
ntraces = len(traces)
# Load SEGY header
SH = loadSEGYHeader(seis)
# additional headers for compatibility with older segy module
SH['filename'] = filename
SH["ntraces"] = ntraces
SH["ns"] = SH['number_of_samples_per_data_trace']
SH["dt"] = SH['sample_interval_in_microseconds'] / 1000 # in milliseconds
# Load all the Trace headers in arrays
STH = loadSEGYTraceHeader(traces)
# Load the data
data = np.vstack([t.data for t in traces]).T
return data, SH, STH
#===============================================================================
# loadSHandSTH
#===============================================================================
def loadSHandSTH(filename, endian=None):
"""
Read and load headers from SEGY file. No data is loaded, saving time and memory.
Usage
-----
SH, STH = loadSHandSTH(filename)
"""
# read file with obspy (headers only)
seis = _read_segy(filename, endian=endian, headonly=True)
traces = seis.traces
ntraces = len(traces)
# Load SEGY header
SH = loadSEGYHeader(seis)
# additional headers for compatibility with older segy module
SH['filename'] = filename
SH["ntraces"] = ntraces
SH["ns"] = SH['number_of_samples_per_data_trace']
SH["dt"] = SH['sample_interval_in_microseconds'] / 1000 # in milliseconds
# Load all the Trace headers in arrays
STH = loadSEGYTraceHeader(traces)
return SH, STH
#===============================================================================
# writeSTH
#===============================================================================
def writeSTH(seis, STH_Key, newSTH):
"""
Write new trace header to a SEGY file, replacing the existing one.
***Not tested***
"""
traces = seis.traces
for i, trace in enumerate(traces):
trace.header.__setattr__(STH_Key, newSTH[i])
``` |
{
"source": "jobarber/progan",
"score": 2
} |
#### File: progan/progan/inferences.py
```python
import torch
from torchvision.utils import save_image
def make_inference(gmodelpath, num_inferences=32):
generator = torch.load('modeldata/' + gmodelpath).cuda()
with torch.no_grad():
for i in range(num_inferences):
latent = torch.randn((1, 512)).cuda()
sample = generator(latent, alpha=1.)
save_image(sample, 'inferences/real_inference{}.png'.format(i))
if __name__ == '__main__':
make_inference('gmodel32_744.3274536132812.pt', num_inferences=256)
```
#### File: progan/progan/models.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import kaiming_normal_std
# ----------------------------------------------------------------------
# Smaller Modules
# ----------------------------------------------------------------------
from torch.nn import init
class MiniBatchStd(nn.Module):
"""
Encourages generator to increase variation.
"""
def __init__(self):
super(MiniBatchStd, self).__init__()
def forward(self, x, group_size=16):
# group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
# s = x.shape # [NCHW] Input shape.
# y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G.
# y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32.
# y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMCHW] Subtract mean over group.
# y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group.
# y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group.
# y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) # [M111] Take average over fmaps and pixels.
# y = tf.cast(y, x.dtype) # [M111] Cast back to original data type.
# y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels.
# return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
group_size = min(group_size, x.shape[0])
s = x.shape
y = x.view(group_size, -1, s[1], s[2], s[3]).float()
y -= torch.mean(y, axis=0, keepdim=True)
y = torch.mean(y ** 2, axis=0)
y = torch.sqrt(y + 1e-8)
y = torch.mean(y, axis=[1, 2, 3], keepdim=True)
y = y.repeat(group_size, 1, 1, 1)
y = y.expand(-1, 1, s[2], s[3])
return y # or combine here to return new fmap
# std = torch.std(x, dim=[0, 1])
# return std.expand(x.shape[0], 1, -1, -1) # returns shape N1HW (with C of 1)
class PixelNorm(nn.Module):
def __init__(self):
super(PixelNorm, self).__init__()
def forward(self, x):
# x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + 1e-8)
class EqualizedConv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
super(EqualizedConv2d, self).__init__(*args, **kwargs)
self.weight.data = torch.randn(self.weight.data.shape)
self.std = kaiming_normal_std(self.weight.data, mode='fan_in', nonlinearity='relu')
self.bias.data = torch.zeros(self.bias.data.shape)
def forward(self, x):
return super(EqualizedConv2d, self).forward(x.mul(self.std))
class EqualizedLinear(nn.Linear):
def __init__(self, *args, **kwargs):
super(EqualizedLinear, self).__init__(*args, **kwargs)
self.weight.data = torch.randn(self.weight.data.shape)
self.std = kaiming_normal_std(self.weight.data, mode='fan_in', nonlinearity='relu')
self.bias.data = torch.zeros(self.bias.data.shape)
def forward(self, x):
return super(EqualizedLinear, self).forward(x.mul(self.std))
# ----------------------------------------------------------------------
# Medium modules
# ----------------------------------------------------------------------
class StartingGeneratorStack(nn.Module):
def __init__(self, resolution=4):
super(StartingGeneratorStack, self).__init__()
self.resolution = resolution
# layers
self.dense = EqualizedLinear(int(min(8192 / resolution, 512)),
int(min(8192 / resolution, 512)) * 16)
# output 512 × 4 × 4
self.conv1 = EqualizedConv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=1,
padding=(1, 1))
self.pixelnorm = PixelNorm()
# remember out channels
self.out_channels = self.conv1.out_channels
def forward(self, x):
# block 1
x = self.dense(x)
x = x.reshape(-1, 512, 4, 4)
x = self.pixelnorm(F.leaky_relu(x, negative_slope=0.2))
# block 2
x = self.pixelnorm(F.leaky_relu(self.conv1(x), negative_slope=0.2))
return x
class IntermediateGeneratorStack(nn.Module):
def __init__(self, resolution=8, in_channels=512):
super(IntermediateGeneratorStack, self).__init__()
self.resolution = resolution
self.in_channels = in_channels
# layers
self.upscale = nn.Upsample(scale_factor=2., mode='nearest')
self.conv1 = EqualizedConv2d(in_channels=self.in_channels,
out_channels=int(min(8192 / resolution, 512)),
kernel_size=(3, 3),
stride=1,
padding=1)
self.conv2 = EqualizedConv2d(in_channels=int(min(8192 / resolution, 512)),
out_channels=int(min(8192 / resolution, 512)),
kernel_size=(3, 3),
stride=1,
padding=1)
self.pixelnorm = PixelNorm()
# remember out channels
self.out_channels = self.conv2.out_channels
def forward(self, x):
x = self.upscale(x)
# block 1
x = self.pixelnorm(F.leaky_relu(self.conv1(x), negative_slope=0.2))
# block 2
x = self.pixelnorm(F.leaky_relu(self.conv2(x), negative_slope=0.2))
return x
class FinalDiscriminatorStack(nn.Module):
def __init__(self, resolution=4):
super(FinalDiscriminatorStack, self).__init__()
self.resolution = resolution
# layers
self.mini_batch = MiniBatchStd()
self.conv1 = EqualizedConv2d(in_channels=int(min(8192 / resolution, 512)) + 1,
out_channels=512,
kernel_size=(3, 3),
stride=1,
padding=1)
self.conv2 = EqualizedConv2d(in_channels=512,
out_channels=512,
kernel_size=(4, 4),
stride=1,
padding=1)
# self.dense1 = EqualizedLinear(int(min(8192 / resolution, 512)) * 16,
# min(int(8192 / resolution - 1), 512))
self.dense2 = EqualizedLinear(512 * 3 * 3, 1)
# remember out channels
self.in_channels = self.conv1.in_channels - 1 # remove extra channel added by minibatchstd
def forward(self, x):
new_channel = self.mini_batch(x)
x = torch.cat([x, new_channel], dim=1)
# block 1
x = F.leaky_relu(self.conv1(x), negative_slope=0.2)
x = F.leaky_relu(self.conv2(x), negative_slope=0.2)
x = x.reshape(-1, 512 * 3 * 3)
# block 2
# x = F.leaky_relu(self.dense1(x), negative_slope=0.2)
# block 3
x = self.dense2(x)
return x
class IntermediateDiscriminatorStack(nn.Module):
def __init__(self, resolution=1024, out_channels=512):
super(IntermediateDiscriminatorStack, self).__init__()
self.resolution = resolution
self.out_channels = out_channels
# layers
self.conv1 = EqualizedConv2d(in_channels=int(min(8192 / (resolution * 2), 512)),
out_channels=int(min(8192 / resolution, 512)),
kernel_size=(3, 3),
stride=1,
padding=1)
self.conv2 = EqualizedConv2d(in_channels=int(min(8192 / resolution, 512)),
out_channels=self.out_channels,
kernel_size=(3, 3),
stride=1,
padding=1)
self.downscale = nn.AvgPool2d(kernel_size=(2, 2))
self.in_channels = self.conv1.in_channels
def forward(self, x):
# block 1
x = F.leaky_relu(self.conv1(x), negative_slope=0.2)
# block 2
x = F.leaky_relu(self.conv2(x), negative_slope=0.2)
x = self.downscale(x)
return x
# ----------------------------------------------------------------------
# Larger modules
# ----------------------------------------------------------------------
class Generator(nn.Module):
def __init__(self, start_resolution=4):
super(Generator, self).__init__()
self.resolution = start_resolution
start_stack = StartingGeneratorStack(resolution=4)
self.modules_ = nn.ModuleList([start_stack])
self.next_in_channels = self.modules_[-1].out_channels
self.to_rgb = EqualizedConv2d(in_channels=self.modules_[-1].out_channels,
out_channels=3,
kernel_size=(1, 1),
stride=1,
padding=0)
# self.equalize_learning(self.to_rgb)
self.prior_to_rgb = None
def forward(self, x, alpha=1.):
# go through the modules
to_rgb = False
for i, module in enumerate(self.modules_):
alpha_applies = alpha < 1. and len(self.modules_) > 1
if alpha_applies:
if i == len(self.modules_) - 1:
old_x = self.prior_to_rgb(x)
old_x = module.upscale(old_x)
new_x = module(x)
new_x = self.to_rgb(new_x)
x = torch.lerp(old_x, new_x, alpha)
to_rgb = True
else:
x = module(x)
else:
x = module(x)
if not to_rgb:
x = self.to_rgb(x)
return torch.clamp(x, min=0., max=1.)
def increase_resolution(self):
self.resolution *= 2
new_module = IntermediateGeneratorStack(resolution=int(self.resolution),
in_channels=self.next_in_channels)
self.next_in_channels = new_module.out_channels
self.modules_.append(new_module)
self.prior_to_rgb = self.to_rgb
self.to_rgb = EqualizedConv2d(in_channels=self.modules_[-1].out_channels,
out_channels=3,
kernel_size=(1, 1),
stride=1,
padding=0)
if self.to_rgb.in_channels == self.prior_to_rgb.in_channels:
self.to_rgb.weight.data = self.prior_to_rgb.weight.data
else:
approximate_decrease = self.prior_to_rgb.in_channels // self.to_rgb.in_channels
new_weight = torch.zeros(self.to_rgb.weight.data.shape)
for n in range(self.to_rgb.weight.data.shape[0]):
for new_c, c in enumerate(range(0, self.to_rgb.weight.data.shape[1], approximate_decrease)):
mean = torch.mean(self.prior_to_rgb.weight.data[n, c:c + approximate_decrease, :, :],
dim=0)
new_weight[n, new_c] = mean
self.to_rgb.weight.data = new_weight
@property
def newest_params(self):
return self.modules_[-1].parameters()
class Discriminator(nn.Module):
def __init__(self, start_resolution=4):
super(Discriminator, self).__init__()
self.resolution = start_resolution
final_stack = FinalDiscriminatorStack(resolution=4)
self.modules_ = nn.ModuleList([final_stack])
self.from_rgb = EqualizedConv2d(in_channels=3,
out_channels=final_stack.in_channels,
kernel_size=(1, 1),
stride=1,
padding=0)
# self.equalize_learning(self.from_rgb)
self.downscale = nn.AvgPool2d(kernel_size=(2, 2))
self.prior_from_rgb = None
def forward(self, x, alpha=1.):
# go through the modules
for i, module in enumerate(self.modules_):
if alpha < 1. and len(self.modules_) > 1 and i == 0:
old_x = self.downscale(x)
old_x = F.leaky_relu(self.prior_from_rgb(old_x), negative_slope=0.2)
old_x = old_x * (1 - alpha)
new_x = F.leaky_relu(self.from_rgb(x), negative_slope=0.2)
new_x = module(new_x) * alpha
x = new_x + old_x
elif i == 0:
x = self.from_rgb(x)
x = module(x)
else:
x = module(x)
return x
def increase_resolution(self):
self.resolution *= 2
new_module = IntermediateDiscriminatorStack(resolution=int(self.resolution),
out_channels=int(min(8192 / self.resolution, 512)))
self.modules_.insert(0, new_module)
self.prior_from_rgb = self.from_rgb
self.from_rgb = EqualizedConv2d(in_channels=3,
out_channels=self.modules_[0].in_channels,
kernel_size=(1, 1),
stride=1,
padding=0)
@property
def newest_params(self):
return self.modules_[0].parameters()
``` |
{
"source": "Jobava/mirror-mwclient",
"score": 3
} |
#### File: mirror-mwclient/mwclient/errors.py
```python
class MwClientError(RuntimeError):
pass
class MediaWikiVersionError(MwClientError):
pass
class APIDisabledError(MwClientError):
pass
class MaximumRetriesExceeded(MwClientError):
pass
class APIError(MwClientError):
def __init__(self, code, info, kwargs):
self.code = code
self.info = info
MwClientError.__init__(self, code, info, kwargs)
class InsufficientPermission(MwClientError):
pass
class UserBlocked(InsufficientPermission):
pass
class EditError(MwClientError):
pass
class ProtectedPageError(EditError, InsufficientPermission):
pass
class FileExists(EditError):
pass
class LoginError(MwClientError):
pass
class EmailError(MwClientError):
pass
class NoSpecifiedEmail(EmailError):
pass
class NoWriteApi(MwClientError):
pass
class InvalidResponse(MwClientError):
def __init__(self, response_text=None):
self.message = 'Did not get a valid JSON response from the server. Check that ' + \
'you used the correct hostname. If you did, the server might ' + \
'be wrongly configured or experiencing temporary problems.'
self.response_text = response_text
MwClientError.__init__(self, self.message, response_text)
def __str__(self):
return self.message
``` |
{
"source": "Jobava/mirror-pontoon",
"score": 2
} |
#### File: pontoon/administration/views.py
```python
import base64
import json
import logging
import os
import shutil
import traceback
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.db import transaction
from django.forms.models import inlineformset_factory
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render
from django.template.defaultfilters import slugify
from django.utils.datastructures import MultiValueDictKeyError
from pontoon.administration import files
from pontoon.base import utils
from pontoon.base.models import (
Entity,
Locale,
Project,
ProjectForm,
Resource,
Subpage,
Translation,
UserProfile,
get_projects_with_stats,
)
log = logging.getLogger('pontoon')
def admin(request, template='admin.html'):
"""Admin interface."""
log.debug("Admin interface.")
if not request.user.has_perm('base.can_manage'):
return render(request, '403.html', status=403)
projects = Project.objects.all().order_by("name")
data = {
'projects': get_projects_with_stats(projects),
'url_prefix': 'admin/projects',
}
return render(request, template, data)
def get_slug(request):
"""Convert project name to slug."""
log.debug("Convert project name to slug.")
if not request.user.has_perm('base.can_manage'):
log.error("Insufficient privileges.")
return HttpResponse("error")
if not request.is_ajax():
log.error("Non-AJAX request")
return HttpResponse("error")
try:
name = request.GET['name']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
log.debug("Name: " + name)
slug = slugify(name)
log.debug("Slug: " + slug)
return HttpResponse(slug)
def manage_project(request, slug=None, template='admin_project.html'):
"""Admin project."""
log.debug("Admin project.")
if not request.user.has_perm('base.can_manage'):
return render(request, '403.html', status=403)
SubpageInlineFormSet = inlineformset_factory(
Project, Subpage, extra=1, fields=('project', 'name', 'url'))
form = ProjectForm()
formset = SubpageInlineFormSet()
locales_selected = []
subtitle = 'Add project'
pk = None
project = None
message = 'Please wait while strings are imported from the repository.'
autoupdate = False
# Save project
if request.method == 'POST':
locales_selected = Locale.objects.filter(
pk__in=request.POST.getlist('locales'))
# Update existing project
try:
pk = request.POST['pk']
project = Project.objects.get(pk=pk)
form = ProjectForm(request.POST, instance=project)
# Needed if form invalid
formset = SubpageInlineFormSet(request.POST, instance=project)
subtitle = 'Edit project'
# Add a new project
except MultiValueDictKeyError:
form = ProjectForm(request.POST)
# Needed if form invalid
formset = SubpageInlineFormSet(request.POST)
autoupdate = True
if form.is_valid():
if project and set(project.locales.all()) != set(locales_selected):
autoupdate = True
project = form.save(commit=False)
formset = SubpageInlineFormSet(request.POST, instance=project)
if formset.is_valid():
project.save()
# http://bit.ly/1glKN50
form.save_m2m()
formset.save()
# Properly displays formset, but removes errors (if valid only)
formset = SubpageInlineFormSet(instance=project)
subtitle += '. Saved.'
pk = project.pk
if autoupdate:
messages.warning(request, message)
else:
subtitle += '. Error.'
else:
subtitle += '. Error.'
# If URL specified and found, show edit, otherwise show add form
elif slug is not None:
try:
project = Project.objects.get(slug=slug)
pk = project.pk
form = ProjectForm(instance=project)
formset = SubpageInlineFormSet(instance=project)
locales_selected = project.locales.all()
subtitle = 'Edit project'
if not Resource.objects.filter(project=project).exists():
autoupdate = True
messages.warning(request, message)
except Project.DoesNotExist:
form = ProjectForm(initial={'slug': slug})
# Override default label suffix
form.label_suffix = ''
data = {
'form': form,
'formset': formset,
'locales_selected': locales_selected,
'locales_available': Locale.objects.exclude(pk__in=locales_selected),
'REPOSITORY_TYPE_CHOICES': Project.REPOSITORY_TYPE_CHOICES,
'subtitle': subtitle,
'pk': pk,
'autoupdate': autoupdate,
}
# Set locale in Translate link
if project and locales_selected:
locale = utils.get_project_locale_from_request(
request, project.locales) or locales_selected[0].code
if locale:
data['translate_locale'] = locale
if Resource.objects.filter(project=project).exists():
data['ready'] = True
return render(request, template, data)
def delete_project(request, pk, template=None):
"""Delete project."""
try:
log.debug("Delete project.")
if not request.user.has_perm('base.can_manage'):
return render(request, '403.html', status=403)
with transaction.atomic():
project = Project.objects.get(pk=pk)
project.delete()
path = files.get_repository_path_master(project)
if os.path.exists(path):
shutil.rmtree(path)
return HttpResponseRedirect(reverse('pontoon.admin'))
except Exception as e:
log.error(
"Admin interface: delete project error.\n%s"
% unicode(e), exc_info=True)
messages.error(
request,
"There was an error during deleting this project.")
return HttpResponseRedirect(reverse(
'pontoon.admin.project',
args=[project.slug]))
def update_from_repository(request, template=None):
"""Update all project locales from repository."""
log.debug("Update all project locales from repository.")
if not request.user.has_perm('base.can_manage'):
return render(request, '403.html', status=403)
if request.method != 'POST':
log.error("Non-POST request")
raise Http404
try:
pk = request.POST['pk']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse(json.dumps({
'type': 'error',
'message': 'Project primary key not provided.',
}), content_type='application/json')
try:
project = Project.objects.get(pk=pk)
except Project.DoesNotExist as e:
log.error(str(e))
return HttpResponse(json.dumps({
'type': 'error',
'message': str(e),
}), content_type='application/json')
try:
files.update_from_repository(project)
files.extract_to_database(project)
except Exception as e:
log.error("Exception: " + str(e))
log.debug(traceback.format_exc())
return HttpResponse(json.dumps({
'type': 'error',
'message': str(e),
}), content_type='application/json')
except IOError as e:
log.error("IOError: " + str(e))
log.debug(traceback.format_exc())
return HttpResponse(json.dumps({
'type': 'error',
'message': str(e),
}), content_type='application/json')
return HttpResponse("200")
def update_from_transifex(request, template=None):
"""Update all project locales from Transifex repository."""
log.debug("Update all project locales from Transifex repository.")
if not request.user.has_perm('base.can_manage'):
return render(request, '403.html', status=403)
if request.method != 'POST':
log.error("Non-POST request")
raise Http404
try:
pk = request.POST['pk']
transifex_project = request.POST['transifex_project']
transifex_resource = request.POST['transifex_resource']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
try:
p = Project.objects.get(pk=pk)
except Project.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
"""Check if user authenticated to Transifex."""
profile = UserProfile.objects.get(user=request.user)
username = request.POST.get(
'transifex_username', profile.transifex_username)
password = request.POST.get(
'transifex_password', base64.decodestring(profile.transifex_password))
if len(username) == 0 or len(password) == 0:
return HttpResponse("authenticate")
for l in p.locales.all():
"""Make GET request to Transifex API."""
response = utils.req('get', transifex_project, transifex_resource,
l.code, username, password)
"""Save or update Transifex data to DB."""
if hasattr(response, 'status_code') and response.status_code == 200:
entities = json.loads(response.content)
for entity in entities:
_save_entity(project=p, string=entity["key"],
comment=entity["comment"])
if len(entity["translation"]) > 0:
e = Entity.objects.get(project=p, string=entity["key"])
_save_translation(
entity=e, locale=l, string=entity["translation"])
log.debug("Transifex data for " + l.name + " saved to DB.")
else:
return HttpResponse(response)
"""Save Transifex username and password."""
if 'remember' in request.POST and request.POST['remember'] == "on":
profile.transifex_username = request.POST['transifex_username']
profile.transifex_password = base64.encodestring(
request.POST['transifex_password'])
profile.save()
return HttpResponse(response.status_code)
``` |
{
"source": "Jobayer-000/CuteNet",
"score": 2
} |
#### File: CuteNet/efficientnetv2/cutenet_model.py
```python
import copy
import itertools
import math
import os
from absl import logging
import numpy as np
import tensorflow as tf
import effnetv2_configs
import hparams
import utils
from tensorflow.keras.layers import (
Dropout,
Softmax,
LayerNormalization,
Conv2D,
Layer,
Dense,
Activation
)
from tensorflow import keras
from tensorflow.keras import Model, Sequential
import tensorflow_addons as tfa
import collections
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
class Mlp(tf.keras.layers.Layer):
def __init__(self, in_features, hidden_features=None, out_features=None, drop=0., prefix=''):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = Dense(hidden_features, name=f'{prefix}/mlp/fc1')
self.fc2 = Dense(out_features, name=f'{prefix}/mlp/fc2')
self.drop = Dropout(drop)
def call(self, x):
x = self.fc1(x)
x = tf.keras.activations.gelu(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
B, H, W, C = x.shape
x = tf.reshape(x, shape=[-1, H // window_size,
window_size, W // window_size, window_size, C])
x = tf.transpose(x, perm=[0, 1, 3, 2, 4, 5])
windows = tf.reshape(x, shape=[-1, window_size, window_size, C])
return windows
def window_reverse(windows, window_size, H, W, C):
x = tf.reshape(windows, shape=[-1, H // window_size,
W // window_size, window_size, window_size, C])
x = tf.transpose(x, perm=[0, 1, 3, 2, 4, 5])
x = tf.reshape(x, shape=[-1, H, W, C])
return x
class WindowAttention(tf.keras.layers.Layer):
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0., prefix=''):
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.prefix = prefix
self.qkv = Dense(dim * 3, use_bias=qkv_bias,
name=f'{self.prefix}/attn/qkv')
self.attn_drop = Dropout(attn_drop)
self.proj = Dense(dim, name=f'{self.prefix}/attn/proj')
self.proj_drop = Dropout(proj_drop)
def build(self, input_shape):
self.relative_position_bias_table = self.add_weight(f'{self.prefix}/attn/relative_position_bias_table',
shape=(
(2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), self.num_heads),
initializer=tf.initializers.Zeros(), trainable=True)
coords_h = np.arange(self.window_size[0])
coords_w = np.arange(self.window_size[1])
coords = np.stack(np.meshgrid(coords_h, coords_w, indexing='ij'))
coords_flatten = coords.reshape(2, -1)
relative_coords = coords_flatten[:, :,
None] - coords_flatten[:, None, :]
relative_coords = relative_coords.transpose([1, 2, 0])
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1).astype(np.int64)
self.relative_position_index = tf.Variable(initial_value=tf.convert_to_tensor(
relative_position_index), trainable=False, name=f'{self.prefix}/attn/relative_position_index')
self.built = True
def call(self, x, mask=None):
B_, N, C = x.shape
qkv = tf.transpose(tf.reshape(self.qkv(
x), shape=[-1, N, 3, self.num_heads, C // self.num_heads]), perm=[2, 0, 3, 1, 4])
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ tf.transpose(k, perm=[0, 1, 3, 2]))
relative_position_bias = tf.gather(self.relative_position_bias_table, tf.reshape(
self.relative_position_index, shape=[-1]))
relative_position_bias = tf.reshape(relative_position_bias, shape=[
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1])
relative_position_bias = tf.transpose(
relative_position_bias, perm=[2, 0, 1])
attn = attn + tf.expand_dims(relative_position_bias, axis=0)
if mask is not None:
nW = mask.get_shape()[0] # tf.shape(mask)[0]
attn = tf.reshape(attn, shape=[-1, nW, self.num_heads, N, N]) + tf.cast(
tf.expand_dims(tf.expand_dims(mask, axis=1), axis=0), attn.dtype)
attn = tf.reshape(attn, shape=[-1, self.num_heads, N, N])
attn = tf.nn.softmax(attn, axis=-1)
else:
attn = tf.nn.softmax(attn, axis=-1)
attn = self.attn_drop(attn)
x = tf.transpose((attn @ v), perm=[0, 2, 1, 3])
x = tf.reshape(x, shape=[-1 , N, C])
x = self.proj(x)
x = self.proj_drop(x)
return x
def drop_path(inputs, drop_prob, is_training):
if (not is_training) or (drop_prob == 0.):
return inputs
# Compute keep_prob
keep_prob = 1.0 - drop_prob
# Compute drop_connect tensor
random_tensor = keep_prob
shape = (tf.shape(inputs)[0],) + (1,) * \
(len(tf.shape(inputs)) - 1)
random_tensor += tf.random.uniform(shape, dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.math.divide(inputs, keep_prob) * binary_tensor
return output
class DropPath(tf.keras.layers.Layer):
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def call(self, x, training=None):
return drop_path(x, self.drop_prob, training)
class SwinTransformerBlock(tf.keras.layers.Layer):
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, mlp_ratio=4.,
qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path_prob=0., norm_layer=LayerNormalization, prefix=''):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.prefix = prefix
self.norm1 = norm_layer(epsilon=1e-5, name=f'{self.prefix}/norm1')
self.attn = WindowAttention(dim, window_size=(self.window_size, self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, prefix=self.prefix)
self.drop_path = DropPath(
drop_path_prob if drop_path_prob > 0. else 0.)
self.norm2 = norm_layer(epsilon=1e-5, name=f'{self.prefix}/norm2')
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
drop=drop, prefix=self.prefix)
def build(self, input_shape):
if self.shift_size > 0:
H, W = self.input_resolution
img_mask = np.zeros([1, H, W, 1])
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
img_mask = tf.convert_to_tensor(img_mask)
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = tf.reshape(
mask_windows, shape=[-1, self.window_size * self.window_size])
attn_mask = tf.expand_dims(
mask_windows, axis=1) - tf.expand_dims(mask_windows, axis=2)
attn_mask = tf.where(attn_mask != 0, -100.0, attn_mask)
attn_mask = tf.where(attn_mask == 0, 0.0, attn_mask)
self.attn_mask = tf.Variable(
initial_value=attn_mask, trainable=False, name=f'{self.prefix}/attn_mask')
else:
self.attn_mask = None
self.built = True
def call(self, x):
H, W = self.input_resolution
B, L, C = x.get_shape().as_list()
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = tf.reshape(x, shape=[-1, H, W, C])
# cyclic shift
if self.shift_size > 0:
shifted_x = tf.roll(
x, shift=[-self.shift_size, -self.shift_size], axis=[1, 2])
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size)
x_windows = tf.reshape(
x_windows, shape=[-1, self.window_size * self.window_size, C])
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask)
# merge windows
attn_windows = tf.reshape(
attn_windows, shape=[-1, self.window_size, self.window_size, C])
shifted_x = window_reverse(attn_windows, self.window_size, H, W, C)
# reverse cyclic shift
if self.shift_size > 0:
x = tf.roll(shifted_x, shift=[
self.shift_size, self.shift_size], axis=[1, 2])
else:
x = shifted_x
x = tf.reshape(x, shape=[-1, H * W, C])
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(tf.keras.layers.Layer):
def __init__(self, input_resolution, dim, norm_layer=LayerNormalization, prefix=''):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = Dense(2 * dim, use_bias=False,
name=f'{prefix}/downsample/reduction')
self.norm = norm_layer(epsilon=1e-5, name=f'{prefix}/downsample/norm')
def call(self, x):
H, W = self.input_resolution
B, L, C = x.get_shape().as_list()
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = tf.reshape(x, shape=[-1, H, W, C])
x0 = x[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :] # B H/2 W/2 C
x1 = x[:, fc00:e968:6179::de52:7100, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :] # B H/2 W/2 C
x2 = x[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fc00:e968:6179::de52:7100, :] # B H/2 W/2 C
x3 = x[:, fc00:e968:6179::de52:7100, fc00:e968:6179::de52:7100, :] # B H/2 W/2 C
x = tf.concat([x0, x1, x2, x3], axis=-1)
x = tf.reshape(x, shape=[-1, (H // 2) * (W // 2), 4 * C])
x = self.norm(x)
x = self.reduction(x)
return x
class BasicLayer(tf.keras.layers.Layer):
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path_prob=0., norm_layer=LayerNormalization, downsample=None, use_checkpoint=False, prefix=''):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = tf.keras.Sequential([SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (
i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path_prob=drop_path_prob[i] if isinstance(
drop_path_prob, list) else drop_path_prob,
norm_layer=norm_layer,
prefix=f'{prefix}/blocks{i}') for i in range(depth)])
if downsample is not None:
self.downsample = downsample(
input_resolution, dim=dim, norm_layer=norm_layer, prefix=prefix)
else:
self.downsample = None
def call(self, x):
x = self.blocks(x)
if self.downsample is not None:
x = self.downsample(x)
return x
class PatchEmbed(tf.keras.layers.Layer):
def __init__(self, img_size=(224, 224), patch_size=(4, 4), in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
patches_resolution = [img_size[0] //
patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = Conv2D(embed_dim, kernel_size=patch_size,
strides=patch_size, name='proj')
if norm_layer is not None:
self.norm = norm_layer(epsilon=1e-5, name='norm')
else:
self.norm = None
def call(self, x):
B, H, W, C = x.get_shape().as_list()
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
x = tf.reshape(
x, shape=[-1, (H // self.patch_size[0]) * (W // self.patch_size[0]), self.embed_dim])
if self.norm is not None:
x = self.norm(x)
return x
class ReversedPatchEmbed(Layer):
def __init__(self,patch_size=4, dim=256):
super().__init__()
self.trans_conv2d = tf.keras.layers.Conv2DTranspose(dim, patch_size, patch_size)
def call(self,input):
B, L, C = input.shape
x = tf.reshape(input, [-1, int(np.sqrt(L)), int(np.sqrt(L)), C])
x = self.trans_conv2d(x)
return x
#-----------------------------------------------------EFFICIENTNET_PART---------------------------------------------------------------------------------
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for convolutional kernels.
The main difference with tf.variance_scaling_initializer is that
tf.variance_scaling_initializer uses a truncated normal with an uncorrected
standard deviation, whereas here we use a normal distribution. Similarly,
tf.initializers.variance_scaling uses a truncated normal with
a corrected standard deviation.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
kernel_height, kernel_width, _, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random.normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
def dense_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for dense kernels.
This initialization is equal to
tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
distribution='uniform').
It is written out explicitly here for clarity.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
init_range = 1.0 / np.sqrt(shape[1])
return tf.random.uniform(shape, -init_range, init_range, dtype=dtype)
def round_filters(filters, mconfig, skip=False):
"""Round number of filters based on depth multiplier."""
multiplier = mconfig.width_coefficient
divisor = mconfig.depth_divisor
min_depth = mconfig.min_depth
if skip or not multiplier:
return filters
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
return int(new_filters)
def round_repeats(repeats, multiplier, skip=False):
"""Round number of filters based on depth multiplier."""
if skip or not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
class SE(tf.keras.layers.Layer):
"""Squeeze-and-excitation layer."""
def __init__(self, mconfig, se_filters, output_filters, name=None):
super().__init__(name=name)
self._local_pooling = mconfig.local_pooling
self._data_format = mconfig.data_format
self._act = utils.get_act_fn(mconfig.act_fn)
# Squeeze and Excitation layer.
self._se_reduce = tf.keras.layers.Conv2D(
se_filters,
kernel_size=1,
strides=1,
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=True,
name='conv2d')
self._se_expand = tf.keras.layers.Conv2D(
output_filters,
kernel_size=1,
strides=1,
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=True,
name='conv2d_1')
def call(self, inputs):
h_axis, w_axis = [2, 3] if self._data_format == 'channels_first' else [1, 2]
if self._local_pooling:
se_tensor = tf.nn.avg_pool(
inputs,
ksize=[1, inputs.shape[h_axis], inputs.shape[w_axis], 1],
strides=[1, 1, 1, 1],
padding='VALID')
else:
se_tensor = tf.reduce_mean(inputs, [h_axis, w_axis], keepdims=True)
se_tensor = self._se_expand(self._act(self._se_reduce(se_tensor)))
logging.info('Built SE %s : %s', self.name, se_tensor.shape)
return tf.sigmoid(se_tensor) * inputs
class MBConvBlock(tf.keras.layers.Layer):
"""A class of MBConv: Mobile Inverted Residual Bottleneck.
Attributes:
endpoints: dict. A list of internal tensors.
"""
def __init__(self, block_args, mconfig, name=None):
"""Initializes a MBConv block.
Args:
block_args: BlockArgs, arguments to create a Block.
mconfig: GlobalParams, a set of global parameters.
name: layer name.
"""
super().__init__(name=name)
self._block_args = copy.deepcopy(block_args)
self._mconfig = copy.deepcopy(mconfig)
self._local_pooling = mconfig.local_pooling
self._data_format = mconfig.data_format
self._channel_axis = 1 if self._data_format == 'channels_first' else -1
self._act = utils.get_act_fn(mconfig.act_fn)
self._has_se = (
self._block_args.se_ratio is not None and
0 < self._block_args.se_ratio <= 1)
self.endpoints = None
# Builds the block accordings to arguments.
self._build()
@property
def block_args(self):
return self._block_args
def _build(self):
"""Builds block according to the arguments."""
# pylint: disable=g-long-lambda
bid = itertools.count(0)
get_norm_name = lambda: 'tpu_batch_normalization' + ('' if not next(
bid) else '_' + str(next(bid) // 2))
cid = itertools.count(0)
get_conv_name = lambda: 'conv2d' + ('' if not next(cid) else '_' + str(
next(cid) // 2))
# pylint: enable=g-long-lambda
mconfig = self._mconfig
filters = self._block_args.input_filters * self._block_args.expand_ratio
kernel_size = self._block_args.kernel_size
# Expansion phase. Called if not using fused convolutions and expansion
# phase is necessary.
if self._block_args.expand_ratio != 1:
self._expand_conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=1,
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
name=get_conv_name())
self._norm0 = utils.normalization(
mconfig.bn_type,
axis=self._channel_axis,
momentum=mconfig.bn_momentum,
epsilon=mconfig.bn_epsilon,
groups=mconfig.gn_groups,
name=get_norm_name())
# Depth-wise convolution phase. Called if not using fused convolutions.
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=self._block_args.strides,
depthwise_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
name='depthwise_conv2d')
self._norm1 = utils.normalization(
mconfig.bn_type,
axis=self._channel_axis,
momentum=mconfig.bn_momentum,
epsilon=mconfig.bn_epsilon,
groups=mconfig.gn_groups,
name=get_norm_name())
if self._has_se:
num_reduced_filters = max(
1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se = SE(self._mconfig, num_reduced_filters, filters, name='se')
else:
self._se = None
# Output phase.
filters = self._block_args.output_filters
self._project_conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=1,
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
name=get_conv_name())
self._norm2 = utils.normalization(
mconfig.bn_type,
axis=self._channel_axis,
momentum=mconfig.bn_momentum,
epsilon=mconfig.bn_epsilon,
groups=mconfig.gn_groups,
name=get_norm_name())
def residual(self, inputs, x, training, survival_prob):
if (self._block_args.strides == 1 and
self._block_args.input_filters == self._block_args.output_filters):
# Apply only if skip connection presents.
if survival_prob:
x = utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
return x
def call(self, inputs, training, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
logging.info('Block %s input shape: %s (%s)', self.name, inputs.shape,
inputs.dtype)
x = inputs
if self._block_args.expand_ratio != 1:
x = self._act(self._norm0(self._expand_conv(x), training=training))
logging.info('Expand shape: %s', x.shape)
x = self._act(self._norm1(self._depthwise_conv(x), training=training))
logging.info('DWConv shape: %s', x.shape)
if self._mconfig.conv_dropout and self._block_args.expand_ratio > 1:
x = tf.keras.layers.Dropout(self._mconfig.conv_dropout)(
x, training=training)
if self._se:
x = self._se(x)
self.endpoints = {'expansion_output': x}
x = self._norm2(self._project_conv(x), training=training)
x = self.residual(inputs, x, training, survival_prob)
logging.info('Project shape: %s', x.shape)
return x
class FusedMBConvBlock(MBConvBlock):
"""Fusing the proj conv1x1 and depthwise_conv into a conv2d."""
def _build(self):
"""Builds block according to the arguments."""
# pylint: disable=g-long-lambda
bid = itertools.count(0)
get_norm_name = lambda: 'tpu_batch_normalization' + ('' if not next(
bid) else '_' + str(next(bid) // 2))
cid = itertools.count(0)
get_conv_name = lambda: 'conv2d' + ('' if not next(cid) else '_' + str(
next(cid) // 2))
# pylint: enable=g-long-lambda
mconfig = self._mconfig
block_args = self._block_args
filters = block_args.input_filters * block_args.expand_ratio
kernel_size = block_args.kernel_size
if block_args.expand_ratio != 1:
# Expansion phase:
self._expand_conv = tf.keras.layers.Conv2D(
filters,
kernel_size=kernel_size,
strides=block_args.strides,
kernel_initializer=conv_kernel_initializer,
padding='same',
use_bias=False,
name=get_conv_name())
self._norm0 = utils.normalization(
mconfig.bn_type,
axis=self._channel_axis,
momentum=mconfig.bn_momentum,
epsilon=mconfig.bn_epsilon,
groups=mconfig.gn_groups,
name=get_norm_name())
if self._has_se:
num_reduced_filters = max(
1, int(block_args.input_filters * block_args.se_ratio))
self._se = SE(mconfig, num_reduced_filters, filters, name='se')
else:
self._se = None
# Output phase:
filters = block_args.output_filters
self._project_conv = tf.keras.layers.Conv2D(
filters,
kernel_size=1 if block_args.expand_ratio != 1 else kernel_size,
strides=1 if block_args.expand_ratio != 1 else block_args.strides,
kernel_initializer=conv_kernel_initializer,
padding='same',
use_bias=False,
name=get_conv_name())
self._norm1 = utils.normalization(
mconfig.bn_type,
axis=self._channel_axis,
momentum=mconfig.bn_momentum,
epsilon=mconfig.bn_epsilon,
groups=mconfig.gn_groups,
name=get_norm_name())
def call(self, inputs, training, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
logging.info('Block %s input shape: %s', self.name, inputs.shape)
x = inputs
if self._block_args.expand_ratio != 1:
x = self._act(self._norm0(self._expand_conv(x), training=training))
logging.info('Expand shape: %s', x.shape)
self.endpoints = {'expansion_output': x}
if self._mconfig.conv_dropout and self._block_args.expand_ratio > 1:
x = tf.keras.layers.Dropout(self._mconfig.conv_dropout)(x, training)
if self._se:
x = self._se(x)
x = self._norm1(self._project_conv(x), training=training)
if self._block_args.expand_ratio == 1:
x = self._act(x) # add act if no expansion.
x = self.residual(inputs, x, training, survival_prob)
logging.info('Project shape: %s', x.shape)
return x
class Stem(tf.keras.layers.Layer):
"""Stem layer at the begining of the network."""
def __init__(self, mconfig, stem_filters, name=None):
super().__init__(name=name)
self._conv_stem = tf.keras.layers.Conv2D(
filters=round_filters(stem_filters, mconfig),
kernel_size=3,
strides=2,
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=mconfig.data_format,
use_bias=False,
name='conv2d')
self._norm = utils.normalization(
mconfig.bn_type,
axis=(1 if mconfig.data_format == 'channels_first' else -1),
momentum=mconfig.bn_momentum,
epsilon=mconfig.bn_epsilon,
groups=mconfig.gn_groups)
self._act = utils.get_act_fn(mconfig.act_fn)
def call(self, inputs, training):
return self._act(self._norm(self._conv_stem(inputs), training=training))
class Head(tf.keras.layers.Layer):
"""Head layer for network outputs."""
def __init__(self, mconfig, name=None):
super().__init__(name=name)
self.endpoints = {}
self._mconfig = mconfig
self._conv_head = tf.keras.layers.Conv2D(
filters=round_filters(mconfig.feature_size or 1280, mconfig),
kernel_size=1,
strides=1,
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=mconfig.data_format,
use_bias=False,
name='conv2d')
self._norm = utils.normalization(
mconfig.bn_type,
axis=(1 if mconfig.data_format == 'channels_first' else -1),
momentum=mconfig.bn_momentum,
epsilon=mconfig.bn_epsilon,
groups=mconfig.gn_groups)
self._act = utils.get_act_fn(mconfig.act_fn)
self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(
data_format=mconfig.data_format)
if mconfig.dropout_rate > 0:
self._dropout = tf.keras.layers.Dropout(mconfig.dropout_rate)
else:
self._dropout = None
self.h_axis, self.w_axis = ([2, 3] if mconfig.data_format
== 'channels_first' else [1, 2])
def call(self, inputs, training):
"""Call the layer."""
outputs = self._act(self._norm(self._conv_head(inputs), training=training))
self.endpoints['head_1x1'] = outputs
if self._mconfig.local_pooling:
shape = outputs.get_shape().as_list()
kernel_size = [1, shape[self.h_axis], shape[self.w_axis], 1]
outputs = tf.nn.avg_pool(
outputs, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
self.endpoints['pooled_features'] = outputs
if self._dropout:
outputs = self._dropout(outputs, training=training)
self.endpoints['global_pool'] = outputs
if self._fc:
outputs = tf.squeeze(outputs, [self.h_axis, self.w_axis])
outputs = self._fc(outputs)
self.endpoints['head'] = outputs
else:
outputs = self._avg_pooling(outputs)
self.endpoints['pooled_features'] = outputs
if self._dropout:
outputs = self._dropout(outputs, training=training)
self.endpoints['head'] = outputs
return outputs
class DenseWithBN(tf.keras.layers.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.dense_lr = keras.layers.Dense(units)
self._norm = keras.layers.BatchNormalization()
def call(self, inputs):
return tf.nn.silu(self._norm(self.dense_lr(inputs)))
#------------------------------------------------------THE_MODEL----------------------------------------------------------------------
class CuteNetModel(tf.keras.Model):
"""A class implements tf.keras.Model.
Reference: https://arxiv.org/abs/1807.11626
"""
def __init__(self,
effnet_model_name='efficientnetv2-m',
effnet_model_config=None,
name='cutenet',
img_size=(384, 384), window_size=12, embed_dim=128,
depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32],
patch_size=(4, 4),in_chans=24,
mlp_ratio=4., qkv_bias=True, qk_scale=None, num_classes=1000,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=LayerNormalization, ape=False, patch_norm=True,
**kwargs):
"""Initializes an `Model` instance.
Args:
model_name: A string of model name for efficinetnet model.
model_config: A dict of efficientnet model configurations or a string of hparams.
name: A string of layer name.
Raises:
ValueError: when blocks_args is not specified as a list.
"""
super().__init__(name=name)
cfg = copy.deepcopy(hparams.base_config)
if effnet_model_name:
cfg.override(effnetv2_configs.get_model_config(effnet_model_name))
cfg.model.override(effnet_model_config)
self.cfg = cfg
self._mconfig = cfg.model
self.endpoints = None
self.in_chans = in_chans
self.img_size = img_size
self.patch_size = patch_size
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.drop_rate = drop_rate
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
self.norm_layer = LayerNormalization
self.ape = ape
self.patch_norm = patch_norm
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self._build()
def _build(self):
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=self.img_size, patch_size=self.patch_size, in_chans=self.in_chans, embed_dim=self.embed_dim,
norm_layer=self.norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.patch_embed_1 = PatchEmbed(
img_size=(192,192), in_chans=self.in_chans, embed_dim=self.embed_dim*2,
norm_layer=self.norm_layer if self.patch_norm else None)
self.patch_embed_2 = PatchEmbed(
img_size=(96,96), in_chans=self.in_chans, embed_dim=self.embed_dim*4,
norm_layer=self.norm_layer if self.patch_norm else None)
self.patch_embed_3 = PatchEmbed(
img_size=(48,48), in_chans=self.in_chans, embed_dim=self.embed_dim*8,
norm_layer=self.norm_layer if self.patch_norm else None)
self.embeder = [self.patch_embed_1,self.patch_embed_2,self.patch_embed_3]
self.swin_concat_1 = tf.keras.layers.Concatenate()
self.swin_concat_2 = tf.keras.layers.Concatenate()
self.swin_concat_3 = tf.keras.layers.Concatenate()
self.swin_concat = [self.swin_concat_1, self.swin_concat_2, self.swin_concat_3]
self.effnet_concat_1 = tf.keras.layers.Concatenate()
self.effnet_concat_2 = tf.keras.layers.Concatenate()
self.effnet_concat_3 = tf.keras.layers.Concatenate()
self.effnet_concat = [self.effnet_concat_1, self.effnet_concat_2, self.effnet_concat_3]
self.reversed_embed_1 = ReversedPatchEmbed(dim=48)
self.reversed_embed_2 = ReversedPatchEmbed(dim=80)
self.reversed_embed_3 = ReversedPatchEmbed(dim=176)
self.reversed_embed = [self.reversed_embed_1, self.reversed_embed_2, self.reversed_embed_3]
self.effnet_dense_1 = DenseWithBN(48)
self.effnet_dense_2 = DenseWithBN(80)
self.effnet_dense_3 = DenseWithBN(176)
self.effnet_dense = [self.effnet_dense_1, self.effnet_dense_2, self.effnet_dense_3]
self.swin_dense_1 = DenseWithBN(self.embed_dim*2)
self.swin_dense_2 = DenseWithBN(self.embed_dim*4)
self.swin_dense_3 = DenseWithBN(self.embed_dim*8)
self.swin_dense = [self.swin_dense_1, self.swin_dense_2, self.swin_dense_3]
self.final_concat = tf.keras.layers.Concatenate()
# absolute position embedding
if self.ape:
initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=.02)
# TODO: Check to make sure that this variable is supposed to not be trainable
self.absolute_pos_embed = tf.Variable(initializer(shape = (1, num_patches, self.embed_dim)), trainable=False)
self.pos_drop = tf.keras.layers.Dropout(rate=self.drop_rate)
# stochastic depth
dpr = [x for x in np.linspace(0, self.drop_path_rate, sum(self.depths))] # stochastic depth decay rule
# build layers
self._swin_blocks = [tf.keras.models.Sequential(BasicLayer(dim=int(self.embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=self.depths[i_layer],
num_heads=self.num_heads[i_layer],
window_size=self.window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias, qk_scale=self.qk_scale,
drop=self.drop_rate, attn_drop=self.attn_drop_rate,
drop_path_prob=dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])],
norm_layer=self.norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None)) for i_layer in range(self.num_layers)]
# TODO: Check impact of epsilon
self.norm = tf.keras.layers.BatchNormalization()
self.avgpool = tf.keras.layers.GlobalAveragePooling1D()
"""Builds a model."""
self._effnet_blocks = []
# Stem part.
self._stem = Stem(self._mconfig, self._mconfig.blocks_args[0].input_filters)
# Builds blocks.
block_id = itertools.count(0)
block_name = lambda: 'blocks_%d' % next(block_id)
for block_args in self._mconfig.blocks_args:
assert block_args.num_repeat > 0
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(block_args.input_filters, self._mconfig)
output_filters = round_filters(block_args.output_filters, self._mconfig)
repeats = round_repeats(block_args.num_repeat,
self._mconfig.depth_coefficient)
block_args.update(
dict(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=repeats))
# The first block needs to take care of stride and filter size increase.
conv_block = {0: MBConvBlock, 1: FusedMBConvBlock}[block_args.conv_type]
self._effnet_blocks.append(
conv_block(block_args, self._mconfig, name=block_name()))
if block_args.num_repeat > 1: # rest of blocks with the same block_arg
# pylint: disable=protected-access
block_args.input_filters = block_args.output_filters
block_args.strides = 1
# pylint: enable=protected-access
for _ in range(block_args.num_repeat - 1):
self._effnet_blocks.append(
conv_block(block_args, self._mconfig, name=block_name()))
# Head part.
self._head = Head(self._mconfig)
def get_model_with_inputs(self, inputs, **kargs):
model = tf.keras.Model(
inputs=[inputs], outputs=self.call(inputs, training=True))
return model
def call(self, inputs, training=False, with_endpoints=False):
"""Implementation of call().
Args:
inputs: input tensors.
training: boolean, whether the model is constructed for training.
with_endpoints: If true, return a list of endpoints.
Returns:
output tensors.
"""
outputs = None
self.endpoints = {}
reduction_idx = 0
# Calls Stem layers
outputs = self._stem(inputs, training)
swin_outputs = self.patch_embed(outputs)
if self.ape:
swin_outputs = swin_outputs + self.absolute_pos_embed
swin_outputs = self.pos_drop(swin_outputs)
logging.info('Built stem: %s (%s)', outputs.shape, outputs.dtype)
self.endpoints['stem'] = outputs
# Calls blocks.
for idx, block in enumerate(self._effnet_blocks):
is_reduction = False # reduction flag for blocks after the stem layer
if ((idx == len(self._effnet_blocks) - 1) or
self._effnet_blocks[idx + 1].block_args.strides > 1):
is_reduction = True
reduction_idx += 1
survival_prob = self._mconfig.survival_prob
if survival_prob:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * float(idx) / len(self._effnet_blocks)
logging.info('block_%s survival_prob: %s', idx, survival_prob)
outputs = block(outputs, training=training, survival_prob=survival_prob)
self.endpoints['block_%s' % idx] = outputs
if is_reduction:
self.endpoints['reduction_%s' % reduction_idx] = outputs
if reduction_idx > 1:
swin_outputs = self._swin_blocks[reduction_idx-2](swin_outputs)
if reduction_idx <5:
effnet_embed = self.embeder[reduction_idx-2](outputs)
reversed_embed = self.reversed_embed[reduction_idx-2](swin_outputs)
outputs = self.effnet_concat[reduction_idx-2]([reversed_embed, outputs])
outputs = self.effnet_dense[reduction_idx-2](outputs)
swin_outputs = self.swin_concat[reduction_idx-2]([swin_outputs, effnet_embed])
swin_outputs = self.swin_dense[reduction_idx-2](swin_outputs)
if block.endpoints:
for k, v in block.endpoints.items():
self.endpoints['block_%s/%s' % (idx, k)] = v
if is_reduction:
self.endpoints['reduction_%s/%s' % (reduction_idx, k)] = v
self.endpoints['features'] = outputs
# Head to obtain the final feature.
outputs = self._head(outputs)
swin_outputs = self.avgpool(swin_outputs)
outputs = self.final_concat([outputs, swin_outputs])
self.endpoints.update(self._head.endpoints)
if with_endpoints: # Use for building sequential models.
return [outputs] + list(
filter(lambda endpoint: endpoint is not None, [
self.endpoints.get('reduction_1'),
self.endpoints.get('reduction_2'),
self.endpoints.get('reduction_3'),
self.endpoints.get('reduction_4'),
self.endpoints.get('reduction_5'),
]))
return outputs
def get_model(model_name,
model_config=None,
include_top=True,
weights='imagenet',
training=True,
with_endpoints=False,
**kwargs):
"""Get a EfficientNet V1 or V2 model instance.
This is a simply utility for finetuning or inference.
Args:
model_name: a string such as 'efficientnetv2-s' or 'efficientnet-b0'.
model_config: A dict of model configurations or a string of hparams.
include_top: whether to include the final dense layer for classification.
weights: One of None (random initialization),
'imagenet' (pretrained on ImageNet),
'imagenet21k' (pretrained on Imagenet21k),
'imagenet21k-ft1k' (pretrained on 21k and finetuned on 1k),
'jft' (trained with non-labelled JFT-300),
or the path to the weights file to be loaded. Defaults to 'imagenet'.
training: If true, all model variables are trainable.
with_endpoints: whether to return all intermedia endpoints.
**kwargs: additional parameters for keras model, such as name=xx.
Returns:
A single tensor if with_endpoints if False; otherwise, a list of tensor.
"""
net = EffNetV2Model(model_name, model_config, include_top, **kwargs)
net(tf.keras.Input(shape=(None, None, 3)),
training=training,
with_endpoints=with_endpoints)
if not weights: # pylint: disable=g-bool-id-comparison
return net
v2url = 'https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/v2/'
v1url = 'https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/'
v1jfturl = 'https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/'
pretrained_ckpts = {
# EfficientNet V2.
'efficientnetv2-s': {
'imagenet': v2url + 'efficientnetv2-s.tgz',
'imagenet21k': v2url + 'efficientnetv2-s-21k.tgz',
'imagenet21k-ft1k': v2url + 'efficientnetv2-s-21k-ft1k.tgz',
},
'efficientnetv2-m': {
'imagenet': v2url + 'efficientnetv2-m.tgz',
'imagenet21k': v2url + 'efficientnetv2-m-21k.tgz',
'imagenet21k-ft1k': v2url + 'efficientnetv2-m-21k-ft1k.tgz',
},
'efficientnetv2-l': {
'imagenet': v2url + 'efficientnetv2-l.tgz',
'imagenet21k': v2url + 'efficientnetv2-l-21k.tgz',
'imagenet21k-ft1k': v2url + 'efficientnetv2-l-21k-ft1k.tgz',
},
'efficientnetv2-xl': {
# no imagenet ckpt.
'imagenet21k': v2url + 'efficientnetv2-xl-21k.tgz',
'imagenet21k-ft1k': v2url + 'efficientnetv2-xl-21k-ft1k.tgz',
},
'efficientnetv2-b0': {
'imagenet': v2url + 'efficientnetv2-b0.tgz',
'imagenet21k': v2url + 'efficientnetv2-b0-21k.tgz',
'imagenet21k-ft1k': v2url + 'efficientnetv2-b0-21k-ft1k.tgz',
},
'efficientnetv2-b1': {
'imagenet': v2url + 'efficientnetv2-b1.tgz',
'imagenet21k': v2url + 'efficientnetv2-b1-21k.tgz',
'imagenet21k-ft1k': v2url + 'efficientnetv2-b1-21k-ft1k.tgz',
},
'efficientnetv2-b2': {
'imagenet': v2url + 'efficientnetv2-b2.tgz',
'imagenet21k': v2url + 'efficientnetv2-b2-21k.tgz',
'imagenet21k-ft1k': v2url + 'efficientnetv2-b2-21k-ft1k.tgz',
},
'efficientnetv2-b3': {
'imagenet': v2url + 'efficientnetv2-b3.tgz',
'imagenet21k': v2url + 'efficientnetv2-b3-21k.tgz',
'imagenet21k-ft1k': v2url + 'efficientnetv2-b3-21k-ft1k.tgz',
},
# EfficientNet V1.
'efficientnet-b0': {
'imagenet': v1url + 'efficientnet-b0.tar.gz',
'jft': v1jfturl + 'noisy_student_efficientnet-b0.tar.gz',
},
'efficientnet-b1': {
'imagenet': v1url + 'efficientnet-b1.tar.gz',
'jft': v1jfturl + 'noisy_student_efficientnet-b1.tar.gz',
},
'efficientnet-b2': {
'imagenet': v1url + 'efficientnet-b2.tar.gz',
'jft': v1jfturl + 'noisy_student_efficientnet-b2.tar.gz',
},
'efficientnet-b3': {
'imagenet': v1url + 'efficientnet-b3.tar.gz',
'jft': v1jfturl + 'noisy_student_efficientnet-b3.tar.gz',
},
'efficientnet-b4': {
'imagenet': v1url + 'efficientnet-b4.tar.gz',
'jft': v1jfturl + 'noisy_student_efficientnet-b4.tar.gz',
},
'efficientnet-b5': {
'imagenet': v1url + 'efficientnet-b5.tar.gz',
'jft': v1jfturl + 'noisy_student_efficientnet-b5.tar.gz',
},
'efficientnet-b6': {
'imagenet': v1url + 'efficientnet-b6.tar.gz',
'jft': v1jfturl + 'noisy_student_efficientnet-b6.tar.gz',
},
'efficientnet-b7': {
'imagenet': v1url + 'efficientnet-b7.tar.gz',
'jft': v1jfturl + 'noisy_student_efficientnet-b7.tar.gz',
},
'efficientnet-b8': {
'imagenet': v1url + 'efficientnet-b8.tar.gz',
},
'efficientnet-l2': {
'jft': v1jfturl + 'noisy_student_efficientnet-l2_475.tar.gz',
},
}
if model_name in pretrained_ckpts and weights in pretrained_ckpts[model_name]:
url = pretrained_ckpts[model_name][weights]
fname = os.path.basename(url).split('.')[0]
pretrained_ckpt= tf.keras.utils.get_file(fname, url , untar=True)
else:
pretrained_ckpt = weights
if tf.io.gfile.isdir(pretrained_ckpt):
pretrained_ckpt = tf.train.latest_checkpoint(pretrained_ckpt)
net.load_weights(pretrained_ckpt)
return net
``` |
{
"source": "Jobayer-000/DOLG-TensorFlow",
"score": 2
} |
#### File: DOLG-TensorFlow/layers/OrtholFusion.py
```python
import tensorflow as tf
from tensorflow.keras import layers
class OrthogonalFusion(layers.Layer):
def __init__(self, **kwargs):
super().__init__(name='OrthogonalFusion', **kwargs)
def call(self, inputs):
local_feat, global_feat = inputs
height = local_feat.shape[1]
width = local_feat.shape[2]
depth = local_feat.shape[3]
local_feat = tf.reshape(local_feat, [-1, height*width, depth])
local_feat = tf.transpose(local_feat, perm=[0, 2, 1])
projection = tf.matmul(
tf.expand_dims(global_feat, axis=1),
local_feat
)
projection = tf.matmul(
tf.expand_dims(global_feat, axis=2),
projection
)
projection = tf.reshape(projection, [-1, height, width, depth])
global_feat_norm = tf.norm(global_feat, ord=2, axis=1)
projection = projection / tf.reshape(global_feat_norm*global_feat_norm, shape=[-1, 1, 1, 1])
local_feat = tf.transpose(local_feat, perm=[0, 1, 2])
local_feat = tf.reshape(local_feat, [-1, height, width, depth])
orthogonal_comp = local_feat - projection
global_feat = tf.expand_dims(tf.expand_dims(global_feat, axis=1), axis=1)
global_feat = tf.broadcast_to(global_feat, tf.shape(local_feat))
output = tf.concat([global_feat, orthogonal_comp], axis=-1)
return output
``` |
{
"source": "jobc90/Protein-Resarch",
"score": 2
} |
#### File: presearch_trrosetta/prepare/create_dsmap.py
```python
import logging
import os
import sys
import numpy as np
import argparse
from PIL import Image
from Bio.PDB import MMCIFParser
from scipy.spatial import distance_matrix
import pdb
import tqdm
from presearch_trrosetta.utils.vocab import aa_dict
def save_fasta(res_name, fasta_path, seq):
with open(os.path.join(fasta_path, f'{res_name}.fasta'), mode='w') as obj:
obj.write(f'>{res_name}\n')
obj.write(seq)
def create_dsmap(cif_path,
dsmap_path,
fasta_path):
p = MMCIFParser()
for name in tqdm.tqdm(os.listdir(cif_path)):
structure = p.get_structure(name, f"{cif_path}/{name}")
for model in structure:
for chain in model:
pdb_id = os.path.splitext(name)[0]
res_name = f'{pdb_id.upper()}_{chain.id}'
coords = []
seqs = []
# todo : how to treat the hetaatom, UNK case, no ss case
# missing part is not in structure line. It is written another line.
# anyway in biopython module, not read missing part. It is skipped
# todo : in NMR,because of many experiment to identical residue, there is many redundant chain. So it is needed more time.
for amino in chain:
if amino.get_id()[0] == ' ':
coords.append(amino["CA"].get_coord())
if amino.get_resname()!='UNK':
seqs.append(aa_dict[amino.get_resname()])
logging.info(f"{res_name} - num of coords : {len(coords)}")
if len(coords)>0:
# save img
try:
coords = np.array(coords)
gt_distance_matrix = distance_matrix(coords, coords)
im = Image.fromarray(gt_distance_matrix.astype(np.int8))
im.save(os.path.join(dsmap_path, f'{res_name}.png'))
except :
#pdb.set_trace()
logging.warning(f"check the {res_name}")
# save seq
save_fasta(res_name, fasta_path, ''.join(seqs))
def parse_args(args) :
parser = argparse.ArgumentParser()
parser.add_argument('--cif_path')
parser.add_argument('--dsmap_path')
parser.add_argument('--fasta_path')
return parser.parse_args(args)
def make_dirs(*path):
for _path in path :
os.makedirs(_path, exist_ok=True)
def main(args=None):
if args is None:
args = sys.argv[1:]
args = parse_args(args)
make_dirs(args.dsmap_path,args.fasta_path)
create_dsmap(args.cif_path,
args.dsmap_path,
args.fasta_path)
if __name__ == '__main__' :
main()
# todo : multiprocessing ?
``` |
{
"source": "jobcpf/cosy",
"score": 2
} |
#### File: cosy/bin/cosyd.py
```python
import sys
sys.path.append("/home/squirrel/dev/cosy/cosy") # append python project directory root
# Standard import
import time
import os.path
# Import custom modules
from cosy_daemon import Daemon
import cosy_run as crun
################## Variables #################################### Variables #################################### Variables ##################
from global_config import logging, now_file, BASE_SLEEP, PID_FILE
script_file = "%s: %s" % (now_file,os.path.basename(__file__))
func_name = 'cosy_daemon'
################## Classes ###################################### Classes ###################################### Classes ####################
class CosyDaemon(Daemon):
"""
Subclass Daemon class to run cosy scripts.
"""
def run(self):
idst = None
token3 = None
pol_sleep = BASE_SLEEP
while True:
# logging
now_file = time.strftime('%Y%m%d_%H%M%S')
logging.debug('%s:%s: >>>>>>>>>>>>>>>>>>>>>>>>>>>>>> DAEMON (%s) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<</n' % (script_file,func_name,now_file))
print '%s: >>>>>>>>>>>>>>>>>>>>>>>>>>>>>> DAEMON ( %s) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<' % (func_name,now_file)
# call script run script
rbool, idst, token3, pol_sleep = crun.cosy_run(idst, token3)
# delay/re-run
time.sleep(pol_sleep)
################## Scripts ###################################### Scripts ###################################### Scripts ####################
if __name__ == "__main__":
daemon = CosyDaemon(PID_FILE)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
#logging.debug('%s:%s: Starting COSY Daemon' % (script_file,func_name))
daemon.start()
elif 'stop' == sys.argv[1]:
#logging.debug('%s:%s: Stopping COSY Daemon' % (script_file,func_name))
daemon.stop()
elif 'restart' == sys.argv[1]:
#logging.debug('%s:%s: Restarting COSY Daemon' % (script_file,func_name))
daemon.restart()
elif 'status' == sys.argv[1]:
#logging.debug('%s:%s: Status of COSY Daemon' % (script_file,func_name))
daemon.status()
elif 'test' == sys.argv[1]:
#logging.debug('%s:%s: Starting COSY Daemon in TEST mode (foreground)' % (script_file,func_name))
daemon.run()
else:
#logging.error('%s:%s: Unknown daemon command for COSY' % (script_file,func_name))
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|test)" % sys.argv[0]
sys.exit(2)
```
#### File: cosy/api/api_access.py
```python
import os
import sys
import time
import requests
from requests.auth import HTTPBasicAuth
# Import custom modules
import api.api_auth as apa
import data.data_api as datp
################## Variables #################################### Variables #################################### Variables ##################
from global_config import logging, now_file
script_file = "%s: %s" % (now_file,os.path.basename(__file__))
################## Functions ###################################### Functions ###################################### Functions ####################
def api_call(api_call, user_id = False, token3 = None, method = None, json = False):
"""
Retrieve data from API using token.
> 'api call identifier', [user_id], [token3], [method: PUT|POST|GET], [json data]
< response tuple: (True, json data, token3) or (False,http status code, token3)
"""
func_name = sys._getframe().f_code.co_name # Defines name of function for logging
logging.debug('%s:%s: API call (%s): %s' % (script_file,func_name,method,api_call))
try:
get_token = False
# test if token returned
if token3 is not None :
# build auth header
auth_header = {'Authorization': 'Bearer %s' % token3[1]}
# make request
if method == 'GET' :
#print json
r = requests.get(api_call, json=json, headers=auth_header)
elif method == 'POST' :
#print json
r = requests.post(api_call, json=json, headers=auth_header)
#print r.json()
elif method == 'PUT' :
#print json
r = requests.put(api_call, json=json, headers=auth_header)
#print r.json()
else :
#print json
r = requests.get(api_call, headers=auth_header)
#print r.json()
# check for unauthorised using token
if r.status_code == requests.codes.unauthorized:
logging.debug('%s:%s: API data retrieval unauthorised with token. Status Code: %s' % (script_file,func_name,r.status_code))
get_token = True
if token3 is None or get_token:
# get new token (refresh > u:p)
rbool, token3_or_error = apa.get_new_token(token3 = token3, user_id = user_id) # get new token and add to db
# check if token returned
if rbool:
# build auth header
auth_header = {'Authorization': 'Bearer %s' % token3_or_error[1]}
# make request
if method == 'GET' :
r = requests.get(api_call, json=json, headers=auth_header)
elif method == 'POST' :
r = requests.post(api_call, json=json, headers=auth_header)
elif method == 'PUT' :
r = requests.put(api_call, json=json, headers=auth_header)
else :
r = requests.get(api_call, headers=auth_header)
else:
logging.error('%s:%s: Could not retrieve new token. Error: %s' % (script_file,func_name,token3))
return (False, token3_or_error, None)
# capture status codes from all calls
if r.status_code == requests.codes.unauthorized: # 401
logging.error('%s:%s: API unauthorised with token. Status Code: %s, API Call: %s' % (script_file,func_name,r.status_code,api_call))
#print api_call, r.json()
return (False, r.status_code, token3)
elif r.status_code == requests.codes.internal_server_error:
logging.error('%s:%s: API caused an internal server error. Status Code: %s, API Call: %s' % (script_file,func_name,r.status_code,api_call))
#print api_call, r.json()
return (False, r.status_code, token3)
elif r.status_code == requests.codes.bad_request: # 400
logging.error('%s:%s: Bad request to API. Status Code: %s, API Call: %s' % (script_file,func_name,r.status_code,api_call))
print api_call, r.json()
return (False, r.status_code, token3)
elif r.status_code == requests.codes.not_found: # 404
logging.debug('%s:%s: Resource not found at API. Status Code: %s, API Call: %s' % (script_file,func_name,r.status_code,api_call))
#print "ERROR >>>>>>>>>>>>>", api_call, r.json()
return (False, r.status_code, token3)
elif r.status_code == requests.codes.forbidden: # 403
logging.error('%s:%s: Forbidden to access API. Status Code: %s, API Call: %s' % (script_file,func_name,r.status_code,api_call))
print api_call, r.json()
return (False, r.status_code, token3)
if r.headers['Content-Type'] in ['application/json'] :
# return data
return (True,r.json(),token3)
else:
logging.error('%s:%s: No valid JSON data retrieved from API' % (script_file,func_name))
#print "ERROR >>>>>>>>>>>>>", api_call
#print 'r.request.headers:',r.request.headers
#print 'r.headers:',r.headers
#print 'r.content:',r.content
#exit(1)
return (False, r.status_code, token3)
except requests.exceptions.ConnectionError as e:
logging.error('%s:%s: ConnectionError: %s' % (script_file,func_name,e))
return (False, 503, token3)
```
#### File: cosy/data/data_init.py
```python
import os
import sys
import time
from datetime import datetime
import sqlite3
#print "sqlite3 ", sqlite3.version, "run-time SQLite library version ",sqlite3.sqlite_version
# Import custom modules
################## Variables #################################### Variables #################################### Variables ##################
from global_config import logging, now_file, DB_API
script_file = "%s: %s" % (now_file,os.path.basename(__file__))
# define working database for module
db = DB_API
# common db operations
from data_common import create_connection, execute_sql
################## Functions ###################################### Functions ###################################### Functions ####################
def init_db(databases):
"""
Create databases for COSY.
> databases[list of db dict definitions]
< True, False
"""
func_name = sys._getframe().f_code.co_name # Defines name of function for logging
logging.debug('%s:%s: Initiate databases' % (script_file,func_name))
# iterate databases & build
for db, db_sql_dict in databases.iteritems() :
# get execute order list from 'sort' value
exec_order = sorted(db_sql_dict, key=lambda x: db_sql_dict[x]['sort'])
# Get sorted order table build data
for table in exec_order:
# initiate CREATE TABLE sql
sql_str = "" + "CREATE TABLE IF NOT EXISTS %s (" % table
if 'pk' in db_sql_dict[table] :
for pk, detail in db_sql_dict[table]['pk'].iteritems():
sql_str = sql_str + "%s %s, " % (pk, detail)
if 'vfields' in db_sql_dict[table] :
for vfield, detail in db_sql_dict[table]['vfields'].iteritems():
sql_str += "%s %s, " % (vfield, detail)
if 'sfields' in db_sql_dict[table] :
for sfield, detail in db_sql_dict[table]['sfields'].iteritems():
sql_str += "%s %s, " % (sfield, detail)
if 'constraints' in db_sql_dict[table] :
for cfield, detail in db_sql_dict[table]['constraints'].iteritems():
sql_str += "%s %s, " % (cfield, detail)
# terminate sql statement
sql_str = sql_str[:-2]
sql_str += ");"
# create table
execute_sql(db, sql_str)
if 'commands' in db_sql_dict[table] :
for command, detail in db_sql_dict[table]['commands'].iteritems():
sql_str = "%s %s;" % (command, detail)
# create table
execute_sql(db, sql_str)
return True
def init_user(table,auth_json):
"""
Create initial user credentials for cosy API access.
TODO: Access user:password from secure file via ssh...
> table, json auth details
< user5 {'passwd': '', 'client_secret': '', 'id': , 'client_id': '', 'user': ''}
"""
func_name = sys._getframe().f_code.co_name # Defines name of function for logging
logging.debug('%s:%s: Create initial user credentials in table: %s' % (script_file,func_name,table))
try :
# connect to / create db
conn = create_connection(db)
# generate values to insert
user_detail = (auth_json['user'],
auth_json['user'],
auth_json['passwd'],
auth_json['client_id'],
auth_json['client_secret'],
datetime.now())
# connection object as context manager
with conn:
cur = conn.cursor()
cur.execute("""INSERT OR REPLACE INTO {tn}(id, user, passwd, client_id, client_secret, create_date) VALUES (
(SELECT id FROM {tn} WHERE user = ?), ?, ?, ?, ?, ?
)""".format(tn=table),user_detail)
# get userID to return
userID = cur.lastrowid
except sqlite3.OperationalError as e:
logging.error('%s:%s: Table does not exist - database file missing?' % (script_file,func_name))
raise e
finally:
##### Test
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=table)):
# print row
conn.close()
# add user id to auth_json > user5
auth_json['id'] = userID
return auth_json
```
#### File: cosy/data/data.py
```python
import os
import sys
import time
from datetime import datetime
import sqlite3
#print "sqlite3 ", sqlite3.version, "run-time SQLite library version ",sqlite3.sqlite_version
# Import custom modules
################## Variables #################################### Variables #################################### Variables ##################
from global_config import logging, now_file, DB_DATA, TB_COMM, TB_CEVENT, TB_CECONF
script_file = "%s: %s" % (now_file,os.path.basename(__file__))
# define working database for module
db = DB_DATA
# get database definition dict
from db_sql import DATABASES
from data_common import create_connection, insert_statement, dict_factory
################## Functions ###################################### Functions ###################################### Functions ####################
def insert_data(user_id, table, json):
"""
Insert or Replace data for database.
N.B. Causes id to increment if inserting without id and will reset defaults where fields not passed.
> user_id, table name, json data
< True, False
"""
func_name = sys._getframe().f_code.co_name # Defines name of function for logging
logging.debug('%s:%s: Insert or Replace data to table: %s.%s' % (script_file,func_name,db,table))
try :
# build dynamic insert statement components
insert3 = insert_statement(user_id, db, table, json)
# connect to / create db
conn = create_connection(db)
#print insert3[0]
#print insert3[1]
#print insert3[2]
# connection object as context manager
with conn:
conn.executemany("INSERT OR REPLACE INTO {tn}({tf}) VALUES ({ih})".format(tn=table,tf=insert3[0],ih=insert3[1]),insert3[2])
# connection object using 'with' will rool back db on exception and close on complete
except sqlite3.IntegrityError as e:
logging.error('%s:%s: SQL IntegrityError: %s' % (script_file,func_name,e))
#raise e
return False # e.g. returns false if no control unit discovered...
except sqlite3.OperationalError as e:
logging.error('%s:%s: SQLite Operational Error: %s' % (script_file,func_name,e))
raise e
#return False
except sqlite3.ProgrammingError as e:
logging.error('%s:%s: SQLite Programming Error: %s' % (script_file,func_name,e))
raise e
#return False
finally:
##### Test
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=table)):
# print row
conn.close()
return True
def manage_control(table, sysID = None, method = 'Get idst', data = None):
"""
Enforce 'self' bool for control unit
> user_id, table, sysID, [status]
< idst, False
idst:{'status': u'OK',
'user_id': 1,
'status_bool': 1,
'URI': u'http://172.16.32.40:8000/api/0.1/env/reg/710011/',
'sysID': 710011,
'system_type': 31,
'last_config: [date],}
"""
func_name = sys._getframe().f_code.co_name # Defines name of function for logging
logging.debug('%s:%s: Manage control unit method: %s' % (script_file,func_name,method))
try :
# connect to / create db
conn = create_connection(db)
# connection object as context manager
with conn:
if method == 'status':
# status update
if data is None:
conn.execute("UPDATE {tn} SET status_bool = 1, status = 'OK' WHERE sysID = ?;".format(tn=table),(sysID,))
else:
conn.execute("UPDATE {tn} SET status_bool = 0, status = ? WHERE sysID = ?;".format(tn=table),(data,sysID))
elif method == 'config':
conn.execute("UPDATE {tn} SET last_config = ? WHERE sysID = ?;".format(tn=table),(datetime.now(),sysID))
elif method == 'self':
# enforce cuID
conn.execute("UPDATE {tn} SET self_bool = 0;".format(tn=table))
conn.execute("UPDATE {tn} SET self_bool = 1 WHERE sysID = ?;".format(tn=table),(sysID,))
###### Test
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=table)):
# print row
## get idst to return
# over write row_factory to return JSON
conn.row_factory = dict_factory
cur = conn.cursor()
# get data
cur.execute("SELECT user_id, sysID, system_type, status_bool, status, URI, last_config from {tn} WHERE self_bool = 1;".format(tn=table))
idst = cur.fetchone()
# connection object using 'with' will rool back db on exception and close on complete
except sqlite3.IntegrityError as e:
logging.error('%s:%s: SQL IntegrityError: %s' % (script_file,func_name,e))
raise e
#return False
except sqlite3.OperationalError as e:
logging.error('%s:%s: SQLite Operational Error: %s' % (script_file,func_name,e))
#raise e
return False
except sqlite3.ProgrammingError as e:
logging.error('%s:%s: SQLite Programming Error: %s' % (script_file,func_name,e))
raise e
#return False
finally:
conn.close()
return idst
def manage_comms(idst, data_json = False, method = None):
"""
Get comms for target system
> idst, [API sent confirmation transactionID list]
< True, (api_put, api_post, api_get)
TODO - currently one way - need both ways
"""
func_name = sys._getframe().f_code.co_name # Defines name of function for logging
logging.debug('%s:%s: Manage communications data method %s' % (script_file,func_name,method))
try :
# connect to / create db
conn = create_connection(db)
####### Update comms queue from API call
if method == 'insert' :
with conn:
for json in data_json :
conn.execute("""INSERT OR REPLACE INTO {tn} (id, comm_sent, control_sys, transactionID, source, target, data, priority, URI, complete_req, complete, last_date, user_id)
VALUES (
(SELECT id FROM {tn} WHERE control_sys = {cs} AND transactionID = {ti}),
ifnull((SELECT comm_sent FROM {tn} WHERE control_sys = {cs} AND transactionID = {ti}),0),
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
""".format(tn=TB_COMM,cs=json['control_sys'],ti=json['transactionID']),(
json['control_sys'],
json['transactionID'],
json.get('source',None),
json.get('target',None),
json.get('data',None),
json.get('priority',None),
json.get('URI',None),
json.get('complete_req',0),
json.get('complete',0),
datetime.now(),
idst['user_id']
))
# return true
ret_val = True
####### Update comms queue and events
elif method == 'updatelist' :
# iterate transactions and update sent in comms queue and event TODO: update many without iterator
with conn:
for ident in data_json:
conn.execute("UPDATE {tn} SET comm_sent = 1, URI=?, complete = ? WHERE control_sys = ? AND transactionID = ?;".format(tn=TB_COMM),ident)
# remove URI
ident = ident[1:]
conn.execute("UPDATE {tn} SET link_confirmed = 1, complete = ? WHERE control_sys = ? AND transactionID = ?;".format(tn=TB_CEVENT),ident)
# return true
ret_val = True
####### Sync events and comms queue
else :
with conn :
### NEW COMMS QUEUE >> EVENTS
# get incomplete items from comms queue & insert into events where targeted
conn.execute("""INSERT INTO {tu} (control_sys, source, target, data, transactionID, priority, link_complete_req, last_date, user_id)
SELECT control_sys, source, target, data, transactionID, priority, complete_req, last_date, user_id
FROM {tn}
WHERE control_sys = ?
AND target = ?
AND NOT EXISTS (
SELECT a.*
FROM {tn} AS a
INNER JOIN {tu} AS b ON a.transactionID = b.transactionID AND a.control_sys = b.control_sys
);""".format(tn=TB_COMM,tu=TB_CEVENT),(idst['sysID'], idst['system_type']))
### NEW EVENTS >> COMMS QUEUE
## insert sample data
#conn.execute("""INSERT INTO {tu} (control_sys, source, target, data, priority, user_id, event_config, link_complete_req)
# VALUES
# (7010002, 31, 13, "data ish", 1, 1, 1, 1),
# (7010002, 31, 13, "more data wee", 1, 1, 1, 1),
# (7010002, 31, 13, "bingo even more data", 1, 1, 1, 1)
# ;""".format(tu=TB_CEVENT))
# generate transactionID in events
# TODO: test consistency of transactionID generated here and in Django
# TODO: update fails if no event type
# alternative: event['transactionID'] = "3%s%s%s" % (str(event['source']).zfill(2),str(event['event_type']).zfill(2),event['id'])
conn.execute("""UPDATE {tn}
SET transactionID = '3'||substr('00'||source,-2,2)||(SELECT substr('00'||event_type,-2,2) FROM {tn2} WHERE {tn}.event_config = {tn2}.id)||id
WHERE target NOT NULL
AND transactionID IS NULL
;""".format(tn=TB_CEVENT,tn2=TB_CECONF))
# get incomplete items from events & insert into comms queue where targeted
conn.execute("""INSERT INTO {tu} (control_sys, transactionID, source, target, data, priority, last_date, user_id, complete_req)
SELECT control_sys, transactionID, source, target, data, priority, last_date, user_id, link_complete_req
FROM {tn} AS a
WHERE a.control_sys = ?
AND a.source = ?
AND NULLIF(a.complete,0) IS NULL
AND a.transactionID IS NOT NULL
AND NOT EXISTS (
SELECT *
FROM {tn} AS b
INNER JOIN {tu} AS c ON b.control_sys = c.control_sys AND b.transactionID = c.transactionID
WHERE a.transactionID = b.transactionID
)
;""".format(tn=TB_CEVENT,tu=TB_COMM),(idst['sysID'], idst['system_type']))
### UPDATE EVENTS >> COMMS QUEUE
# update comms queue where events completed
conn.execute("""UPDATE {tu}
SET data = (SELECT data FROM {ts}
WHERE {ts}.control_sys = {tu}.control_sys
AND {ts}.transactionID = {tu}.transactionID
),
complete = 1
WHERE
EXISTS (
SELECT * FROM {ts}
WHERE {ts}.control_sys = {tu}.control_sys
AND {ts}.transactionID = {tu}.transactionID
AND {ts}.complete = 1
AND NULLIF({tu}.complete,0) IS NULL
);
""".format(tu=TB_COMM, ts=TB_CEVENT))
### UPDATE COMMS QUEUE >> EVENTS
# update comms queue where events completed
conn.execute("""UPDATE {tu}
SET data = (SELECT data FROM {ts}
WHERE {ts}.control_sys = {tu}.control_sys
AND {ts}.transactionID = {tu}.transactionID
),
complete = 1
WHERE
EXISTS (
SELECT * FROM {ts}
WHERE {ts}.control_sys = {tu}.control_sys
AND {ts}.transactionID = {tu}.transactionID
AND {ts}.complete = 1
AND NULLIF({tu}.complete,0) IS NULL
);
""".format(tu=TB_CEVENT, ts=TB_COMM))
#### Test
#print ">>>>>>>>>>>>>>>>>>>>>> EVENTS"
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=TB_CEVENT)):
# print row
#print ">>>>>>>>>>>>>>>>>>>>>> COMMS QUEUE"
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=TB_COMM)):
# print row
########### Return JSON comms queue for API PUT (UPDATE) and POST
# over write row_factory to return JSON
conn.row_factory = dict_factory
cur = conn.cursor()
### get JSON comms events that need API GET - e.g. events generated locally requiring completion at API
# define fields for select
#fields = "control_sys, meter, data, transactionID, source, target, priority, complete_req, complete, URI"
fields = "meter, transactionID, complete"
# get data for comms sync
cur.execute("""SELECT {tf} FROM {tn}
WHERE comm_sent = 1
AND complete_req = 1
AND NULLIF(complete,0) IS NULL
AND URI NOT NULL
AND target < ?
ORDER BY priority DESC;
""".format(tn=TB_COMM,tf=fields),(idst['system_type'],))
api_get = cur.fetchall()
### get JSON comms events that need API POST (e.g. originating below API)
# define fields for select
fields = "control_sys, meter, data, transactionID, source, target, priority, complete_req, complete"
# get data for comms sync
cur.execute("""SELECT {tf} FROM {tn}
WHERE NULLIF(comm_sent,0) IS NULL
AND URI IS NULL
AND target < ?
ORDER BY priority DESC;
""".format(tn=TB_COMM,tf=fields),(idst['system_type'],))
api_post = cur.fetchall()
### get JSON comms events that need API PUT (UPDATES) (e.g. originating from API)
# define fields for select
#fields = "sysID, meter, data, transactionID, source, target, priority, complete_req, complete, URI"
fields = "control_sys, transactionID, data, complete, complete_req, URI"
# get data for comms sync
cur.execute("""SELECT {tf} FROM {tn}
WHERE complete = 1
AND NULLIF(comm_sent,0) IS NULL
AND complete_req = 1
AND URI NOT NULL
ORDER BY priority DESC;
""".format(tn=TB_COMM,tf=fields))
api_put = cur.fetchall()
# return pu and post JSON
ret_val = (api_put, api_post, api_get)
# connection object using 'with' will rool back db on exception and close on complete
except sqlite3.IntegrityError as e:
logging.error('%s:%s: SQL IntegrityError: %s' % (script_file,func_name,e))
raise e
#return False
except sqlite3.OperationalError as e:
logging.error('%s:%s: SQLite Operational Error: %s' % (script_file,func_name,e))
raise e
#return False
except sqlite3.ProgrammingError as e:
logging.error('%s:%s: SQLite Programming Error: %s' % (script_file,func_name,e))
raise e
#return False
except ValueError as e:
logging.error('%s:%s: ValueError: %s' % (script_file,func_name,e))
raise e
#return False
finally:
#### Test
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=TB_CEVENT)):
# print row
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=TB_COMM)):
# print row
conn.close()
return ret_val
def get_policies(tb_pol, tb_ceconf, idst):
"""
Get policies associated with control unit
> policy table, event config table, idst
< policies, eventconfig
"""
func_name = sys._getframe().f_code.co_name # Defines name of function for logging
logging.debug('%s:%s: Get policies (%s) and event config (%s)' % (script_file,func_name,tb_pol,tb_ceconf))
try :
# connect to / create db
conn = create_connection(db)
# over write row_factory to return JSON
conn.row_factory = dict_factory
cur = conn.cursor()
# get data
cur.execute("""SELECT id, name, policy_data, default_event
FROM {tn}
WHERE user_id = ?
;""".format(tn=tb_pol),(idst['user_id'],))
policies = cur.fetchall()
cur.execute("""SELECT *
FROM {tn}
WHERE user_id = ?
;""".format(tn=tb_ceconf),(idst['user_id'],))
eventconfig = cur.fetchall()
except sqlite3.OperationalError as e:
logging.debug('%s:%s: SQLite Operational Error: %s' % (script_file,func_name,e))
#raise e
return False
except sqlite3.ProgrammingError as e:
logging.error('%s:%s: SQLite Programming Error: %s' % (script_file,func_name,e))
raise e
#return False
finally:
##### Test
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=table)):
# print row
conn.close()
return policies, eventconfig
def manage_event(table, user_id, method = None, data = None):
"""
Manage events
> table, method (last_event, ...), data
< last event
"""
func_name = sys._getframe().f_code.co_name # Defines name of function for logging
logging.debug('%s:%s: Manage event method %s (%s)' % (script_file,func_name,method,table))
try :
# connect to / create db
conn = create_connection(db)
# over write row_factory to return JSON
conn.row_factory = dict_factory
# connection object as context manager
with conn:
cur = conn.cursor()
# get last event
if method == 'last_event':
# get data
cur.execute("""SELECT *
FROM {tn}
WHERE event_config = ?
ORDER BY last_date DESC
LIMIT 1
;""".format(tn=table),(data,))
rdata = cur.fetchone()
# insert event
else:
# build dynamic insert statement components
insert3 = insert_statement(user_id, db, table, data)
#print insert3[0]
#print insert3[1]
#print insert3[2][0]
cur.execute("""INSERT INTO {tn}({tf})
VALUES ({ih})
;""".format(tn=table,tf=insert3[0],ih=insert3[1]),insert3[2][0])
rdata = cur.lastrowid
# connection object using 'with' will rool back db on exception and close on complete
except sqlite3.IntegrityError as e:
logging.error('%s:%s: SQL IntegrityError: %s' % (script_file,func_name,e))
raise e
#return False
except sqlite3.OperationalError as e:
logging.debug('%s:%s: SQLite Operational Error: %s' % (script_file,func_name,e))
raise e
#return False
except sqlite3.ProgrammingError as e:
logging.error('%s:%s: SQLite Programming Error: %s' % (script_file,func_name,e))
raise e
#return False
finally:
##### Test
#for row in conn.execute('SELECT * FROM {tn}'.format(tn=table)):
# print row
conn.close()
return rdata
``` |
{
"source": "jobdataexchange/competensor",
"score": 3
} |
#### File: competensor/utils/db.py
```python
from sqlalchemy.orm import Session
from contextlib import contextmanager
from models.models import engine, Pipeline
from copy import deepcopy
@contextmanager
def jdx_database_session_query_scope():
session = Session(engine)
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class JDXDatabase(object):
def __init__(self,
tablename='pipeline',
uri="postgresql+psycopg2://postgres:password@jdx-postgres:5432/jdx_reference_backend_application"):
self.tablename = tablename
self.uri = uri
def get_pipeline_from_job_description(self, job_description_id):
# I dont' know why this is throwing an error, looks like
# querying by uuid can be problematic
#
# pipeline_query = self.session.query(self.Pipeline)\
# .filter_by(pipeline_id=job_description_id)
# pipeline = pipeline_query.one()
# for row in session.query(self.Pipeline).all():
# if job_description_id == vars(row)['pipeline_id']:
# return vars(row)
# return pipeline
with jdx_database_session_query_scope() as session:
pipeline_query = session.query(Pipeline).filter_by(
pipeline_id=job_description_id
)
pipeline = pipeline_query.one()
pipeline_copy = deepcopy(pipeline)
return pipeline_copy
def get_raw_text_from_pipeline(self, pipeline_id):
pipeline = self.get_pipeline_from_job_description(pipeline_id)
with jdx_database_session_query_scope() as session:
session.add(pipeline)
return pipeline.file_text
def get_context_object_json(self, pipeline_id):
pipeline = self.get_pipeline_from_job_description(pipeline_id)
with jdx_database_session_query_scope() as session:
session.add(pipeline)
return pipeline.context_object
```
#### File: utils/parser/clearinghouse.py
```python
import xlrd
from xlrd import open_workbook
import numpy as np
import pandas as pd
import re
def read_excel_with_formatting(file_path,
sheet_name,
skiprows=0,
competency_col=2):
# adopted from https://stackoverflow.com/a/7991458/3662899
wb = open_workbook(file_path, formatting_info=True)
sheet = wb.sheet_by_name(sheet_name)
font = wb.font_list
value_arr = ['']*sheet.nrows
indent = [0]*sheet.nrows
numeric = ['']*sheet.nrows
bgcol = [0]*sheet.nrows
weight = [0]*sheet.nrows
tags = [""]*sheet.nrows
# cycle through all cells to get colors
for idx, row in enumerate(range(sheet.nrows)):
if idx < skiprows:
continue
for column in range(sheet.ncols):
cell = sheet.cell(row, column)
if xlrd.XL_CELL_EMPTY != cell.ctype:
value = str(cell.value)
tag = ''
if column == competency_col:
fmt = wb.xf_list[cell.xf_index]
bgcol[row] = fmt.background.background_colour_index
weight[row] = font[fmt.font_index].weight
indent[row] = fmt.alignment.indent_level
match = re.match(r'^([\d\.]*)(.*)', value)
if match.lastindex >= 2:
value_arr[row] = match.groups()[1].strip()
if 0 != match.lastindex:
tag = match.groups()[0]
numeric[row] = match.groups()[0]
prefix = ', '
if not tags[row].strip():
# first tag starts with a value, not a comma
prefix = ''
if not tag:
tag = re.match('^([\d\.]*)',
value).group()
# some times the numeric tag is repeated in another column
# (such as the comptency column) so we do a quick check to avoid
# repeating numeric values
if tag.strip() and tags[row] != tag:
tags[row] += prefix + tag
df = pd.DataFrame({"background": bgcol,
"weight": weight,
"indent": indent,
"numeric": numeric,
"value": value_arr,
"tags": tags
})
df.replace('', np.nan, inplace=True)
df.dropna(inplace=True, subset=['value'])
return df
def induce_tags(df, include_section=True):
'''
Assuming top level tags we uniquely tag other columns
by both forward filling to perserve the top level hierarchy
and make each row unique appending the row number.
It is possible to do hierarchical tagging by inspecting the
weight and indent level but this is good enough for now.
ACE might request hierarchical tagging though.
'''
ret = df.replace('', np.nan)
ret.tags.fillna(method='ffill', inplace=True)
ret.tags =\
[', row='.join(elem) for elem in zip(ret.tags.astype(str),
(ret.index+1).astype(str))]
ret.dropna(subset=['value'], inplace=True)
if include_section:
ret.tags = 'section ' + ret.tags
return ret
``` |
{
"source": "jobdataexchange/jdx-api",
"score": 2
} |
#### File: jdxapi/routes/framework_selections.py
```python
from jdxapi.app import api, DB
from jdxapi.models import Pipeline, FrameworkRecommendation, Framework
from jdxapi.utils.logger_resource import LoggerResource
from flask import request, jsonify
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from jdxapi.utils.error import ApiError
from jdxapi.utils.functions import RequestHandler
import datetime
import jdxapi.utils.constants as c
from jdxapi.utils.functions import RequestHandler, ResponseHandler
@api.resource("/framework-selections")
class FrameworkSelections(LoggerResource):
def post(self):
req = request.get_json()
pipeline_id = RequestHandler.get_pipeline_id(req, True)
pipeline = Pipeline.get_pipeline_from_id(pipeline_id)
competencies = req[c.FRAMEWORKS].get(c.COMPETENCY)
occupations = req[c.FRAMEWORKS].get(c.OCCUPATION)
industries = req[c.FRAMEWORKS].get(c.INDUSTRY)
self.validate(pipeline_id, competencies)
frameworks = self.compile_frameworks(competencies, occupations, industries)
self.store_given_frameworks(pipeline, frameworks)
resp_data = self.create_response_data(pipeline_id)
response = ResponseHandler.create_response(resp_data, 200)
return response
def validate(self, pipeline_id, competencies):
if not pipeline_id:
raise ApiError(f"Must have a '{c.PIPELINE_ID}''", 422)
if not competencies:
raise ApiError("Must have at least one competency", 422)
def compile_frameworks(self, *framework_lists):
frameworks = []
for framework in framework_lists:
if framework:
frameworks += [*framework]
return frameworks
def store_given_frameworks(self, pipeline, frameworks):
# Pass frameworks to pipeline
given_frameworks = [item[c.FRAMEWORK_ID] for item in frameworks]
for framework_id in given_frameworks:
that_framework = Framework.get_framework_from_id(framework_id)
pipeline.frameworks.append(that_framework)
DB.session.commit()
def create_response_data(self, pipeline_id):
resp_data = {
c.PIPELINE_ID: str(pipeline_id),
c.TIMESTAMP: str(datetime.datetime.now()),
}
return resp_data
```
#### File: jdxapi/utils/error.py
```python
from flask import jsonify
import logging
class ApiError(Exception):
def __init__(self, message, status_code=400, payload=None):
Exception.__init__(self)
self.message = message
self.status_code = status_code
self.payload = payload or ()
def get_response(self):
print(self.payload)
logger = logging.getLogger('inputoutput')
logger.exception('ApiError:')
ret = dict(self.payload)
ret['message'] = self.message
return jsonify(ret), self.status_code # todo improve this so i can log the api resp
```
#### File: tests/models/test_framework_model.py
```python
from pocha import describe, it, before, after
from expects import expect, equal, be_none, be, be_above, be_true, have_keys
# from ..pocha_setup import application
# import uuid
from model_setup import setup_environment, restore_environment, get_app, get_db
from jdxapi.models import *
setup_environment = before(setup_environment)
restore_environment = after(restore_environment)
@describe('framework model')
def _():
# @describe('Pipeline.user_is_authorized_to_access_pipeline')
# def _():
# testDB = get_db()
# auth_user_token = <PASSWORD>()
# unauth_user_token = uuid.uuid4()
# new_pipeline = pipeline_model.Pipeline(
# user_token=auth_user_token,
# file_name="file_name",
# file_format="file_format",
# file_text="file_text"
# )
# testDB.session.add(new_pipeline)
# testDB.session.commit()
# pipeline_id = new_pipeline.pipeline_id
# @it('passes when a user access their pipeline')
# def _():
# result = pipeline_model.Pipeline.user_is_authorized_to_access_pipeline(
# auth_user_token,
# pipeline_id
# )
# expect(result).to(equal(True))
# @it('fails when a user access someone elses pipeline')
# def _():
# result = pipeline_model.Pipeline.user_is_authorized_to_access_pipeline(
# unauth_user_token,
# pipeline_id
# )
# expect(result).to(equal(False))
@it('does stuff', skip=True)
def _():
pass
``` |
{
"source": "JobDoesburg/landolfio",
"score": 2
} |
#### File: website/asset_media/admin.py
```python
from django.contrib import admin
from django.utils.safestring import mark_safe
from nested_admin.nested import NestedStackedInline, NestedModelAdmin
from asset_media.models import MediaSet, MediaItem
class AssetMediaItemInline(NestedStackedInline):
model = MediaItem
fk_name = "set"
extra = 0
fields = ["thumbnail", "media"]
readonly_fields = ["thumbnail"]
def thumbnail(self, obj):
return mark_safe(
'<a href={url} target="_blank"><img src="{url}" width="{width}" /></a>'.format(
url=obj.media.url, width=400,
)
)
@admin.register(MediaSet)
class MediaSetAdmin(NestedModelAdmin):
inlines = [AssetMediaItemInline]
class Media:
"""Necessary to use AutocompleteFilter."""
@admin.register(MediaItem)
class MediaItemAdmin(admin.ModelAdmin):
fields = ["set", "media", "file"]
readonly_fields = ["file"]
def file(self, obj):
return mark_safe(
'<a href={url} target="_blank"><img src="{url}" width="{width}"/></a>'.format(
url=obj.media.url, width=1080,
)
)
class Media:
"""Necessary to use AutocompleteFilter."""
```
#### File: website/asset_media/models.py
```python
import os
from django.db import models
from django.db.models import CASCADE, SET_NULL
from django.utils import timezone
from asset_events.models import Event
from assets.models import Asset
class MediaSet(models.Model):
class Meta:
verbose_name = "media set"
verbose_name_plural = "media sets"
date = models.DateTimeField(default=timezone.now, blank=False, null=False)
asset = models.ForeignKey(Asset, null=True, blank=True, on_delete=CASCADE)
event = models.OneToOneField(Event, null=True, blank=True, on_delete=SET_NULL)
remarks = models.TextField(null=True, blank=True)
def __str__(self):
if self.event:
return f"{self.asset} media - {self.event}"
return f"{self.asset} media"
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if self.event:
self.asset = self.event.asset
super().save(force_insert, force_insert, update_fields, update_fields)
def get_upload_path(instance, filename):
return f"asset_media/{instance.set.asset.category}/{instance.set.asset.number}/{instance.set.asset.number}_{timezone.now().strftime('%Y-%m-%d_%H-%M-%S')}_{filename}"
class MediaItem(models.Model):
class Meta:
verbose_name = "media item"
verbose_name_plural = "media items"
set = models.ForeignKey(MediaSet, null=False, blank=False, on_delete=CASCADE)
media = models.FileField(null=False, blank=False, upload_to=get_upload_path)
def __str__(self):
return self.media.name
```
#### File: maintenance/models/maintenance_ticket.py
```python
from django.db import models
from django.db.models import PROTECT
from asset_events.models import SingleStatusChangingEvent
from assets.models import Asset
from maintenance.models import MaintenanceProvider
class AssetMaintenanceTicket(SingleStatusChangingEvent):
class Meta:
verbose_name = "maintenance ticket"
verbose_name_plural = "maintenance tickets"
input_statuses = [
Asset.UNDER_REVIEW,
Asset.AVAILABLE,
Asset.ISSUED_UNPROCESSED,
Asset.ISSUED_LOAN,
Asset.ISSUED_RENT,
]
def get_output_status(self):
return Asset.MAINTENANCE_EXTERNAL if self.maintenance_provider else Asset.MAINTENANCE_IN_HOUSE
maintenance_provider = models.ForeignKey(MaintenanceProvider, null=True, blank=True, on_delete=PROTECT)
class AssetMaintenanceReturn(SingleStatusChangingEvent):
class Meta:
verbose_name = "maintenance return"
verbose_name_plural = "maintenance returns"
input_statuses = [Asset.MAINTENANCE_IN_HOUSE, Asset.MAINTENANCE_EXTERNAL]
output_status = Asset.AVAILABLE
# TODO add FK to a maintenance invoice/report?
```
#### File: moneybird_accounting/models/tax_rate.py
```python
from django.db import models
from moneybird_accounting.models import MoneybirdReadOnlyResourceModel
class TaxRate(MoneybirdReadOnlyResourceModel):
class Meta:
verbose_name = "tax rate"
verbose_name_plural = "tax rates"
moneybird_resource_path_name = "tax_rates"
moneybird_resource_name = "tax_rate"
moneybird_data_fields = [
"name",
"percentage",
"show_tax",
"active",
] # TODO add taxratetype
name = models.CharField(blank=True, null=True, max_length=100)
percentage = models.DecimalField(blank=True, null=True, max_digits=5, decimal_places=2)
show_tax = models.BooleanField(blank=True, null=True)
active = models.BooleanField(blank=True, null=True)
def __str__(self):
return self.name
```
#### File: moneybird_accounting/models/workflow.py
```python
from django.db import models
from moneybird_accounting.models import MoneybirdReadOnlyResourceModel
class Workflow(MoneybirdReadOnlyResourceModel):
class Meta:
verbose_name = "workflow"
verbose_name_plural = "workflows"
moneybird_resource_path_name = "workflows"
moneybird_resource_name = "workflow"
moneybird_data_fields = [
"type",
"name",
"default",
"active",
"prices_are_incl_tax",
]
WORKFLOW_TYPE_INVOICE_WORKFLOW = "InvoiceWorkflow"
WORKFLOW_TYPE_ESTIMATE_WORKFLOW = "EstimateWorkflow"
WORKFLOW_TYPES = (
(WORKFLOW_TYPE_INVOICE_WORKFLOW, "Invoice Workflow"),
(WORKFLOW_TYPE_ESTIMATE_WORKFLOW, "Estimate Workflow"),
)
type = models.CharField(blank=True, null=True, choices=WORKFLOW_TYPES, max_length=20)
name = models.CharField(blank=True, null=True, max_length=100)
default = models.BooleanField(blank=True, null=True)
active = models.BooleanField(blank=True, null=True)
prices_are_incl_tax = models.BooleanField(blank=True, null=True)
def __str__(self):
return self.name
```
#### File: website/moneybird_accounting/moneybird_sync.py
```python
import logging
from typing import List, Type
import moneybird
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from moneybird import MoneyBird, TokenAuthentication
from moneybird_accounting.models import *
class MoneyBirdSynchronizationError(ValidationError):
pass
# TODO implement webhooks!
class MoneyBirdAPITalker:
_logger = logging.getLogger("django.moneybird")
_token = settings.MONEYBIRD_API_TOKEN
_administration_id = settings.MONEYBIRD_ADMINISTRATION_ID
_moneybird = MoneyBird(TokenAuthentication(_token))
@property
def moneybird(self):
"""An Moneybird API instance (can be adapted to support API sessions)."""
return self._moneybird
@property
def administration_id(self):
"""The administration_id to work with."""
return self._administration_id
def sync_objects(self, cls: Type[MoneybirdSynchronizableResourceModel]):
"""Synchronize all objects of a MoneybirdSynchronizableResourceModel."""
self._logger.info(f"Getting Moneybird {cls.get_moneybird_resource_path_name()} for synchronization")
data = self.moneybird.get(f"{cls.get_moneybird_resource_path_name()}/synchronization", self.administration_id,)
self._logger.info(f"Moneybird returned {len(data)} {cls.get_moneybird_resource_path_name()}")
to_delete = cls.objects.exclude(id__in=[x["id"] for x in data])
if len(to_delete) > 0:
self._logger.info(
f"Found {len(to_delete)} {cls.get_moneybird_resource_path_name()} to delete: {to_delete}"
)
for delete_object in to_delete:
delete_object.processed = True
delete_object.delete()
update_or_create = []
for obj_data in data:
try:
obj = cls.objects.get(id=obj_data["id"])
except cls.DoesNotExist:
self._logger.info(f"Found new {cls.get_moneybird_resource_name()} to create with id {obj_data['id']}")
update_or_create.append(obj_data["id"])
else:
if obj.version != obj_data["version"]:
self._logger.info(f"Found {obj} to be updated")
update_or_create.append(obj_data["id"])
return self.update_or_create_objects(cls, update_or_create)
def sync_objects_hard(self, cls: Type[MoneybirdSynchronizableResourceModel]):
self._logger.info(f"Performing hard sync on {cls} objects, setting version to None.")
cls.objects.update(version=None)
self.sync_objects(cls)
def sync_readonly_objects(self, cls: Type[MoneybirdReadOnlyResourceModel]):
data = self.moneybird.get(f"{cls.get_moneybird_resource_path_name()}", self.administration_id,)
ids = []
for obj in data:
ids.append(obj["id"])
cls.update_or_create_object_from_moneybird(obj)
for obj in cls.objects.all():
if obj.id not in ids:
obj.processed = True
obj.delete()
def sync_readwrite_objects(self, cls: Type[MoneybirdReadWriteResourceModel]):
data = self.moneybird.get(f"{cls.get_moneybird_resource_path_name()}", self.administration_id,)
ids = []
for obj in data:
ids.append(obj["id"])
cls.update_or_create_object_from_moneybird(obj)
for obj in cls.objects.all():
if obj.id not in ids:
obj.processed = True
obj.delete()
def update_or_create_objects(self, cls: Type[MoneybirdSynchronizableResourceModel], ids: List[str]):
"""Update or create Moneybird objects with certain ids."""
chunks = [ids[i : i + 100] for i in range(0, len(ids), 100)]
for chunk in chunks:
self._logger.info(
f"Getting {len(chunk)} Moneybird {cls.get_moneybird_resource_path_name()} by id to sync: {chunk}"
)
data = self.moneybird.post(
f"{cls.get_moneybird_resource_path_name()}/synchronization", {"ids": chunk}, self.administration_id,
)
self._logger.info(
f"Moneybird returned {len(data)} {cls.get_moneybird_resource_path_name()} to create or update: {data}"
)
for object_data in data:
self._logger.info(
f"Updating or creating {cls.get_moneybird_resource_path_name()} {object_data['id']}: {object_data}"
)
cls.update_or_create_object_from_moneybird(object_data)
return cls.objects.filter(id__in=ids)
def create_moneybird_resource(self, cls: Type[MoneybirdReadWriteResourceModel], data: Dict[str, Any]):
"""Create a new resource on Moneybird."""
data_filtered = dict([(x, data[x]) for x in data if x not in cls.get_moneybird_readonly_fields()])
try:
self._logger.info(f"Creating Moneybird {cls.get_moneybird_resource_path_name()} {id}: {data_filtered}")
reply = self.moneybird.post(
cls.get_moneybird_resource_path_name(),
{cls.get_moneybird_resource_name(): data_filtered},
self.administration_id,
)
self._logger.info(f"Moneybird returned {cls.get_moneybird_resource_name()}: {reply}")
return reply
except moneybird.api.MoneyBird.InvalidData as e:
raise IntegrityError(e.response["error"] if e.response["error"] else e.response)
def patch_moneybird_resource(
self, cls: Type[MoneybirdReadWriteResourceModel], id: str, data: Dict[str, Any],
):
"""Patch an existing Moneybird resource."""
data_filtered = dict([(x, data[x]) for x in data if x not in cls.get_moneybird_readonly_fields()])
try:
self._logger.info(f"Patching Moneybird {cls.get_moneybird_resource_path_name()} {id}: {data_filtered}")
reply = self.moneybird.patch(
f"{cls.get_moneybird_resource_path_name()}/{id}",
{cls.get_moneybird_resource_name(): data_filtered},
self.administration_id,
)
self._logger.info(f"Moneybird returned {cls.get_moneybird_resource_name()}: {reply}")
return reply
except moneybird.api.MoneyBird.InvalidData as e:
raise IntegrityError(e.response["error"] if e.response["error"] else e.response)
def delete_moneybird_resource(self, cls: Type[MoneybirdReadWriteResourceModel], id: str):
"""Delete an existing Moneybird resource."""
try:
self._logger.info(f"Deleting Moneybird {cls.get_moneybird_resource_path_name()} {id}")
return self.moneybird.delete(f"{cls.get_moneybird_resource_path_name()}/{id}", self.administration_id,)
except moneybird.api.MoneyBird.APIError as e:
if e.status_code == 204:
pass
else:
raise IntegrityError(e.response["error"] if e.response["error"] else e.response)
def full_sync(self, hard=False):
for cls in MoneybirdReadOnlyResourceModel.__subclasses__():
if not cls._meta.abstract:
self.sync_readonly_objects(cls)
for cls in MoneybirdReadWriteResourceModel.__subclasses__():
if not cls._meta.abstract:
self.sync_readwrite_objects(cls)
for cls in MoneybirdSynchronizableResourceModel.__subclasses__():
if not cls._meta.abstract:
if hard:
self.sync_objects_hard(cls)
else:
self.sync_objects(cls)
```
#### File: website/moneybird_accounting/tests.py
```python
from django.test import TestCase
from moneybird_accounting.models import Contact
from moneybird_accounting.moneybird_sync import MoneyBirdAPITalker
class MoneyBirdAPITalkerTest(TestCase):
def setUp(self):
self.mb = MoneyBirdAPITalker()
def test_sync_contacts(self):
"""
- No contacts, after sync there are
- No updates, nothing touched
- Remove contact from Django, reappears
- Delete contact from Moneybird, disappears
- Update contact, after synced change cascaded
:return:
"""
self.mb.sync_objects(Contact)
# TODO write a lot of tests...
```
#### File: rentals/models/issuance_loan.py
```python
from django.db import models
from django.db.models import PROTECT
from asset_events.models import StatusChangingEvent, MultiAssetEvent
from assets.models import Asset
from moneybird_accounting.models import Contact
class SingleAssetLoan(StatusChangingEvent):
class Meta:
verbose_name = "asset loan"
verbose_name_plural = "asset loans"
input_statuses = [Asset.AVAILABLE, Asset.ISSUED_UNPROCESSED]
output_status = Asset.ISSUED_LOAN
loan = models.ForeignKey("AssetLoan", null=False, blank=False, on_delete=PROTECT)
@property
def contact(self):
return self.sale.contact
def date(self):
return self.sale.date
class AssetLoan(MultiAssetEvent):
class Meta:
verbose_name = "loan"
verbose_name_plural = "loans"
contact = models.ForeignKey(Contact, null=True, blank=True, on_delete=PROTECT)
assets = models.ManyToManyField(Asset, through=SingleAssetLoan)
def __str__(self):
return f"Loan {', '.join(self.assets.values_list('number', flat=True))}"
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.