metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joaoeudes7/suggestclasses",
"score": 2
} |
#### File: suggestclasses/dados/dados.py
```python
import csv
import os
import urllib.request
import django
django.setup()
from core.models import Curso, Centro, Departamento, ComponenteCurricular, EstruturaCurricular, OrganizacaoCurricular
DADOS_PATH = '/home/taciano/dev/workspace/suggestclasses/dados'
def main():
print("Lendo dados sobre o CERES/UFRN ...!")
os.chdir(DADOS_PATH)
print(os.getcwd())
downloads_csv()
centros() # Adicionamos apenas o CERES.
departamentos()
cursos()
componentes()
estruturas()
organizacao()
def downloads_csv():
print("Download do CSV dos Departamentos do CERES/UFRN ...!")
url = 'http://dados.ufrn.br/dataset/da6451a5-1a59-4630-bdc2-97f6be4a59c2/resource/3f2e4e32-ef1a-4396-8037' \
'-cbc22a89d97f/download/unidades.csv'
file_name = 'unidades.csv'
urllib.request.urlretrieve(url, file_name)
print("Download do CSV dos Cursos do CERES/UFRN ...!")
url = 'http://dados.ufrn.br/dataset/08b0dc59-faa9-4281-bd1e-2a39f532489e/resource/949be3d1-e85b-4d0f-9f60' \
'-1d9a7484bb06/download/cursos-ufrn.csv'
file_name = 'cursos-ufrn.csv'
urllib.request.urlretrieve(url, file_name)
print("Download do CSV dos Componentes do CERES/UFRN ...!")
url = 'http://dados.ufrn.br/dataset/3fea67e8-6916-4ed0-aaa6-9a8ca06a9bdc/resource/9a3521d2-4bc5-4fda-93f0' \
'-f701c8a20727/download/componentes-curriculares-presenciais.csv'
file_name = 'componentes-curriculares-presenciais.csv'
urllib.request.urlretrieve(url, file_name)
print("Download do CSV das Estruturas Curriculares do CERES/UFRN ...!")
url = 'http://dados.ufrn.br/dataset/e7c24910-75c1-451b-9097-e4352488dd69/resource/94cc35b0-6560-44f3-8c67' \
'-98cff965f23c/download/estruturas-curriculares.csv'
file_name = 'estruturas-curriculares.csv'
urllib.request.urlretrieve(url, file_name)
print("Download do CSV dos Organização Curricular do CERES/UFRN ...!")
url = 'http://dados.ufrn.br/dataset/82aca3f1-f7ee-425e-bf1e-b6a1d6811bf4/resource/3f25d054-c5d2-4bf2-8cd4' \
'-8e0a2e4f63ce/download/curriculo-componente-graduacao.csv '
file_name = 'curriculo-componente-graduacao.csv'
urllib.request.urlretrieve(url, file_name)
def centros():
# Cadastrando o Centro CERES
centro = Centro(id_unidade=1482, codigo=1800, nome='Centro de Ensino Superior do Seridó',
sigla='CERES', endereco='Rua Joaquim Gregório, Penedo, Caicó - RN',
site='http://www.ceres.ufrn.br/')
centro.save()
def departamentos():
# Buscando o Centro CERES
ceres = Centro.objects.get(id_unidade=1482)
with open('unidades.csv') as csvfile:
unidades = csv.reader(csvfile, delimiter=';')
next(unidades) # skip header
for row in unidades:
id_dep = row[0]
codigo_dep = row[1]
nome_dep = row[2]
sigla_dep = row[3]
municipio = row[6]
id_unidade_responsavel = row[9].strip()
tipo_unidade_organizacional = row[17].strip()
if id_unidade_responsavel == '1482' and (tipo_unidade_organizacional == 'DEPARTAMENTO'
or tipo_unidade_organizacional == 'ASSESSORIA'):
print(id_dep)
print(codigo_dep)
print(nome_dep)
print(sigla_dep)
print(municipio)
print(tipo_unidade_organizacional)
print(id_unidade_responsavel)
d = Departamento(id_unidade=id_dep, codigo=codigo_dep, nome=nome_dep, sigla=sigla_dep,
endereco=municipio,
centro=ceres)
d.save()
def cursos():
print("Criando cursos para o CERES ...!")
# Buscando o Centro CERES
ceres = Centro.objects.get(id_unidade=1482)
with open('cursos-ufrn.csv') as csvfile:
cursos_ufrn = csv.reader(csvfile, delimiter=';')
next(cursos_ufrn) # skip header
for row in cursos_ufrn:
id_curso = row[0]
nome_curso = row[1]
nivel_ensino = row[5]
grau_academico = row[6]
modalidade_educacao = row[7]
turno = row[10]
id_unidade_responsavel = row[14]
if id_unidade_responsavel == '1482':
print(id_curso)
print(nome_curso)
print(nivel_ensino)
print(grau_academico)
print(modalidade_educacao)
print(turno)
print(id_unidade_responsavel)
c = Curso(codigo=id_curso, nome=nome_curso, nivel=nivel_ensino, grau=grau_academico,
modalidade=modalidade_educacao, turno=turno, centro=ceres)
c.save()
def componentes():
print("Criando Componentes para os Departamentos do CERES ...!")
with open('componentes-curriculares-presenciais.csv') as csvfile:
componentes_ceres = csv.reader(csvfile, delimiter=';')
next(componentes_ceres) # skip header
for row in componentes_ceres:
unidade_responsavel = row[5].strip()
if Departamento.objects.filter(nome=unidade_responsavel).exists():
depto = Departamento.objects.get(nome=unidade_responsavel)
print("Departamento " + depto.sigla)
id_componente = row[0]
tipo_componente = row[1]
codigo_componente = row[2]
nivel_componente = row[3]
nome_componente = row[4]
unidade_responsavel = row[5].strip()
ch_teorico = row[6]
ch_pratico = row[7]
ch_estagio = row[8]
ch_total = row[9]
ch_dedicada_docente = row[10]
ch_ead = row[11]
cr_max_ead = row[12]
equivalencia = row[16]
pre_requisito = row[17]
co_requisito = row[18]
ementa = row[19]
bibliografia = row[20]
objetivos = row[21]
conteudo = row[22]
competencias_habilidades = row[23]
referencias = row[24]
ano_programa = row[25]
periodo_programa = row[26]
modalidade = row[27]
curso_componente = row[28]
# if depto.id_unidade == 9726 or depto.id_unidade == 235:
print(id_componente)
cc = ComponenteCurricular(id_componente=id_componente, tipo=tipo_componente,
codigo=codigo_componente, nivel=nivel_componente, nome=nome_componente,
ch_teorica=ch_teorico, ch_pratica=ch_pratico, ch_estagio=ch_estagio,
ch_total=ch_total, ch_docente=ch_dedicada_docente, ch_ead=ch_ead,
cr_max_ead=cr_max_ead, equivalencia=equivalencia,
requisito=pre_requisito, corequisito=co_requisito, ementa=ementa,
modalidade=modalidade, departamento=depto)
cc.save()
def estruturas():
print("Criando Estruturas Curriculares para os Cursos do CERES ...!")
with open('estruturas-curriculares.csv') as csvfile:
estruturas_ceres = csv.reader(csvfile, delimiter=';')
next(estruturas_ceres) # skip header
for row in estruturas_ceres:
curso_ufrn = row[3]
if Curso.objects.filter(codigo=curso_ufrn).exists():
curso_ceres = Curso.objects.get(codigo=curso_ufrn)
print(curso_ceres)
id_curriculo = row[0]
codigo = row[1]
nome_matriz = row[2]
id_curso = row[3]
nome_curso = row[4]
semestre_conclusao_minimo = row[5] if row[5] != '' else None
semestre_conclusao_ideal = row[6] if row[6] != '' else None
semestre_conclusao_maximo = row[7] if row[7] != '' else None
meses_conclusao_minimo = row[8] if row[8] != '' else None
meses_conclusao_ideal = row[9] if row[9] != '' else None
meses_conclusao_maximo = row[10] if row[10] != '' else None
cr_total_minimo = row[11] if row[11] != '' else None
ch_total_minima = row[12] if row[12] != '' else None
ch_optativas_minima = row[13] if row[13] != '' else None
ch_complementar_minima = row[14] if row[14] != '' else None
max_eletivos = row[15] if row[15] != '' else None
ch_nao_atividade_obrigatoria = row[16] if row[16] != '' else None
cr_nao_atividade_obrigatorio = row[17] if row[17] != '' else None
ch_atividade_obrigatoria = row[18] if row[18] != '' else None
cr_minimo_semestre = row[19] if row[19] != '' else None
cr_ideal_semestre = row[20] if row[20] != '' else None
cr_maximo_semestre = row[21] if row[21] != '' else None
ch_minima_semestre = row[22] if row[22] != '' else None
ch_ideal_semestre = row[23] if row[23] != '' else None
ch_maxima_semestre = row[24] if row[24] != '' else None
periodo_entrada_vigor = row[25] if row[25] != '' else None
ano_entrada_vigor = row[26] if row[26] != '' else None
observacao = row[27]
ec = EstruturaCurricular(id_curriculo=id_curriculo, codigo=codigo, nome=nome_matriz,
semestre_conclusao_minimo=semestre_conclusao_minimo,
semestre_conclusao_ideal=semestre_conclusao_ideal,
semestre_conclusao_maximo=semestre_conclusao_maximo,
meses_conclusao_minimo=meses_conclusao_minimo,
meses_conclusao_ideal=meses_conclusao_ideal,
meses_conclusao_maximo=meses_conclusao_maximo,
cr_total_minimo=cr_total_minimo, ch_total_minima=ch_total_minima,
ch_optativas_minima=ch_optativas_minima,
ch_complementar_minima=ch_complementar_minima, max_eletivos=max_eletivos,
ch_nao_atividade_obrigatoria=ch_nao_atividade_obrigatoria,
cr_nao_atividade_obrigatorio=cr_nao_atividade_obrigatorio,
ch_atividade_obrigatoria=ch_atividade_obrigatoria,
cr_minimo_semestre=cr_minimo_semestre,
cr_ideal_semestre=cr_ideal_semestre, cr_maximo_semestre=cr_maximo_semestre,
ch_minima_semestre=ch_minima_semestre, ch_ideal_semestre=ch_ideal_semestre,
ch_maxima_semestre=ch_maxima_semestre,
periodo_entrada_vigor=periodo_entrada_vigor,
ano_entrada_vigor=ano_entrada_vigor, observacao=observacao, curso=curso_ceres)
ec.save()
def organizacao():
print("Criando Estruturas Curriculares para os Cursos do CERES ...!")
with open('curriculo-componente-graduacao.csv') as csvfile:
ccg = csv.reader(csvfile, delimiter=';')
next(ccg) # skip header
for row in ccg:
id_estrutura = row[1]
id_componente_curricular = row[2]
if EstruturaCurricular.objects.filter(id_curriculo=id_estrutura).exists():
ec = EstruturaCurricular.objects.get(id_curriculo=id_estrutura)
if ComponenteCurricular.objects.filter(id_componente=id_componente_curricular).exists():
cc = ComponenteCurricular.objects.get(id_componente=id_componente_curricular)
id_curriculo_componente = row[0]
id_curriculo = row[1]
id_componente_curricular = row[2]
semestre_oferta = row[3]
tipo_vinculo_componente = row[4]
nivel_ensino = row[5]
oc = OrganizacaoCurricular(id_curriculo_componente=id_curriculo_componente, estrutura=ec,
componente=cc, semestre=semestre_oferta,
tipo_vinculo=tipo_vinculo_componente, nivel=nivel_ensino)
oc.save()
if __name__ == "__main__":
main()
``` |
{
"source": "joaoevangelista/animated-octo-couscous",
"score": 3
} |
#### File: animated-octo-couscous/cars/toolbox.py
```python
def find_missing(df, token='?'):
col_acc = []
for c in df.columns:
if token in df[c].values:
col_acc.append(c)
print(*col_acc)
def show_object_columns(df):
return df.select_dtypes(include=['object'])
def to_category(df, columns):
for c in columns:
df[c] = df[c].astype('category')
return df
def convert_type(df, orig, to='float64'):
cols = df.select_dtypes(include=[orig]).columns
for c in cols:
df[c] = df[c].astype(to)
def show_nans(df):
return df.isnull().sum()
def learning_rate_gen(start=0.01, iterations=8):
acc = []
for i in range(0, iterations):
if i == 0:
acc.append(start)
acc.append(acc[-1] * 2)
return acc
``` |
{
"source": "joaofanti/TrabRedesIIFinal",
"score": 4
} |
#### File: TrabRedesIIFinal/Modelos/Game.py
```python
import sys
sys.path.insert(0, "Modelos/Mapa")
from Map import *
from Item import Item
"""
Define a classe que manipula a logica do jogo.
"""
class Game:
"""
Define um jogador do jogo.
"""
class Player:
"""
Cria uma nova instancia de jogador
"""
def __init__(self, name, addr, map):
self.Name = name
self.Addr = addr #IP
self.Room = 1 # Jogador sempre inicia na sala 1
self.Inventario = []
self.Inventario.append(Item("Mapa", map))
"""
Cria uma nova instancia de jogo.
"""
def __init__(self, map):
self.Map = map
self.Players = []
"""
Cria um novo jogador. Retorna falso se jogador ja existe. Retorna verdadeiro se jogador foi criado.
"""
def CriaJogador(self, playerId, addr):
if (self.getPlayer(playerId) != None):
return "FAIL"
self.Players.append(self.Player(playerId, addr, self.Map.showMap()))
return "OK"
"""
Examina a sala em que o jogador se encontra.
"""
def Examina(self, playerId):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
room = self.Map.getRoom(player.Room)
return room.ToString()
"""
Move o jogador para outra sala.
"""
def Move(self, playerId, direction):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
room = self.Map.getRoom(player.Room)
roomInDirection = room.GetRoomInDirection(direction)
if (roomInDirection != None):
if (room.CanMoveTo(direction)):
player.Room = roomInDirection
for item in player.Inventario:
if item.Name == "Mapa":
item.Description = self.Map.showMap(roomInDirection)
return "O jogador se moveu para a sala " + str(roomInDirection) + "."
else:
return "A porta esta fechada."
else:
return "Nao ha sala nesta direcao."
def Inventario(self, playerId):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
result = ""
ln = len(player.Inventario)
for i in range(0, ln):
result += player.Inventario[i].Name
if (i + 1 != ln):
result += " ; "
return result
def UsaItem(self, playerId, itemName, target = None):
player = self.getPlayer(playerId)
abriuPorta = False
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
for item in player.Inventario:
if item.Name == itemName:
if "Nota" in str(item.Name):
return item.Description
elif item.Name == "Mapa":
return item.Description
elif item.Name == "ObjetoFinal":
if salaAtual.ID == 1:
return "Fim"
else:
return "Voce precisa estar na sala inicial para utilizar este objeto"
elif ("Chave" in str(item.Name)):
if target == None:
return "Escolha uma porta para abrir"
else:
for x in range(0, len(salaAtual.Doors)):
if str(x) == target:
abriuPorta = True
self.Map.getRoom(player.Room).Doors[x].OpenDoor()
if(abriuPorta == True):
return "Porta "+target+" foi aberta"
else:
return "Nao foi possivel abrir a porta "+target
return "Portas da sala "+str(salaAtual.ID)+" foram abertas"
else:
return "Item nao existente no inventario"
"""
Jogador pega um objeto que esta na sala atual
"""
def Pegar(self, playerId, objeto):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
if(salaAtual == None):
return "Sala nao encontrada"
objetoAdicionado = False
lenObjetos = len(salaAtual.Objects)
for x in range(0, lenObjetos):
objetoEncontrado = salaAtual.Objects[x]
if(str(objeto) == str(objetoEncontrado.Name)):
objetoAdicionado = True
del salaAtual.Objects[x]
player.Inventario.append(Item(objetoEncontrado.Name, objetoEncontrado.Description))
break
if(objetoAdicionado == True):
return "Objeto " + objeto + " adicionado ao inventario"
else:
return "Objeto " + objeto + " nao foi encontrado nesta sala"
"""
Larga objeto do inventario na sala atual
"""
def Largar(self, playerId, objeto):
player = self.getPlayer(playerId)
if(player == None):
return "Player nao encontrado"
salaAtual = self.Map.getRoom(player.Room)
objetoDeletado = False
for x in range(0, len(player.Inventario)):
itemPlayer = player.Inventario[x]
if(itemPlayer.Name == str(objeto)):
objetoDeletado = True
del player.Inventario[x]
salaAtual.Objects.append(Item(itemPlayer.Name, itemPlayer.Description))
if(objetoDeletado == True):
return "Objeto " + objeto + " adicionado a sala"
else:
return "Objeto " + objeto + " nao foi encontrado no inventario"
"""
Envia um texto para um jogador especifico
"""
def Cochichar(self, playerSource, text, playerTarget):
player = self.getPlayer(playerSource)
for x in range(0, len(self.Players)):
if(self.Players[x].Name == str(playerTarget)):
return (self.Players[x].Addr, text)
"""
Retorna os players presente na sala passada por parametro
"""
def getPlayersInRoom(self, room):
sala = self.Map.getRoom(room)
if(sala == None):
return "Sala nao encontrada"
playersNaSala = []
for x in range(0, len(self.Players)):
if(self.Players[x].Room == room):
playersNaSala.append(self.Players[x].Addr)
return playersNaSala
"""
Busca o jogador na lista de jogadores conectados ao jogo.
"""
def getPlayer(self, playerName):
for player in self.Players:
if player.Name == playerName:
return player
return None
```
#### File: TrabRedesIIFinal/Modelos/Jogador.py
```python
class Player:
ID = "" # Identificador do jogador
Inventario = [] # Inventario contendo os objetos carregados pelo jogador
"""
Cria uma nova instancia de jogador.
"""
def __init__(self, numeroPlayer, sala, nome):
self.salaAtual = 4
self.numeroPlayer = numeroPlayer
self.nome = nome
self.sala = sala
self.chave = False
self.objetoFinal = False
self.botoesPressionados = { 1 : False,
2 : False,
3 : False,
4 : False,
5 : False }
self.sequenciaBotoes = []
```
#### File: Modelos/Mapa/Map.py
```python
import StringIO
"""
Define o mapa do jogo.
"""
class Map(object):
Rooms = [] # Lista de salas
RoomDesign = "" # Desenho da sala para usar de modelo
PlayerPlace = "PLAYER" # Texto para escrever onde esta o jogador
"""
Cria uma nova instancia de Mapa
"""
def __init__(self, rooms, roomDesign):
self.Rooms = rooms
self.RoomDesign = roomDesign
"""
Retorna o mapa desenhado como ASCII com a posicao do jogador, se necessario.
"""
def showMap(self, playerPositionRoomId = -1):
if (playerPositionRoomId > 0):
return "Voce esta na sala " + str(playerPositionRoomId) + "\n" + self.RoomDesign
else:
return self.RoomDesign
def getRoom(self, roomId):
for room in self.Rooms:
if room.ID == roomId:
return room
return None
``` |
{
"source": "joaofbsm/battleship-intel",
"score": 4
} |
#### File: python/src/vertex.py
```python
class Vertex:
def __init__(self,
weight=None,
degree=0,
bipartite_set=None,
opening_time=None,
closing_time=None,
depth=0,
parent=None,
ancestors=None):
"""
Data structure to store vertex related information.
"""
# Weight of the vertex which can used to store other problem-specific information
self.weight = weight
# Number of edges connected to that vertex
self.degree = degree
# Opening time calculated by DFS
self.opening_time = opening_time
# Closing time calculated by DFS
self.closing_time = closing_time
# Number of bipartite set to which this vertex belongs if the connected component it is in is a bipartite graph
self.bipartite_set = bipartite_set
# This is the depth of this vertex considering the first vertex of its connected component as the root
self.depth = depth
# This is the parent of the vertex in the shortest path from it to the root
self.parent = parent
# Logarithmic ancestors of this vertex calculated with dynamic programming for LCA with Binary Lifting
self.ancestors = ancestors
``` |
{
"source": "joaofbsm/neural-lang",
"score": 3
} |
#### File: neural-lang/code/nlp.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'MIT'
import logging
import gensim
import numpy as np
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
def split_corpus_file(path_to_file='', filename='corpus.txt',
proportions=(0.25, 0.5, 0.75, 1)):
"""
Split corpus into different sizes.
:param path_to_file: path to the corpus file.
:param filename: corpus file name.
:param proportions: proportions to apply to the size of corpus. One new file
for each value.
:return: None
"""
with open('{}{}'.format(path_to_file, filename), 'r') as f:
# Considering that you are using a corpus with only one line
corpus = f.read()
corpus = corpus.split(' ')
corpus_size = len(corpus)
for proportion in proportions:
splitting_point = round(corpus_size * proportion)
with open('{}{}{}'.format(path_to_file, proportion, filename), 'w+') as f:
f.write(' '.join(corpus[:splitting_point]))
def prepare_validation_file(path_to_file='', filename='validation.txt',
prefix_filter=None, lowercase=False):
"""
Prepare the validation file for evaluation of analogy similarity.
All actions are optional.
:param path_to_file: path to the validation file.
:param filename: validation file name.
:param prefix_filter: prefix substring to filter line out of file.
:param lowercase: flag to convert all words in file to lowercase.
:return: None
"""
# Removes topic's headers
if prefix_filter is not None:
with open('{}{}'.format(path_to_file, filename), 'r') as old_file, \
open('{}prep_{}'.format(path_to_file, filename), 'w+') as new_file:
for line in old_file:
if not line.startswith(prefix_filter):
# Convert all words to lowercase
if lowercase:
line = line.lower()
new_file.write(line)
# Convert all words to lowercase in case there is no line filter
elif lowercase:
with open('{}{}'.format(path_to_file, filename), 'r') as old_file, \
open('{}prep_{}'.format(path_to_file, filename), 'w+') as new_file:
for line in old_file:
line = line.lower()
new_file.write(line)
def evaluate_analogies_distance(model, validation_path, validation_filename):
"""
:param model:
:param validation_path:
:param validation_filename:
:return:
"""
oov_question = 0
oov_answer = 0
distances = []
with open(validation_path + validation_filename, 'r') as f:
for line in f:
words = line.split()
# Get word on top of the similarity to the resulting vector rank
try:
predicted = model.most_similar(positive=words[1:3],
negative=words[0],
topn=1)[0][0]
except:
oov_question += 1
continue
# Calculate the distance between predicted and correct word
try:
distances.append(float(model.wv.distance(predicted, words[3])))
except:
oov_answer += 1
continue
mean_distances = np.mean(distances)
logging.info('Mean of analogies distance: {}'.format(mean_distances))
logging.info(('{} question words and {} answer words out of '
'vocabulary').format(oov_question, oov_answer))
return mean_distances
def train_models(corpus_path, corpus_filename, models_path, corpus_proportions,
context_sizes, training_algorithms):
"""
:param corpus_path:
:param corpus_filename:
:param models_path:
:param corpus_proportions:
:param context_sizes:
:param training_algorithms:
:return:
"""
for corpus_proportion in corpus_proportions:
sentences = gensim.models.word2vec.LineSentence(
'{}{}{}'.format(corpus_path, corpus_proportion, corpus_filename)
)
for context_size in context_sizes:
for algorithm_name, sg in training_algorithms.items():
model = gensim.models.Word2Vec(
sentences=sentences,
window=context_size,
min_count=1,
workers=4,
sg=sg
)
model.save('{}{}-{}-{}.model'.format(
models_path,
corpus_proportion,
context_size,
algorithm_name)
)
def test_models(validation_path, validation_filename, models_path, results_path,
corpus_proportions, context_sizes, training_algorithms):
"""
:param validation_path:
:param validation_filename:
:param models_path:
:param results_path:
:param corpus_proportions:
:param context_sizes:
:param training_algorithms:
:return:
"""
for corpus_proportion in corpus_proportions:
for context_size in context_sizes:
for algorithm_name, sg in training_algorithms.items():
model = gensim.models.Word2Vec.load('{}{}-{}-{}.model'.format(
models_path,
corpus_proportion,
context_size,
algorithm_name)
)
accuracy = model.wv.evaluate_word_analogies(
validation_path + validation_filename,
case_insensitive=True
)[0]
distance = evaluate_analogies_distance(
model,
validation_path,
'prep_' + validation_filename
)
with open('{}{}-{}-{}.txt'.format(
results_path,
corpus_proportion,
context_size,
algorithm_name),
'w+') as f:
f.write('accuracy={:.5g}\n'.format(accuracy))
f.write('distance={:.5g}'.format(distance))
``` |
{
"source": "joaofbsm/tensor-network",
"score": 3
} |
#### File: tensor-network/implementation/main.py
```python
from __future__ import print_function
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
import matplotlib.pyplot as plt
import numpy as np
import keras
import sys
import utils
from collections import Counter
from copy import deepcopy
from imblearn.over_sampling import RandomOverSampler
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.utils import to_categorical
from sklearn.model_selection import StratifiedKFold
def train_network(X, y, train_index, test_index, parameters, extra=False):
"""Create and fit the MLP network
Arguments:
X -- Dataset input instances.
y -- Dataset output classes.
train_index -- K-fold generated train indexes.
test_index -- K-fold generated test indexes.
parameters -- Neural network model parameters.
Keyword arguments:
extra -- Flag for the presence of extra hidden layer (default: {False})
"""
model = Sequential() # Linear stack of layers
sgd = SGD(lr=parameters["l_rate"]) # SGD optimizer. Allows learning rate.
# Add hidden layer
model.add(Dense(parameters["hidden_size"], activation="sigmoid",
input_dim=parameters["input_size"]))
# Add extra layer if needed
if extra:
model.add(Dense(parameters["extra_size"], activation="sigmoid"))
# Add output layer
model.add(Dense(parameters["output_size"], activation="softmax"))
# Split input by k-fold generated indexes
train_X = X[train_index]
test_X = X[test_index]
# Convert output to one-hot encoding
train_y = to_categorical(y[train_index], num_classes=7)
test_y = to_categorical(y[test_index], num_classes=7)
# Compile model
model.compile(optimizer=sgd, loss="categorical_crossentropy",
metrics=["accuracy"])
# Fit model
results = model.fit(x=train_X, y=train_y,
batch_size=parameters["batch_size"],
epochs=parameters["n_epochs"],
validation_data=(test_X, test_y), shuffle=True)
return results.history
def execute_experiment(name, variations, X, y, parameters, kfold):
"""Train neural network for a set of different parameters and save results
Arguments:
name -- Name of the parameter to be varied.
variations -- Variations of the parameter.
X -- Dataset input instances.
y -- Dataset output classes.
parameters -- Neural network model parameters.
kfold -- Object used to create k-folds for crossvalidation.
"""
parameters = deepcopy(parameters)
accuracy = {}
for variation in variations:
parameters[name] = variation
accuracy_train = [] # Cross-validation train accuracy
accuracy_test = [] # Cross-validation test accuracy
for train_index, test_index in kfold.split(X, y):
if name == "extra_size":
results = train_network(X, y, train_index, test_index,
parameters, True)
else:
results = train_network(X, y, train_index, test_index,
parameters)
accuracy_train.append(results["acc"])
accuracy_test.append(results["val_acc"])
accuracy[variation] = {
"train_mean": np.mean(accuracy_train, axis=0),
"train_std": np.std(accuracy_train, axis=0),
"test_mean": np.mean(accuracy_test, axis=0),
"test_std": np.std(accuracy_test, axis=0)
}
utils.save_data(name, accuracy)
def balanced_experiment(X, y, parameters, kfold):
"""Oversample the dataset to analyze the performanced on a balanced one
Arguments:
X -- Dataset input instances.
y -- Dataset output classes.
parameters -- Neural network model parameters.
kfold -- Object used to create k-folds for crossvalidation.
"""
accuracy = {}
# Oversample data
X, y = RandomOverSampler().fit_sample(X, y)
accuracy_train = [] # Cross-validation train accuracy
accuracy_test = [] # Cross-validation test accuracy
for train_index, test_index in kfold.split(X, y):
results = train_network(X, y, train_index, test_index, parameters)
accuracy_train.append(results["acc"])
accuracy_test.append(results["val_acc"])
accuracy[parameters["hidden_size"]] = {
"train_mean": np.mean(accuracy_train, axis=0),
"train_std": np.std(accuracy_train, axis=0),
"test_mean": np.mean(accuracy_test, axis=0),
"test_std": np.std(accuracy_test, axis=0)
}
utils.save_data("balanced", accuracy)
def main(args):
X, y = utils.load_dataset(args[1])
parameters = {
"n_epochs": 200, # Number of epochs
"batch_size": 50, # Default batch size
"l_rate": 0.5, # Default learning rate
"input_size": 8, # Input layer size
"hidden_size": 50, # Default hidden layer size
"extra_size": 50, # Default extra hidden layer size
"output_size": 7 # Output layer size
}
experiments = {
"hidden_size": [5, 15, 50, 100], # Hidden layer sizes
"extra_size": [5, 15, 50, 100], # Extra layer sizes
"l_rate": [0.1, 0.5, 1, 10], # Learning rates
"batch_size": [1, 10, 50, 100] # Batch sizes
}
# Generator for 3-fold cross-validation
kfold = StratifiedKFold(n_splits=3, shuffle=True)
for key, value in experiments.items():
print("\nExecuting {}\n----------------------\n".format(key))
execute_experiment(key, value, X, y, parameters, kfold)
balanced_experiment(X, y, parameters, kfold)
if __name__ == "__main__":
main(sys.argv)
``` |
{
"source": "joaofbsm/upgraded-guacamole",
"score": 3
} |
#### File: upgraded-guacamole/feature_engineering/extractor.py
```python
from __future__ import print_function
import os
import sys
import MySQLdb
import pandas as pd
import numpy as np
from tqdm import tqdm
#==================================FUNCTIONS==================================#
def onehot_champions(match, db):
champions = pd.read_sql("SELECT id FROM Champion", db)
champions["pos"] = champions.index
champions = champions.set_index("id").to_dict()
blue_team = match[:5]
red_team = match[5:10]
blue_champions = np.zeros(len(champions["pos"]))
red_champions = np.zeros(len(champions["pos"]))
for _, player in blue_team.iterrows():
blue_champions[champions["pos"][player["championId"]]] = 1
for _, player in red_team.iterrows():
red_champions[champions["pos"][player["championId"]]] = 1
result = np.concatenate((blue_champions, red_champions))
return result
def onehot_spells(match, db):
spells = pd.read_sql("SELECT id FROM SummonerSpell "
"WHERE name='Barrier' OR name='Cleanse' "
"OR name='Exhaust' OR name='Flash' OR name='Ghost' "
"OR name='Heal' OR name='Ignite' OR name='Smite' "
"OR name='Teleport'", db)
spells["pos"] = spells.index
spells = spells.set_index("id").to_dict()
blue_team = match[:5]
red_team = match[5:10]
blue_spells = np.zeros(len(spells["pos"]))
red_spells = np.zeros(len(spells["pos"]))
for _, player in blue_team.iterrows():
blue_spells[spells["pos"][player["spell1Id"]]] += 1
blue_spells[spells["pos"][player["spell2Id"]]] += 1
for _, player in red_team.iterrows():
red_spells[spells["pos"][player["spell1Id"]]] += 1
red_spells[spells["pos"][player["spell2Id"]]] += 1
result = np.concatenate((blue_spells, red_spells))
return result
def onehot_summoner_masteries_team(match, db, cursor):
masteries = pd.read_sql("SELECT id FROM Mastery", db)
masteries["pos"] = masteries.index
masteries = masteries.set_index("id").to_dict()
get_summoner_masteries = ("SELECT M.masteryId, M.rank "
"FROM MatchParticipant P, MatchMastery M, "
"MatchDetail D, MatchPlayer PL "
"WHERE PL.summonerId = %s "
"AND P._match_id = %s "
"AND PL._participant_id = P._id "
"AND P._id = M._participant_id "
"AND P._match_id = D.matchId AND D.mapId = 11 "
"ORDER BY P._match_id, PL.summonerId")
blue_team = match[:5]
red_team = match[5:10]
blue_summoner_masteries = np.zeros(45)
red_summoner_masteries = np.zeros(45)
for _, player in blue_team.iterrows():
cursor.execute(get_summoner_masteries, (player["summonerId"],
player["matchId"]))
summoner_masteries = list(cursor)
for mastery, rank in summoner_masteries:
blue_summoner_masteries[masteries["pos"][mastery]] += rank
for _, player in red_team.iterrows():
cursor.execute(get_summoner_masteries, (player["summonerId"],
player["matchId"]))
summoner_masteries = list(cursor)
for mastery, rank in summoner_masteries:
red_summoner_masteries[masteries["pos"][mastery]] += rank
results = np.concatenate((blue_summoner_masteries, red_summoner_masteries))
return results
def dmg_types_team(match, db):
champion_dmg = pd.read_sql("SELECT _champion_id, attack, defense, magic "
"FROM ChampionInfo "
"ORDER BY _champion_id", db)
champion_dmg = champion_dmg.set_index("_champion_id").T.to_dict("list")
blue_team = match[:5]
red_team = match[5:10]
blueteam_dmg = np.zeros((3))
redteam_dmg = np.zeros((3))
for _, player in blue_team.iterrows():
blueteam_dmg += champion_dmg[player["championId"]]
for _, player in red_team.iterrows():
redteam_dmg += champion_dmg[player["championId"]]
result = np.concatenate((blueteam_dmg, redteam_dmg))
return result
def dmg_types_percent_team(match, db):
champion_dmg = pd.read_sql("SELECT _champion_id, attack, magic "
"FROM ChampionInfo "
"ORDER BY _champion_id", db)
champion_dmg = champion_dmg.set_index("_champion_id").T.to_dict("list")
blue_team = match[:5]
red_team = match[5:10]
blueteam_dmg = np.zeros((2))
redteam_dmg = np.zeros((2))
for _, player in blue_team.iterrows():
blueteam_dmg += champion_dmg[player["championId"]]
total_dmg = np.sum(blueteam_dmg)
blueteam_dmg = 100 * np.around(np.divide(blueteam_dmg, total_dmg),
decimals=5)
for _, player in red_team.iterrows():
redteam_dmg += champion_dmg[player["championId"]]
total_dmg = np.sum(redteam_dmg)
redteam_dmg = 100 * np.around(np.divide(redteam_dmg, total_dmg),
decimals=5)
result = np.concatenate((blueteam_dmg, redteam_dmg))
return result
def mastery_scores_team(match, cursor):
get_mastery_scores = ("SELECT mastery "
"FROM SummonerMasteries "
"WHERE summId = %s")
blue_team = match[:5]
red_team = match[5:10]
mastery_scores = np.zeros(2)
for _, player in blue_team.iterrows():
cursor.execute(get_mastery_scores, [player["summonerId"]])
mastery_score = list(cursor)
if not mastery_score:
return None
mastery_score = mastery_score[0][0]
mastery_scores[0] += mastery_score
for _, player in red_team.iterrows():
cursor.execute(get_mastery_scores, [player["summonerId"]])
mastery_score = list(cursor)
if not mastery_score:
return None
mastery_score = mastery_score[0][0]
mastery_scores[1] += mastery_score
return mastery_scores
def champion_masteries_team(match, cursor):
get_champion_masteries = ("SELECT mastery "
"FROM SummonerChampMasteries "
"WHERE summId = %s AND championId = %s")
blue_team = match[:5]
red_team = match[5:10]
champion_masteries = np.zeros(2)
for _, player in blue_team.iterrows():
cursor.execute(get_champion_masteries, (player["summonerId"],
player["championId"]))
champion_mastery = list(cursor)
if not champion_mastery:
return None
champion_mastery = champion_mastery[0][0]
champion_masteries[0] += champion_mastery
for _, player in red_team.iterrows():
cursor.execute(get_champion_masteries, (player["summonerId"],
player["championId"]))
champion_mastery = list(cursor)
if not champion_mastery:
return None
champion_mastery = champion_mastery[0][0]
champion_masteries[1] += champion_mastery
return champion_masteries
def champion_masteries_summoner(match, cursor):
get_champion_masteries = ("SELECT mastery "
"FROM SummonerChampMasteries "
"WHERE summId = %s AND championId = %s")
blue_team = match[:5]
red_team = match[5:10]
blue_champion_masteries = np.zeros(5)
red_champion_masteries = np.zeros(5)
i = 0
for _, player in blue_team.iterrows():
cursor.execute(get_champion_masteries, (player["summonerId"],
player["championId"]))
champion_mastery = list(cursor)
if not champion_mastery:
return None
champion_mastery = champion_mastery[0][0]
blue_champion_masteries[i] = champion_mastery
i += 1
i = 0
for _, player in red_team.iterrows():
cursor.execute(get_champion_masteries, (player["summonerId"],
player["championId"]))
champion_mastery = list(cursor)
if not champion_mastery:
return None
champion_mastery = champion_mastery[0][0]
red_champion_masteries[i] = champion_mastery
i += 1
champion_masteries = np.concatenate((blue_champion_masteries,
red_champion_masteries))
return champion_masteries
def summoner_wins_and_rate_team(match, cursor):
get_history = ("SELECT wins, losses "
"FROM SummonerHistory "
"WHERE summId = %s")
blue_team = match[:5]
red_team = match[5:10]
blue_total = np.zeros(1)
red_total = np.zeros(1)
blue_wins = np.zeros(1)
red_wins = np.zeros(1)
blue_rate = np.zeros(1)
red_rate = np.zeros(1)
for _, player in blue_team.iterrows():
cursor.execute(get_history, [player["summonerId"]])
outcomes = list(cursor)[0]
if not outcomes:
continue
wins = outcomes[0]
losses = outcomes[1]
blue_total += wins + losses
blue_wins += wins
# Harmonic mean
if blue_total > 0:
blue_rate = (blue_wins / (blue_total * 1.0)) * 100
for _, player in red_team.iterrows():
cursor.execute(get_history, [player["summonerId"]])
outcomes = list(cursor)[0]
if not outcomes:
continue
wins = outcomes[0]
losses = outcomes[1]
red_total += wins + losses
red_wins += wins
if red_total > 0:
red_rate = (red_wins / (red_total * 1.0)) * 100
result = np.concatenate((blue_rate, blue_wins, red_rate, red_wins))
return result
def champion_wins_and_rate_team(match, cursor):
get_history = ("SELECT wins, losses "
"FROM SummonerChampHistory "
"WHERE summId = %s AND championId = %s")
blue_team = match[:5]
red_team = match[5:10]
blue_total = np.zeros(1)
red_total = np.zeros(1)
blue_wins = np.zeros(1)
red_wins = np.zeros(1)
blue_rate = np.zeros(1)
red_rate = np.zeros(1)
for _, player in blue_team.iterrows():
cursor.execute(get_history, (player["summonerId"], player["championId"]))
outcomes = list(cursor)[0]
if not outcomes:
continue
wins = outcomes[0]
losses = outcomes[1]
blue_total += wins + losses
blue_wins += wins
if blue_total > 0:
blue_rate = (blue_wins / (blue_total * 1.0)) * 100
for _, player in red_team.iterrows():
cursor.execute(get_history, (player["summonerId"], player["championId"]))
outcomes = list(cursor)[0]
if not outcomes:
continue
wins = outcomes[0]
losses = outcomes[1]
red_total += wins + losses
red_wins += wins
if red_total > 0:
red_rate = (red_wins / (red_total * 1.0)) * 100
result = np.concatenate((blue_rate, blue_wins, red_rate, red_wins))
return result
def team_features_zero_to_ten(match, cursor):
get_features = ("SELECT PL.summonerId, PTD._type, PTD.zeroToTen "
"FROM MatchParticipant PA, MatchPlayer PL, "
"MatchParticipantTimeline PT, "
"MatchParticipantTimelineData PTD "
"WHERE PL.summonerId = %s AND PA._match_id = %s "
"AND PL._participant_id = PA._id "
"AND PA._id = PT._participant_id "
"AND PT._id = PTD._timeline_id")
blue_team = match[:5]
red_team = match[5:10]
blue_zero_to_ten = np.zeros(4)
red_zero_to_ten = np.zeros(4)
for _, player in blue_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
blue_zero_to_ten[0] += features[2]
elif features[1] == "damageTakenPerMinDeltas":
blue_zero_to_ten[1] += features[2]
elif features[1] == "goldPerMinDeltas":
blue_zero_to_ten[2] += features[2]
elif features[1] == "xpPerMinDeltas":
blue_zero_to_ten[3] += features[2]
for _, player in red_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
red_zero_to_ten[0] += features[2]
elif features[1] == "damageTakenPerMinDeltas":
red_zero_to_ten[1] += features[2]
elif features[1] == "goldPerMinDeltas":
red_zero_to_ten[2] += features[2]
elif features[1] == "xpPerMinDeltas":
red_zero_to_ten[3] += features[2]
zero_to_ten = np.concatenate((blue_zero_to_ten, red_zero_to_ten))
return zero_to_ten
def team_features_zero_to_twenty(match, cursor):
get_features = ("SELECT PL.summonerId, PTD._type, PTD.zeroToTen, "
"PTD.tenToTwenty "
"FROM MatchParticipant PA, MatchPlayer PL, "
"MatchParticipantTimeline PT, "
"MatchParticipantTimelineData PTD "
"WHERE PL.summonerId = %s AND PA._match_id = %s "
"AND PL._participant_id = PA._id "
"AND PA._id = PT._participant_id "
"AND PT._id = PTD._timeline_id")
blue_team = match[:5]
red_team = match[5:10]
blue_zero_to_twenty = np.zeros(4)
red_zero_to_twenty = np.zeros(4)
for _, player in blue_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
blue_zero_to_twenty[0] += features[2] + features[3]
elif features[1] == "damageTakenPerMinDeltas":
blue_zero_to_twenty[1] += features[2] + features[3]
elif features[1] == "goldPerMinDeltas":
blue_zero_to_twenty[2] += features[2] + features[3]
elif features[1] == "xpPerMinDeltas":
blue_zero_to_twenty[3] += features[2] + features[3]
for _, player in red_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
red_zero_to_twenty[0] += features[2] + features[3]
elif features[1] == "damageTakenPerMinDeltas":
red_zero_to_twenty[1] += features[2] + features[3]
elif features[1] == "goldPerMinDeltas":
red_zero_to_twenty[2] += features[2] + features[3]
elif features[1] == "xpPerMinDeltas":
red_zero_to_twenty[3] += features[2] + features[3]
zero_to_twenty = np.concatenate((blue_zero_to_twenty, red_zero_to_twenty))
return zero_to_twenty
def team_features_zero_to_thirty(match, cursor):
get_features = ("SELECT PL.summonerId, PTD._type, PTD.zeroToTen, "
"PTD.tenToTwenty, PTD.twentyToThirty "
"FROM MatchParticipant PA, MatchPlayer PL, "
"MatchParticipantTimeline PT, "
"MatchParticipantTimelineData PTD "
"WHERE PL.summonerId = %s AND PA._match_id = %s "
"AND PL._participant_id = PA._id "
"AND PA._id = PT._participant_id "
"AND PT._id = PTD._timeline_id")
blue_team = match[:5]
red_team = match[5:10]
blue_zero_to_thirty = np.zeros(4)
red_zero_to_thirty = np.zeros(4)
for _, player in blue_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
blue_zero_to_thirty[0] += (features[2] + features[3]
+ features[4])
elif features[1] == "damageTakenPerMinDeltas":
blue_zero_to_thirty[1] += (features[2] + features[3]
+ features[4])
elif features[1] == "goldPerMinDeltas":
blue_zero_to_thirty[2] += (features[2] + features[3]
+ features[4])
elif features[1] == "xpPerMinDeltas":
blue_zero_to_thirty[3] += (features[2] + features[3]
+ features[4])
for _, player in red_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
red_zero_to_thirty[0] += (features[2] + features[3]
+ features[4])
elif features[1] == "damageTakenPerMinDeltas":
red_zero_to_thirty[1] += (features[2] + features[3]
+ features[4])
elif features[1] == "goldPerMinDeltas":
red_zero_to_thirty[2] += (features[2] + features[3]
+ features[4])
elif features[1] == "xpPerMinDeltas":
red_zero_to_thirty[3] += (features[2] + features[3]
+ features[4])
zero_to_thirty = np.concatenate((blue_zero_to_thirty, red_zero_to_thirty))
return zero_to_thirty
def team_features_zero_to_end(match, cursor):
get_features = ("SELECT PL.summonerId, PTD._type, PTD.zeroToTen, "
"PTD.tenToTwenty, PTD.twentyToThirty, PTD.thirtyToEnd "
"FROM MatchParticipant PA, MatchPlayer PL, "
"MatchParticipantTimeline PT, "
"MatchParticipantTimelineData PTD "
"WHERE PL.summonerId = %s AND PA._match_id = %s "
"AND PL._participant_id = PA._id "
"AND PA._id = PT._participant_id "
"AND PT._id = PTD._timeline_id")
blue_team = match[:5]
red_team = match[5:10]
blue_zero_to_end = np.zeros(4)
red_zero_to_end = np.zeros(4)
for _, player in blue_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
blue_zero_to_end[0] += (features[2] + features[3]
+ features[4] + features[5])
elif features[1] == "damageTakenPerMinDeltas":
blue_zero_to_end[1] += (features[2] + features[3]
+ features[4] + features[5])
elif features[1] == "goldPerMinDeltas":
blue_zero_to_end[2] += (features[2] + features[3]
+ features[4] + features[5])
elif features[1] == "xpPerMinDeltas":
blue_zero_to_end[3] += (features[2] + features[3]
+ features[4] + features[5])
for _, player in red_team.iterrows():
cursor.execute(get_features, (player["summonerId"],
player["matchId"]))
player_features = list(cursor)
if not player_features:
return None
for features in player_features:
if features[1] == "creepsPerMinDeltas":
red_zero_to_end[0] += (features[2] + features[3]
+ features[4] + features[5])
elif features[1] == "damageTakenPerMinDeltas":
red_zero_to_end[1] += (features[2] + features[3]
+ features[4] + features[5])
elif features[1] == "goldPerMinDeltas":
red_zero_to_end[2] += (features[2] + features[3]
+ features[4] + features[5])
elif features[1] == "xpPerMinDeltas":
red_zero_to_end[3] += (features[2] + features[3]
+ features[4] + features[5])
zero_to_end = np.concatenate((blue_zero_to_end, red_zero_to_end))
return zero_to_end
def remove_incomplete_instances(dataset):
incomplete_instances = []
for i, instance in enumerate(dataset):
complete = np.count_nonzero(instance)
if not complete:
incomplete_instances.append(i)
dataset = np.delete(dataset, incomplete_instances, axis=0)
print("\n\n", len(incomplete_instances), "incomplete instances removed.")
return dataset
#===================================MODELS====================================#
def feature_testing(db, cursor):
"""
Size of features
----------------
onehot_champions: 272
onehot_spells: 18
onehot_summoner_masteries_team: 90
dmg_types_team = 6
dmg_types_percent_team = 4
mastery_scores_team = 2
mastery_scores_diff = 1
champion_masteries_team = 2
champion_masteries_summoner = 10
summoner_wins_and_rate_team = 4
champion_wins_and_rate_team = 4
zero_to_ten -> end = 8
zero_to_ten -> end_diff = 4
winner: 1
TOTAL: 418
"""
df = pd.read_sql("SELECT D.matchId, PL.summonerId, P.championId, P.teamId,"
" P.spell1Id, P.spell2Id, T.winner "
"FROM MatchParticipant P, MatchDetail D, MatchTeam T, "
"MatchPlayer PL "
"WHERE P._match_id = D.matchId AND D.mapId = 11 "
"AND D.matchId = T._match_id AND P.teamId = T.teamId "
"AND PL._participant_id = P._id "
"ORDER BY D.matchId, P.teamId", db)
dataset = np.zeros((df.shape[0] / 10, 407))
bar = tqdm(total=df.shape[0] / 10)
for i, player in enumerate(xrange(0, df.shape[0] - 10, 10)):
bar.update(1)
match = df[player:player + 10]
#=================================PRE=================================#
champions = onehot_champions(match, db)
spells = onehot_spells(match, db)
masteries = onehot_summoner_masteries_team(match, db, cursor)
dmg_types = dmg_types_team(match, db)
dmg_percent = dmg_types_percent_team(match, db)
mastery_scores = mastery_scores_team(match, cursor)
if mastery_scores is None:
continue
mastery_scores_diff = mastery_scores[0] - mastery_scores[1]
mastery_scores_diff = mastery_scores_diff[np.newaxis]
champion_team_masteries = champion_masteries_team(match, cursor)
if champion_team_masteries is None:
continue
champion_team_diff = champion_team_masteries[0] - champion_team_masteries[1]
champion_team_diff = champion_team_diff[np.newaxis]
champion_summ_masteries = champion_masteries_summoner(match, cursor)
if champion_summ_masteries is None:
continue
#historysumm = summoner_wins_and_rate_team(match, cursor)
#historysumm_wins_diff = np.zeros(2)
#historysumm_wins_diff[0] = historysumm[0] - historysumm[2]
#historysumm_wins_diff[1] = historysumm[1] - historysumm[3]
#historychamp = champion_wins_and_rate_team(match, cursor)
#historychamp_wins_diff = np.zeros(2)
#historychamp_wins_diff[0] = historychamp[0] - historychamp[2]
#historychamp_wins_diff[1] = historychamp[1] - historychamp[3]
#=================================IN==================================#
#zero_to_ = team_features_zero_to_thirty(match, cursor)
#if zero_to_ is None:
# continue
#zero_to_diff = zero_to_[:4] - zero_to_[4:]
winner = np.array(df["winner"].iloc[player])[np.newaxis]
# PRE
dataset[i] = np.concatenate((champions, spells, masteries, dmg_types, dmg_percent, mastery_scores, mastery_scores_diff, champion_team_masteries, champion_team_diff, champion_summ_masteries, winner))
# dataset[i] = np.concatenate((272,
# IN
#dataset[i] = np.concatenate((zero_to_, zero_to_diff, winner))
dataset = remove_incomplete_instances(dataset)
return dataset
#====================================MAIN=====================================#
def main(args):
db = MySQLdb.connect(host="localhost", user="root", passwd="<PASSWORD>",
db="lol")
cursor = db.cursor()
db.set_character_set('utf8')
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
model_name = args[0]
feature_models = {"ft": feature_testing}
model = feature_models[model_name](db, cursor)
if model_name == "ft":
model_name = args[1]
np.savetxt(model_name + ".csv", model, delimiter=",", fmt="%.5g")
cursor.close()
db.close()
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "joao-fb/surf-reporter",
"score": 3
} |
#### File: joao-fb/surf-reporter/reporter.py
```python
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import datetime
import os
class Reporter:
def __init__(self, url, x_arg):
# Replace below path with the absolute path
# to chromedriver in your computer
options = webdriver.ChromeOptions()
envioroment = os.environ["environment"]
if envioroment == 'test':
CHROMEDRIVER_PATH = '/Applications/chromedriver'
elif envioroment == 'prod':
CHROMEDRIVER_PATH = os.environ["CHROMEDRIVER_PATH"]
chrome_bin = os.environ.get('GOOGLE_CHROME_BIN', "chromedriver")
options.binary_location = chrome_bin
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
options.add_argument('--headless')
options.add_argument("--disable-dev-shm-usage")
else:
CHROMEDRIVER_PATH = 'no path'
self.driver = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH, options=options)
self.driver.get(url)
wait = WebDriverWait(self.driver, 600)
wait.until(EC.presence_of_element_located((By.XPATH, x_arg)))
def find_energies(self):
surf_guru = self.driver.page_source
soup = BeautifulSoup(surf_guru, features='html.parser')
html_energies = soup.findAll("label", {"class": "resumo_energia_en"})
week_energies = [html_energie.text for html_energie in html_energies]
return week_energies
def process_energies(self, raw_energies):
p_energies = {}
day = 0
for energy in raw_energies:
if 'J' in energy:
date = datetime.datetime.today()
date += datetime.timedelta(days=day)
report_date = date
energy = energy.replace('J', "")
energy = energy.replace('▼', "")
energy = energy.replace('▲', "")
energy = int(energy)
p_energies[report_date.strftime('%A, %d %b %Y')] = energy
day += 1
return p_energies
def __del__(self):
self.driver.quit()
``` |
{
"source": "JoaoFdC/PyroNear",
"score": 3
} |
#### File: pyronear/datasets/utils.py
```python
import requests
import multiprocessing as mp
from multiprocessing.pool import ThreadPool
from pathlib import Path
from functools import partial
from tqdm import tqdm
from urllib.parse import urlparse
from torchvision.datasets.utils import check_integrity
def url_retrieve(url, outfile, timeout=4):
"""Download the content of an URL request to a specified location
Args:
url (str): URL to request
outfile (pathlib.Path): path of the file where the response will be saved
timeout (float, optional): number of seconds before the request times out
"""
response = requests.get(url, timeout=timeout, allow_redirects=True)
if response.status_code != 200:
raise requests.exceptions.ConnectionError(f'Error code {response.status_code} - could not download {url}')
outfile.write_bytes(response.content)
def get_fname(url, default_extension='jpg', max_base_length=50):
"""Find extension of file located by URL
Args:
url (str): URL of the file
default_extension (str, optional): default extension
max_base_length (int, optional): max base filename's length
Returns:
str: file name
"""
name_split = urlparse(url).path.rpartition('/')[-1].split('.')
# Check if viable extension
if len(name_split) > 1 and all(c.isalpha() for c in name_split[-1].lower()):
base, extension = '.'.join(name_split[:-1]), name_split[-1].lower()
# Fallback on default extension
else:
base, extension = name_split[-1], default_extension
# Check base length
if len(base) > max_base_length:
base = base[:max_base_length]
return f"{base}.{extension}"
def download_url(url, root, filename=None, md5=None, timeout=4,
retries=4, verbose=False, silent=False):
"""Download a file accessible via URL with mutiple retries
Args:
url (str or tuple<str, str>): URL to request
root (pathlib.Path): folder where the file will be saved in
filename (str, optional): name of the output file
md5 (str, optional): md5 for integrity verification
timeout (float, optional): number of seconds before the request times out
retries (int, optional): number of additional allowed download attempts
verbose (bool, optional): whether status can be displayed in console
silent (bool, optional): whether Exception should be raised upon download failure
"""
if isinstance(url, tuple):
url, filename = url
if not isinstance(url, str):
raise TypeError('expected argument url to be of type <str>')
# Root folder
root = Path(root).expanduser()
root.mkdir(parents=True, exist_ok=True)
if not filename:
filename = get_fname(url)
fpath = root.joinpath(filename)
# Download file
if check_integrity(fpath, md5):
if verbose:
print(f'Using downloaded and verified file: {fpath}')
else:
success = False
# Allow multiple retries
for idx in range(retries + 1):
try:
url_retrieve(url, fpath, timeout)
success = True
except Exception as e:
# Try switching to http
if url.startswith('https'):
try:
url_retrieve(url.replace('https:', 'http:'), fpath, timeout)
success = True
except Exception:
success = False
# Handle exception
if not success and (idx == retries):
if not silent:
raise e
elif verbose:
print(e)
if success:
break
def parallel(func, arr, threads=None, leave=False):
"""Download a file accessible via URL with mutiple retries
Args:
func (callable): function to be executed on multiple workers
arr (iterable): function argument's values
threads (int, optional): number of workers to be used for multiprocessing
leave (bool, optional): whether traces of progressbar should be kept upon termination
Returns:
list: list of function's results
"""
if threads is None:
threads = min(16, mp.cpu_count())
if threads < 2:
results = [func(arg) for arg in tqdm(arr, total=len(arr), leave=leave)]
else:
with ThreadPool(threads) as tp:
results = list(tqdm(tp.imap_unordered(func, arr), total=len(arr)))
if any([o is not None for o in results]):
return results
def download_urls(entries, root, timeout=4, retries=4, threads=None, silent=True):
"""Download multiple URLs a file accessible via URL with mutiple retries
Args:
entries (list<str, str>): URL and destination filen
root (pathlib.Path): folder where the files will be saved in
timeout (float, optional): number of seconds before the request times out
retries (int, optional): number of additional allowed download attempts
threads (int, optional): number of threads to be used for multiprocessing
silent (bool, optional): whether Exception should be raised upon download failure
"""
parallel(partial(download_url, root=root, timeout=timeout, retries=retries, silent=silent),
entries, threads=threads)
```
#### File: pyronear/models/mobilenet.py
```python
from torchvision.models.mobilenet import MobileNetV2, model_urls as imagenet_urls
from torchvision.models.utils import load_state_dict_from_url
from .utils import cnn_model
__all__ = ['mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://srv-file7.gofile.io/download/RKagNy/mobilenet_v2-binary-classification.pth'
}
model_cut = -1
def mobilenet_v2(pretrained=False, progress=True, imagenet_pretrained=False,
num_classes=1, lin_features=512, dropout_prob=0.5,
bn_final=False, concat_pool=True, **kwargs):
r"""MobileNetV2 model from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.mobilenet.MobileNetV2`
"""
# Model creation
base_model = MobileNetV2(num_classes=num_classes, **kwargs)
# Imagenet pretraining
if imagenet_pretrained:
if pretrained:
raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True')
state_dict = load_state_dict_from_url(imagenet_urls['mobilenet_v2'],
progress=progress)
# Remove FC params from dict
for key in ('classifier.1.weight', 'classifier.1.bias'):
state_dict.pop(key, None)
missing, unexpected = base_model.load_state_dict(state_dict, strict=False)
if any(unexpected) or any(not elt.startswith('classifier.') for elt in missing):
raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}")
# Cut at last conv layers
model = cnn_model(base_model, model_cut, base_model.classifier[1].in_features, num_classes,
lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool)
# Parameter loading
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict)
return model
``` |
{
"source": "JoaoFelipe-AlvesOliveira/LearningPython",
"score": 4
} |
#### File: LearningPython/Functions/exercicio_4.py
```python
def restringirDoaçaoMulher(peso,sexo):
if peso < 50 or sexo == "f":
resultado = ("Você não pode doar sangue, pois está abaixo do peso")
else:
resultado = ("Você pode doar sangue")
return resultado
def restringirDoaçaoHomem(peso,sexo):
if peso < 60 or sexo == "m":
resultado = ("Você não pode doar sangue, pois está abaixo do peso")
else:
resultado = ("Você pode doar sangue")
return resultado
peso = float(input("Informe o seu peso"))
sexo = (input("Informe o seu sexo apenas com as iniciais"))
resultado = restringirDoaçaoMulher(peso,sexo)
resultado = restringirDoaçaoHomem(peso,sexo)
print (resultado)
``` |
{
"source": "JoaoFelipe/apted",
"score": 2
} |
#### File: apted/apted/all_possible_mappings_ted.py
```python
from __future__ import (absolute_import, division)
from copy import copy
from .config import Config
from .node_indexer import NodeIndexer
class AllPossibleMappingsTED(object):
"""Implements an exponential algorithm for the tree edit distance. It
computes all possible TED mappings between two trees and calculated their
minimal cost."""
def __init__(self, tree1, tree2, config=None):
self.config = config or Config()
"""Config object that specifies how to calculate the edit distance"""
self.it1 = NodeIndexer(tree1, 0, self.config)
"""Stores the indexes of the first input tree"""
self.it2 = NodeIndexer(tree2, 1, self.config)
"""Stores the indexes of the second input tree"""
def compute_edit_distance(self):
"""Computes the tree edit distance between two trees by trying all
possible TED mappings. It uses the specified cost model."""
mappings = [
mapping for mapping in self.generate_all_one_to_one_mappins()
if self.is_ted_mapping(mapping)
]
return self.get_min_cost(mappings)
def generate_all_one_to_one_mappins(self):
"""Generate all possible 1-1 mappings.
These mappings do not conform to TED conditions (sibling-order and
ancestor-descendant).
A mapping is a list of pairs (arrays) of preorder IDs (identifying
nodes).
return set of all 1-1 mappings
"""
mappings = [
[(node1, None) for node1 in self.it1.pre_ltr_info] +
[(None, node2) for node2 in self.it2.pre_ltr_info]
]
# For each node in the source tree
for node1 in self.it1.pre_ltr_info:
# Duplicate all mappings and store in mappings_copy
mappings_copy = [
copy(x) for x in mappings
]
# For each node in the destination tree
for node2 in self.it2.pre_ltr_info:
# For each mapping (produced for all n1 values smaller than
# current n1)
for mapping in mappings_copy:
# Produce new mappings with the pair (n1, n2) by adding this
# pair to all mappings where it is valid to add
element_add = True
# Verify if (n1, n2) can be added to mapping m.
# All elements in m are checked with (n1, n2) for possible
# violation
# One-to-one condition
for ele1, ele2 in mapping:
# n1 is not in any of previous mappings
if ele1 and ele2 and ele2 is node2:
element_add = False
break
# New mappings must be produces by duplicating a previous
# mapping and extending it by (n1, n2)
if element_add:
m_copy = copy(mapping)
m_copy.append((node1, node2))
m_copy.remove((node1, None))
m_copy.remove((None, node2))
mappings.append(m_copy)
return mappings
def is_ted_mapping(self, mapping):
"""Test if a 1-1 mapping is a TED mapping"""
# pylint: disable=no-self-use, invalid-name
# Validade each pait of pairs of mapped nodes in the mapping
for node_a1, node_a2 in mapping:
# Use only pairs of mapped nodes for validation.
if node_a1 is None or node_a2 is None:
continue
for node_b1, node_b2 in mapping:
# Use only pairs of mapped nodes for validation.
if node_b1 is None or node_b2 is None:
continue
# If any of the conditions below doesn't hold, discard m.
# Validate ancestor-descendant condition.
n1 = (
node_a1.pre_ltr < node_b1.pre_ltr and
node_a1.pre_rtl < node_b1.pre_rtl
)
n2 = (
node_a2.pre_ltr < node_b2.pre_ltr and
node_a2.pre_rtl < node_b2.pre_rtl
)
if (n1 and not n2) or (not n1 and n2):
# Discard the mapping.
# If this condition doesn't hold, the next condition
# doesn't have to be verified any more and any other
# pair doesn't have to be verified any more.
return False
# Validade sibling-order condition
n1 = (
node_a1.pre_ltr < node_b1.pre_ltr and
node_a1.pre_rtl > node_b1.pre_rtl
)
n2 = (
node_a2.pre_ltr < node_b2.pre_ltr and
node_a2.pre_rtl > node_b2.pre_rtl
)
if (n1 and not n2) or (not n1 and n2):
# Discard the mapping.
return False
return True
def get_min_cost(self, mappings):
"""Given list of all TED mappings, calculate the cost of the
minimal-cost mapping."""
insert, delete = self.config.insert, self.config.delete
rename = self.config.rename
# Initialize min_cost to the upper bound
min_cost = float('inf')
# verify cost of each mapping
for mapping in mappings:
m_cost = 0
# Sum up edit costs for all elements in the mapping m.
for node1, node2 in mapping:
if node1 and node2:
m_cost += rename(node1.node, node2.node)
elif node1:
m_cost += delete(node1.node)
else:
m_cost += insert(node2.node)
# Break as soon as the current min_cost is exceeded.
# Only for early loop break.
if m_cost > min_cost:
break
# Store the minimal cost - compare m_cost and min_cost
min_cost = min(min_cost, m_cost)
return min_cost
``` |
{
"source": "JoaoFelipe/extensible_provn",
"score": 2
} |
#### File: extensible_provn/query/provn.py
```python
import dateutil.parser
from .querier import querier, var, BLANK
from ..utils import parsetime
@querier.prov("entity", ["id", "text"])
def entity(querier, eid, attrs={}, id_=None):
return [
eid,
querier.text("entity", [eid], attrs, id_)
]
@querier.prov("activity", ["id", "start", "end", "text"])
def activity(dot, aid, start_time=None, end_time=None, attrs=None, id_=None):
start = parsetime(start_time)
end = parsetime(end_time)
return [
aid, start, end,
querier.text("activity", [aid, start_time, end_time], attrs, id_)
]
@querier.prov("used", ["id", "activity", "entity", "time", "text"])
def used(dot, aid, eid=None, time=None, attrs=None, id_=None):
ti = parsetime(time)
return [
id_, aid, eid, ti,
querier.text("used", [aid, eid, time], attrs, id_)
]
@querier.prov("wasDerivedFrom", ["generated", "used", "activity", "generation", "use", "attrs", "text"])
def wasDerivedFrom(dot, egenerated=None, eused=None, aid=None, gid=None, uid=None, attrs=None, id_=None):
return [
egenerated, eused, aid, gid, uid,
attrs or {}, querier.text(
"wasDerivedFrom",
[egenerated, eused, aid, gid, uid], attrs, id_
)
]
@querier.prov("wasGeneratedBy", ["id", "entity", "activity", "time", "text"])
def wasGeneratedBy(dot, eid, aid=None, time=None, attrs=None, id_=None):
ti = parsetime(time)
return [
id_, eid, aid, ti,
querier.text("wasGeneratedBy", [eid, aid, time], attrs, id_)
]
@querier.prov("hadMember", ["collection", "entity", "text"])
def hadMember(dot, ecollection=None, eid=None, attrs=None, id_=None):
return [
ecollection, eid,
querier.text("hadMember", [ecollection, eid], attrs, id_)
]
@querier.prov("specializationOf", ["specific", "general", "text"])
def specializationOf(dot, specific=None, general=None, attrs=None, id_=None):
return [
specific, general,
querier.text("specializationOf", [specific, general], attrs, id_)
]
```
#### File: extensible_provn/view/intertwined_prov.py
```python
from .provn import prov
from .prov_dictionary import graph
from ..utils import unquote
from .. import utils
NAMESPACE = "https://dew-uff.github.io/versioned-prov/ns/intertwined#"
def intertwined(attrs, key, default="-"):
try:
return attrs[(key, "intertwined", NAMESPACE)]
except KeyError:
return default
def ns_intertwined(key):
return {
"intertwined:" + key,
NAMESPACE + key,
key
}
@graph.prov("entity")
def entity(dot, eid, attrs=None, id_=None):
if prov(attrs, 'type') in ns_intertwined('Version'):
return dot.node(attrs, "version", eid)
return dot.node(attrs, "entity", eid)
@graph.prov("wasDerivedFrom")
def was_derived_from(dot, egenerated=None, eused=None, aid=None, gid=None, uid=None, attrs=None, id_=None):
if aid and gid and uid:
dot.used_required[(aid, eused)] = (uid, attrs)
dot.generated_required[(egenerated, aid)] = (gid, attrs)
if prov(attrs, 'type') in ns_intertwined('Reference'):
if intertwined(attrs, 'access', False):
return dot.arrow3(
attrs, "int_wasDerivedFrom",
egenerated, intertwined(attrs, 'collection'), eused,
"",
"der ac-{}\n{}".format(
intertwined(attrs, 'access'),
intertwined(attrs, 'checkpoint')
),
"[{}]".format(intertwined(attrs, 'key')),
)
return dot.arrow2(
attrs, "int_wasDerivedFrom",
egenerated, eused, "der ref\n{}".format(
intertwined(attrs, 'checkpoint')
),
extra="4"
)
return dot.arrow2(attrs, "wasDerivedFrom", egenerated, eused, "der")
if __name__ == "__main__":
graph.main()
@graph.prov("used")
def used(dot, aid, eid=None, time=None, attrs=None, id_=None):
dot.used.add((aid, eid))
checkpoint = intertwined(attrs, 'checkpoint', False)
if checkpoint:
return dot.arrow2(attrs, "int_used", aid, eid, "use\n{}".format(checkpoint))
return dot.arrow2(attrs, "used", aid, eid, "use")
@graph.prov("wasGeneratedBy")
def was_generated_by(dot, aid, eid=None, time=None, attrs=None, id_=None):
dot.used.add((aid, eid))
checkpoint = intertwined(attrs, 'checkpoint', False)
if checkpoint:
return dot.arrow2(attrs, "int_wasGeneratedBy", aid, eid, "gen\n{}".format(checkpoint))
return dot.arrow2(attrs, "wasGeneratedBy", aid, eid, "gen")
def _main():
"""Main function"""
graph.main()
if __name__ == "__main__":
_main()
```
#### File: extensible_provn/view/mutable_prov.py
```python
from .provn import graph
from ..utils import unquote
@graph.prov("value")
def value(dot, vid, attrs=None, id_=None):
return dot.node(attrs, "value", vid)
@graph.prov("accessed")
def accessed(dot, eid=None, vid=None, time=None, attrs=None, id_=None):
return dot.arrow2(attrs, "accessed", eid, vid, "access\n{}".format(time or "-"))
@graph.prov("accessedPart")
def accessed_part(dot, eid=None, wid=None, key=None, pid=None, time=None, attrs=None, id_=None):
key = unquote(key)
return dot.arrow2(attrs, "accessedPart", eid, pid, "part\n{}[{}]\n{}".format(
wid or "-", key or "-", time or "-"
))
@graph.prov("defined")
def defined(dot, eid=None, vid=None, time=None, attrs=None, id_=None):
return dot.arrow2(attrs, "defined", eid, vid, "defined\n{}".format(time or "-"))
@graph.prov("wasDefinedBy")
def was_defined_by(dot, vid=None, eid=None, time=None, attrs=None, id_=None):
return dot.arrow2(attrs, "wasDefinedBy", vid, eid, "def by\n{}".format(time or "-"))
@graph.prov("derivedByInsertion")
def derived_by_insertion(dot, eid=None, wid=None, changes=None, time=None, attrs=None, id_=None):
result = []
for pos, part in changes:
pos = unquote(pos)
result.append(dot.arrow2(
attrs, "derivedByInsertion",
wid, part, "der-ins-v\n[{}]\n{}".format(pos or "-", time or "-"),
extra="0"
))
result.append(dot.arrow2(
attrs, "derivedByInsertion",
part, eid, "der-ins-e\n[{}]\n{}".format(pos or "-", time or "-"),
extra="1"
))
result = [x for x in result if x]
if not result:
return None
return "\n".join(result)
@graph.prov("derivedByRemoval")
def derived_by_removal(dot, eid=None, wid=None, positions=None, time=None, attrs=None, id_=None):
result = []
for pos in positions:
pos = unquote(pos)
result.append(dot.arrow2(
attrs, "derivedByRemoval",
wid, eid, "der-rem\n[{}]\n{}".format(pos or "-", time or "-")
))
result = [x for x in result if x]
if not result:
return None
return "\n".join(result)
def _main():
"""Main function"""
graph.main()
if __name__ == "__main__":
_main()
```
#### File: view/style/nohighlight.py
```python
from .provtoolbox import ProvToolboxStyle
class NoHighlightStyle(ProvToolboxStyle):
def __init__(self):
super(NoHighlightStyle, self).__init__()
self.use_parsetime = False
self.hide_namespace = False
self.qualified_attr = True
self.labelsize = "14"
self.join(self.style, {
#"hadMember_label": lambda l, a: self.taillabel("[ ]", a),
"derivedByInsertionFrom1": {"arrowhead": "none"},
"derivedByInsertionFrom_label*": self.label,
"hadDictionaryMember_label*": self.label,
"value": {"fillcolor": "#FFFC87", "color": "#808080", "style": "filled"},
"accessed_label*": self.label,
"accessedPart_label*": self.label,
"defined_label*": self.label,
"wasDefinedBy_label*": self.label,
"derivedByInsertion1": {"style":"dashed"},
"derivedByInsertion_label*": self.label,
"derivedByRemoval_label*": self.label,
"version": {"fillcolor": "#FFFC87", "color": "#808080", "style": "filled"},
"int_wasDerivedFrom1": {"arrowhead": "none"},
"int_wasDerivedFrom0": {"style":"dashed"},
"int_wasDerivedFrom_label*": self.label,
"int_used_label*": self.label,
"int_wasGeneratedBy_label*": self.label,
"ver_wasDerivedFrom1": {"arrowhead": "none"},
"ver_wasDerivedFrom0": {"style":"dashed"},
"ver_hadMember_label*": self.label,
"ver_wasDerivedFrom_label*": self.label,
"ver_used_label*": self.label,
"ver_wasGeneratedBy_label*": self.label,
})
def label(self, label, attrs):
if label:
return {
"fontsize": self.labelsize,
"distance": "1.5",
"angle": "60.0",
"rotation": "20",
"label": label.replace('"', '\\"')
}
return {}
EXPORT = NoHighlightStyle
``` |
{
"source": "JoaoFelipe/nbsvg",
"score": 2
} |
#### File: nbsvg/components/cell.py
```python
from lxml.builder import E
from .base import StylizedElement
from .code import Code
from .markdown import Markdown
from .group import GroupSequence
from .output import display_data, stream_output, Error
from .text import Text
from .image import SVGGroup
def inout(number, ey, color, style, text=""):
inoutpositiony = getattr(style, 'inoutpositiony', style.fontsize + ey)
return E.g(
E.g(
E.text(
E.tspan(f"{text}[{number}]:", {'fill': f"{color}"}),
{
'x': f'{style.input_width - 3}',
'y': f'{inoutpositiony}',
'{http://www.w3.org/XML/1998/namespace}space': 'preserve',
'text-anchor': 'end'
}
), {
'font-family': 'monospace',
'font-size': f'{style.fontsize}px'
}
)
)
class CellInput(StylizedElement):
def __init__(self, number, text, **kwargs):
super().__init__(**kwargs)
self.number = number
self.text = text
def build(self, style):
style.input_width += 25 if style.showtext else 0
self.element = inout(
self.number, 6 + 5, "#307fc1", style,
text="In " if style.showtext else ""
)
code = Code(self.text).translate(style.input_width, 5)
self.element.append(code.do_build(style, width=style.width - style.input_width).element)
self.width = style.width
self.height = code.height + 10
def __repr__(self):
return f'CellInput({self.number!r}, {self.text!r})'
class CellOutput(StylizedElement):
def __init__(self, number, output, **kwargs):
super().__init__(**kwargs)
self.number = number
self.output = output
def build(self, style):
style.input_width += 25 if style.showtext else 0
self.element = inout(
self.number, 0, "#bf5b3d", style,
text="Out" if style.showtext else ""
)
output = self.output.translate(style.input_width + 7, 0)
self.element.append(output.do_build(style, width=style.width - style.input_width).element)
self.width = style.width
self.height = output.height + 5
def __repr__(self):
return f'CellOutput({self.number!r}, {self.output!r})'
class CellDisplay(StylizedElement):
def __init__(self, output, **kwargs):
super().__init__(**kwargs)
self.output = output
def build(self, style):
self.element = E.g()
output = self.output.translate(style.input_width + 7, 0)
self.element.append(output.do_build(style, width=style.width - style.input_width).element)
self.width = style.width
self.height = output.height
def __repr__(self):
return f'CellDisplay({self.output!r})'
class Cell(StylizedElement):
def __init__(self, cell, **kwargs):
super().__init__(**kwargs)
self.cell = cell
self._replace_cell = None
self._replace_input = None
self._remove_input = False
self._replace_outputs = None
self._remove_outputs = False
self._replace_result = None
self._replace_display = None
self._replace_execution_count = None
self._result_kwargs = {}
self._display_kwargs = {}
self.result = None
def replace_cell(self, component):
self._replace_cell = component
def replace_input(self, component):
self._replace_input = component
def remove_input(self):
self._remove_input = True
def replace_outputs(self, component):
self._replace_outputs = component
def remove_outputs(self):
self._remove_outputs = True
def replace_result(self, component):
self._replace_result = component
def replace_display(self, component):
self._replace_display = component
def result_kwargs(self, kwargs):
self._result_kwargs = kwargs
def display_kwargs(self, kwargs):
self._display_kwargs = kwargs
def replace_execution_count(self, ec):
self._replace_execution_count = ec
def build(self, style):
cell_type = self.cell.get('cell_type', '')
source = self.cell.get('source', '')
if self._replace_cell:
result = self._replace_cell.do_build(style)
elif cell_type == 'markdown':
result = Markdown(source).do_build(style)
elif cell_type == 'code':
result = GroupSequence(group_margin=0)
execution_count = self._replace_execution_count or self.cell.get('execution_count', ' ') or ' '
cell_input = self._replace_input or CellInput(execution_count, source)
if not self._remove_input:
result.add(cell_input)
if not self._remove_outputs:
if self._replace_outputs:
result.add(self._replace_outputs)
else:
for output in self.cell.get('outputs', []):
output_type = output.get('output_type', '')
if output_type == 'execute_result':
result.add(self._replace_result or CellOutput(
execution_count,
display_data(output.get('data', {}), **self._result_kwargs)
))
elif output_type == 'stream':
result.add(self._replace_display or CellDisplay(stream_output(output)))
elif output_type == 'display_data':
result.add(self._replace_display or CellDisplay(
display_data(output.get('data', {}), **self._display_kwargs)
))
elif output_type == 'error':
result.add(CellDisplay(Error(output)))
result = result.do_build(style)
else:
result = Text(source).do_build(style)
self.result = result
self.element = result.element
self.width = result.width
self.height = result.height
def __repr__(self):
return f'Cell({self.cell!r})'
def ellipsis():
return CellDisplay(SVGGroup(
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="16" data-icon="ui-components:ellipses"><g xmlns="http://www.w3.org/2000/svg" class="jp-icon3" fill="#616161"><circle cx="5" cy="12" r="2"></circle><circle cx="12" cy="12" r="2"></circle><circle cx="19" cy="12" r="2"></circle></g></svg>',
default_height=24
)).translate(-6, 0)
```
#### File: nbsvg/components/drawing.py
```python
from lxml.builder import E
from lxml import etree
from .base import StylizedElement
from .text import Text
class TextBox(StylizedElement):
def __init__(self, text, width=None, height=None, fill="white", stroke="blue", align='start', padding=5, **kwargs):
super().__init__(**kwargs)
self.text = text
self.width = width
self.height = height
self.fill = fill
self.stroke = stroke
self.padding = padding
self.align = align
def build(self, style):
align = self.align
text = Text(self.text, textanchor=align).do_build(style)
self.width = self.width or text.width + 2*self.padding
self.height = self.height or text.height + 2*self.padding
if align == 'start':
text = text.translate(self.padding, self.padding)
elif align == 'middle':
text = text.translate(self.width/2, self.padding)
elif align == 'end':
text = text.translate(self.width - self.padding, self.padding)
text = text.do_build(style)
self.element = E.g(
E.rect({
'x': '0', 'y': '0', 'width': f'{self.width - 1}', 'height': f'{self.height}',
'fill': self.fill, 'stroke': self.stroke
}),
text.element
)
class SVGNode(StylizedElement):
def __init__(self, text, width=1, height=1, **kwargs):
super().__init__(**kwargs)
self.text = text
self.width = width
self.height = height
def build(self, style):
self.element = etree.XML(self.text)
```
#### File: nbsvg/components/html.py
```python
from .image import Image
from .base import StylizedElement
class HTML(StylizedElement):
def __init__(self, html, **kwargs):
super().__init__(**kwargs)
self.html = html
def build(self, style):
import imgkit
res = imgkit.from_string(self.html, False)
image = Image(res).do_build(style)
self.width = image.width
self.height = image.height
self.element = image.element
def __repr__(self):
return f'HTML({self.html!r})'
```
#### File: nbsvg/nbsvg/style.py
```python
from copy import copy
class Style:
old = []
input_width = 35
width = 700
fontsize = 10
showtext = False
code_padding = 7
group_margin = 10
fontfamily = 'monospace'
fontwidth_proportion = 0.6
textanchor = 'start'
table_bold_fontwidth_proportion = 0.67
table_fontwidth_proportion = 0.6
table_colpadding = 5
table_fontsize = 8
table_oversize_proportion = 1.3
table_fontfamily = '-apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"'
markdown_fontfamily = '-apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"'
p_fontsize = 9
p_oversize_proportion = 1.3
p_fontwidth_proportion = 0.6
h1_fontsize = 19
h1_oversize_proportion = 1.3
h1_fontwidth_proportion = 0.6
h2_fontsize = 16
h2_oversize_proportion = 1.3
h2_fontwidth_proportion = 0.6
h3_fontsize = 13
h3_oversize_proportion = 1.3
h3_fontwidth_proportion = 0.6
h4_fontsize = 11
h4_oversize_proportion = 1.3
h4_fontwidth_proportion = 0.6
h5_fontsize = 9
h5_oversize_proportion = 1.3
h5_fontwidth_proportion = 0.6
h6_fontsize = 8
h6_oversize_proportion = 1.3
h6_fontwidth_proportion = 0.6
def apply(self, kwargs, this=True):
result = self if this else copy(self)
result.old.append({})
for key, value in kwargs.items():
result.old[-1][key] = getattr(self, key, None)
setattr(result, key, value)
return result
def undo(self):
old = self.old.pop()
for key, value in old.items():
setattr(self, key, value)
def getsizeintable(self, text, bold):
prop = self.table_fontwidth_proportion
if bold:
prop = self.table_bold_fontwidth_proportion
return len(text) * prop * self.table_fontsize + self.table_colpadding
def fontlen(self, width, name):
return int(
width
// (getattr(self, f'{name}_fontsize') * getattr(self, f'{name}_fontwidth_proportion'))
* getattr(self, f'{name}_oversize_proportion')
)
class PilStyle(Style):
fonttype = "SEGOEUI.TTF"
bold_fonttype = "SEGOEUIB.TTF"
def getsizeintable(self, text, bold):
from PIL import ImageFont
cfont = ImageFont.truetype(self.fonttype, self.table_fontsize)
if bold:
cfont = ImageFont.truetype(self.bold_fonttype, self.table_fontsize)
return cfont.getsize(text)[0] + self.table_colpadding
STYLE = Style()
``` |
{
"source": "JoaoFelipe/PyPosAST",
"score": 3
} |
#### File: PyPosAST/pyposast/parser.py
```python
from __future__ import (absolute_import, division)
import bisect
import tokenize
from collections import OrderedDict, defaultdict
from .cross_version import StringIO
from .constants import (KEYWORDS, COMBINED_KEYWORDS, SEMI_KEYWORDS,
FUTURE_KEYWORDS, PAST_KEYWORKDS)
class ElementDict(OrderedDict):
def __init__(self, *args, **kwargs):
super(ElementDict, self).__init__(*args, **kwargs)
self._bkeys = None
def set_keys(self):
if not self._bkeys:
self._bkeys = list(self.keys())
def find_next(self, position, inclusive=False):
self.set_keys()
if inclusive:
position = (position[0], position[1] + 1)
index = bisect.bisect_left(self._bkeys, position)
key = self._bkeys[index]
value = self[key]
return key, value
def find_previous(self, position, inclusive=False):
self.set_keys()
if inclusive:
position = (position[0], position[1] + 1)
index = bisect.bisect_left(self._bkeys, position)
key = self._bkeys[index - 1]
value = self[key]
return key, value
def r_find_next(self, position):
key, value = self.find_next(position)
return value, key
def r_find_previous(self, position):
key, value = self.find_previous(position)
return value, key
class StackElement(dict):
def __init__(self, open_str, close_str):
super(StackElement, self).__init__()
self.open = open_str
self.close = close_str
self.stack = []
def check(self, t_string, t_srow_scol, t_erow_ecol):
if t_string == self.open:
self.stack.append(t_srow_scol)
elif t_string == self.close:
self[self.stack.pop()] = t_erow_ecol
def apply_delta(original, dline, doffset):
return (original[0] + dline, original[1] + doffset)
class TokenCollector(object):
def __init__(self):
self.stacks = self.parenthesis, self.sbrackets, self.brackets = [
StackElement(*x) for x in (('(', ')'), ('[', ']'), ('{', '}'))
]
self.strings, self.attributes, self.numbers = {}, {}, {}
self.operators = defaultdict(dict)
self.names = defaultdict(dict)
self.tokens = []
def loop(self, code, dline=0, doffset=0):
last = None
dots = 0 # number of dots
first_dot = None
f = StringIO(code)
for tok in tokenize.generate_tokens(f.readline):
self.tokens.append(tok)
t_type, t_string, t_srow_scol, t_erow_ecol, t_line = tok
# ToDo: apply delta
t_srow_scol = apply_delta(t_srow_scol, dline, doffset)
t_erow_ecol = apply_delta(t_erow_ecol, dline, doffset)
tok = [t_type, t_string, t_srow_scol, t_erow_ecol, t_line]
tok.append(False) # Should wait the next step
if t_type == tokenize.OP:
for stack in self.stacks:
stack.check(t_string, t_srow_scol, t_erow_ecol)
if t_string == '.':
if not dots:
first_dot = tok
dots += 1
if dots == 3: # Python 2
self.operators['...'][t_erow_ecol] = first_dot[2]
dots = 0
first_dot = None
self.operators[t_string][t_erow_ecol] = t_srow_scol
elif t_type == tokenize.STRING:
if t_string.startswith('f'): # Python 3.6
inner = t_string[2:-1]
stack = []
for index, char in enumerate(inner):
if char == "{":
if not stack or stack[-1] != index - 1:
stack.append(index)
if char == "}" and stack:
oindex = stack.pop()
sub = inner[oindex + 1:index]
self.brackets.check(
"{",
apply_delta(t_srow_scol, 0, oindex + 2),
apply_delta(t_srow_scol, 0, oindex + 3),
)
self.brackets.check(
"}",
apply_delta(t_srow_scol, 0, index + 2),
apply_delta(t_srow_scol, 0, index + 3),
)
self.loop(sub, t_srow_scol[0] - 1, oindex + 2)
start = t_srow_scol
if last and last[0] == tokenize.STRING:
start = self.strings[last[3]]
del self.strings[last[3]]
self.strings[t_erow_ecol] = start
elif t_type == tokenize.NUMBER:
self.numbers[t_erow_ecol] = t_srow_scol
elif t_type == tokenize.NAME and t_string == 'elif':
self.operators['if'][t_erow_ecol] = t_srow_scol
elif t_type == tokenize.NAME and t_string in PAST_KEYWORKDS.keys():
if t_type == tokenize.NAME and t_string in KEYWORDS:
self.operators[t_string][t_erow_ecol] = t_srow_scol
if last and last[1] == PAST_KEYWORKDS[t_string]:
combined = "{} {}".format(last[1], t_string)
self.operators[combined][t_erow_ecol] = last[2]
elif t_string in FUTURE_KEYWORDS:
tok[5] = True
else:
self.operators[t_string][t_erow_ecol] = t_srow_scol
elif t_type == tokenize.NAME and t_string in FUTURE_KEYWORDS:
tok[5] = True
elif t_string in SEMI_KEYWORDS:
self.operators[t_string][t_erow_ecol] = t_srow_scol
elif t_type == tokenize.NAME and t_string in KEYWORDS:
self.operators[t_string][t_erow_ecol] = t_srow_scol
elif t_type == tokenize.NAME and dots == 1:
self.attributes[t_erow_ecol] = first_dot[2]
dots = 0
first_dot = None
if t_string != '.':
dots = 0
if last and last[1] in FUTURE_KEYWORDS and last[5]:
self.operators[last[1]][last[3]] = last[2]
if t_type == tokenize.NAME or t_string == 'None':
self.names[t_string][t_erow_ecol] = t_srow_scol
if t_type != tokenize.NL:
last = tok
def extract_tokens(code, return_tokens=False):
# Should I implement a LL 1 parser?
toc = TokenCollector()
toc.loop(code)
if return_tokens:
return toc.tokens
result = [
ElementDict(sorted(toc.parenthesis.items())),
ElementDict(sorted(toc.sbrackets.items())),
ElementDict(sorted(toc.brackets.items())),
ElementDict(sorted(toc.strings.items())),
ElementDict(sorted(toc.attributes.items())),
ElementDict(sorted(toc.numbers.items())),
]
operators = {k: ElementDict(sorted(v.items()))
for k, v in toc.operators.items()}
names = {k: ElementDict(sorted(v.items()))
for k, v in toc.names.items()}
return result, operators, names
```
#### File: PyPosAST/tests/test_extra.py
```python
from __future__ import (absolute_import, division)
import ast
import textwrap
from .utils import get_nodes, NodeTestCase, only_python2, only_python3
class TestExtra(NodeTestCase):
def test_noworkflow_var(self):
code = """
def x(a=1):
return a
for j in range(3):
for i in range(j):
print(i)
i = i**2
i += 2
class A():
pass
a = x(a=2)
a = b = c = 1
a = range(5)
A.a = c
a[b] = b
e = b, c = c, 1
a, (b, c) = b, e
a += (lambda b: b)(a)
b = a
a = 2
c = {
'a': a,
'b': b
}
d = [a, b, c]
d[1] += 1
print(a)
print(b)
print(c)
a, b = 1, c
"""
code = textwrap.dedent(code)
nodes = get_nodes(code, ast.FunctionDef)
self.assertPosition(nodes[0], (2, 0), (3, 12), (2, 3))
nodes = get_nodes(code, ast.For)
self.assertPosition(nodes[0], (4, 0), (8, 14), (4, 3))
self.assertPosition(nodes[1], (5, 4), (8, 14), (5, 7))
nodes = get_nodes(code, ast.ClassDef)
self.assertPosition(nodes[0], (10, 0), (11, 8), (10, 5))
nodes = get_nodes(code, ast.Assign)
self.assertPosition(nodes[0], (7, 8), (7, 16), (7, 16))
self.assertPosition(nodes[1], (13, 0), (13, 10), (13, 10))
self.assertPosition(nodes[2], (14, 0), (14, 13), (14, 13))
self.assertPosition(nodes[3], (15, 0), (15, 12), (15, 12))
self.assertPosition(nodes[4], (16, 0), (16, 7), (16, 7))
self.assertPosition(nodes[5], (17, 0), (17, 8), (17, 8))
self.assertPosition(nodes[6], (18, 0), (18, 15), (18, 15))
self.assertPosition(nodes[7], (19, 0), (19, 16), (19, 16))
self.assertPosition(nodes[8], (21, 0), (21, 5), (21, 5))
self.assertPosition(nodes[9], (22, 0), (22, 5), (22, 5))
self.assertPosition(nodes[10], (23, 0), (26, 1), (26, 1))
self.assertPosition(nodes[11], (27, 0), (27, 13), (27, 13))
self.assertPosition(nodes[12], (34, 0), (34, 11), (34, 11))
nodes = get_nodes(code, ast.List)
self.assertPosition(nodes[0], (27, 4), (27, 13), (27, 13))
def test_update_parenthesis(self):
code = ("patterns('',\n"
" # url(r'^$', 'views.home', name='home')\n"
"\n"
" url(r'^index$', 'views.index', name='index'),\n"
" url(r'^root$', 'views.root', name='root'),\n"
" # url(r'^$', 'views.home', name='home'),\n"
")")
nodes = get_nodes(code, ast.Call)
self.assertPosition(nodes[0], (1, 0), (7, 1), (7, 1))
self.assertPosition(nodes[1], (4, 4), (4, 48), (4, 48))
self.assertPosition(nodes[2], (5, 4), (5, 45), (5, 45))
def test_assign_tuple(self):
code = ("abc.mno = func()\n"
"abc.pqr.ghi = ()\n"
"abc.jkl = b''")
nodes = get_nodes(code, ast.Assign)
self.assertPosition(nodes[0], (1, 0), (1, 16), (1, 16))
self.assertPosition(nodes[1], (2, 0), (2, 16), (2, 16))
self.assertPosition(nodes[2], (3, 0), (3, 13), (3, 13))
def test_relative_import_and_assign_attribute(self):
code = ("from ..a import b\n"
"abc.ghi = [jkl.mno.pqr(name=name) for name in 'abc']")
nodes = get_nodes(code, ast.Assign)
self.assertPosition(nodes[0], (2, 0), (2, 52), (2, 52))
def test_update_parenthesis2(self):
code = ("a = fn(\n"
" b=1,\n"
" c=[\n"
" c.d(\n"
" e='a',\n"
" f='b',\n"
" g=c,\n"
" )\n"
" ]\n"
"\n"
")")
nodes = get_nodes(code, ast.List)
self.assertPosition(nodes[0], (3, 6), (9, 5), (9, 5))
def test_attribute(self):
code = ("(abc.ghi(ijk=[abc.lmn, abc.opq]).\n"
" rst('a').uvw('a') |\n"
" abc.ghi(ijk=[abc.xyz]).\n"
" rst('a').uvw('a'),\n"
" ['b', 'c'],"
")")
nodes = get_nodes(code, ast.Attribute)
self.assertPosition(nodes[0], (1, 1), (2, 16), (2, 13))
self.assertPosition(nodes[1], (1, 1), (2, 7), (1, 33))
self.assertPosition(nodes[2], (1, 1), (1, 8), (1, 5))
self.assertPosition(nodes[3], (1, 14), (1, 21), (1, 18))
self.assertPosition(nodes[4], (1, 23), (1, 30), (1, 27))
self.assertPosition(nodes[5], (3, 1), (4, 16), (4, 13))
self.assertPosition(nodes[6], (3, 1), (4, 7), (3, 24))
self.assertPosition(nodes[7], (3, 1), (3, 8), (3, 5))
self.assertPosition(nodes[8], (3, 14), (3, 21), (3, 18))
def test_name(self):
code = (b"#bla\n"
b"abc")
nodes = get_nodes(code, ast.Name)
self.assertPosition(nodes[0], (2, 0), (2, 3), (2, 3))
```
#### File: PyPosAST/tests/test_misc.py
```python
from __future__ import (absolute_import, division)
import ast
from .utils import get_nodes, NodeTestCase
from .utils import only_python2, only_python3, only_python35, only_python36, only_python38
class TestMisc(NodeTestCase):
# pylint: disable=missing-docstring, too-many-public-methods
def test_index(self):
code = ("#bla\n"
"a[1]")
nodes = get_nodes(code, ast.Index)
self.assertPosition(nodes[0], (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(nodes[0])
def test_slice(self):
code = ("#bla\n"
"a[1:2:3]")
nodes = get_nodes(code, ast.Slice)
self.assertPosition(nodes[0], (2, 2), (2, 7), (2, 4))
self.assertOperation(nodes[0].op_pos[0], (2, 3), (2, 4), (2, 4), ':')
self.assertOperation(nodes[0].op_pos[1], (2, 5), (2, 6), (2, 6), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_slice2(self):
code = ("#bla\n"
"a[:\\\n"
"2:3]")
nodes = get_nodes(code, ast.Slice)
self.assertPosition(nodes[0], (2, 2), (3, 3), (2, 3))
self.assertOperation(nodes[0].op_pos[0], (2, 2), (2, 3), (2, 3), ':')
self.assertOperation(nodes[0].op_pos[1], (3, 1), (3, 2), (3, 2), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_slice3(self):
code = ("#bla\n"
"a[:\\\n"
":2]")
nodes = get_nodes(code, ast.Slice)
self.assertPosition(nodes[0], (2, 2), (3, 2), (2, 3))
self.assertOperation(nodes[0].op_pos[0], (2, 2), (2, 3), (2, 3), ':')
self.assertOperation(nodes[0].op_pos[1], (3, 0), (3, 1), (3, 1), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_slice4(self):
code = ("#bla\n"
"a[:]")
nodes = get_nodes(code, ast.Slice)
self.assertPosition(nodes[0], (2, 2), (2, 3), (2, 3))
self.assertOperation(nodes[0].op_pos[0], (2, 2), (2, 3), (2, 3), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_slice5(self):
code = ("#bla\n"
"a[::]")
nodes = get_nodes(code, ast.Slice)
self.assertPosition(nodes[0], (2, 2), (2, 4), (2, 3))
self.assertOperation(nodes[0].op_pos[0], (2, 2), (2, 3), (2, 3), ':')
self.assertOperation(nodes[0].op_pos[1], (2, 3), (2, 4), (2, 4), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_slice6(self):
code = ("#bla\n"
"a[11:2\\\n"
":]")
nodes = get_nodes(code, ast.Slice)
self.assertPosition(nodes[0], (2, 2), (3, 1), (2, 5))
self.assertOperation(nodes[0].op_pos[0], (2, 4), (2, 5), (2, 5), ':')
self.assertOperation(nodes[0].op_pos[1], (3, 0), (3, 1), (3, 1), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_slice7(self):
code = ("#bla\n"
"a[::None]")
nodes = get_nodes(code, ast.Slice)
self.assertPosition(nodes[0], (2, 2), (2, 8), (2, 3))
self.assertOperation(nodes[0].op_pos[0], (2, 2), (2, 3), (2, 3), ':')
self.assertOperation(nodes[0].op_pos[1], (2, 3), (2, 4), (2, 4), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_slice8(self):
code = ("s = None\n"
"a[::]")
nodes = get_nodes(code, ast.Slice)
self.assertPosition(nodes[0], (2, 2), (2, 4), (2, 3))
self.assertOperation(nodes[0].op_pos[0], (2, 2), (2, 3), (2, 3), ':')
self.assertOperation(nodes[0].op_pos[1], (2, 3), (2, 4), (2, 4), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_ext_slice(self):
code = ("#bla\n"
"a[1:2,3]")
nodes = get_nodes(code, ast.ExtSlice)
self.assertPosition(nodes[0], (2, 2), (2, 7), (2, 6))
self.assertOperation(nodes[0].op_pos[0], (2, 5), (2, 6), (2, 6), ',')
self.assertNoBeforeInnerAfter(nodes[0])
def test_ext_slice2(self):
code = ("#bla\n"
"a[1:2:,3]")
nodes = get_nodes(code, ast.ExtSlice)
self.assertPosition(nodes[0], (2, 2), (2, 8), (2, 7))
self.assertOperation(nodes[0].op_pos[0], (2, 6), (2, 7), (2, 7), ',')
self.assertNoBeforeInnerAfter(nodes[0])
def test_ext_slice3(self):
code = ("#bla\n"
"a[3,1:2:]")
nodes = get_nodes(code, ast.ExtSlice)
self.assertPosition(nodes[0], (2, 2), (2, 8), (2, 4))
self.assertOperation(nodes[0].op_pos[0], (2, 3), (2, 4), (2, 4), ',')
self.assertNoBeforeInnerAfter(nodes[0])
def test_eq(self):
code = ("#bla\n"
"2 == 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(comp)
def test_not_eq(self):
code = ("#bla\n"
"2 != 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(comp)
def test_not_eq2(self):
""" Python 2 syntax """
code = ("#bla\n"
"2 != 4\n"
"5 != 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(comp)
comp2 = nodes[1].op_pos[0]
self.assertPosition(comp2, (3, 2), (3, 4), (3, 4))
self.assertNoBeforeInnerAfter(comp2)
@only_python2
def test_not_eq3(self):
""" Python 2 syntax """
code = ("#bla\n"
"2 <> 4\n"
"5 != 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(comp)
comp2 = nodes[1].op_pos[0]
self.assertPosition(comp2, (3, 2), (3, 4), (3, 4))
self.assertNoBeforeInnerAfter(comp2)
def test_lt(self):
code = ("#bla\n"
"2 < 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(comp)
def test_lte(self):
code = ("#bla\n"
"2 <= 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(comp)
def test_gt(self):
code = ("#bla\n"
"2 > 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(comp)
def test_gte(self):
code = ("#bla\n"
"2 >= 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(comp)
def test_is(self):
code = ("#bla\n"
"2 is 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(comp)
def test_is2(self):
code = ("#bla\n"
"(2)is(4)\n"
"(3)is(5)")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 3), (2, 5), (2, 5))
self.assertNoBeforeInnerAfter(comp)
comp2 = nodes[1].op_pos[0]
self.assertPosition(comp2, (3, 3), (3, 5), (3, 5))
self.assertNoBeforeInnerAfter(comp2)
def test_is_not(self):
code = ("#bla\n"
"2 is not 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 8), (2, 8))
self.assertNoBeforeInnerAfter(comp)
def test_in(self):
code = ("#bla\n"
"2 in 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(comp)
def test_not_in(self):
code = ("#bla\n"
"2 not in 4")
nodes = get_nodes(code, ast.Compare)
comp = nodes[0].op_pos[0]
self.assertPosition(comp, (2, 2), (2, 8), (2, 8))
self.assertNoBeforeInnerAfter(comp)
def test_comprehension(self):
code = ("#bla\n"
"[x\n"
" for x in l\n"
" if x]")
nodes = get_nodes(code, ast.comprehension)
self.assertPosition(nodes[0], (3, 1), (4, 5), (3, 4))
self.assertOperation(nodes[0].op_pos[0], (3, 1), (3, 4), (3, 4), 'for')
self.assertOperation(nodes[0].op_pos[1], (3, 7), (3, 9), (3, 9), 'in')
self.assertOperation(nodes[0].op_pos[2], (4, 1), (4, 3), (4, 3), 'if')
self.assertNoBeforeInnerAfter(nodes[0])
def test_comprehension2(self):
code = ("#bla\n"
"[x\n"
" for x in l\n"
" if x - 2\n"
" if x]")
nodes = get_nodes(code, ast.comprehension)
self.assertPosition(nodes[0], (3, 1), (5, 5), (3, 4))
self.assertOperation(nodes[0].op_pos[0], (3, 1), (3, 4), (3, 4), 'for')
self.assertOperation(nodes[0].op_pos[1], (3, 7), (3, 9), (3, 9), 'in')
self.assertOperation(nodes[0].op_pos[2], (4, 1), (4, 3), (4, 3), 'if')
self.assertOperation(nodes[0].op_pos[3], (5, 1), (5, 3), (5, 3), 'if')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python36
def test_comprehension3(self):
code = ("async def f():\n"
" [x\n"
" async for x in l\n"
" if x]")
nodes = get_nodes(code, ast.comprehension)
self.assertPosition(nodes[0], (3, 5), (4, 9), (3, 14))
self.assertOperation(nodes[0].op_pos[0], (3, 5), (3, 14), (3, 14), 'async for')
self.assertOperation(nodes[0].op_pos[1], (3, 17), (3, 19), (3, 19), 'in')
self.assertOperation(nodes[0].op_pos[2], (4, 5), (4, 7), (4, 7), 'if')
self.assertNoBeforeInnerAfter(nodes[0])
def test_comprehension4(self):
code = ("#bla\n"
"[(x)for(x)in(l)if(x)]\n"
"[(y)for(y)in(m)if(y)]")
nodes = get_nodes(code, ast.comprehension)
self.assertPosition(nodes[0], (2, 4), (2, 20), (2, 7))
self.assertOperation(nodes[0].op_pos[0], (2, 4), (2, 7), (2, 7), 'for')
self.assertOperation(nodes[0].op_pos[1], (2, 10), (2, 12), (2, 12), 'in')
self.assertOperation(nodes[0].op_pos[2], (2, 15), (2, 17), (2, 17), 'if')
self.assertNoBeforeInnerAfter(nodes[0])
self.assertPosition(nodes[1], (3, 4), (3, 20), (3, 7))
self.assertOperation(nodes[1].op_pos[0], (3, 4), (3, 7), (3, 7), 'for')
self.assertOperation(nodes[1].op_pos[1], (3, 10), (3, 12), (3, 12), 'in')
self.assertOperation(nodes[1].op_pos[2], (3, 15), (3, 17), (3, 17), 'if')
self.assertNoBeforeInnerAfter(nodes[1])
@only_python3
def test_arg(self):
code = ("#bla\n"
"def f(x: 'a', y):\n"
" pass")
nodes = get_nodes(code, ast.arg)
self.assertPosition(nodes[0], (2, 6), (2, 12), (2, 12))
self.assertOperation(nodes[0].op_pos[0], (2, 7), (2, 8), (2, 8), ':')
self.assertNoBeforeInnerAfter(nodes[0])
self.assertPosition(nodes[1], (2, 14), (2, 15), (2, 15))
self.assertNoBeforeInnerAfter(nodes[1])
def test_arguments(self):
code = ("#bla\n"
"lambda x, y=2, *z, **w : x")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 7), (2, 22), (2, 22))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 11), (2, 12), (2, 12), '=')
self.assertOperation(nodes[0].op_pos[2], (2, 13), (2, 14), (2, 14), ',')
self.assertOperation(nodes[0].op_pos[3], (2, 15), (2, 16), (2, 16), '*')
self.assertOperation(nodes[0].op_pos[4], (2, 17), (2, 18), (2, 18), ',')
self.assertOperation(nodes[0].op_pos[5], (2, 19), (2, 21), (2, 21), '**')
self.assertPosition(nodes[0].vararg_node, (2, 16), (2, 17), (2, 17))
self.assertPosition(nodes[0].kwarg_node, (2, 21), (2, 22), (2, 22))
self.assertNoBeforeInnerAfter(nodes[0])
@only_python3
def test_arguments2(self):
code = ("#bla\n"
"lambda x, *, y=2: x")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 7), (2, 16), (2, 16))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 10), (2, 11), (2, 11), '*')
self.assertOperation(nodes[0].op_pos[2], (2, 11), (2, 12), (2, 12), ',')
self.assertOperation(nodes[0].op_pos[3], (2, 14), (2, 15), (2, 15), '=')
self.assertNoBeforeInnerAfter(nodes[0])
def test_arguments3(self):
code = ("#bla\n"
"lambda : 2")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 8), (2, 8), (2, 8))
self.assertEqual(len(nodes[0].op_pos), 0)
self.assertNoBeforeInnerAfter(nodes[0])
def test_arguments4(self):
code = ("#bla\n"
"def f( x, y=2, *z, **w ):\n"
" x")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 7), (2, 22), (2, 22))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 11), (2, 12), (2, 12), '=')
self.assertOperation(nodes[0].op_pos[2], (2, 13), (2, 14), (2, 14), ',')
self.assertOperation(nodes[0].op_pos[3], (2, 15), (2, 16), (2, 16), '*')
self.assertOperation(nodes[0].op_pos[4], (2, 17), (2, 18), (2, 18), ',')
self.assertOperation(nodes[0].op_pos[5], (2, 19), (2, 21), (2, 21), '**')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python3
def test_arguments5(self):
code = ("#bla\n"
"def f(x, *, y=2): x")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 15), (2, 15))
self.assertOperation(nodes[0].op_pos[0], (2, 7), (2, 8), (2, 8), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 9), (2, 10), (2, 10), '*')
self.assertOperation(nodes[0].op_pos[2], (2, 10), (2, 11), (2, 11), ',')
self.assertOperation(nodes[0].op_pos[3], (2, 13), (2, 14), (2, 14), '=')
self.assertNoBeforeInnerAfter(nodes[0])
def test_arguments6(self):
code = ("#bla\n"
"def f( ):\n"
" 2")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 8), (2, 8), (2, 8))
self.assertEqual(len(nodes[0].op_pos), 0)
self.assertNoBeforeInnerAfter(nodes[0])
def test_arguments7(self):
code = ("#bla\n"
"def f():\n"
" 2")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 6), (2, 6))
self.assertEqual(len(nodes[0].op_pos), 0)
self.assertNoBeforeInnerAfter(nodes[0])
@only_python3
def test_arguments8(self):
code = ("#bla\n"
"def f(x, *, y, z=2): x")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 18), (2, 18))
self.assertOperation(nodes[0].op_pos[0], (2, 7), (2, 8), (2, 8), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 9), (2, 10), (2, 10), '*')
self.assertOperation(nodes[0].op_pos[2], (2, 10), (2, 11), (2, 11), ',')
self.assertOperation(nodes[0].op_pos[3], (2, 13), (2, 14), (2, 14), ',')
self.assertOperation(nodes[0].op_pos[4], (2, 16), (2, 17), (2, 17), '=')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python38
def test_positional_only(self):
code = ("#bla\n"
"def f(p1, p2, /, p_or_kw, *, kw):\n"
" pass")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 31), (2, 31))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 12), (2, 13), (2, 13), ',')
self.assertOperation(nodes[0].op_pos[2], (2, 14), (2, 15), (2, 15), '/')
self.assertOperation(nodes[0].op_pos[3], (2, 15), (2, 16), (2, 16), ',')
self.assertOperation(nodes[0].op_pos[4], (2, 24), (2, 25), (2, 25), ',')
self.assertOperation(nodes[0].op_pos[5], (2, 26), (2, 27), (2, 27), '*')
self.assertOperation(nodes[0].op_pos[6], (2, 27), (2, 28), (2, 28), ',')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python38
def test_positional_only2(self):
code = ("#bla\n"
"def f(p1, p2=None, /, p_or_kw=None, *, kw):\n"
" pass")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 41), (2, 41))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 12), (2, 13), (2, 13), '=')
self.assertOperation(nodes[0].op_pos[2], (2, 17), (2, 18), (2, 18), ',')
self.assertOperation(nodes[0].op_pos[3], (2, 19), (2, 20), (2, 20), '/')
self.assertOperation(nodes[0].op_pos[4], (2, 20), (2, 21), (2, 21), ',')
self.assertOperation(nodes[0].op_pos[5], (2, 29), (2, 30), (2, 30), '=')
self.assertOperation(nodes[0].op_pos[6], (2, 34), (2, 35), (2, 35), ',')
self.assertOperation(nodes[0].op_pos[7], (2, 36), (2, 37), (2, 37), '*')
self.assertOperation(nodes[0].op_pos[8], (2, 37), (2, 38), (2, 38), ',')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python38
def test_positional_only3(self):
code = ("#bla\n"
"def f(p1, p2=None, /, *, kw):\n"
" pass")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 27), (2, 27))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 12), (2, 13), (2, 13), '=')
self.assertOperation(nodes[0].op_pos[2], (2, 17), (2, 18), (2, 18), ',')
self.assertOperation(nodes[0].op_pos[3], (2, 19), (2, 20), (2, 20), '/')
self.assertOperation(nodes[0].op_pos[4], (2, 20), (2, 21), (2, 21), ',')
self.assertOperation(nodes[0].op_pos[5], (2, 22), (2, 23), (2, 23), '*')
self.assertOperation(nodes[0].op_pos[6], (2, 23), (2, 24), (2, 24), ',')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python38
def test_positional_only4(self):
code = ("#bla\n"
"def f(p1, p2=None, /):\n"
" pass")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 20), (2, 20))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 12), (2, 13), (2, 13), '=')
self.assertOperation(nodes[0].op_pos[2], (2, 17), (2, 18), (2, 18), ',')
self.assertOperation(nodes[0].op_pos[3], (2, 19), (2, 20), (2, 20), '/')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python38
def test_positional_only5(self):
code = ("#bla\n"
"def f(p1, p2, /, p_or_kw):\n"
" pass")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 24), (2, 24))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 12), (2, 13), (2, 13), ',')
self.assertOperation(nodes[0].op_pos[2], (2, 14), (2, 15), (2, 15), '/')
self.assertOperation(nodes[0].op_pos[3], (2, 15), (2, 16), (2, 16), ',')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python38
def test_positional_only6(self):
code = ("#bla\n"
"def f(p1, p2, /):\n"
" pass")
nodes = get_nodes(code, ast.arguments)
self.assertPosition(nodes[0], (2, 6), (2, 15), (2, 15))
self.assertOperation(nodes[0].op_pos[0], (2, 8), (2, 9), (2, 9), ',')
self.assertOperation(nodes[0].op_pos[1], (2, 12), (2, 13), (2, 13), ',')
self.assertOperation(nodes[0].op_pos[2], (2, 14), (2, 15), (2, 15), '/')
self.assertNoBeforeInnerAfter(nodes[0])
def test_invert(self):
code = ("#bla\n"
"~a")
nodes = get_nodes(code, ast.UnaryOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 0), (2, 1), (2, 1))
self.assertNoBeforeInnerAfter(op)
def test_not(self):
code = ("#bla\n"
"not a")
nodes = get_nodes(code, ast.UnaryOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 0), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
def test_usub(self):
code = ("#bla\n"
"-a")
nodes = get_nodes(code, ast.UnaryOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 0), (2, 1), (2, 1))
self.assertNoBeforeInnerAfter(op)
def test_uadd(self):
code = ("#bla\n"
"+a")
nodes = get_nodes(code, ast.UnaryOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 0), (2, 1), (2, 1))
self.assertNoBeforeInnerAfter(op)
def test_add(self):
code = ("#bla\n"
"a + a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
def test_sub(self):
code = ("#bla\n"
"a - a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
def test_mult(self):
code = ("#bla\n"
"a * a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
@only_python35
def test_matmult(self):
code = ("#bla\n"
"a @ a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
def test_div(self):
code = ("#bla\n"
"a / a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
def test_mod(self):
code = ("#bla\n"
"a % a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
def test_pow(self):
code = ("#bla\n"
"a ** a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(op)
def test_lshift(self):
code = ("#bla\n"
"a << a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(op)
def test_rshift(self):
code = ("#bla\n"
"a >> a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(op)
def test_bitor(self):
code = ("#bla\n"
"a | a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
def test_bitand(self):
code = ("#bla\n"
"a & a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 3), (2, 3))
self.assertNoBeforeInnerAfter(op)
def test_floordiv(self):
code = ("#bla\n"
"a // a")
nodes = get_nodes(code, ast.BinOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(op)
def test_and(self):
code = ("#bla\n"
"a and b")
nodes = get_nodes(code, ast.BoolOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 5), (2, 5))
self.assertNoBeforeInnerAfter(op)
def test_or(self):
code = ("#bla\n"
"a or b")
nodes = get_nodes(code, ast.BoolOp)
op = nodes[0].op_pos[0]
self.assertPosition(op, (2, 2), (2, 4), (2, 4))
self.assertNoBeforeInnerAfter(op)
def test_alias(self):
code = ("#bla\n"
"import a,\\\n"
"b as c")
nodes = get_nodes(code, ast.alias)
self.assertPosition(nodes[0], (2, 7), (2, 8), (2, 8))
self.assertEqual(len(nodes[0].op_pos), 0)
self.assertNoBeforeInnerAfter(nodes[0])
self.assertPosition(nodes[1], (3, 0), (3, 6), (3, 6))
self.assertOperation(nodes[1].op_pos[0], (3, 2), (3, 4), (3, 4), 'as')
self.assertNoBeforeInnerAfter(nodes[1])
def test_excepthandler(self):
code = ("#bla\n"
"try:\n"
" a\n"
"except Exception1:\n"
" b\n"
"except Exception2:\n"
" c")
nodes = get_nodes(code, ast.excepthandler)
self.assertPosition(nodes[0], (4, 0), (5, 5), (4, 6))
self.assertOperation(nodes[0].op_pos[0], (4, 0), (4, 6), (4, 6), 'except')
self.assertOperation(nodes[0].op_pos[1], (4, 17), (4, 18), (4, 18), ':')
self.assertNoBeforeInnerAfter(nodes[0])
self.assertPosition(nodes[1], (6, 0), (7, 5), (6, 6))
self.assertOperation(nodes[1].op_pos[0], (6, 0), (6, 6), (6, 6), 'except')
self.assertOperation(nodes[1].op_pos[1], (6, 17), (6, 18), (6, 18), ':')
self.assertNoBeforeInnerAfter(nodes[1])
@only_python2
def test_excepthandler2(self):
code = ("#bla\n"
"try:\n"
" a\n"
"except Exception1, target:\n"
" b")
nodes = get_nodes(code, ast.excepthandler)
self.assertPosition(nodes[0], (4, 0), (5, 5), (4, 6))
self.assertOperation(nodes[0].op_pos[0], (4, 0), (4, 6), (4, 6), 'except')
self.assertOperation(nodes[0].op_pos[1], (4, 17), (4, 18), (4, 18), ',')
self.assertOperation(nodes[0].op_pos[2], (4, 25), (4, 26), (4, 26), ':')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python2
def test_excepthandler3(self):
code = ("#bla\n"
"try:\n"
" a\n"
"except (Exception1, Exception2), target:\n"
" b")
nodes = get_nodes(code, ast.excepthandler)
self.assertPosition(nodes[0], (4, 0), (5, 5), (4, 6))
self.assertOperation(nodes[0].op_pos[0], (4, 0), (4, 6), (4, 6), 'except')
self.assertOperation(nodes[0].op_pos[1], (4, 31), (4, 32), (4, 32), ',')
self.assertOperation(nodes[0].op_pos[2], (4, 39), (4, 40), (4, 40), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_excepthandler4(self):
code = ("#bla\n"
"try:\n"
" a\n"
"except Exception1 as target:\n"
" b")
nodes = get_nodes(code, ast.excepthandler)
self.assertPosition(nodes[0], (4, 0), (5, 5), (4, 6))
self.assertOperation(nodes[0].op_pos[0], (4, 0), (4, 6), (4, 6), 'except')
self.assertOperation(nodes[0].op_pos[1], (4, 18), (4, 20), (4, 20), 'as')
self.assertOperation(nodes[0].op_pos[2], (4, 27), (4, 28), (4, 28), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_excepthandler5(self):
code = ("#bla\n"
"try:\n"
" a\n"
"except (Exception1, Exception2) as target:\n"
" b")
nodes = get_nodes(code, ast.excepthandler)
self.assertPosition(nodes[0], (4, 0), (5, 5), (4, 6))
self.assertOperation(nodes[0].op_pos[0], (4, 0), (4, 6), (4, 6), 'except')
self.assertOperation(nodes[0].op_pos[1], (4, 32), (4, 34), (4, 34), 'as')
self.assertOperation(nodes[0].op_pos[2], (4, 41), (4, 42), (4, 42), ':')
self.assertNoBeforeInnerAfter(nodes[0])
def test_excepthandler6(self):
code = ("#bla\n"
"try:\n"
" a\n"
"except(Exception1):\n"
" b\n"
"except(Exception2):\n"
" c")
nodes = get_nodes(code, ast.excepthandler)
self.assertPosition(nodes[0], (4, 0), (5, 5), (4, 6))
self.assertOperation(nodes[0].op_pos[0], (4, 0), (4, 6), (4, 6), 'except')
self.assertOperation(nodes[0].op_pos[1], (4, 18), (4, 19), (4, 19), ':')
self.assertNoBeforeInnerAfter(nodes[0])
self.assertPosition(nodes[1], (6, 0), (7, 5), (6, 6))
self.assertOperation(nodes[1].op_pos[0], (6, 0), (6, 6), (6, 6), 'except')
self.assertOperation(nodes[1].op_pos[1], (6, 18), (6, 19), (6, 19), ':')
self.assertNoBeforeInnerAfter(nodes[1])
@only_python3
def test_withitem(self):
code = ("#bla\n"
"with x as a, y:\n"
" a")
nodes = get_nodes(code, ast.withitem)
self.assertPosition(nodes[0], (2, 5), (2, 11), (2, 6))
self.assertOperation(nodes[0].op_pos[0], (2, 7), (2, 9), (2, 9), 'as')
self.assertNoBeforeInnerAfter(nodes[0])
self.assertPosition(nodes[1], (2, 13), (2, 14), (2, 14))
self.assertEqual(len(nodes[1].op_pos), 0)
self.assertNoBeforeInnerAfter(nodes[1])
@only_python3
def test_keyword(self):
code = ("#bla\n"
"@dec1\n"
"class a(metaclass=object):\n"
" pass")
nodes = get_nodes(code, ast.keyword)
self.assertPosition(nodes[0], (3, 8), (3, 24), (3, 18))
self.assertOperation(nodes[0].op_pos[0], (3, 17), (3, 18), (3, 18), '=')
self.assertNoBeforeInnerAfter(nodes[0])
def test_keyword2(self):
code = ("#bla\n"
"f(a=2)")
nodes = get_nodes(code, ast.keyword)
self.assertPosition(nodes[0], (2, 2), (2, 5), (2, 4))
self.assertOperation(nodes[0].op_pos[0], (2, 3), (2, 4), (2, 4), '=')
self.assertNoBeforeInnerAfter(nodes[0])
@only_python35
def test_keyword3(self):
code = ("#bla\n"
"f(x, a=2, **b, ** c)")
nodes = get_nodes(code, ast.keyword)
self.assertPosition(nodes[0], (2, 5), (2, 8), (2, 7))
self.assertOperation(nodes[0].op_pos[0], (2, 6), (2, 7), (2, 7), '=')
self.assertNoBeforeInnerAfter(nodes[0])
self.assertPosition(nodes[1], (2, 10), (2, 13), (2, 12))
self.assertOperation(nodes[1].op_pos[0], (2, 10), (2, 12), (2, 12), '**')
self.assertNoBeforeInnerAfter(nodes[1])
self.assertPosition(nodes[2], (2, 15), (2, 19), (2, 17))
self.assertOperation(nodes[2].op_pos[0], (2, 15), (2, 17), (2, 17), '**')
self.assertNoBeforeInnerAfter(nodes[2])
```
#### File: PyPosAST/tests/utils.py
```python
from __future__ import (absolute_import, division)
import unittest
from pyposast.cross_version import only_python2, only_python3
from pyposast.cross_version import only_python35, only_python36
from pyposast.cross_version import only_python38
from pyposast import get_nodes
class NodeTestCase(unittest.TestCase):
"""Base test case"""
# pylint: disable=invalid-name
def assertPosition(self, node, first, last, uid, messages=None):
"""Check node positions"""
# pylint: disable=no-self-use
node_first = (node.first_line, node.first_col)
node_last = (node.last_line, node.last_col)
messages = messages or []
if not node_first == first:
messages.append(
'first does not match: {} != {}'.format(node_first, first))
if not node_last == last:
messages.append(
'last does not match: {} != {}'.format(node_last, last))
if not node.uid == uid:
messages.append(
'uid does not match: {} != {}'.format(node.uid, uid))
if messages:
raise AssertionError('\n'.join(messages))
def assertOperation(self, node, first, last, uid, kind):
messages = []
if not node.kind == kind:
messages.append(
'kind does not match: {} != {}'.format(node.kind, kind)
)
self.assertPosition(node, first, last, uid, messages=messages)
def assertNoBeforeInnerAfter(self, node):
"""Check if node does not have pos_before, pos_inner, pos_after"""
self.assertFalse(hasattr(node, 'pos_before'))
self.assertFalse(hasattr(node, 'pos_inner'))
self.assertFalse(hasattr(node, 'pos_after'))
def assertSimpleInnerPosition(self, node, first, last):
"""Check pos_before, pos_inner, pos_after"""
node_first = (node.first_line, node.first_col)
node_last = (node.last_line, node.last_col)
self.assertPosition(node.pos_before, node_first, first, first)
self.assertPosition(node.pos_inner, first, last, last)
self.assertPosition(node.pos_after, last, node_last, node_last)
``` |
{
"source": "joaoferreira7991/QS-Selenium",
"score": 3
} |
#### File: QS-Selenium/app/main.py
```python
import unittest
import page
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("--headless")
class Test01_Register(unittest.TestCase):
# This URL is the result of a successful sign up,
# we can use it to verify if the test passed or not.
_expected_url = 'https://smart-home-assistant.herokuapp.com/sign_in'
# Setup functions called when initialized
def setUp(self):
self.driver = webdriver.Chrome('./driver/chromedriver.exe')
self.driver.get('https://smart-home-assistant.herokuapp.com/sign_up')
'''
Verify if a user cannot sign up when the email field
is empty.
'''
def test011_verify_email_field(self):
# get register page
regPage = page.RegisterPage(self.driver)
assert regPage.is_title_matches()
# Insert username.
regPage.username_text_element = 'Tufao'
# Insert email.
regPage.email_text_element = ''
# Insert password
regPage.password_text_element = '<PASSWORD>'
# Insert confirm password
regPage.confirm_password_text_element = '<PASSWORD>'
# Click button.
regPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
'''
Verify if a user cannot sign up when the username field
is empty.
'''
def test012_verify_username_field(self):
# Get the sign up page.
regPage = page.RegisterPage(self.driver)
assert regPage.is_title_matches()
# Insert username.
regPage.username_text_element = ''
#insert email
regPage.email_text_element = '<EMAIL>'
# Insert password.
regPage.password_text_element = '<PASSWORD>'
# Insert confirm password
regPage.confirm_password_text_element = '123'
# Click button.
regPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
'''
Verify if a user cannot sign up when the password field
is empty.
'''
def test013_verify_password_field(self):
# Get the sign up page.
regPage = page.RegisterPage(self.driver)
assert regPage.is_title_matches()
# Insert username.
regPage.username_text_element = 'Tufao'
# Insert email.
regPage.email_text_element = '<EMAIL>'
# Insert password confirmation.
regPage.password_text_element = ''
# Insert confirm password
regPage.confirm_password_text_element = '<PASSWORD>'
# Click button.
regPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
'''
Verify if a user cannot sign up when the password confirm field
is empty.
'''
def test014_verify_password_confirm_field(self):
# Get the sign up page.
regPage = page.RegisterPage(self.driver)
assert regPage.is_title_matches()
# Insert username.
regPage.username_text_element = 'Tufao'
# Insert email.
regPage.email_text_element = '<EMAIL>'
# Insert password.
regPage.password_text_element = '<PASSWORD>'
# Insert confirm password
regPage.confirm_password_text_element = ''
# Click button.
regPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
'''
Verify if a user cannot login when all fields are empty.
'''
def test015_empty_fields(self):
# Get the sign up page.
regPage = page.RegisterPage(self.driver)
assert regPage.is_title_matches()
# Click button.
regPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
'''
Verify if a user will be able to sign up with a valid username, a valid email
and a valid password with a invalid password confirmation.
'''
def test016_invalid_password(self):
# Get the sign up page.
regPage = page.RegisterPage(self.driver)
assert regPage.is_title_matches()
# Insert username.
regPage.username_text_element = 'Tufao'
# Insert email.
regPage.email_text_element = '<EMAIL>'
# Insert password.
regPage.password_text_element = '<PASSWORD>'
# Insert different password confirmation.
regPage.confirm_password_text_element = '<PASSWORD>'
# Click button.
regPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
'''
Verify if a user will be able to sign up with a valid username, a valid email
and a valid password/password confirmation.
'''
def test017_valid_sign_up(self):
# Get the sign up page.
regPage = page.RegisterPage(self.driver)
assert regPage.is_title_matches()
# Insert username.
regPage.username_text_element = 'Tufao'
# Insert email.
regPage.email_text_element = '<EMAIL>'
# Insert password.
regPage.password_text_element = '<PASSWORD>'
# Insert password confirmation.
regPage.confirm_password_text_element = '<PASSWORD>'
# Click button.
regPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# End of test
def tearDown(self):
self.driver.close()
class Test02_Login(unittest.TestCase):
# This URL is only acessible as a result of a sucessful login,
# we can use it to verify if the test passed or not.
_expected_url = 'https://smart-home-assistant.herokuapp.com/index'
# Setup functions called when initialized
def setUp(self):
self.driver = webdriver.Chrome('./driver/chromedriver.exe')
self.driver.get('https://smart-home-assistant.herokuapp.com/sign_in')
def test020_verify_username_field(self):
# Get the login page elements.
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
def test021_verify_password_field(self):
# Enter the login page.
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username.
loginPage.username_text_element = 'Tufao'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
def test022_empty_fields(self):
# Enter the login page.
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare the current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
def test023_invalid_password(self):
# Enter the login page.
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertNotEqual(self.driver.current_url, self._expected_url)
def test024_valid_sign_in(self):
# Enter the login page.
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# End of test
def tearDown(self):
self.driver.close()
class Test03_Dashboard(unittest.TestCase):
# This URL is only acessible as a result of a sucessful login,
# we can use it to verify if the test passed or not.
_expected_url = 'https://smart-home-assistant.herokuapp.com/index'
# Setup functions called when initialized
def setUp(self):
self.driver = webdriver.Chrome('./driver/chromedriver.exe')
self.driver.get('https://smart-home-assistant.herokuapp.com/sign_in')
# Actuator form related tests.
'''
Test if the user cannot submit the actuator form with all fields empty
'''
def test0300_actuator_form_all_fields_empty(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add actuator button
dbPage.actuator_click_add_button()
# Insert empty name
dbPage.actuator_name_text_element = ''
# Insert empty IPv4 address
dbPage.actuator_ip_text_element = ''
# Click button
dbPage.actuator_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if both fields had been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.actuator_name_text_element)
self.assertEqual('Required Field.', dbPage.actuator_ip_text_element)
'''
Test if the user cannot submit the actuator form with an empty device name.
'''
def test0301_actuator_form_name_field_empty(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add actuator button
dbPage.actuator_click_add_button()
# Insert empty name
dbPage.actuator_name_text_element = ''
# Insert empty IPv4 address
dbPage.actuator_ip_text_element = '192.168.1.65'
# Click button
dbPage.actuator_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if name field has been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.actuator_name_text_element)
'''
Test if the user cannot submit the actuator forum with an empty ip field.
'''
def test0302_actuator_form_ip_field_empty(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add actuator button
dbPage.actuator_click_add_button()
# Insert empty name
dbPage.actuator_name_text_element = 'smart switch'
# Insert empty IPv4 address
dbPage.actuator_ip_text_element = ''
# Click button
dbPage.actuator_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if ip field has been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.actuator_ip_text_element)
'''
Test if the user can submit an appropriate actuator form
'''
def test0303_actuator_form_success(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add actuator button
dbPage.actuator_click_add_button()
# Insert name
dbPage.actuator_name_text_element = 'smart switch'
# Insert empty IPv4 address
dbPage.actuator_ip_text_element = '192.168.1.65'
# Click button
dbPage.actuator_click_submit_button()
# Wait 1 second
time.sleep(1)
# Check if a new actuator frame was created.
assert dbPage.actuator_frame_exists()
# Controller form related tests.
def test0304_controller_form_all_fields_empty(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add controller button
dbPage.controller_click_add_button()
# Insert empty name
dbPage.controller_name_text_element = ''
# Insert empty red
dbPage.controller_red_text_element = ''
# Insert empty green
dbPage.controller_green_text_element = ''
# Insert empty blue
dbPage.controller_blue_text_element = ''
# Click button
dbPage.controller_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if all fields had been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.controller_name_text_element)
self.assertEqual('Required Field.', dbPage.controller_red_text_element)
self.assertEqual('Required Field.', dbPage.controller_green_text_element)
self.assertEqual('Required Field.', dbPage.controller_blue_text_element)
def test0305_controller_form_name_field_empty(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add controller button
dbPage.controller_click_add_button()
# Insert empty name
dbPage.controller_name_text_element = ''
# Insert empty red
dbPage.controller_red_text_element = '17'
# Insert empty green
dbPage.controller_green_text_element = '27'
# Insert empty blue
dbPage.controller_blue_text_element = '22'
# Click button
dbPage.controller_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if name field has been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.controller_name_text_element)
def test0306_controller_form_red_field_empty(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add controller button
dbPage.controller_click_add_button()
# Insert empty name
dbPage.controller_name_text_element = 'led strip'
# Insert empty red
dbPage.controller_red_text_element = ''
# Insert empty green
dbPage.controller_green_text_element = '27'
# Insert empty blue
dbPage.controller_blue_text_element = '22'
# Click button
dbPage.controller_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if red field has been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.controller_red_text_element)
def test0307_controller_form_green_field_empty(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add controller button
dbPage.controller_click_add_button()
# Insert empty name
dbPage.controller_name_text_element = 'led strip'
# Insert empty red
dbPage.controller_red_text_element = '17'
# Insert empty green
dbPage.controller_green_text_element = ''
# Insert empty blue
dbPage.controller_blue_text_element = '22'
# Click button
dbPage.controller_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if green field has been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.controller_green_text_element)
def test0308_controller_form_blue_field_empty(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add controller button
dbPage.controller_click_add_button()
# Insert empty name
dbPage.controller_name_text_element = 'led strip'
# Insert empty red
dbPage.controller_red_text_element = '17'
# Insert empty green
dbPage.controller_green_text_element = '27'
# Insert empty blue
dbPage.controller_blue_text_element = ''
# Click button
dbPage.controller_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if blue field has been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.controller_blue_text_element)
def test0309_controller_form_malformed_pin_values(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add controller button
dbPage.controller_click_add_button()
# Insert empty name
dbPage.controller_name_text_element = 'led strip'
# Insert empty red
dbPage.controller_red_text_element = 'qwrwerqr'
# Insert empty green
dbPage.controller_green_text_element = 'qwerwerqw'
# Insert empty blue
dbPage.controller_blue_text_element = 'werwrw'
# Click button
dbPage.controller_click_submit_button()
# Wait 1 second
time.sleep(1)
# Verify if pin fields have been filled with 'Required Field.'
self.assertEqual('Required Field.', dbPage.controller_red_text_element)
self.assertEqual('Required Field.', dbPage.controller_green_text_element)
self.assertEqual('Required Field.', dbPage.controller_blue_text_element)
def test0310_controller_form_success(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click the add controller button
dbPage.controller_click_add_button()
# Insert empty name
dbPage.controller_name_text_element = 'led strip'
# Insert empty red
dbPage.controller_red_text_element = '17'
# Insert empty green
dbPage.controller_green_text_element = '27'
# Insert empty blue
dbPage.controller_blue_text_element = '22'
# Click button
dbPage.controller_click_submit_button()
# Wait 1 second
time.sleep(1)
# Check if a new actuator frame was created.
assert dbPage.controller_frame_exists()
# Actuator button related tests.
def test0311_actuator_frame_onoff_button(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click button then wait for it turn on
dbPage.actuator_frame_click_onoff()
time.sleep(5)
# Assert if color has changed
assert dbPage.is_actuator_on()
def test0312_actuator_frame_delete_button(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click delete button
dbPage.actuator_frame_click_delete()
# Check if it exists
assert dbPage.actuator_frame_exists()
# Controller button related tests.
def test0313_controller_frame_colorshift_button_while_off(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click coloshift button
dbPage.controller_frame_click_colorshift()
# Check if it changed color
assert not (dbPage.is_controller_colorshift_on())
def test0314_controller_frame_onoff_button(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '123'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click button then wait for it turn on
dbPage.controller_frame_click_onoff()
time.sleep(5)
# Assert if color has changed
assert dbPage.is_controller_on()
def test0315_controller_frame_colorshift_button_while_on(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click colorshift button
dbPage.controller_frame_click_colorshift()
time.sleep(5)
# Check if it changed color
assert dbPage.is_controller_colorshift_on()
def test0316_controller_frame_delete_button(self):
# Get sign in page
loginPage = page.LoginPage(self.driver)
assert loginPage.is_title_matches()
# Insert username that is valid for login.
loginPage.username_text_element = 'Tufao'
# Insert correct password.
loginPage.password_text_element = '<PASSWORD>'
# Click button.
loginPage.click_submit_button()
# Wait 1 second
time.sleep(1)
# Compare current url with the expected one.
self.assertEqual(self.driver.current_url, self._expected_url)
# Get dashboard page
dbPage = page.DashboardPage(self.driver)
assert dbPage.is_title_matches()
# Click delete button
dbPage.controller_frame_click_delete()
# Check if it exists
assert dbPage.controller_frame_exists()
# End of test
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main(verbosity=2)
``` |
{
"source": "joaofig/auto-k-means",
"score": 3
} |
#### File: joaofig/auto-k-means/DistortionCurve.py
```python
class DistortionCurve:
"""Stores the distortion curve as a function of K, the number of clusters."""
def __init__(self):
self.distortions = dict()
def add(self, k, distortion):
self.distortions[k] = distortion
def get(self, k):
return self.distortions[k]
def __getitem__(self, item):
return self.distortions[item]
def __setitem__(self, key, value):
self.distortions[key] = value
```
#### File: auto-k-means/KEstimators/AsankaPerera.py
```python
import numpy as np
import math
from sklearn.cluster import MiniBatchKMeans
class KEstimator:
"""Estimates the K-Means K hyperparameter through geometrical analysis of the distortion curve"""
def __init__(self, cluster_fn=None):
self.K = 0
self.cluster = cluster_fn
@staticmethod
def distance_to_line(x0, y0, x1, y1, x2, y2):
"""
Calculates the distance from (x0,y0) to the
line defined by (x1,y1) and (x2,y2)
"""
dx = x2 - x1
dy = y2 - y1
return abs(dy * x0 - dx * y0 + x2 * y1 - y2 * x1) / \
math.sqrt(dx * dx + dy * dy)
def fit(self, X, tolerance=1e-3):
"""Fits the value of K"""
max_distance = -1
s_k_list = list()
sk0 = 0
for k in range(1, len(X) + 1):
sk1 = self.cluster(X, k)
s_k_list.append(sk1)
if k > 2 and abs(sk0 - sk1) < tolerance:
break
sk0 = sk1
s_k = np.array(s_k_list)
x0 = 1
y0 = s_k[0]
x1 = len(s_k)
y1 = 0
for k in range(1, len(s_k)):
dist = self.distance_to_line(k, s_k[k-1], x0, y0, x1, y1)
if dist > max_distance:
max_distance = dist
else:
self.K = k - 1
break
return self
def fit_s_k(self, s_k, tolerance=1e-3):
"""Fits the value of K using the s_k series"""
max_distance = float('-inf')
s_k_list = list()
sk0 = 0
# Fit the maximum K
for k in s_k:
sk1 = s_k[k]
s_k_list.append(sk1)
if k > 2 and abs(sk0 - sk1) < tolerance:
break
sk0 = sk1
s_k = np.array(s_k_list)
# Get the line endpoints
x0 = 1
y0 = s_k[0]
x1 = len(s_k)
y1 = 0
# Now find the largest distance
for k in range(1, len(s_k)):
dist = self.distance_to_line(k, s_k[k-1], x0, y0, x1, y1)
if dist > max_distance:
max_distance = dist
else:
self.K = k - 1
break
return self
```
#### File: auto-k-means/KEstimators/Radius.py
```python
import math
from sklearn.cluster import MiniBatchKMeans
class KEstimator:
def __init__(self):
self.K = 0
@staticmethod
def calculate_s_k(X, k):
km = MiniBatchKMeans(n_clusters=k, random_state=42).fit(X)
return km.inertia_ # -km.score(df) #
def fit(self, X, max_k=50, tolerance=3):
min_distance = 0
for k in range(1, len(X) + 1):
sk = self.calculate_s_k(X, k)
radius = math.sqrt(k * k + sk * sk)
if k == 1:
min_distance = radius
if radius <= min_distance:
min_distance = radius
self.K = k
elif k - self.K > tolerance:
break
```
#### File: joaofig/auto-k-means/whiteboard.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import torch
from sklearn.datasets.samples_generator import make_blobs
from KEstimators import PhamDimovNguyen
from KEstimators import Riddle
from KEstimators import RiddleTorch
#
#
# def load_data_1024():
# df = pd.read_csv('data/data_1024.csv',
# delim_whitespace=True)
# df = df.drop('Driver_ID', axis=1)
# df = df.rename(index=str, columns={"Distance_Feature": "x", "Speeding_Feature": "y"})
# df = df[['x','y']]
# df = (df - df.min()) / (df.max() - df.min())
# return df
#
#
# def load_a1():
# df = pd.read_csv('data/a1.txt', names=['x', 'y'], delim_whitespace=True, dtype=np.float64)
# df = (df - df.min()) / (df.max() - df.min())
# return df
#
#
# def load_unbalance():
# df = pd.read_csv('data/unbalance.txt', names=['x', 'y'], delim_whitespace=True, dtype=np.float64)
# df = (df - df.min()) / (df.max() - df.min())
# return df
#
#
# def load_dim2():
# df = pd.read_csv('data/dim2.txt', names=['x', 'y'], delim_whitespace=True, dtype=np.float64)
# df = (df - df.min()) / (df.max() - df.min())
# return df
#
#
# def load_HTRU_2():
# df = pd.read_csv('data/HTRU2/HTRU_2.csv', dtype=np.float64)
# df = (df - df.min()) / (df.max() - df.min())
# return df
def load_data():
clusters = random.randint(1, 20)
cluster_std = random.uniform(0.5, 8)
print('K={0}'.format(clusters))
X, y = make_blobs(n_samples=200*clusters,
centers=clusters,
cluster_std=cluster_std,
n_features=2,
center_box=(-50.0, 50.0))
df = pd.DataFrame(data=X, columns=['x', 'y'])
return df
# from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
def calculate_s_k(X, k):
km = KMeans(n_clusters=k).fit(X)
return km.inertia_
def run():
pham_estimator = PhamDimovNguyen.KEstimator()
# asanka_estimator = AsankaPerera.KEstimator()
riddle_estimator = Riddle.KEstimator()
torch_estimator = RiddleTorch.KEstimator()
s_k_t = torch.zeros(51)
df = load_data()
s_k = dict()
dim = len(df.columns)
print(len(df))
x_range = range(1, 51)
for k in x_range:
km = KMeans(n_clusters=k).fit(df)
s_k[k] = km.inertia_
s_k_t[k] = km.inertia_
print(s_k[k])
# asanka_estimator.fit_s_k(s_k, tolerance=1e-3)
# print('Asanka : {0}'.format(asanka_estimator.K))
pham_estimator.fit_s_k(s_k, max_k=50, dim=dim)
print('PhamDN : {0}'.format(pham_estimator.K))
riddle_estimator.fit_s_k(s_k, max_k=50)
print('Riddle : {0}'.format(riddle_estimator.K))
torch_estimator.fit(s_k_t)
print('Torch : {0}'.format(torch_estimator.K))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(df.x, df.y)
ax1.set_title('Clusters')
ax2.plot(np.log(np.arange(1, 51, dtype=np.float32)), np.log(s_k_t.numpy()[1:]))
ax2.set_title('log-log Scree')
plt.show()
if __name__ == '__main__':
run()
``` |
{
"source": "joaofig/dublin-buses",
"score": 4
} |
#### File: joaofig/dublin-buses/explore.py
```python
import pandas as pd
import numpy as np
def load_day(day):
header = ['timestamp', 'line_id', 'direction', 'jrny_patt_id', 'time_frame', 'journey_id', 'operator',
'congestion', 'lon', 'lat', 'delay', 'block_id', 'vehicle_id', 'stop_id', 'at_stop']
types = {'timestamp': np.int64,
'journey_id': np.int32,
'congestion': np.int8,
'lon': np.float64,
'lat': np.float64,
'delay': np.int8,
'vehicle_id': np.int32,
'at_stop': np.int8}
file_name = 'data/siri.201301{0:02d}.csv'.format(day)
df = pd.read_csv(file_name, header=None, names=header, dtype=types, parse_dates=['time_frame'], infer_datetime_format=True)
null_replacements = {'line_id': 0, 'stop_id': 0}
df = df.fillna(value=null_replacements)
df['line_id'] = df['line_id'].astype(np.int32)
df['stop_id'] = df['stop_id'].astype(np.int32)
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='us')
return df
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
All args must be of equal length.
Taken from here: https://stackoverflow.com/questions/29545704/fast-haversine-approximation-python-pandas#29546836
"""
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
#c = 2 * np.arcsin(np.sqrt(a))
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
meters = 6372000.0 * c
return meters
def calculate_durations(data_frame, vehicle_id):
one_second = np.timedelta64(1000000000, 'ns')
dv = data_frame[data_frame['vehicle_id']==vehicle_id]
ts = dv.timestamp.values
dtd = ts[1:] - ts[:-1]
dt = np.zeros(len(dtd) + 1)
dt[1:] = dtd / one_second
return dt
def calculate_distances(data_frame, vehicle_id):
dv = data_frame[data_frame['vehicle_id']==vehicle_id]
lat = dv.lat.values
lon = dv.lon.values
dxm = haversine_np(lon[1:], lat[1:], lon[:-1], lat[:-1])
dx = np.zeros(len(dxm) + 1)
dx[1:] = dxm
return dx
def filter_columns(df):
columns = ['timestamp', 'direction', 'journey_id', 'congestion', 'lon', 'lat', 'delay', 'vehicle_id', 'stop_id', 'at_stop']
return df[columns]
def run():
days = None
for d in range(31):
print("Day {0}".format(d + 1))
day = filter_columns(load_day(d + 1))
day['dt'] = 0.0
day['dx'] = 0.0
day['speed'] = 0.0
if days is None:
days = day
else:
days = days.append(day)
vehicles = days['vehicle_id'].unique()
for v in vehicles:
print("Vehicle {0}".format(v))
vehicle_selector = days['vehicle_id'] == v
days.loc[vehicle_selector, 'dt'] = calculate_durations(days, v)
days.loc[vehicle_selector, 'dx'] = calculate_distances(days, v)
speed_selector = days['dt'] > 0
days.loc[speed_selector, 'speed'] = days[speed_selector].dx / days[speed_selector].dt * 3.6
# Filter invalid points (speeds over 100 km/h)
days = days[days['speed'] < 100.0]
days.to_csv('data/201301.csv', index=False)
if __name__ == "__main__":
run()
``` |
{
"source": "joaofig/geonaja",
"score": 3
} |
#### File: joaofig/geonaja/geonaja.py
```python
import urllib.request
import os.path
import zipfile
import numpy as np
import math
import joblib
from typing import List
class ElevationProvider(object):
"""Base elevation provider class"""
def __init__(self):
pass
@staticmethod
def get_tile_xy(latitude: float,
longitude: float) -> (int, int):
"""
Given the location's latitude and longitude, return the corresponding
elevation tile coordinates.
:param latitude: Location latitude in decimal degrees
:param longitude: Location longitude in decimal degrees
:return: Tuple containing the elevation tile coordinates
"""
x = int((longitude + 180.0) / 5.0) + 1
y = int(-latitude / 5.0) + 12
return x, y
@staticmethod
def get_tile_name_xy(x: int, y: int) -> str:
"""
Given the elevation tile coordinates, return the tile name.
:param x: Elevation tile x coordinate
:param y: Elevation tile y coordinate
:return: Elevation tile file file name
"""
return "srtm_{0:02d}_{1:02d}".format(x, y)
def get_tile_name(self, latitude: float, longitude: float) -> str:
"""
Given the location's latitude and longitude, return the corresponding
elevation tile file name.
:param latitude: Location latitude in decimal degrees
:param longitude: Location longitude in decimal degrees
:return: Elevation tile file name
"""
x, y = self.get_tile_xy(latitude, longitude)
return self.get_tile_name_xy(x, y)
@staticmethod
def download_tile(tile_name: str, dir_name: str) -> str:
"""
Downloads an elevation tile into a cache directory.
:param tile_name: Elevation tile file name
:param dir_name: Cache directory
:return: Local file name
"""
zip_name = tile_name + ".zip"
url = "http://srtm.csi.cgiar.org/wp-content/uploads/files/" \
"srtm_5x5/ASCII/" + zip_name
file_name = os.path.join(dir_name, zip_name)
urllib.request.urlretrieve(url, file_name)
return file_name
class ElevationTile(object):
"""
Elevation tile - contains all the information of an elevation tile file
"""
def __init__(self, rows, cols, x_ll, y_ll, cell_size, x=-1, y=-1):
self.array = None
self.rows = rows
self.cols = cols
self.x_ll = x_ll
self.y_ll = y_ll
self.cell_size = cell_size
self.x = x
self.y = y
def get_row_col(self,
latitude: float,
longitude: float) -> (int, int):
"""
Given the location's latitude and longitude, return the corresponding
elevation tile cell coordinates.
:param latitude: Location latitude in decimal degrees
:param longitude: Location longitude in decimal degrees
:return: The array coordinates of the elevation value
"""
row = self.rows - math.trunc((latitude - self.y_ll) /
self.cell_size + 0.5)
col = math.trunc((longitude - self.x_ll) / self.cell_size + 0.5)
return row, col
def get_elevation(self,
latitude: float,
longitude: float) -> np.int32:
"""
Gets the elevation of the tile element corresponding to the given
location's latitude and longitude.
:param latitude: Location latitude in decimal degrees
:param longitude: Location longitude in decimal degrees
:return:
"""
row, col = self.get_row_col(latitude, longitude)
return self.array[row, col]
def create_array(self):
"""
Creates the elevation array
:return: None
"""
if self.array is None:
self.array = np.zeros((self.rows, self.cols), dtype=np.int32)
class FileElevationProvider(ElevationProvider):
"""
A simple elevation provider that does not preprocess the downloaded tiles.
Tile files are stored as the original zipped trio of files, and decoded
upon file load. This process is slow when loading from file.
"""
def __init__(self, cache_dir):
self.cache_dir = cache_dir
self.tile_dict = {}
@staticmethod
def parse_text(content: List[str]) -> ElevationTile:
rows = 0
cols = 0
x_ll = 0.0
y_ll = 0.0
cell = 0.0
for i in range(6):
line = content[i].decode("utf-8")
items = line.split()
if items[0] == "ncols":
cols = int(items[1])
elif items[0] == "nrows":
rows = int(items[1])
elif items[0] == "xllcorner":
x_ll = float(items[1])
elif items[0] == "yllcorner":
y_ll = float(items[1])
elif items[0] == "cellsize":
cell = float(items[1])
tile = ElevationTile(rows, cols, x_ll, y_ll, cell)
# Read in all the elevation values
tile.create_array()
for i in range(6, len(content)):
line = content[i].decode("utf-8")
row = np.fromstring(line, dtype=np.int16, count=cols, sep=' ')
tile.array[i - 6, :] = row
return tile
def get_tile(self, tile_name: str) -> ElevationTile:
"""
Retrieves the tile, either from the web or any of the caches.
:param tile_name: Elevation tile name.
:return: Elevation tile
"""
tile = None
if tile_name in self.tile_dict:
tile = self.tile_dict[tile_name]
else:
file_name = os.path.join(self.cache_dir, tile_name + ".zip")
if not os.path.exists(file_name):
self.download_tile(tile_name, self.cache_dir)
if os.path.exists(file_name):
with zipfile.ZipFile(file_name) as z:
with z.open(tile_name + ".asc") as asc:
content = asc.readlines()
tile = self.parse_text(content)
self.tile_dict[tile_name] = tile
return tile
def get_elevation(self,
latitude: float,
longitude: float) -> int:
"""
Given a location latitude and longitude, retrieve the average elevation
in meters. This is the main entry point for the whole feature.
:param latitude: Location latitude in decimal degrees
:param longitude: Location longitude in decimal degrees
:return:
"""
tile_name = self.get_tile_name(latitude, longitude)
tile = self.get_tile(tile_name)
if tile is not None:
return tile.get_elevation(latitude, longitude)
else:
return -9999
class JoblibElevationProvider(FileElevationProvider):
"""
A more sophisticated elevation provider that preprocesses the downloaded
elevation tiles and saves them to cache using the Joblib package.
By avoiding the decompressing and decoding steps when loading from file,
this class achieves much better performance. Note that when downloading
a tile for the first time, the performance penalty is slightly higher than
the previous one.
"""
def __init__(self, cache_dir):
super().__init__(cache_dir)
def get_tile(self, tile_name: str) -> ElevationTile:
"""
Retrieves the tile, either from the web or any of the caches.
:param tile_name: Elevation tile name.
:return: Elevation tile
"""
if tile_name in self.tile_dict:
tile = self.tile_dict[tile_name]
else:
elev_file_name = os.path.join(self.cache_dir, tile_name + ".elev")
if not os.path.exists(elev_file_name):
self.download_tile(tile_name, self.cache_dir)
file_name = os.path.join(self.cache_dir, tile_name + ".zip")
with zipfile.ZipFile(file_name) as z:
with z.open(tile_name + ".asc") as asc:
content = asc.readlines()
tile = self.parse_text(content)
self.tile_dict[tile_name] = tile
os.remove(file_name)
joblib.dump(tile, elev_file_name)
else:
tile = joblib.load(elev_file_name)
return tile
if __name__ == "__main__":
elevation = JoblibElevationProvider(".")
print(elevation.get_elevation(34.1225696, -118.2181179))
print(elevation.get_elevation(34.0095999, -117.53678559999999))
print(elevation.get_elevation(37.6047911, -122.0384952))
``` |
{
"source": "joaofig/geo-spoke",
"score": 3
} |
#### File: geo-spoke/geo/geospoke.py
```python
import numpy as np
import pandas as pd
import h3.api.numpy_int as h3
import multiprocessing as mp
import math
import geo.geomath as gm
from functools import partial
from timeit import default_timer as timer
class GeoBrute(object):
def __init__(self, locations: np.ndarray):
self.lats = locations[:, 0]
self.lons = locations[:, 1]
def query_radius(self,
location: np.ndarray,
r: float) -> (np.ndarray, np.ndarray):
"""
Selects the indices of the points that lie within a given distance from
a given location.
:param location: Location to query in [lat, lon] format
:param r: Radius in meters
:return: Array of indices
"""
lat = location[0, 0]
lon = location[0, 1]
dist = gm.vec_haversine(self.lats, self.lons, lat, lon)
return np.argwhere(dist <= r)
def query_knn(self, location: np.array, k: int) -> np.array:
dist = gm.vec_haversine(self.lats, self.lons,
location[0], location[1])
idx = np.argsort(dist)
return idx[:k], dist[idx[:k]]
def get_slice(dim: int, i: int, k: int) -> np.ndarray:
return slice(max(0, i - k), min(dim - 1, i + k) + 1)
def calculate_sorted_distances(latitudes, longitudes, lat, lon):
dist = gm.vec_haversine(latitudes, longitudes, lat, lon)
idx = np.argsort(dist)
return idx, dist[idx]
class GeoSpoke(object):
def __init__(self, locations: np.ndarray):
self.lats = locations[:, 0]
self.lons = locations[:, 1]
min_lat, max_lat = self.lats.min(), self.lats.max()
min_lon, max_lon = self.lons.min(), self.lons.max()
h = gm.num_haversine(min_lat, min_lon, max_lat, min_lon)
w = gm.num_haversine(min_lat, min_lon, min_lat, max_lon)
self.density = locations.shape[0] / (w * h)
if max_lat > 0:
self.lat0 = self.lat1 = min_lat - 90
else:
self.lat0 = self.lat1 = max_lat + 90
self.lon0 = (max_lon - min_lon) / 2 - 45
self.lon1 = self.lon0 + 90
self.idx0, self.sorted0 = calculate_sorted_distances(self.lats, self.lons, self.lat0, self.lon0)
self.idx1, self.sorted1 = calculate_sorted_distances(self.lats, self.lons, self.lat1, self.lon1)
def query_radius(self,
location: np.ndarray,
r: float) -> np.ndarray:
"""
Selects the indices of the points that lie within a given distance from
a given location.
:param location: Location to query in [lat, lon] format
:param r: Radius in meters
:return: Array of indices
"""
lat = location[0]
lon = location[1]
d0 = gm.num_haversine(lat, lon, self.lat0, self.lon0)
d1 = gm.num_haversine(lat, lon, self.lat1, self.lon1)
i0 = np.searchsorted(self.sorted0, d0 - r)
i1 = np.searchsorted(self.sorted0, d0 + r)
match0 = self.idx0[i0:i1 + 1]
i0 = np.searchsorted(self.sorted1, d1 - r)
i1 = np.searchsorted(self.sorted1, d1 + r)
match1 = self.idx1[i0:i1 + 1]
intersect = np.intersect1d(match0, match1)
dist = gm.vec_haversine(self.lats[intersect],
self.lons[intersect],
lat, lon)
return intersect[dist <= r]
def query_knn(self, location: np.ndarray, k: int) -> (np.ndarray, np.ndarray):
lat = location[0]
lon = location[1]
d0 = gm.num_haversine(lat, lon, self.lat0, self.lon0)
d1 = gm.num_haversine(lat, lon, self.lat1, self.lon1)
r = math.sqrt(k / self.density) * 2.0
intersect = np.zeros(0)
while intersect.shape[0] < k:
s0 = np.searchsorted(self.sorted0, [d0 - r, d0 + r])
s1 = np.searchsorted(self.sorted1, [d1 - r, d1 + r])
intersect = np.intersect1d(self.idx0[s0[0]:s0[1] + 1],
self.idx1[s1[0]:s1[1] + 1],
assume_unique=True)
r *= 4
dist = gm.vec_haversine(self.lats[intersect],
self.lons[intersect],
lat, lon)
idx = np.argsort(dist)
return intersect[idx][:k], dist[idx[:k]]
def geo_to_h3_array(locations, resolution: int = 12):
hexes = [h3.geo_to_h3(locations[i, 0], locations[i, 1], resolution) for i in range(locations.shape[0])]
return hexes
class H3Index(object):
def __init__(self, locations: np.ndarray, resolution=10):
self.locations = locations
self.h3res = resolution
cpus = mp.cpu_count()
arrays = np.array_split(locations, cpus)
fn = partial(geo_to_h3_array, resolution=resolution)
with mp.Pool(processes=cpus) as pool:
results = pool.map(fn, arrays)
flattened = [item for sublist in results for item in sublist]
self.h3arr = np.array(flattened, dtype=np.uint64)
self.h3idx = np.argsort(self.h3arr)
def query_radius(self,
location: np.ndarray,
r: float) -> np.ndarray:
edge_len = h3.edge_length(self.h3res, unit="m")
idx = h3.geo_to_h3(location[0], location[1], self.h3res)
ring = h3.k_ring(idx, 1 + int(round(r / edge_len)))
i0 = np.searchsorted(self.h3arr, ring, side='left', sorter=self.h3idx)
i1 = np.searchsorted(self.h3arr, ring, side='right', sorter=self.h3idx)
indices = np.hstack([np.arange(i, j) for i, j in zip(i0, i1) if i != j])
idx = self.h3idx[indices]
dist = gm.vec_haversine(self.locations[idx, 0], self.locations[idx, 1],
location[0], location[1])
return self.h3idx[indices[np.argwhere(dist <= r).ravel()]]
def query_knn(self, location: np.ndarray, k: int) -> (np.ndarray, np.ndarray):
idx = h3.geo_to_h3(location[0], location[1], self.h3res)
i = 0
indices = np.zeros(0, dtype=np.uint64)
ring = np.zeros(0, dtype=np.uint64)
while indices.shape[0] < k:
i += 2
k_ring = h3.k_ring(idx, i)
ring = np.setdiff1d(k_ring, ring, assume_unique=True)
i0 = np.searchsorted(self.h3arr, ring, side='left', sorter=self.h3idx)
i1 = np.searchsorted(self.h3arr, ring, side='right', sorter=self.h3idx)
indices = np.hstack((indices,
np.hstack([np.arange(i, j, dtype=np.uint64)
for i, j in zip(i0, i1) if i != j])))
idx = self.h3idx[indices]
dist = gm.vec_haversine(self.locations[idx, 0],
self.locations[idx, 1],
location[0], location[1])
dist_idx = np.argsort(dist)
return idx[dist_idx[:k]], dist[dist_idx[:k]]
def main():
import folium
from folium.vector_layers import CircleMarker
# np.random.randint(111)
"""
For testing purposes only
:return:
"""
columns_to_read = ['Timestamp', 'LineID', 'Direction', 'PatternID',
'JourneyID', 'Congestion', 'Lon', 'Lat',
'Delay', 'BlockID', 'VehicleID', 'StopID', 'AtStop']
df = pd.read_parquet("../data/sir010113-310113.parquet",
columns=columns_to_read)
positions = df[['Lat', 'Lon']].to_numpy()
random_indexes = np.random.randint(low=0, high=positions.shape[0], size=100)
random_locations = positions[random_indexes]
start = timer()
geo_query = GeoSpoke(positions)
end = timer()
print("GeoSpoke initialization took {} seconds".format(end - start))
start = timer()
h3_index = H3Index(positions, resolution=10)
end = timer()
print("H3Index initialization took {} seconds".format(end - start))
geo_brute = GeoBrute(positions)
#
# pt = np.array([[53.3520802, -6.2883607]])
ind = np.zeros(0)
start = timer()
for pt in random_locations: # [random_locations[0]]:
ind = geo_query.query_radius(pt, r=100.0)
end = timer()
print(ind.shape[0], np.sort(ind))
print("GeoSpoke radius query took {} seconds".format(end - start))
print("--------------")
start = timer()
for pt in random_locations: # [random_locations[0]]:
ind = h3_index.query_radius(pt, r=100.0)
end = timer()
print(ind.shape[0], np.sort(ind))
print("H3Index radius query took {} seconds".format(end - start))
print("--------------")
print(" ")
print("KNN - GeoSpoke ------")
start = timer()
for pt in random_locations:
knn0, dist0 = geo_query.query_knn(pt, 20)
end = timer()
print("Timer: {}".format(end - start))
print(knn0)
print(dist0)
print("--------------")
print(" ")
print("KNN - H3Index ------")
start = timer()
for pt in random_locations:
knn0, dist0 = h3_index.query_knn(pt, 20)
end = timer()
print("Timer: {}".format(end - start))
print(knn0)
print(dist0)
print("--------------")
# print("KNN - GeoBrute ------")
# start = timer()
# knn1, dist1 = geo_brute.query_knn(random_locations[0].ravel(), 20)
# end = timer()
# print("Timer: {}".format(end - start))
# print(knn1)
# print(dist1)
# m = folium.Map(location=pt)
# for idx in knn0:
# CircleMarker(positions[idx], radius=1, color="#ff0000").add_to(m)
# for idx in knn1:
# CircleMarker(positions[idx], radius=1, color="#0000ff").add_to(m)
# CircleMarker(pt, radius=1, color="#000000").add_to(m)
# m.save("./map.html")
if __name__ == "__main__":
main()
``` |
{
"source": "joaofig/strat-group-spl",
"score": 3
} |
#### File: joaofig/strat-group-spl/k_fold_split.py
```python
import numpy as np
from numpy.random import default_rng
from collections import namedtuple
from tools import History, get_sort_index, hash_solution
from tools import calculate_cost, calculate_costs, calculate_cost_gradients
from tools import cosine_similarity, get_similarities
RANDOM_SEED = 5005 # 2310
Solution = namedtuple("Solution", "cost index")
def generate_problem(num_groups: int, num_classes: int,
min_group_size: int, max_group_size: int,
max_group_percent: float) -> np.ndarray:
"""
Generates a problem matrix from the given parameters.
:param num_groups: The number of data groups.
:param num_classes: The number of classes.
:param min_group_size: The minimum group size.
:param max_group_size: The maximum group size.
:param max_group_percent: The maximum class percent.
:return: The problem matrix (N,C).
"""
rng = default_rng(seed=RANDOM_SEED)
problem = np.zeros((num_groups, num_classes))
problem[:, 0] = rng.integers(low=min_group_size,
high=max_group_size,
size=num_groups)
for i in range(1, num_groups):
top_i = max_group_percent * problem[i, 0]
for j in range(1, num_classes):
h = top_i - problem[i, 1:j+1].sum()
problem[i, j] = rng.integers(low=0, high=h)
return problem
def print_problem(problem, solution=None):
print("")
if solution is None:
for g in problem:
print("\t".join([str(int(n)) for n in g]))
else:
for g in zip(problem, solution):
print("{0}\t{1}".format("\t".join([str(int(n)) for n in g[0]]), g[1]))
def print_solution(problem, solution, k):
"""
Prints the solution to the console.
:param problem: The problem matrix.
:param solution: Th solution vector.
:param k: The number of folds.
"""
num_classes = problem.shape[1]
total_count = problem[:, 0].sum()
print(calculate_cost(problem, solution, k))
print("")
print("K-Fold Partitioning")
print(total_count, total_count / k)
for i in range(k):
index = solution == i
print(i+1, problem[index, 0].sum())
for j in range(1, num_classes):
print("")
print("Class {0}".format(j))
for i in range(k):
index = solution == i
print(i+1, problem[index, j].sum() / problem[index, 0].sum())
# @jit(nopython=True)
def add_solution(tabu, solution_arr, cs, k, k0, k1, arr_ix):
"""
:param tabu:
:param solution_arr:
:param cs:
:param k: The number of folds in the problem (integer)
:param k0:
:param k1:
:param arr_ix:
:return:
"""
sort_ix0 = get_sort_index(solution_arr, cs, k0, arr_ix)
sort_ix1 = get_sort_index(solution_arr, cs, k1, arr_ix)
n0 = sort_ix0.shape[0]
n1 = sort_ix1.shape[0]
i0, i1 = 0, 0
solution_added = False
while not solution_added and i0 < sort_ix0.shape[0] and i1 < sort_ix1.shape[0]:
solution = np.copy(solution_arr)
if n0 > 0 and n1 > 0:
if cs[k0, sort_ix0[i0]] > cs[k1, sort_ix1[i1]]:
solution[sort_ix0[i0]] = k1
i0 += 1
else:
solution[sort_ix1[i1]] = k0
i1 += 1
elif n0 > 0:
solution[sort_ix0[i0]] = k1
i0 += 1
else:
solution[sort_ix1[i1]] = k0
i1 += 1
h = hash_solution(solution, k)
if h not in tabu:
tabu.add(h)
solution_added = True
solution_arr = solution
return solution_arr, solution_added
def main():
history = History()
num_groups = 20 # Number of groups to simulate
num_classes = 2 # Number of classes
max_group_size = 10000 # Maximum group size
max_group_percent = 0.4 # Maximum proportion for each class
k = 5 # Number of folds
max_empty_iterations = 10000
max_intensity_iterations = 10
min_cost = 1000
terminated = False
tabu = set()
problem = generate_problem(num_groups, num_classes,
min_group_size=10,
max_group_size=max_group_size,
max_group_percent=max_group_percent)
rng = default_rng(seed=RANDOM_SEED)
solution = rng.integers(low=0, high=k, size=num_groups)
# solution = np.zeros(problem.shape[0], dtype=int)
arr_ix = np.arange(num_groups, dtype=int)
tabu.add(hash_solution(solution, k))
incumbent_solution = solution
incumbent_cost = calculate_cost(problem, incumbent_solution, k)
print(incumbent_cost)
history.add(solution)
n = 0
n_intensity = 0
solution_added = False
while not terminated:
cost_grad = calculate_cost_gradients(problem, solution, k)
cs = cosine_similarity(problem, cost_grad)
sims = get_similarities(cost_grad)
for sim in sims:
k0, k1 = int(sim[1]), int(sim[2]) # Cast the indices from double to int
solution, solution_added = add_solution(tabu, solution, cs, k, k0, k1, arr_ix)
if solution_added:
break # Breaks the 'sims' loop
if not solution_added:
print("Solution not added!")
cost = calculate_cost(problem, solution, k)
if cost < incumbent_cost:
print(cost)
incumbent_cost = cost
incumbent_solution = solution
n = 0
n_intensity = 0
history.add(solution)
n += 1
n_intensity += 1
if n > max_empty_iterations or incumbent_cost < min_cost:
terminated = True
print(calculate_costs(problem, incumbent_solution, k))
print(len(tabu))
print_solution(problem, incumbent_solution, k)
# print_problem(problem, incumbent_solution)
if __name__ == "__main__":
main()
```
#### File: strat-group-spl/tools/functions.py
```python
import numpy as np
from numba import jit
@jit(nopython=True)
def index_to_str(idx):
"""
Generates a string representation from an index array.
:param idx: The NumPy boolean index array.
:return: The string representation of the array.
"""
num_chars = int(idx.shape[0] / 6 + 0.5)
s = ""
for i in range(num_chars):
b = i * 6
six = idx[b:b+6]
c = 0
for j in range(six.shape[0]):
c = c * 2 + int(six[j])
s = s + chr(c+32)
return s
@jit(nopython=True)
def hash_solution(solution: np.ndarray, k: int) -> str:
"""
Calculates a string hash for the solution.
:param solution: The solution vector.
:param k: The number of folds.
:return: The string hash.
"""
s = ""
for i in range(k-1):
s = s + index_to_str(solution == i)
return s
@jit(nopython=True)
def is_in(element, test_elements):
"""
Predicate to test the inclusion of items in the first array on the second
:param element: Array whose elements we want to test the inclusion for
:param test_elements: Target array
:return: Boolean array of the same size as `element` with the element-wise inclusion test results
"""
unique = set(test_elements)
result = np.zeros_like(element, dtype=np.bool_)
for i in range(element.shape[0]):
result[i] = element[i] in unique
return result
@jit(nopython=True)
def get_sort_index(solution: np.ndarray, cs: np.ndarray, k: int, arr_ix: np.ndarray) -> np.ndarray:
"""
:param solution: The solution vector.
:param cs: The cosine similarity matrix.
:param k: The selected fold index [0..K)
:param arr_ix: A pre-calculated integer range [0..N).
:return:
"""
sort_ix = np.zeros((0,), dtype=np.int_)
solution_indices_for_k = solution == k
n = solution_indices_for_k.sum()
# Check if there are any indexes for fold k
if n > 0:
# Get the descending sort indices for the similarities of fold k.
# Lower similarities mean larger differences.
sort_ix = np.flip(np.argsort(cs[k]))
# Filter the solution indices that belong to fold k.
sort_ix = sort_ix[is_in(sort_ix, arr_ix[solution_indices_for_k])]
return sort_ix
@jit(nopython=True)
def cosine_similarity(problem: np.ndarray, cost_grad: np.ndarray) -> np.ndarray:
"""
Calculates the cosine similarity vector between the problem array
and the cost gradient vector.
:param problem: The problem array.
:param cost_grad: The cost gradient matrix.
:return: The cosine similarity vector.
"""
k = cost_grad.shape[0]
s = np.zeros((k, problem.shape[0]))
c = problem
norm_c = np.zeros(problem.shape[0])
for i in range(problem.shape[0]):
norm_c[i] = np.linalg.norm(c[i])
for i in range(k):
g = cost_grad[i]
a = np.dot(c, g)
b = np.multiply(norm_c, np.linalg.norm(g))
s[i, :] = np.divide(a, b)
return s
@jit(nopython=True)
def vector_similarity(v0: np.ndarray, v1: np.ndarray) -> float:
"""
Calculates the cosine similarity between two vectors.
:param v0: Vector
:param v1: Vector
:return: Similarity scalar.
"""
a = np.dot(v0, v1)
b = np.linalg.norm(v0) * np.linalg.norm(v1)
return a / b * np.linalg.norm(v0 - v1)
@jit(nopython=True)
def get_lowest_similarity(cost_grads):
"""
Calculates the fold index pair with the lowest cost gradient
cosine similarity.
:param cost_grads: K-dimensional cost gradient vector.
:return: Fold index pair with the lowest cosine similarity.
"""
sim = 1.0
p = (-1, -1)
n = cost_grads.shape[0]
for i in range(n):
for j in range(i + 1, n):
s = vector_similarity(cost_grads[i], cost_grads[j])
if s < sim:
sim = s
p = (i, j)
return p
@jit(nopython=True)
def get_similarities(cost_grads):
"""
Calculates the similarity array between all pairs of cost gradients
:param cost_grads: The cost gradient matrix
:return: The sorted similarity array (K,3) containing rows of
[similarity, i, j] with i != j
"""
n = cost_grads.shape[0]
k_count = int(n * (n - 1) / 2)
sims = np.zeros((k_count, 3))
k = 0
for i in range(n - 1):
for j in range(i + 1, n):
s = vector_similarity(cost_grads[i], cost_grads[j])
sims[k, 0] = s
sims[k, 1] = i
sims[k, 2] = j
k += 1
return sims[sims[:, 0].argsort()]
@jit(nopython=True)
def calculate_costs(problem: np.ndarray, solution: np.ndarray, k: int) -> np.ndarray:
"""
Calculates the cost vector for the given solution.
:param problem: The problem matrix.
:param solution: The solution vector.
:param k: The number of folds.
:return: The K-dimensional cost vector.
"""
c = problem.shape[1]
costs = np.zeros(k)
total_count = problem[:, 0].sum()
for i in range(k):
index = solution == i
costs[i] = 0.5 * (problem[index, 0].sum() - total_count / k) ** 2
stratum_sum = problem[index, 0].sum()
for j in range(1, c):
r = problem[:, j].sum() / total_count
costs[i] += 0.5 * (problem[index, j].sum() - r * stratum_sum) ** 2
return costs
@jit(nopython=True)
def calculate_cost(problem: np.ndarray, solution: np.ndarray, k: int) -> float:
"""
Calculates the overall cost as the L2 norm of the cost vector.
:param problem: The problem matrix.
:param solution: The solution vector.
:param k: The number of folds.
:return: The scalar cost.
"""
return np.linalg.norm(calculate_costs(problem, solution, k))
@jit(nopython=True)
def calculate_cost_gradients(problem: np.ndarray, solution: np.ndarray, k: int) -> np.ndarray:
"""
Computes the K cost gradients.
:param problem: The problem matrix.
:param solution: The solution vector.
:param k: The number of folds.
:return: The (K,C) gradient matrix.
"""
c = problem.shape[1]
gradients = np.zeros((k, c))
total_count = problem[:, 0].sum()
for i in range(k):
index = solution == i
gradients[i, 0] = problem[index, 0].sum() - total_count / k
stratum_sum = problem[index, 0].sum()
for j in range(1, c):
r = problem[:, j].sum() / total_count
gradients[i, j] = problem[index, j].sum() - r * stratum_sum
return gradients
``` |
{
"source": "joaofig/ved-explore",
"score": 2
} |
#### File: ved-explore/tools/generate_traces.py
```python
from collections import Counter
import numpy as np
from tqdm.auto import tqdm
from db.api import VedDb, TraceDb
from numba import jit
from pyquadkey2 import quadkey
def qk_int_tostr(qk_int, z):
qk_str = ""
for i in range(z):
qk_str = "0123"[qk_int % 4] + qk_str
qk_int = qk_int >> 2
return qk_str
def qk_str_toint(qk_str):
qk_int = 0
for c in qk_str:
qk_int = (qk_int << 2) + int(c)
return qk_int
def qk_toint(qk):
return qk_str_toint(str(qk))
def get_master_tile(qk, z):
qk_str = qk_int_tostr(qk, z)
tile = quadkey.from_str(qk_str[:len(qk_str) - 8])
return tile
@jit(nopython=True)
def bresenham_pairs(x0: int, y0: int,
x1: int, y1: int) -> np.ndarray:
"""Generates the diagonal coordinates
Parameters
----------
x0 : int
Origin x value
y0 : int
Origin y value
x1 : int
Target x value
y1 : int
Target y value
Returns
-------
np.ndarray
Array with the diagonal coordinates
"""
dx = abs(x1 - x0)
dy = abs(y1 - y0)
dim = max(dx, dy)
pairs = np.zeros((dim, 2), dtype=np.int64)
x, y = x0, y0
sx = -1 if x0 > x1 else 1
sy = -1 if y0 > y1 else 1
if dx > dy:
err = dx // 2
for i in range(dx):
pairs[i, 0] = x
pairs[i, 1] = y
err -= dy
if err < 0:
y += sy
err += dx
x += sx
else:
err = dy // 2
for i in range(dy):
pairs[i, 0] = x
pairs[i, 1] = y
err -= dx
if err < 0:
x += sx
err += dy
y += sy
return pairs
def create_pixel_table(db):
if not db.table_exists("pixel"):
sql = """
CREATE TABLE pixel (
pixel_id INTEGER PRIMARY KEY ASC,
qk INTEGER NOT NULL,
intensity FLOAT NOT NUL
);
"""
db.execute_sql(sql)
# sql = "CREATE INDEX idx_pixel_qk ON pixel (qk);"
# db.execute_sql(sql)
def get_moves(db):
sql = "select vehicle_id, day_num from move"
moves = db.query(sql)
return moves
def get_move_points(db, move):
sql = """
select latitude, longitude
from signal
where vehicle_id = ? and day_num = ?
order by time_stamp;
"""
points = db.query(sql, move)
return points
def get_unique_points(points):
unique_pts = []
last_pt = None
for pt in points:
if last_pt is None or last_pt != pt:
unique_pts.append(pt)
last_pt = pt
return unique_pts
def add_pixel(db, px, intensity):
sql = "select n from l26 where qk=?"
res = db.query(sql, parameters=[px])
if len(res) == 0:
sql = "insert into l26 (qk, tile, n) values (?, ?, ?)"
db.execute_sql(sql, [px, px // 256, intensity])
else:
sql = "update l26 set n = ? where qk = ?"
db.execute_sql(sql, [res[0] + intensity, px])
def get_level_list(d, z):
l = [(qk, qk_toint(get_master_tile(qk, z)), d[qk]) for qk in d.keys()]
return l
def get_parent_level(d):
parents = dict()
for qk in d.keys():
p = qk // 4
if p in parents:
parents[p] += d[qk]
else:
parents[p] = d[qk]
return parents
def insert_all_levels(db, counter):
level = counter
level_ranges = dict()
for z in tqdm(range(26, 8, -1)):
level_ranges[z] = (min(level.values()), max(level.values()))
db.insert_qk_intensities(z, get_level_list(level, z))
level = get_parent_level(level)
return level_ranges
def main():
trace_db = TraceDb(folder="../db")
ved_db = VedDb(folder="../db")
counter = Counter()
create_pixel_table(trace_db)
moves = get_moves(ved_db)
for move in tqdm(moves):
points = get_unique_points(get_move_points(ved_db, move))
tiles = [quadkey.from_geo((p[0], p[1]), level=26).to_tile() for p in points]
for i in range(len(tiles) - 1):
x0, y0 = tiles[i][0]
x1, y1 = tiles[i+1][0]
line = bresenham_pairs(x0, y0, x1, y1)
pixels = [qk_str_toint(quadkey.from_tile((p[0], p[1]), 26).key) for p in line]
counter.update(pixels)
level_ranges = insert_all_levels(trace_db, counter)
trace_db.insert_level_ranges([(k, v[0], v[1]) for k, v in level_ranges.items()])
if __name__ == "__main__":
main()
``` |
{
"source": "Joao-Filh0/Python--Flask--Pix",
"score": 3
} |
#### File: Joao-Filh0/Python--Flask--Pix/main.py
```python
from flask import Flask,request,jsonify
from flask_cors import CORS
import os
from pix import *
app = Flask(__name__)
CORS(app)
dataResponses = []
#Rota usada para testes do webhook
@app.route("/",methods = ['GET'])
def get_data():
if len(dataResponses) == 0:
return jsonify({"message" : "Nada no momento "}), 200
return jsonify(dataResponses),200
@app.route("/cob",methods = ["POST"])
def web_hooks():
pix = PixModel()
result = request.get_json()
cob = pix.cob(uid = result.get("uid"), data = result.get("data"))
dataResponses.append(cob)
return jsonify({
"message" : "sucess" ,"payload" : cob }),201
if __name__== '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(debug=True,host="0.0.0.0",port=port,threaded=True)
#,host="0.0.0.0"
```
#### File: Joao-Filh0/Python--Flask--Pix/pix.py
```python
import json,requests
from constants import permissions
from constants import urls
from constants import body
from utils import qrcode
from utils import creator
class PixModel:
def __init__(self):
#Auth para pegar token de acesso
self.ACESS_TOKEN = self.auth(body.cob_data).get("access_token")
def auth(self,data):
r = requests.post(url= urls.auth_url,
data = json.dumps(data) ,
headers = {'content-type': 'application/json',
'Authorization': permissions.BASIC_CREDENTIAL},
cert= permissions.CERTIFICATES)
return json.loads(r.text)
def cob(self,uid,data):
txid =creator.txid(uid)
r = requests.put(url= urls.cob_url+txid,
data=json.dumps(data),
headers={'content-type': 'application/json',
'Authorization': f"Bearer {self.ACESS_TOKEN}"},
cert= permissions.CERTIFICATES)
response = json.loads(r.text)
if(r.status_code == 201):
self.webhooks_config(key=response.get("chave"))
return {
"txid" : txid,
"qr_code" : qrcode.build_payload(response)
}
return {}
def webhooks_config(self,key):
requests.put(url= urls.webhooks_config_url+key,data=json.dumps(body.webhook_data),
headers={'content-type': 'application/json',
'Authorization': f"Bearer {self.ACESS_TOKEN}"},
cert= permissions.CERTIFICATES)
return
```
#### File: Python--Flask--Pix/utils/creator.py
```python
import libscrc
from datetime import datetime
from random import randint,choice
def txid(userID):
alphabet = "abcdefghijklmnopqrstuvwxyz"
now = str(datetime.now()).replace("-",'').replace(".",'').replace(":",'').replace(" ",'')[2:14]
byts = bytes(userID, 'utf-8')
userID = hex(libscrc.xmodem(byts,0xFFFF)).upper().replace("0X","")
id = ""
for i in range(0,15):
sort = randint(0,9)
if(randint(0,len(id))%2 ==0):
id = id+str(sort)
else:
sort = randint(0,25)
if(sort%2==0):
id = id+choice(alphabet).upper()
else:
id = id+choice(alphabet)
return userID+now+id
``` |
{
"source": "joaofilipevaz/CSM-Trab3",
"score": 3
} |
#### File: joaofilipevaz/CSM-Trab3/trab3.py
```python
import cv2
import numpy as np
# import matplotlib.pyplot as plt
from trab3Test import K3, K5
from time import time
from os import path
import matplotlib
# 1
"""
Construa uma função (codificador) que para cada bloco de 8×8 da imagem original efectue a DCT bidimensional.
Veja a imagem com o conjunto dos blocos após a transformada.
Construa uma função (descodificador) que faça a DCT inversa.
Verifique que a imagem é igual à original.
"""
def codificador(bloco, k1, alfa):
# DCT2D direta
bloco_dct = cv2.dct(bloco-128)
# quantificação
dct_quant = np.round(bloco_dct / (k1 * alfa))
return dct_quant
def descodificador(bloco_desc_dct, k1, alfa):
# DCT2D inversa (IDCT2D)
bloco_rec = np.round((k1 * alfa) * bloco_desc_dct)
return np.round(cv2.idct(bloco_rec)+128)
# 2
"""
Construa uma função (codificador) que para cada bloco de 8 × 8 de coeficientes da transformação efectuada faça
a divisão pela matriz de quantificação (tabela K1 no anexo da norma) multiplicada por um factor de qualidade
q (ver pág. 230 do livro "Tecnologias de Compressão Multimédia").
Veja a imagem com o conjunto dos blocos após a quantificação.
Construa uma função (descodificador) que realize a operação inversa da quantificação.
Junte estas funções às já realizadas e verifique para diferentes factores de qualidade qual a SNR e veja a imagem
descodificada.
"""
# 3
"""
Construa uma função (codificador) que faça a codificação diferencial dos coeficientes DC após a quantificação.
Construa a função inversa para o descodificador.
"""
def dpcm(bloco_dct):
# lista de controlo com os valores DC diferenciais
dc = [bloco_dct[0][0][0]]
# copia do array original para um novo array
bloco_dct_dpcm = np.copy(bloco_dct)
# DPCM da componente DC
for i in xrange(1, len(bloco_dct)):
diff = bloco_dct[i][0][0] - bloco_dct[i-1][0][0]
bloco_dct_dpcm[i][0][0] = diff
dc.append(diff)
return bloco_dct_dpcm, dc
def desc_dpcm(bloco_dct_dpcm, dc):
bloco_dct = np.copy(bloco_dct_dpcm)
dcanterior = 0
# DPCM da componente DC
for i in xrange(0, len(dc)):
bloco_dct[i][0][0] = dc[i] + dcanterior
dcanterior = bloco_dct[i][0][0]
return bloco_dct
# 4
"""
Construa uma função (codificador) que crie um array com a indexação em zig-zag dos coeficientes AC após a
quantificação e crie um array com os pares (zero run length, nonzero value).
Construa a função inversa para o descodificador.
Junte estas funções às já realizadas e veja a imagem descodificada.
"""
def zig_zag(bloco_dct_dpcm, zigzag, debug, test_block):
zigzag_order = zigzag.ravel().astype(np.int8)
# lista bidimensional com os ac de cada bloco
bloco_dct_dpcm_zz = []
# array temporario para guardar os valores ordenaados pela order do zigzag
temp = np.zeros(64)
for i in xrange(len(bloco_dct_dpcm)):
# captura o primeiro bloco 8x8
bloco_1d = bloco_dct_dpcm[i][:][:].ravel()
# lista com os pares (zero run length, nonzero value)
ac = []
if i == test_block and debug:
print bloco_1d
print zigzag_order
for z in xrange(0, len(bloco_1d)):
# guarda o valor no indice correspondente pela ordem do zigzag
temp[zigzag_order[z]] = bloco_1d[z]
# variavel auxiliar para contar o numero de zeros
zeros = 0
if i == test_block and debug:
print temp
eob = False
for t in xrange(1, len(temp), 1):
# valida o fim do bloco
if (temp[t] == 0) and zeros >= 15:
ac.append((0, 0))
eob = True
break
# aplica o limita máximo de 15 zeros consecutivos para nao criar conflitos com a cod huff
elif temp[t] == 0 and zeros < 15:
zeros += 1
else:
# adiciona o um tuplo (run length code, value)
ac.append((zeros, int(temp[t])))
zeros = 0
if not eob:
ac.append((0, 0))
if i == test_block and debug:
print ac
bloco_dct_dpcm_zz.append(ac)
return bloco_dct_dpcm_zz
def zag_zig(acs, zigzag, debug, test_block):
ind_O = zigzag.reshape(64, order='F').astype('int')
# lista de output 8x8
bloco_dct_dpcm = []
for i in xrange(len(acs)):
ac_block = acs[i]
if i == test_block and debug:
print ac_block
temp = np.zeros(64)
ultima_pos = 0
for z in xrange(len(ac_block)):
zeros = ac_block[z][0]
value = ac_block[z][1]
if value != 0:
if zeros+1+ultima_pos >= 64:
print ac_block[z]
temp[zeros+1+ultima_pos] = value
ultima_pos += zeros+1
if i == test_block and debug:
print "array temp"
print temp
print ind_O
bloco_1d_ordenado = temp[ind_O].reshape((8, 8), order='F')
if i == test_block and debug:
print bloco_1d_ordenado
bloco_dct_dpcm.append(bloco_1d_ordenado)
return bloco_dct_dpcm
# 5
"""
Construa uma função que dados os arrays das alíneas anteriores use as tabelas do código de Huffman (tabela K3
e K5) e grave num ficheiro a sequência de bits correspondente. (não é necessário usar o formato JFIF)
"""
def codifica_huff(bloco_dct_dpcm_zz, bloco_dct_dpcm, debug):
# stream de bits de saida
bit_stream = ""
# insere informação sobre a o numero de blocos 8x8 a ler
bit_stream += '{0:032b}'.format(len(bloco_dct_dpcm_zz))
for i in xrange(len(bloco_dct_dpcm)):
# valor componente DC
dc = int(bloco_dct_dpcm[i][0][0])
if dc != 0:
# O campo Size indica quantos bits codificam o campo amplitude
size = len('{0:b}'.format(abs(dc)))
else:
size = 0
# adiciona o size ao bitstream recorrendo à codificação de huffman
bit_stream += K3[size] # + " "
if size != 0:
# amplitude é o valor em binario do componente dc
amp_dc = ones_complement(dc, size)
# adiciona o valor directamente ao bitstream sem codificação de huffman
bit_stream += amp_dc # + " "
# analise da componente ac
for z in xrange(len(bloco_dct_dpcm_zz[i])):
# quantidade de 0s consecutivos
runlength = bloco_dct_dpcm_zz[i][z][0]
# valor do coeficiente nao nulo
value = bloco_dct_dpcm_zz[i][z][1]
if value != 0:
# o valor é ainda subdividido em size e amp como no dc
size = len('{0:b}'.format(abs(value)))
amp_ac = ones_complement(value, size)
# o tuplo (runlength, size) é codificado recorrendo a tabela K5 com codigo de Huffman
bit_stream += K5[(runlength, size)] # + " "
# o valor é codificado sem huffman
bit_stream += amp_ac # + " "
else:
size = 0
# o tuplo (runlength, size) é codificado recorrendo a tabela K5 com codigo de Huffman
bit_stream += K5[(runlength, size)] # + " "
if debug:
print bit_stream
print len(bit_stream)
# utiliza a função desenvolvida no trab anterior para escrever para ficheiro
escrever(bit_stream, "Lena_Cod.huf")
return bit_stream
# 6
"""
Construa uma função que leia o ficheiro gravado e retorne os arrays com os coeficientes AC e DC.
"""
def le_huff(test_block):
# lista com os coeficientes dc
dc = []
# lista com os coeficientes ac por bloco
ac = []
# Sequencia de bits com a codificação da mensagem
seqbits = ler("Lena_Cod.huf")
# le o primeiro bloco de 32 bits com a informação sobre o numero de blocos 8x8 a ler
n_blocos = int(seqbits[0:32], 2)
seqbits = seqbits[32:]
print "o numero de blocos é " + str(n_blocos)
# lista bidimensional com a totalidade dos ac dos blocos
bloco_dct_dpcm_zz = []
# loops de (15,0) para serem retirados
count = 0
# lê os bits codificados enquanto houver dados para leitura
for z in xrange(n_blocos):
if count == test_block:
print bloco_dct_dpcm_zz[test_block]
# flag end of block
eob = False
# le o dc
for k in K3:
# avalia o prefixo inicial de acordo com a chave do dicionario
if seqbits.startswith(K3[k]):
# slice da mensagem de bits para lermos sempre a partir do inicio
seqbits = seqbits[len(K3[k]):]
if k > 0:
# adiciona o valor a lista dc
dc.append(read_ones_complement(seqbits[0:k]))
# remove o valor lido da mensagem
seqbits = seqbits[k:]
else:
dc.append(0)
# print "DC =" + str(dc)
break
while not eob:
for y in K5:
# avalia o prefixo inicial de acordo com a chave do dicionario
if seqbits.startswith(K5[y]):
# obtemos runlength e size
runlength = y[0]
size = y[1]
# slice da mensagem de bits para lermos sempre a partir do inicio
seqbits = seqbits[len(K5[y]):]
if K5[y] == "1010":
eob = True
ac.append((0, 0))
bloco_dct_dpcm_zz.append(ac)
ac = []
break
if size != 0:
# obtemos o valor
amp_ac = read_ones_complement(seqbits[0:size])
# remove o valor lido da mensagem
seqbits = seqbits[size:]
ac.append((runlength, amp_ac))
return dc, bloco_dct_dpcm_zz, n_blocos
# 7
"""
Junte estas funções às já realizadas e veja a imagem descodificada.
Para diferentes factores de qualidade meça a relação sinal-ruído e a taxa de compressão obtida. Represente um
gráfico onde se apresente a taxa de compressão em função do SNR.
"""
# 8
"""
No mesmo gráfico compare o seu compressor de imagem com outros existentes para várias qualidades.
"""
# 9
"""
O relatório deve conter uma descrição breve das funções realizadas e uma tabela com todos os resultados da
SNR, taxa de compressão, tempo de compressão e descompressão.
"""
# converte a imagem num array de blocos 8x8
def create_8x8block(array):
# resultado da divisao modulo 8 pelo comprimento do array
mod8 = (array.shape[0] % 8) == 0 and (array.shape[1] % 8) == 0
# Lista de blocos 8x8
lista_blocos = []
if not mod8:
print "Dimensão do array não é multipla de 8"
for i in xrange(0, array.shape[0], 8):
for z in xrange(0, array.shape[1], 8):
block = array[i:(i+8), z:(z+8)]
lista_blocos.append(block.astype(np.float32))
return lista_blocos
# efectua o processo inverso devolvendo a imagem a shape original
def revert_to_original_block(lista_blocos, original_shape):
array_original = np.zeros(original_shape)
count = 0
for i in xrange(0, array_original.shape[0], 8):
for z in xrange(0, array_original.shape[1], 8):
array_original[i:(i+8), z:(z+8)] = lista_blocos[count]
count += 1
return array_original
def quality_factor(q_factor):
if q_factor <= 50:
factor = 50.0 / q_factor
else:
factor = 2.0 - (q_factor * 2.0) / 100.0
return factor
# calcula o valor em binario com recurso ao inverso (complemento de uns) para binario negativo
def ones_complement(value, size):
if value >= 0:
return '{0:b}'.format(value)
else:
bit_lenght = "{" + "0:0{}b".format(str(size)) + "}"
return bit_lenght.format(2**size - 1 - abs(value))
# efectua o procedimento inverso retornado o numero que corresponde ao binario
def read_ones_complement(bin_number):
bin_number = list(bin_number)
if bin_number[0] == "1":
bin_number = ''.join(bin_number)
return int(bin_number, 2)
else:
for i in xrange(len(bin_number)):
if bin_number[i] == "0":
bin_number[i] = "1"
else:
bin_number[i] = "0"
bin_number = ''.join(bin_number)
return -int(bin_number, 2)
# funções de leitura e escrita obtidas do trabalho anterior
"""
Elabore uma função ("escrever") que dada uma sequência de bits (mensagem codificada) e o nome do ficheiro,
escreva a sequência de bits para o ficheiro.
"""
def escrever(seqbits, nomeficheiro):
# array de bytes que irá ser escrito para ficheiro
array_bytes = bytearray()
# assegura que o numero de bits é multiplo de 8 adicionando os bits necessarios
# avalia o modulo da divisao por 8 para saber quantos bits estão "livres"
n_bits_livres = len(seqbits) % 8
if n_bits_livres != 0:
# enche o resto do byte de 1s
seqbits += '1' * (8 - n_bits_livres)
# insere informação sobre a quantidade de bits de stuffing para permitir a sua remoçao na leitura
seqbits += '{0:08b}'.format(n_bits_livres)
# converte os bits para bytes
for i in range(len(seqbits) / 8):
# segmento de 8 bits = 1 byte
substring = seqbits[i * 8: i * 8 + 8]
# adiciona o segmento ao array
array_bytes.append(int(substring, base=2))
# inicializa o ficheiro em modo de escrita
f = open("{}".format(nomeficheiro), "wb")
# escreve os bytes para ficheiro
for byte in bytes(array_bytes):
f.write(byte)
# fecha o stream de escrita
f.close()
print "Foram escritos {} bits para ficheiro".format(len(seqbits))
"""
Elabore uma função ("ler") que dado o nome do ficheiro, leia uma sequência de bits (mensagem codificada)
contida no ficheiro.
"""
def ler(nomeficheiro):
# Sequencia de bits com a codificação da mensagem
seqbits = ""
# with garante tratamento de exepções e close integrado
with open("{}".format(nomeficheiro), "rb") as f:
# le o byte
byte = f.read(1)
while byte:
# adciona os bits correspondentes do byte à seq de bits
seqbits += '{0:08b}'.format(ord(byte))
byte = f.read(1)
print "Foram lidos {} bits do ficheiro".format(len(seqbits))
# verifica quantos bits foram utilizados para stuffing
n_bits_livres = int(seqbits[-8:], 2)
if n_bits_livres != 0:
# remove o campo de informação sobre os bits de stuffing e esses bits
seqbits = seqbits[:-((8-n_bits_livres) + 8)]
else:
# remove o campo de informação sobre os bits de stuffing e esses bits
seqbits = seqbits[:-8]
print len(seqbits)
return seqbits
# função auxiliar para calcular o SNR entre a imagem original e a comprimida
def calculoSNR(imgOrig, imgComp):
PSinal = np.sum(imgComp**2.0)
PRuido = np.sum((imgOrig-imgComp)**2.0)
args = PSinal / PRuido
return np.round(10.0*np.log10(args), 3)
def main():
print "========================================================================================================"
print "================================Analise Ficheiro lena================================" \
"======="
# variavel que controla o modo de impressao de dados de teste
debug = False
test_block = 4095
# factor de qualidade q
q = 90
#
alfa = quality_factor(q)
# Matriz de quantificação K1
# table K1 - Luminance quantize Matrix
k1 = np.zeros((8, 8))
k1[0] = [16, 11, 10, 16, 24, 40, 51, 61]
k1[1] = [12, 12, 14, 19, 26, 58, 60, 55]
k1[2] = [14, 13, 16, 24, 40, 57, 69, 56]
k1[3] = [14, 17, 22, 29, 51, 87, 80, 62]
k1[4] = [18, 22, 37, 56, 68, 109, 103, 77]
k1[5] = [24, 35, 55, 64, 81, 104, 113, 92]
k1[6] = [49, 64, 78, 87, 103, 121, 120, 101]
k1[7] = [72, 92, 95, 98, 112, 100, 103, 99]
# zig-zag order
zigzag = np.zeros((8, 8))
zigzag[0] = [0, 1, 5, 6, 14, 15, 27, 28]
zigzag[1] = [2, 4, 7, 13, 16, 26, 29, 42]
zigzag[2] = [3, 8, 12, 17, 25, 30, 41, 43]
zigzag[3] = [9, 11, 18, 24, 31, 40, 44, 53]
zigzag[4] = [10, 19, 23, 32, 39, 45, 52, 54]
zigzag[5] = [20, 22, 33, 38, 46, 51, 55, 60]
zigzag[6] = [21, 34, 37, 47, 50, 56, 59, 61]
zigzag[7] = [35, 36, 48, 49, 57, 58, 62, 63]
x = cv2.imread("samples/lena.tiff", cv2.IMREAD_GRAYSCALE)
# cv2.imwrite("samples/lena_gray.jpeg", x.astype(np.uint8))
lista_blocos = create_8x8block(x)
bloco_dct = []
t0 = time()
# DCT e Quantificação
for i in xrange(len(lista_blocos)):
bloco = codificador(lista_blocos[i], k1, alfa)
bloco_dct.append(bloco)
t1 = time()
print "O tempo necessário para efectuar a DCT e a Quantificação foi de {} segundos".format(round(t1 - t0, 3))
# codificação parametro DC
bloco_dct_dpcm, dc_cod = dpcm(bloco_dct)
t2 = time()
print "O tempo necessário para efectuar a codificação DC foi de {} segundos".format(round(t2 - t1, 3))
if debug:
print lista_blocos[test_block]
print bloco_dct_dpcm[test_block]
# codificacao ac
bloco_dct_dpcm_zz = zig_zag(bloco_dct_dpcm, zigzag, debug, test_block)
t3 = time()
print "O tempo necessário para efectuar a codificação AC foi de {} segundos".format(round(t3 - t2, 3))
# codificação huffman e escrita para ficheiro
bitstream_cod = codifica_huff(bloco_dct_dpcm_zz, bloco_dct_dpcm, debug)
t4 = time()
print "O tempo necessário para o bloco de entropy coding (huffman) foi de {} segundos".format(round(t4 - t3, 3))
# imprime imagem
x_desc = revert_to_original_block(bloco_dct_dpcm, x.shape)
#cv2.imshow("Lena dc Q = {}.jpeg".format(q), x_desc.astype(np.uint8))
#cv2.waitKey(0) & 0xFF
cv2.imwrite("Lena dc Q = {}.jpeg".format(q), x_desc.astype(np.uint8))
if debug:
print "O bit Stream é valido? = " + str(bitstream_cod == ler("Lena_Cod.huf"))
# leitura do ficheiro e reconstrução do ac e dc
dc, ac, n_blocos = le_huff(test_block)
if debug:
print "o valor do DC descodificado é igual ao codificado = " + str(dc == dc_cod)
t5 = time()
print "O tempo necessário para a leitura do ficheiro e reconstrução do ac e dc foi de {} " \
"segundos".format(round(t5 - t4, 3))
# descodificacao ac
bloco_desc_dct_dpcm = zag_zig(ac, zigzag, debug, test_block)
t6 = time()
print "O tempo necessário para a descodificacao ac foi de {} segundos".format(round(t6 - t5, 3))
# Descodificação parametro DC
bloco_desc_dct = desc_dpcm(bloco_desc_dct_dpcm, dc)
t7 = time()
print "O tempo necessário para a descodificacao dc foi de {} segundos".format(round(t7 - t6, 3))
if debug:
print bloco_desc_dct_dpcm[test_block]
print bloco_dct[test_block]
bloco_rec = []
# descodificação
for i in xrange(n_blocos):
bloco = descodificador(bloco_desc_dct[i], k1, alfa)
bloco_rec.append(bloco)
t8 = time()
print "O tempo necessário para a dct inversa dc foi de {} segundos".format(round(t8 - t7, 3))
if debug:
print lista_blocos[test_block]
print np.rint(bloco_rec[test_block])
print lista_blocos[test_block]-bloco_rec[test_block]
x_rec = revert_to_original_block(bloco_rec, x.shape)
cv2.imshow("Lena descodificada Q = {}".format(q), x_rec.astype(np.uint8))
cv2.waitKey(0) & 0xFF
cv2.imwrite("Lena descodificada Q = {}.jpg".format(q), x_rec.astype(np.uint8))
print "factor q = " + str(q)
print "alfa = " + str(alfa)
print "SNR = " + str(calculoSNR(x, x_rec))
size_ini = path.getsize("samples/lena.tiff")
size_end = path.getsize("Lena descodificada Q = {}.jpeg".format(q))
print "A dimensão do ficheiro original é de {} Kb".format(round(size_ini / 1024., 2))
print "A dimensão do ficheiro codificado é de {} Kb".format(round(size_end / 1024., 2))
print "A taxa de compressão conseguida foi de {}".format(1. * size_ini / size_end)
print "O saldo da compressão foi de {} Kb".format(round((size_ini - size_end) / 1024., 2))
print "========================================================================================================"
print "========================================================================================================"
print "========================================================================================================"
print
print
main()
``` |
{
"source": "joaofilipevaz/IASA-TP3",
"score": 3
} |
#### File: agente_prosp/controlo_delib/modelo_mundo.py
```python
from psa.psa5.util import dirmov
from agente_prosp.controlo_delib.operador_mover import OperadorMover
class ModeloMundo:
def __init__(self):
self.__estado = None
self.__estados = []
self.__operadores = [OperadorMover(self, ang) for ang in dirmov()]
self.__alterado = False
# definicao do nada
self.__elementos = {}
@property
def estado(self):
return self.__estado
@property
def alterado(self):
return self.__alterado
def obter_elem(self, estado):
return self.__elementos.get(estado)
def actualizar(self, percepcao):
# posicao agente
if self.__elementos != percepcao.imagem:
self.__elementos = percepcao.imagem
self.__alterado = True
self.__estados = self.__elementos.keys()
else:
self.__alterado = False
self.__estado = percepcao.posicao
def operadores(self):
return self.__operadores
def estados(self):
return self.__estados
def mostrar(self, vis):
lista_obs = ["alvo", "obstaculo"]
alvos_obs = {key: value for (key, value) in self.__elementos.items() if value in lista_obs}
vis.elementos(alvos_obs)
```
#### File: src/agente_prosp/controlo.py
```python
import abc
class Controlo(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def processar(self, percepcao):
"""accao"""
``` |
{
"source": "joaofilis/Banco_Gringotes-",
"score": 4
} |
#### File: Banco_Gringotes-/Banco gringotes comeback !/financiamento.py
```python
gringotes comeback !/financiamento.py
def finan():
i = 0
tot_renda = 0
print('Ok mas antes de financiar preciso de algumas informações')
pess_rend = int(input('Quantas pessoas possuem renda na sua casa?'))
while pess_rend > i: #tem que usar 'for' porque se não fica no looping infinito
rend_familia = int(input('Valor da renda do membro:'))
tot_renda = (tot_renda + rend_familia) #nesta linha enquanto esta dentro do while o programa vai somando a renda dos familiares
temp_serv = int(input('Quanto tempo de serviço você tem ?: '))
print('O total de renda familiar é R${:.2f}'.format(tot_renda))
print('Bem vindo ao financiamento online (cuidado com o que você coloca neste campo meu jovem)')
#no caso abaixo eu não sei trabalhar muito bem com booleanos mas vou tentar usar neste caso para o if
print('O que você pretende financiar?')
print('{1} - Imóveis {2} - Automóveis {3} - Nada vim aqui por engano')
escolha = int(input('Bora rapaz diga: '))
if (escolha == 1) or (escolha == 2):
val_fin = int(input('Digite o valor do Financiamento: '))
val_entrada = int(input('Digite o valor de entrada: '))
temp_fin = int(input('Digite o tempo de financiamento: '))
tot_fin = ((val_fin-val_entrada)* 1.8) #calculo juros do financiamento
val_par = (tot_fin/temp_fin) #calculo valor da parcela
rend = ((tot_renda*40)/100) #calculo porcentagem renda familiar
val_tot = (val_fin - val_entrada) #calculo falor do financiamento
if (tot_fin < rend) or (temp_serv <= 3):
print('Valor de financiamento não aprovado pelo sistema')
elif (tot_fin > rend) or (temp_serv > 3):
print('Parabéns o Banco Gringotes aprova o seu financiamento!')
else:
print('Algo no seu financiamento está errado')
else:
print('Obrigado por utilizar o Banco Gringotes')
``` |
{
"source": "joaofonseca9/mimic3-benchmarks-TBI_edit",
"score": 2
} |
#### File: mimic3-benchmarks-TBI_edit/mimic3models/preprocessing.py
```python
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
import math
import platform
import pickle
import json
import os
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from fancyimpute import IterativeImputer
from sklearn.linear_model import BayesianRidge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from warnings import simplefilter
from sklearn.exceptions import ConvergenceWarning
simplefilter("ignore", category=ConvergenceWarning)
class Discretizer:
def __init__(self, timestep=0.8, store_masks=True, impute_strategy='zero', start_time='zero',
config_path=os.path.join(os.path.dirname(__file__), 'resources/discretizer_config.json')):
with open(config_path) as f:
config = json.load(f)
self._id_to_channel = config['id_to_channel']
self._channel_to_id = dict(zip(self._id_to_channel, range(len(self._id_to_channel))))
self._is_categorical_channel = config['is_categorical_channel']
self._possible_values = config['possible_values']
self._normal_values = config['normal_values']
self._header = ["Hours"] + self._id_to_channel
self._timestep = timestep
self._store_masks = store_masks
self._start_time = start_time
self._impute_strategy = impute_strategy
# for statistics
self._done_count = 0
self._empty_bins_sum = 0
self._unused_data_sum = 0
def transform(self, X, header=None, end=None):
if header is None:
header = self._header
assert header[0] == "Hours"
eps = 1e-6
N_channels = len(self._id_to_channel)
ts = [float(row[0]) for row in X]
for i in range(len(ts) - 1):
assert ts[i] < ts[i+1] + eps
if self._start_time == 'relative':
first_time = ts[0]
elif self._start_time == 'zero':
first_time = 0
else:
raise ValueError("start_time is invalid")
if end is None:
max_hours = max(ts) - first_time
else:
max_hours = end - first_time
N_bins = int(max_hours / self._timestep + 1.0 - eps)
# print('N_bins:',N_bins)
# print('N_channels:',N_channels)
cur_len = 0
begin_pos = [0 for i in range(N_channels)]
end_pos = [0 for i in range(N_channels)]
for i in range(N_channels):
channel = self._id_to_channel[i]
begin_pos[i] = cur_len
if self._is_categorical_channel[channel]:
end_pos[i] = begin_pos[i] + len(self._possible_values[channel])
else:
end_pos[i] = begin_pos[i] + 1
cur_len = end_pos[i]
data = np.zeros(shape=(N_bins, cur_len), dtype=float)
mask = np.zeros(shape=(N_bins, N_channels), dtype=int)
original_value = [["" for j in range(N_channels)] for i in range(N_bins)]
total_data = 0
unused_data = 0
def write(data, bin_id, channel, value, begin_pos):
channel_id = self._channel_to_id[channel]
if self._is_categorical_channel[channel]:
category_id = self._possible_values[channel].index(value)
N_values = len(self._possible_values[channel])
one_hot = np.zeros((N_values,))
one_hot[category_id] = 1
for pos in range(N_values):
data[bin_id, begin_pos[channel_id] + pos] = one_hot[pos]
else:
data[bin_id, begin_pos[channel_id]] = float(value)
for row in X:
t = float(row[0]) - first_time
if t > max_hours + eps:
continue
bin_id = int(t / self._timestep - eps)
assert 0 <= bin_id < N_bins
for j in range(1, len(row)):
if row[j] == "":
continue
channel = header[j]
channel_id = self._channel_to_id[channel]
total_data += 1
if mask[bin_id][channel_id] == 1:
unused_data += 1
mask[bin_id][channel_id] = 1
# if row[j]=='10k':
# # print('Found value ',row[j],' in channel ', channel)
# row[j]='10'
#prevent missimputed data to give an error
if not self._is_categorical_channel[channel]:
try:
value=float(row[j])
except:
row[j] = float(self._normal_values[channel])
write(data, bin_id, channel, row[j], begin_pos)
original_value[bin_id][channel_id] = row[j]
# impute missing values
if self._impute_strategy not in ['zero', 'normal_value', 'previous', 'next', 'mice']:
raise ValueError("impute strategy is invalid")
if self._impute_strategy in ['normal_value', 'previous']:
prev_values = [[] for i in range(len(self._id_to_channel))]
for bin_id in range(N_bins):
for channel in self._id_to_channel:
channel_id = self._channel_to_id[channel]
if mask[bin_id][channel_id] == 1:
prev_values[channel_id].append(original_value[bin_id][channel_id])
continue
if self._impute_strategy == 'normal_value':
imputed_value = self._normal_values[channel]
if self._impute_strategy == 'previous':
if len(prev_values[channel_id]) == 0:
imputed_value = self._normal_values[channel]
else:
imputed_value = prev_values[channel_id][-1]
write(data, bin_id, channel, imputed_value, begin_pos)
if self._impute_strategy == 'next':
prev_values = [[] for i in range(len(self._id_to_channel))]
for bin_id in range(N_bins-1, -1, -1):
for channel in self._id_to_channel:
channel_id = self._channel_to_id[channel]
if mask[bin_id][channel_id] == 1:
prev_values[channel_id].append(original_value[bin_id][channel_id])
continue
if len(prev_values[channel_id]) == 0:
imputed_value = self._normal_values[channel]
else:
imputed_value = prev_values[channel_id][-1]
write(data, bin_id, channel, imputed_value, begin_pos)
if self._impute_strategy == 'mice':
mice_imputer = IterativeImputer(max_iter=20, random_state=42, sample_posterior=True)
categorical=[]
for chan in self._is_categorical_channel:
if self._is_categorical_channel[chan]:
categorical.append(chan)
cat_channels=list(self._possible_values.values())
cat_channels = [x for x in cat_channels if x != []]
columnTransformer = ColumnTransformer([('ohe', OneHotEncoder(categories=cat_channels,handle_unknown='ignore',sparse=False),categorical)], remainder='passthrough')
X_df=pd.DataFrame(X[:,1:],columns=header[1:])
X_df[X_df==""]=np.nan
X_ohe = columnTransformer.fit_transform(X_df)
X_ohe=pd.DataFrame(X_ohe,columns=columnTransformer.get_feature_names_out())
all_missing=[]
for col in X_ohe:
if X_ohe[col].isnull().all():
all_missing.append(col)
for ft_name in self._normal_values:
for missing_ft in all_missing:
if ft_name in missing_ft:
X_ohe[missing_ft]=float(self._normal_values[ft_name])
X_mice=mice_imputer.fit_transform(X_ohe)
X_mice=np.concatenate((X[:,0].reshape(-1,1),X_mice),axis=1)
for row in X_mice:
t = float(row[0]) - first_time
if t > max_hours + eps:
continue
bin_id = int(t / self._timestep - eps)
assert 0 <= bin_id < N_bins
data[bin_id,:]=row[1:]
empty_bins = np.sum([1 - min(1, np.sum(mask[i, :])) for i in range(N_bins)])
self._done_count += 1
self._empty_bins_sum += empty_bins / (N_bins + eps)
self._unused_data_sum += unused_data / (total_data + eps)
if self._store_masks:
data = np.hstack([data, mask.astype(np.float32)])
# create new header
new_header = []
for channel in self._id_to_channel:
if self._is_categorical_channel[channel]:
values = self._possible_values[channel]
for value in values:
new_header.append(channel + "->" + value)
else:
new_header.append(channel)
if self._store_masks:
for i in range(len(self._id_to_channel)):
channel = self._id_to_channel[i]
new_header.append("mask->" + channel)
new_header = ",".join(new_header)
return (data, new_header)
def print_statistics(self):
print("statistics of discretizer:")
print("\tconverted {} examples".format(self._done_count))
print("\taverage unused data = {:.2f} percent".format(100.0 * self._unused_data_sum / self._done_count))
print("\taverage empty bins = {:.2f} percent".format(100.0 * self._empty_bins_sum / self._done_count))
class Normalizer:
def __init__(self, fields=None):
self._means = None
self._stds = None
self._fields = None
if fields is not None:
self._fields = [col for col in fields]
self._sum_x = None
self._sum_sq_x = None
self._count = 0
def _feed_data(self, x):
x = np.array(x)
self._count += x.shape[0]
if self._sum_x is None:
self._sum_x = np.sum(x, axis=0)
self._sum_sq_x = np.sum(x**2, axis=0)
else:
self._sum_x += np.sum(x, axis=0)
self._sum_sq_x += np.sum(x**2, axis=0)
def _save_params(self, save_file_path):
eps = 1e-7
with open(save_file_path, "wb") as save_file:
N = self._count
self._means = 1.0 / N * self._sum_x
# print(self._means)
self._stds = np.sqrt(1.0/(N - 1) * (self._sum_sq_x - 2.0 * self._sum_x * self._means + N * self._means**2))
# print(self._stds)
self._stds[self._stds < eps] = eps
self._stds=np.nan_to_num(self._stds, nan=eps)
print(self._stds)
pickle.dump(obj={'means': self._means,
'stds': self._stds},
file=save_file,
protocol=2)
def load_params(self, load_file_path):
with open(load_file_path, "rb") as load_file:
if platform.python_version()[0] == '2':
dct = pickle.load(load_file)
else:
dct = pickle.load(load_file, encoding='latin1')
self._means = dct['means']
self._stds = dct['stds']
def transform(self, X):
if self._fields is None:
fields = range(X.shape[1])
else:
fields = self._fields
ret = 1.0 * X
for col in fields:
ret[:, col] = (X[:, col] - self._means[col]) / self._stds[col]
return ret
``` |
{
"source": "joaofracasso/banknoteBrazil",
"score": 3
} |
#### File: src/modeling/predict_model.py
```python
import io
import torchvision.transforms as transforms
from PIL import Image
import onnxruntime as ort
import numpy as np
class_map = {
0: "10 Reais Frente",
1: "10 Reais Verso",
2: "20 Reais Frente",
3: "20 Reais Verso",
4: "2 Reais Frente",
5: "2 Reais Verso",
6: "50 Reais Frente",
7: "50 Reais Verso",
8: "5 Reais Frente",
9: "5 Reais Verso"
}
def transform_image(image_bytes):
my_transforms = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image = Image.open(io.BytesIO(image_bytes))
return my_transforms(image).unsqueeze_(0)
def get_prediction(image_bytes, inference_session):
tensor = transform_image(image_bytes=image_bytes)
outputs = inference_session.run(None, {'input.1': tensor.numpy()})
y_hat = np.argmax(outputs[0], axis=1)[0]
return class_map[y_hat]
if __name__ == "__main__":
ort_session = ort.InferenceSession('app/models/banknote_best.onnx')
filename = [
"data/validation/2reaisFrente/compressed_0_1835891.jpeg",
'data/validation/2reaisVerso/compressed_0_3752849.jpeg',
"data/validation/5reaisFrente/compressed_0_1986857.jpeg",
"data/validation/5reaisVerso/compressed_0_4651610.jpeg",
"data/validation/10reaisFrente/compressed_0_2854543.jpeg",
"data/validation/10reaisVerso/compressed_0_2175135.jpeg",
'data/validation/20reaisFrente/compressed_0_1516768.jpeg',
'data/validation/20reaisVerso/compressed_0_3080811.jpeg',
'data/validation/50reaisFrente/compressed_0_1478513.jpeg',
'data/validation/50reaisVerso/compressed_0_3923784.jpeg']
for img in filename:
with open(img, 'rb') as f:
image_bytes = f.read()
tensor = get_prediction(image_bytes, ort_session)
print(tensor)
``` |
{
"source": "joaofracasso/e2e-asr-pt-br",
"score": 3
} |
#### File: src/datasets/alcaim.py
```python
import os
from typing import Tuple, Union
from pathlib import Path
import re
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
from unidecode import unidecode
URL = "alcaim"
FOLDER_IN_ARCHIVE = "alcaim"
_CHECKSUMS = {
"http://www.openslr.org/resources/12/dev-clean.tar.gz":
"76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3"
}
def load_alcaim_item(fileid: str,
ext_audio: str,
ext_txt: str) -> Tuple[Tensor, int, str, int, int, int]:
file_text = os.path.splitext(fileid)[0] + ext_txt
# Load audio
waveform, sample_rate = torchaudio.load(fileid)
# Load text
with open(file_text, 'r', encoding='utf8') as ft:
utterance = ft.readlines()[0].strip()
return (
waveform,
sample_rate,
re.sub('[^A-Za-z ]+', '', unidecode(utterance)),
)
class Alcaim(Dataset):
"""Create a Dataset for alcaim.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``,
``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and
``"train-other-500"``. (default: ``"train-clean-100"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"alcaim"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".txt"
_ext_audio = ".wav"
def __init__(self,
root: Union[str, Path],
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False) -> None:
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
self._path = os.path.join(root, folder_in_archive)
#if download:
# if not os.path.isdir(self._path):
# if not os.path.isfile(archive):
# checksum = _CHECKSUMS.get(URL, None)
# download_url(URL, root, hash_value=checksum)
# extract_archive(archive)
self._walker = sorted(str(p) for p in Path(self._path).glob('*/*' + self._ext_audio))
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, utterance, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_alcaim_item(fileid, self._ext_audio, self._ext_txt)
def __len__(self) -> int:
return len(self._walker)
``` |
{
"source": "joaofracasso/semanticGP",
"score": 3
} |
#### File: semanticGP/semanticGP/primitives.py
```python
import operator
import math
import numpy as np
def add(a, b):
try: return operator.add(a, b)
except (OverflowError, ValueError): return 1.0
def sub(a, b):
try: return operator.sub(a, b)
except (OverflowError, ValueError): return 1.0
def mul(a, b):
try:
result = operator.mul(a, b)
if abs(result) > 1e43:
return 1.0
else:
return result
except (OverflowError, ValueError): return 1.0
def div(left, right):
if (abs(right) > 1e-43):
result = left / right
return result
else:
return 1.0
def pow(x, y):
try:
result = math.pow(x, y)
if abs(result) > 1e43:
return 1.0
else:
return result
except (OverflowError, ValueError): return 1.0
def exp(value):
if value < 1e2:
return math.exp(value)
else:
return 1.0
def log(value):
if (value > 1e-100):
return math.log(value)
else:
return 1.0
def sin(x):
try: return math.sin(x)
except (OverflowError, ValueError): return 1.0
def cos(x):
try: return math.cos(x)
except (OverflowError, ValueError): return 1.0
``` |
{
"source": "joaofrancese/heavy-destruction",
"score": 2
} |
#### File: Panda/src/cameraHandler.py
```python
from pandac.PandaModules import Point3, TransparencyAttrib, CardMaker
from direct.interval.IntervalGlobal import Sequence, LerpPosInterval, LerpColorInterval
from direct.gui.OnscreenImage import OnscreenImage
from pandaUtils import hideMouse
# Adapted from: http://www.panda3d.org/forums/viewtopic.php?t=9143
class CameraHandler():
def __init__(self, character):
self.character = character
# Setup mouse
base.disableMouse()
hideMouse(True)
self.mouseSensitivity = 0.1
base.taskMgr.doMethodLater(0.1, self.prepareCameraTask, "prepare-camera")
# Setup camera
base.camera.reparentTo(self.character.node)
base.camera.setPos(0, 0, 0)
base.camera.lookAt(0, 1, 0)
# Create target
self.target = OnscreenImage(image = "media/target.png", pos = (0, 0, 0))
self.target.setTransparency(TransparencyAttrib.MAlpha)
self.target.setScale(0.1)
self.target.setSa(0.5)
# Create overlay
self.overlayCard = CardMaker("overlayCard")
self.overlayCard.setFrameFullscreenQuad()
self.overlay = base.render2d.attachNewNode(self.overlayCard.generate())
self.overlay.setTransparency(TransparencyAttrib.MAlpha)
self.overlay.setColor(0,0,0,0)
# Setup interval sequences
self.shakeSequence = None
self.flashSequence = None
def shake(self, amplitude = (1,0,0), duration = 1.0, swings = 1):
if self.shakeSequence != None:
self.shakeSequence.finish()
self.shakeSequence = Sequence()
swings = int(swings)
duration = float(duration)
dt = duration / (swings * 4)
ds = Point3(amplitude)
for i in range(swings):
self.shakeSequence.append(LerpPosInterval(base.camera, dt, ds*-1))
self.shakeSequence.append(LerpPosInterval(base.camera, dt*2, ds))
self.shakeSequence.append(LerpPosInterval(base.camera, dt, Point3(0, 0, 0)))
self.shakeSequence.start()
def flash(self, color = (1,1,1,1), duration = 1.0, fadeIn = 0.2):
if self.flashSequence != None:
self.flashSequence.finish()
self.flashSequence = Sequence()
dtIn = float(duration) * fadeIn
dtOut = duration - dtIn
if dtIn > 0:
self.flashSequence.append(LerpColorInterval(self.overlay, dtIn, color))
if dtOut > 0:
self.flashSequence.append(LerpColorInterval(self.overlay, dtOut, (0,0,0,0), color))
self.flashSequence.start()
def prepareCameraTask(self, task):
base.win.movePointer(0, base.win.getXSize()/2, base.win.getYSize()/2)
base.taskMgr.add(self.controlCameraTask, "camera-control")
return task.done
def controlCameraTask(self, task):
char = self.character.node
# Get current mouse location.
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
# Rotate character based on mouse coordinates.
if base.win.movePointer(0, base.win.getXSize()/2, base.win.getYSize()/2):
char.setP((char.getP() - (y - base.win.getYSize()/2)*self.mouseSensitivity) % 360)
char.setH((char.getH() - (x - base.win.getXSize()/2)*self.mouseSensitivity) % 360)
# Don't let the camera loop over. Allowed range is 0-90 (up) and 360-270 (down).
if char.getP() > 90 and char.getP() < 180:
char.setP(90)
elif char.getP() < 270 and char.getP() >= 180:
char.setP(270)
return task.cont
```
#### File: Panda/src/character.py
```python
from pandac.PandaModules import OdeBody, OdeMass, OdeBoxGeom, Vec3, Point3
from objects.gameObject import GameObject
from pandaUtils import SoundWrapper, sign
class Character(GameObject):
def __init__(self, world):
GameObject.__init__(self, world)
# Set the speed parameters
self.vel = Vec3(0, 0, 0)
self.strafespeed = 20.0
self.forwardspeed = 32.0
self.backspeed = 24.0
self.jumpspeed = 20
self.wasJumping = False
# Set character dimensions
self.size = (4.0, 3.0, 10.0)
self.eyeHeight = 9.0
self.offset = Point3(0, 0, self.eyeHeight - (self.size[2]/2))
# Create character node
self.node = base.render.attachNewNode("character")
self.node.setPos(0, 0, self.eyeHeight)
self.node.lookAt(0, 1, self.eyeHeight)
# Create physics representation
self.mass = OdeMass()
self.mass.setBox(50, *self.size)
self.body = OdeBody(world.world)
self.body.setMass(self.mass)
self.updatePhysicsFromPos()
self.body.setData(self)
self.geom = OdeBoxGeom(world.space, Vec3(*self.size))
self.geom.setBody(self.body)
world.space.setSurfaceType(self.geom, world.surfaces["box"])
# Adjust collision bitmasks.
self.geom.setCategoryBits(GameObject.bitmaskCharacter)
self.geom.setCollideBits(GameObject.bitmaskAll & ~GameObject.bitmaskBullet)
# Setup event handling
self.keys = [0, 0, 0, 0, 0]
self.setupKeys()
base.taskMgr.add(self.moveTask, "character-move")
# Create footsteps sound
self.footstepsSound = base.loader.loadSfx("media/footsteps.wav")
self.footstepsSound.setLoop(1)
self.footsteps = SoundWrapper(self.footstepsSound)
# Create other sounds.
self.jumpSound = base.loader.loadSfx("media/jump_start.wav")
self.landSound = base.loader.loadSfx("media/jump_fall.wav")
def updatePosFromPhysics(self):
self.node.setPos(render, self.body.getPosition() + self.offset)
self.body.setAngularVel(0, 0, 0)
def updatePhysicsFromPos(self):
self.body.setPosition(self.node.getPos() - self.offset)
self.body.setQuaternion(self.node.getQuat())
def getDir(self):
return base.render.getRelativeVector(self.node, (0, 1, 0))
def moveTo(self, pos):
self.node.setPos(pos)
self.updatePhysicsFromPos()
def recoil(self, mag):
vel = self.body.getLinearVel()
diff = self.getDir() * mag
# Limit recoil
if sign(vel[0]) != sign(diff[0]) and abs(vel[0]) > 15: diff[0] = 0
if sign(vel[1]) != sign(diff[1]) and abs(vel[1]) > 15: diff[1] = 0
diff[2] = 0
self.body.setLinearVel(vel - diff)
def jump(self):
vel = self.body.getLinearVel()
self.body.setLinearVel(vel[0], vel[1], vel[2] + self.jumpspeed)
self.jumpSound.play()
def isJumping(self):
return abs(self.body.getLinearVel()[2]) > 0.05
def setKey(self, button, value):
self.keys[button] = value
def setupKeys(self):
base.accept("w", self.setKey, [0, 1]) #forward
base.accept("s", self.setKey, [1, 1]) #back
base.accept("a", self.setKey, [2, 1]) #strafe left
base.accept("d", self.setKey, [3, 1]) #strafe right
base.accept("space", self.setKey, [4, 1]) #jump
base.accept("w-up", self.setKey, [0, 0])
base.accept("s-up", self.setKey, [1, 0])
base.accept("a-up", self.setKey, [2, 0])
base.accept("d-up", self.setKey, [3, 0])
base.accept("space-up", self.setKey, [4, 0])
def moveTask(self, task):
# Initialize variables
elapsed = globalClock.getDt()
x = 0.0
y = 0.0
jumping = self.isJumping()
# Calculate movement vector.
if self.keys[0] != 0:
y = self.forwardspeed
if self.keys[1] != 0:
y = -self.backspeed
if self.keys[2] != 0:
x = -self.strafespeed
if self.keys[3] != 0:
x = self.strafespeed
self.vel = Vec3(x, y, 0)
self.vel *= elapsed
# Move the character along the ground.
hpr = self.node.getHpr()
self.node.setP(0)
self.node.setR(0)
self.node.setPos(self.node, self.vel)
self.updatePhysicsFromPos()
self.node.setHpr(hpr)
# Play landing sound (if applicable).
if self.wasJumping and not jumping:
pass #Landing detection not working.
#self.landSound.play()
# Jump (if applicable).
if self.keys[4] and not jumping:
self.jump()
self.wasJumping = jumping
# Play footsteps if walking.
if not jumping and (self.keys[0] != 0 or self.keys[1] != 0 or self.keys[2] != 0 or self.keys[3] != 0):
self.footsteps.resume()
else:
self.footsteps.pause()
return task.cont
```
#### File: src/objects/box.py
```python
from pandac.PandaModules import OdeBody, OdeMass, OdeBoxGeom, Vec3, Texture, TextureStage
from gameObject import GameObject
from cement import Cement
from random import random
from pandaUtils import makeVec4Color
from vecUtils import vecFromList, getNeutralDir
#more imports at the end of the file
class Box(GameObject):
disableGracePeriod = 20
def __init__(self, world, parent, color, pos, dir, size, density, unglueThreshold=None, shatterLimit=None, shatterThreshold=None):
GameObject.__init__(self, world)
if unglueThreshold == None: unglueThreshold = 20
if shatterLimit == None: shatterLimit = 0
if shatterThreshold == None: shatterThreshold = 30
self.size = size
self.density = density
self.dir = dir
self.parent = parent
self.color = color = makeVec4Color(color)
self.node = parent.attachNewNode("")
self.node.setPos(*pos)
self.node.setColorScale(color)
self.node.setScale(*size)
self.node.lookAt(self.node, *dir)
self.model = loader.loadModel("models/box.egg")
self.model.reparentTo(self.node)
self.model.setScale(2.0)
self.model.setPos(-1.0, -1.0, -1.0)
self.applyTexture()
self.mass = OdeMass()
self.mass.setBox(density, Vec3(*size) * 2)
self.body = OdeBody(world.world)
self.body.setMass(self.mass)
self.body.setPosition(self.node.getPos())
self.body.setQuaternion(self.node.getQuat())
self.body.setData(self)
self.geom = OdeBoxGeom(world.space, Vec3(*size) * 2.0)
self.geom.setBody(self.body)
world.space.setSurfaceType(self.geom, world.surfaces["box"])
# Adjust collision bitmasks.
self.geom.setCategoryBits(GameObject.bitmaskBox)
self.geom.setCollideBits(GameObject.bitmaskAll & ~GameObject.bitmaskTileGlued)
# Tile, cement and shatter variables.
self.tiles = []
self.cements = []
self.disableCount = 0
self.unglueThreshold = unglueThreshold
self.shatterLimit = shatterLimit
self.shatterThreshold = shatterThreshold
def applyTexture(self):
self.texture = loader.loadTexture("media/brick_wall.tga")
self.texture.setWrapU(Texture.WMRepeat)
self.texture.setWrapV(Texture.WMRepeat)
self.model.setTexture(self.texture, 1)
self.model.setTexScale(TextureStage.getDefault(), max(self.size[0], self.size[1]), self.size[2])
def addTile(self, tile):
if tile not in self.tiles:
self.tiles.append(tile)
def removeTile(self, tile):
if tile in self.tiles:
self.tiles.remove(tile)
def addCement(self, cement):
if cement not in self.cements:
self.cements.append(cement)
def removeCement(self, cement):
if cement in self.cements:
self.cements.remove(cement)
def destroy(self):
for tile in self.tiles:
tile.unglue()
for cement in self.cements:
cement.destroy()
GameObject.destroy(self)
def makeTiles(self, xNeg=False, xPos=False, yNeg=False, yPos=False, zNeg=False, zPos=False, thickness=0.1, unglueThreshold=None, shatterLimit=None, shatterThreshold=None):
if xNeg: Tile(self, self.color, (-1,0,0), thickness, self.density, unglueThreshold, shatterLimit, shatterThreshold)
if xPos: Tile(self, self.color, ( 1,0,0), thickness, self.density, unglueThreshold, shatterLimit, shatterThreshold)
if yNeg: Tile(self, self.color, (0,-1,0), thickness, self.density, unglueThreshold, shatterLimit, shatterThreshold)
if yPos: Tile(self, self.color, (0, 1,0), thickness, self.density, unglueThreshold, shatterLimit, shatterThreshold)
if zNeg: Tile(self, self.color, (0,0,-1), thickness, self.density, unglueThreshold, shatterLimit, shatterThreshold)
if zPos: Tile(self, self.color, (0,0, 1), thickness, self.density, unglueThreshold, shatterLimit, shatterThreshold)
def onCollision(self, otherBody, entry):
if otherBody.isEmpty():
return
self.disableCount = 0
speed = otherBody.getData().body.getLinearVel()
#if otherBody.getData().__class__ == Bullet: print speed.length(), self.shatterThreshold, self.shatterLimit #######
if self.active and speed.length() >= self.shatterThreshold and self.shatterLimit > 0:
adj = otherBody.getData().body.getMass().getMagnitude() / self.body.getMass().getMagnitude()
speedMag = speed.length() * adj
speedBase = ((speed * adj) + (self.body.getLinearVel() * 2) / 3)
self.shatter(speedMag, speedBase)
def shatter(self, speedMag, speedBase):
#print 'box shatter' #########
self.destroy()
taskMgr.add(self.spawnTask, "box-spawn", extraArgs=[speedMag, speedBase])
# Graphic (camera shake) and sound effects
self.world.game.cameraHandler.shake((0,0,2), 0.1)
sound = base.loader.loadSfx("media/shatter.wav")
sound.setVolume(1.5)
sound.play()
def spawnTask(self, speedMag, speedBase):
pos = self.node.getPos()
size = Vec3(self.size) / 2
w = 1
for i in [-w, w]:
for j in [-w, w]:
for k in [-w, w]:
box = Box(self.world, self.parent, self.color,
(pos[0] + (i * size[0]), pos[1] + (j * size[1]), pos[2] + (k * size[2])),
self.dir, size, self.density, self.unglueThreshold, self.shatterLimit - 1, self.shatterThreshold * 1)
speed = (speedBase * (1.5 + random())) + (Vec3(i,j,k) * speedMag * (1 + random()))
speed = speed / 2.0
box.body.setLinearVel(speed)
box.body.setAngularVel(speedMag * random(), speedMag * random(), speedMag * random())
taskMgr.add(box.disableOnStopTask, "box-disableOnStop")
def disableOnStopTask(self, task):
if self.body.getLinearVel().length() > 0.1 or self.body.getAngularVel().length() > 0.1:
self.disableCount = 0
return task.cont
elif self.disableCount < Box.disableGracePeriod:
self.disableCount += 1
return task.cont
else:
self.visibleAfterDestroy = True
if DEBUG: self.node.setColorScale(1.0, 2.0, 1.0, 0.5)
self.destroy()
return task.done
def createWall(world, parent, color, pos, size, density, quantity, shatterLimit=None, tileThickness=None, tileShatterLimit=None):
boxes = []
diffBase = vecFromList(size) * 2
dir = getNeutralDir()
for i in range(quantity[0]):
boxes.append([])
for j in range(quantity[1]):
boxes[i].append([])
for k in range(quantity[2]):
diff = Vec3(diffBase[0]*i, diffBase[1]*j, diffBase[2]*k)
box = Box(world, parent, color, Vec3(*pos) + diff, dir, size, density, shatterLimit=shatterLimit)
boxes[i][j].append(box)
if tileThickness != None:
box.makeTiles( xNeg = i == 0, xPos = i == quantity[0] - 1,
yNeg = j == 0, yPos = j == quantity[1] - 1,
zNeg = False, zPos = k == quantity[2] - 1,
thickness = tileThickness, shatterLimit = tileShatterLimit)
if i > 0: Cement(boxes[i][j][k], boxes[i-1][j][k])
if j > 0: Cement(boxes[i][j][k], boxes[i][j-1][k])
if k > 0: Cement(boxes[i][j][k], boxes[i][j][k-1])
if k == 0: Cement(boxes[i][k][k], None)
return boxes
from tile import Tile
```
#### File: src/objects/cement.py
```python
from pandac.PandaModules import OdeFixedJoint
class Cement:
def __init__(self, one, two):
b1 = None
b2 = None
self.one = one
if one != None:
one.addCement(self)
b1 = one.body
self.two = two
if two != None:
two.addCement(self)
b2 = two.body
self.joint = OdeFixedJoint(one.world.world)
self.joint.attach(b1, b2)
self.joint.set()
self.active = True
def destroy(self):
if not self.active:
return
self.joint.destroy()
if self.one != None:
self.one.removeCement(self)
if self.two != None:
self.two.removeCement(self)
self.active = False
```
#### File: src/objects/gameObject.py
```python
from pandac.PandaModules import Quat, BitMask32
import threading
class GameObject:
bitmaskAll = BitMask32.allOn()
bitmaskDefault = BitMask32.bit(0)
bitmaskBullet = BitMask32.bit(1)
bitmaskCharacter = BitMask32.bit(2)
bitmaskBox = BitMask32.bit(3)
bitmaskTileGlued = BitMask32.bit(4)
bitmaskTile = BitMask32.bit(5)
def __init__(self, world):
world.addObject(self)
self.world = world
self.body = None
self.geom = None
self.node = None
self.active = True
self.visibleAfterDestroy = False
self.dissipateCountdown = 0
def updatePosFromPhysics(self):
if self.body != None:
self.node.setPosQuat(render, self.body.getPosition(), Quat(self.body.getQuaternion()))
def getMomentum(self):
mass = self.body.getMass().getMagnitude()
speed = self.body.getLinearVel().length()
return mass * speed
def dissipate(self, factor, interval=5):
if self.body == None or self.dissipateCountdown > 0:
return
self.body.setLinearVel(self.body.getLinearVel() * factor)
self.body.setAngularVel(self.body.getAngularVel() * factor)
self.dissipateCountdown = interval + 1
self.dissipateRecover()
def dissipateRecover(self):
self.dissipateCountdown -= 1
if self.dissipateCountdown > 0:
self.world.performAfterStep(self.dissipateRecover, [])
def onCollision(self, otherBody, entry):
pass
def destroy(self):
self.active = False
self.world.removeObject(self)
def doDestroy(self):
if self.node != None and not self.visibleAfterDestroy:
self.node.detachNode()
if self.geom != None:
self.geom.disable()
if self.body != None:
self.body.disable()
return
# This is the code that actually removes the elements instead of just disabling them.
# But it doesn't work.
obj = self
print obj #DEBUG
print threading.current_thread() #DEBUG
print "0", #DEBUG
if obj.node != None:
obj.node.detachNode()
obj.node = None
print "1", #DEBUG
if obj.geom != None:
self.world.space.remove(obj.geom) #Probably unnecessary
print "2", #DEBUG
if obj.geom != None:
obj.geom.destroy()
obj.geom = None
print "3", #DEBUG
if obj.body != None:
obj.body.destroy()
obj.body = None
print "4" #DEBUG
```
#### File: src/objects/sphere.py
```python
from pandac.PandaModules import OdeBody, OdeMass, OdeSphereGeom
from gameObject import GameObject
from objects.ripple import Ripple
class Sphere(GameObject):
def __init__(self, world, parent, color, pos, dir, radius, density, posParent=None):
GameObject.__init__(self, world)
self.node = parent.attachNewNode("")
if posParent == None:
self.node.setPos(*pos)
else:
self.node.setPos(posParent, *pos)
self.node.setColor(*color)
self.node.setScale(radius)
self.node.lookAt(self.node, *dir)
self.parent = parent
self.color = color
self.scale = radius
self.model = loader.loadModel("models/smiley.egg")
self.model.reparentTo(self.node)
self.mass = OdeMass()
self.mass.setSphere(density, radius)
self.body = OdeBody(world.world)
self.body.setMass(self.mass)
self.body.setPosition(self.node.getPos())
self.body.setQuaternion(self.node.getQuat())
self.body.setData(self)
self.geom = OdeSphereGeom(world.space, radius)
self.geom.setBody(self.body)
world.space.setSurfaceType(self.geom, world.surfaces["sphere"])
def onCollision(self, otherBody, entry):
if otherBody.isEmpty(): # Collision on a wall
geom = entry.getContactGeom(0)
Ripple(self.parent, self.color, geom.getPos(), geom.getNormal() * -1, self.scale * 2.5)
```
#### File: Panda/src/pandaUtils.py
```python
from pandac.PandaModules import AudioSound, WindowProperties, Vec4, loadPrcFileData
def setWindowTitle(title):
wp = WindowProperties()
wp.setTitle(title)
base.win.requestProperties(wp)
def preSetWindowIcon(filename):
# Must be called before ShowBase().
loadPrcFileData("", "icon-filename " + filename)
def centerWindow():
curProps = base.win.getProperties()
wp = WindowProperties()
wp.setOrigin((base.pipe.getDisplayWidth() - curProps.getXSize()) / 2,
(base.pipe.getDisplayHeight() - curProps.getYSize()) / 2)
base.win.requestProperties(wp)
def hideMouse(hidden):
wp = WindowProperties()
wp.setCursorHidden(hidden)
base.win.requestProperties(wp)
def toggleFullscreen():
wp = WindowProperties()
wp.setFullscreen(not base.win.isFullscreen())
base.win.requestProperties(wp)
def makeVec4Color(color):
return Vec4(color[0], color[1], color[2], color[3] if len(color) == 4 else 1.0)
def sign(num):
return cmp(num, 0)
class SoundWrapper():
def __init__(self, sound):
self.sound = sound
self.pos = 0
def play(self):
self.sound.play()
def stop(self):
self.sound.stop()
def pause(self):
if self.sound.status() == AudioSound.PLAYING:
self.pos = self.sound.getTime()
self.sound.stop()
def resume(self):
if self.sound.status() == AudioSound.READY:
self.sound.setTime(self.pos)
self.sound.play()
```
#### File: Panda/src/scene.py
```python
from pandac.PandaModules import Point3, Vec3, VBase4, PointLight, AmbientLight, Fog
from objects.plane import Plane
from objects.sphere import Sphere
from objects.spinner import Spinner
from objects.box import createWall
class Scene:
def __init__(self, world, parent, cameraHandler, character):
self.world = world
self.parent = parent
self.objects = []
self.boundaries = []
self.cameraHandler = cameraHandler
self.character = character
self.bgm = base.loader.loadSfx("media/ambient.ogg")
self.bgm.setVolume(2.0)
self.bgm.setLoop(True)
self.bgm.play()
def createBoundaries(self, sizeVector, centerPos):
x, y, z = centerPos
sx, sy, sz = sizeVector
b = []
b.append(Plane(self.world, self.parent, (x, y, z - sz), (0, 0,-1), sx, sy, (0.7, 0.7, 0.8, 0.0), "floor", "media/floor_quake.png"))
b.append(Plane(self.world, self.parent, (x, y, z + sz), (0, 0, 1), sx, sy, (0.4, 0.4, 0.4, 0.0), "ceiling", "media/floor_metal_plate.tga"))
b.append(Plane(self.world, self.parent, (x - sx, y, z), (-1, 0,0), sy, sz, (0.38, 0.40, 0.35, 0.0), "left", "media/brick_wall.tga"))
b.append(Plane(self.world, self.parent, (x + sx, y, z), ( 1, 0,0), sy, sz, (0.38, 0.38, 0.38, 0.0), "right", "media/brick_wall.tga"))
b.append(Plane(self.world, self.parent, (x, y - sy, z), (0,-1, 0), sx, sz, (0.35, 0.40, 0.35, 0.0), "front", "media/brick_wall.tga"))
b.append(Plane(self.world, self.parent, (x, y + sy, z), (0, 1, 0), sx, sz, (0.35, 0.40, 0.40, 0.0), "back", "media/brick_wall.tga"))
return b
def setupLighting(self, sizeVector, centerPos):
x, y, z = centerPos
sx, sy, sz = (Vec3(sizeVector) * 0.8)
# Point lights, one in each ceiling corner.
for i in (x-sx, x+sx):
for j in (y-sy, y+sy):
for k in (z+sz,):
self.addPointLight((i, j, k))
# Ambient light.
c = 0.4
lightA = AmbientLight("light-ambient")
lightA.setColor(VBase4(c, c, c, 1))
lightANode = render.attachNewNode(lightA)
render.setLight(lightANode)
# Fog.
fog = Fog("fog")
fog.setColor(1, 1, 1)
fog.setExpDensity(0.002)
render.setFog(fog)
def addPointLight(self, pos):
k = 12
light = PointLight("light-point")
light.setColor(VBase4(k, k, k, 1))
light.setAttenuation(Point3(0.2, 0.1, 0.01))
node = render.attachNewNode(light)
node.setPos(*pos)
render.setLight(node)
class FallingBalls(Scene):
def __init__(self, world, parent, cameraHandler, character):
Scene.__init__(self, world, parent, cameraHandler, character)
self.sphere1 = Sphere(world, parent, (0.7, 0.4, 0.4), (-5, 0, 7), ( 1, 0, 0), 0.9, 30)
self.sphere1.body.setLinearVel(-1.0, 0.0, 0.0)
self.sphere2 = Sphere(world, parent, (0.4, 0.7, 0.4), ( 0, 0, 12), ( 0, 1, 0), 1.2, 30)
self.sphere2.body.setLinearVel(0.0, 0.0, -15.0)
self.sphere3 = Sphere(world, parent, (0.4, 0.4, 0.7), ( 5, 0, 4), (-1, 0, 0), 1.0, 30)
self.sphere3.body.setLinearVel(5.0, 0.0, 15.0)
self.spinner1 = Spinner(world, parent, (0.25, 0.19, 0.13), (-4, 0, 1), (0, 0, -1), (0, -1, 0), (2.0, 1.0, 4.0), 50)
self.spinner2 = Spinner(world, parent, (0.25, 0.19, 0.13), ( 4, 0, 1), (0, 0, -1), (0, -1, 0), (2.0, 1.0, 4.0), 50)
self.createBoundaries((30, 20, 20), (0, 0, 5))
self.setupLighting((30, 20, 20), (0, 0, 5))
class BasicWall(Scene):
def __init__(self, world, parent, cameraHandler, character):
Scene.__init__(self, world, parent, cameraHandler, character)
# Setup the containing area.
volume = (60, 60, 18)
center = (0, 0, 18)
self.createBoundaries(volume, center)
self.setupLighting(volume, center)
character.moveTo((0, 0, 15))
# Create the wall itself.
color = (0.8, 0.8, 0.8)
size = (3, 1, 2)
pos = (-20, 30, size[2])
density = 10
quantity = (4, 2, 3)
shatterLimit = 2
tileThickness = 0.05
tileShatterLimit = 1
createWall(world, parent, color, pos, size, density, quantity, shatterLimit, tileThickness, tileShatterLimit)
``` |
{
"source": "JoaoFranciscoAmorim/exercicios-de-Python",
"score": 4
} |
#### File: exercicios-de-Python/Exercicios_Python/funcaoquecalculaarea.py
```python
def area(a,b):
s=a*b
print('A área do terreno {} por {} é igual a {:.2f} metros quadrados.'.format(a,b,s))
larg=float(input('Largura (em metros): '))
comprimento=float(input('Comprimento (em metros): '))
area(larg,comprimento)
``` |
{
"source": "joao-frohlich/BCC",
"score": 4
} |
#### File: ANN/e01/e01.py
```python
def f(x):
return x ** 5 - 4 * x - 3
def verificacao_bolzano(a, b):
print("*** Verificação Bolzano ***")
fa = f(a)
fb = f(b)
print("f(a)= %.4f\nf(b)= %.4f" % (fa, fb))
if f(a) * f(b) < 0:
print("Como a f(a)*f(b) < 0 ∃ x | f(x) = 0")
return True
print("Como a f(a)*f(b) >= 0 ∄ x | f(x) = 0")
return False
def bissecao(a, b, max_iter):
print("\n*** Método da Bisseção ***")
print("Procurando uma raiz no intervalo [%.3f,%.3f]" % (a, b))
print("Iteração | (x , y)")
fa = f(a)
for i in range(max_iter):
p = a + (b - a) / 2
print("%d | ( %.6f , %.6f )" % (i + 1, p, f(p)))
fp = f(p)
if fa * fp > 0:
a = p
fa = fp
else:
b = p
return p
def bissecao2(a, b, epsilon):
cont = 1
fa = f(a)
while (b - a) / 2 >= epsilon:
p = a + (b - a) / 2
fp = f(p)
if fa * fp > 0:
a = p
fa = fp
else:
b = p
cont += 1
return cont
a = 0.646
b = 2.431
max_iter = 8
epsilon = 10 ** (-14)
if __name__ == "__main__":
if verificacao_bolzano(a, b):
raiz = bissecao(a, b, max_iter)
cont = bissecao2(a, b, epsilon)
print("\nRaiz encontrada após %d iterações = %.6f" % (max_iter, raiz))
print("Iterações para erro menor que 10e-14 = %d" % cont)
else:
print("O intervalo não possui raiz")
```
#### File: ANN/e02/e02.py
```python
def f(x):
return x ** 3 - 4 * x - 1
def f_(x):
return 3 * (x ** 2) - 4
def bolzano(a, b):
if (f(a) * f(b)) < 0:
return True
return False
def newton(p, n):
for i in range(n):
p = p - (f(p) / f_(p))
return p
def secantes(pontos, n):
for i in range(1, n - 1):
pontos.append(
(pontos[i - 1] * f(pontos[i]) - pontos[i] * f(pontos[i - 1]))
/ (f(pontos[i]) - f(pontos[i - 1]))
)
return pontos[-1]
def posicao_falsa(p0, p1, n):
if bolzano(p0, p1):
p3 = p0 - f(p0) * (p1 - p0) / (f(p1) - f(p0))
for i in range(n):
if bolzano(p0, p3):
p0 = p3
else:
p1 = p3
p3 = p0 - f(p0) * (p1 - p0) / (f(p1) - f(p0))
return p3
else:
print("Reprovado no teorema de Bolzano")
# Questao 1
p = float(input("Informe o p0: "))
n = int(input("Informe a quantidade de iterações: "))
ans = newton(p, n)
print("{:.8}".format(ans))
# Questao 2
p0 = float(input("Informe o p0: "))
p1 = float(input("Informe o p1: "))
n = int(input("Informe a quantidade de iterações: "))
pontos = [p0, p1]
ans = secantes(pontos, n)
print("{:.7}".format(ans))
# Questao 3
p0 = float(input("Informe o p0: "))
p1 = float(input("Informe o p1: "))
n = int(input("Informe a quantidade de iterações: "))
ans = posicao_falsa(p0, p1, n)
print("{:.7}".format(ans))
```
#### File: ANN/e04/e04.py
```python
from pprint import pprint
from numpy import array, diag, diagflat, dot, linalg
def jacobi(A, b, N, x):
D_ = diagflat(diag(A))
L_U = A - D_
D_ = linalg.inv(D_)
for i in range(N):
x = dot(dot(-D_, L_U), x) + dot(D_, b)
print("X(%d) = " % (i + 1), end="")
print(x)
return x
A = array([[3, -1, 1], [1, 4, 1], [1, 1, 5]])
b = array([3, 8, -2])
chute = array([0, -1, 2])
sol = jacobi(A, b, N=9, x=chute)
print("A:", end=" ")
pprint(A)
print("b:", end=" ")
print(b)
print("x:", end=" ")
print(sol)
```
#### File: CAL/extra/render_graph.py
```python
from graphviz import Graph
################################################
# Autor: <NAME>
# Nome: Renderizador de grafos
################################################
def save_graph(dot, name):
dot.format = "pdf"
dot.render(name)
def render_graph(g_list, size):
dot = Graph()
for idx in range(size):
dot.node(str(idx))
for u, v, w in g_list:
dot.edge(str(u), str(v), str(w))
save_graph(dot, "initial_graph")
def load_graph(path):
f = open(path, "r")
g_list = []
size, edges = map(int, f.readline().split())
for vertex in range(edges):
edges = list(map(int, list(f.readline().replace("\n", "").split())))
g_list.append(edges)
return g_list, size
if __name__ == "__main__":
g_list, size = load_graph(input("Input filename-> "))
render_graph(g_list, size)
```
#### File: CAL/extra/render_mst.py
```python
from graphviz import Graph
################################################
# Autor: <NAME>
# Nome: Renderizador de MST
################################################
def save_graph(dot, name):
dot.format = "pdf"
dot.render(name)
def render_graph(g_list, size, mst_edges, mst_nodes):
dot = Graph()
for idx in range(size):
if idx in mst_nodes:
dot.node(str(idx), style="filled", color="lightgrey")
else:
dot.node(str(idx))
for u, v, w in g_list:
if [u, v, w] in mst_edges:
dot.edge(str(u), str(v), str(w), color="red")
else:
dot.edge(str(u), str(v), str(w))
save_graph(dot, "mst_graph")
def load_graph(path):
f = open(path, "r")
g_list = []
mst_edges, mst_nodes = [], set()
size, edges, mst_size = map(int, f.readline().split())
for vertex in range(edges):
edges = list(map(int, list(f.readline().replace("\n", "").split())))
g_list.append(edges)
for vertex in range(mst_size):
edges = list(map(int, list(f.readline().replace("\n", "").split())))
mst_edges.append(edges)
mst_nodes.add(edges[0])
mst_nodes.add(edges[1])
return g_list, size, mst_edges, mst_nodes
if __name__ == "__main__":
g_list, size, mst_edges, mst_nodes = load_graph(input("Input filename-> "))
render_graph(g_list, size, mst_edges, mst_nodes)
```
#### File: CGR/T2/snowman.py
```python
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from colors import *
from shapes import SolidCylinder
angle_h = 300.0
angle_v = 0
spin = True
turn_h = 1
turn_v = 0
render_quality = 100 # max = 100
def keyboard(key, x, y):
global spin, turn_h, turn_v, angle_h, angle_v, render_quality
key = ord(key)
# Esc para sair
if key == 27:
exit(0)
elif key == ord("s"):
spin = not spin
elif key == ord("a"):
turn_h = -1
elif key == ord("d"):
turn_h = 1
elif key == ord("w"):
turn_v = 0.5
elif key == ord("x"):
turn_v = -0.5
# Controle de render
elif key == ord("+"):
render_quality += 5
elif key == ord("-"):
render_quality -= 2
else:
angle_h = 300.0
angle_v = 0
turn_h = 1
turn_v = 0
render_quality = 100
def display():
global spin, angle_h, angle_v
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glClearColor(*blue_sky)
glLoadIdentity()
gluLookAt(4.2, 1.4, 0.0, 0.0, 0.4, 0.0, 0.0, 1.0, 0.0)
# Controle de camera
if spin:
angle_h += turn_h * 0.5
angle_v += turn_v
angle_h %= 360
angle_v %= 360
# Angulo do objeto
glRotatef(angle_h, 0, 1, 0)
glRotatef(angle_v, 1, 0, 1)
glPushMatrix()
# Ajustando para centro do frame
glTranslatef(0.0, 0.2, 0.0)
# Base
glPushMatrix()
glTranslatef(0.0, -0.298, 0.0)
glRotatef(90, 1, 0, 0)
glColor3f(*grass)
glutSolidCylinder(1.13, 0.002, 100, 1)
glPopMatrix()
# Cor da neve
glColor3f(*white)
# Bola maior
glTranslatef(0.0, 0.0, 0.0)
glutSolidSphere(0.45, render_quality, render_quality * 2)
# Bola meio
glTranslatef(0.0, 0.45, 0.0)
glutSolidSphere(0.3, render_quality, render_quality * 2)
# Bola cabeca
glTranslatef(0.0, 0.35, 0.0)
glutSolidSphere(0.25, render_quality, render_quality * 2)
# <NAME>
glPushMatrix()
glRotatef(26.0, 0, 1, 0)
glTranslatef(0.21, 0.08, 0.0)
glColor3f(0.1, 0.1, 0.1)
glutSolidSphere(0.04, 8, 8)
glPopMatrix()
# <NAME>
glPushMatrix()
glRotatef(-26.0, 0, 1, 0)
glTranslatef(0.21, 0.08, 0.0)
glutSolidSphere(0.04, 8, 8)
glPopMatrix()
# Nariz
glPushMatrix()
glTranslatef(0.22, -0.01, 0.0)
glRotatef(90, 0, 1, 0)
glColor3f(*red)
glutSolidCone(0.03, 0.18, 8, 6)
glPopMatrix()
# Chapeuzinho
glColor3f(*black)
glPushMatrix()
glTranslatef(0, 0.25, 0)
SolidCylinder(0.2, 0.03, render_quality)
glColor3f(*red)
glTranslatef(0, 0.08, 0)
SolidCylinder(0.11, 0.05, render_quality)
glColor3f(*dark_gray)
SolidCylinder(0.1, 0.3, render_quality)
glPopMatrix()
# Base do globo de vidro
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPushMatrix()
glTranslatef(0.0, -1.1, 0.0)
glColor3f(*dark_gray)
glRotatef(90, 1, 0, 0)
glutSolidCylinder(1.23, 0.4, 100, 1)
glPopMatrix()
glPushMatrix()
glTranslatef(0.0, -1.13, 0.0)
glColor3f(*light_gray)
glPushMatrix()
glRotatef(90, 1, 0, 0)
glutSolidCylinder(1.3, 0.4, 100, 1)
glPopMatrix()
glTranslatef(0.0, -0.08, 0.0)
glColor3f(*dark_gray)
glPushMatrix()
glRotatef(90, 1, 0, 0)
glutSolidCylinder(1.308, 0.24, 100, 1)
glPopMatrix()
glPopMatrix()
# Globo de vidro
# Interno
glPushMatrix()
glColor4f(0.8, 0.8, 0.8, 0.05)
glTranslatef(0.0, -0.5, 0.0)
glRotatef(90.0, 1, 0, 0)
glutSolidSphere(1.3, render_quality, render_quality)
glPopMatrix()
# Externo
glPushMatrix()
glColor4f(0.8, 0.8, 0.8, 0.1)
glTranslatef(0.0, -0.5, 0.0)
glRotatef(90.0, 1, 0, 0)
glutSolidSphere(1.4, render_quality, render_quality)
glPopMatrix()
glDisable(GL_BLEND)
glPopMatrix()
glFlush()
glutSwapBuffers()
width = 800
height = 600
glutInit()
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(width, height)
glutCreateWindow("Boneco de Neve - <NAME>")
glutDisplayFunc(display)
glutIdleFunc(display)
glutKeyboardFunc(keyboard)
glMatrixMode(GL_PROJECTION)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
aspect = width / height
gluPerspective(45, aspect, 0.01, 100.0)
glMatrixMode(GL_MODELVIEW)
glShadeModel(GL_SMOOTH)
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
# Controle de textura
mat_shininess = [15.0]
mat_specular = [0.75, 0.75, 0.75, 0.75]
# Controle de luz
light_ambient = [0.6, 0.6, 0.6, 1.0]
light_diffuse = [0.8, 0.8, 0.8, 0.8]
light_specular = [1.0, 1.0, 1.0, 0.3]
light_position = [6.0, 6.0, 2.0, 0.0]
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular)
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess)
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient)
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)
glutMainLoop()
```
#### File: COMP/W01/class_DFA.py
```python
class DFA:
current_state = None
current_letter = None
valid = True
def __init__(
self, name, alphabet, states, delta_function, start_state, final_states
):
self.name = name
self.alphabet = alphabet
self.states = states
self.delta_function = delta_function
self.start_state = start_state
self.final_states = final_states
self.current_state = start_state
def transition_to_state_with_input(self, letter):
if self.valid:
if (self.current_state, letter) not in self.delta_function.keys():
self.valid = False
return
self.current_state = self.delta_function[(self.current_state, letter)]
self.current_letter = letter
else:
return
def in_accept_state(self):
return self.current_state in self.final_states and self.valid
def go_to_initial_state(self):
self.current_letter = None
self.valid = True
self.current_state = self.start_state
def run_with_word(self, word):
self.go_to_initial_state()
for letter in word:
self.transition_to_state_with_input(letter)
continue
return self.in_accept_state()
def run_with_letters(self, word):
self.go_to_initial_state()
for letter in word:
if self.run_with_letter(letter):
return
else:
return
def run_with_letter(self, letter):
self.transition_to_state_with_input(letter)
return self.current_state
def __len__(self):
return len(self.states)
```
#### File: BCC/LFA/class_regex.py
```python
from utils import read_regex
class Regex:
def __init__(self, file):
self.regex = read_regex(file)
def run_regex(self, raw):
raw = list(raw)
self.word = self.regex.format(raw[0], raw[2], raw[1])
return self.word
def __str__(self):
return self.word
```
#### File: web/project/__init__.py
```python
import os
from time import sleep
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from project.stdout import log
from project.twitter_stream import Extractor
from project.utils import gen_hash
# Flask main file. It generate html page and read input data
# Global vars
tweets = list()
ids = set()
extractors = set()
limit = 5
def add_tweet(tweet):
"""
Append to tweets list a tuple. Each tuple contains tags information and tweet information
The tweet ID is added to an IDs set to prevent duplicated tweets
"""
global tweets
tweets.append((tweet[1], tweet[0]))
ids.add(tweet[1].get_id())
def update(extractors):
"""
Iterate above extractors list updating tweets extracted for each
"""
global tweets
for extractor in extractors:
log("Retrieving data for tag:", str(extractor.hashtag_to_track))
extractor.run()
sleep(1)
for stream in extractor.streams:
for tweet in stream.listener.tweets:
if not tweet[1].get_id() in ids:
add_tweet((*extractor.hashtag_to_track, tweet[1]))
tweets.sort(reverse=True)
class App:
def __init__(self):
self.app = Flask(__name__)
self.tags = set()
@self.app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
# Deal with diferrent buttons pressed
if "tag_input" in request.form.keys():
# Add new tag from input field
data = list(request.form["tag_input"].split(";"))
for tag in data:
if tag not in self.tags:
self.tags.add(tag)
ex = Extractor([tag])
extractors.add(ex)
elif "tag_button" in request.form.keys():
# Removes tag button when is pressed
data = request.form["tag_button"]
self.tags.remove(data)
# Remove tweets from that tag
to_delete = list()
for i in range(len(tweets)):
if tweets[i][1] == data:
to_delete.append(i)
for i in to_delete[::-1]:
tweets.pop(i)
# Destroys that tag stream
to_delete = list()
for i in extractors:
if data in i.hashtag_to_track:
for stream in i.streams:
log("Closing ", data, " stream")
stream.disconnect()
to_delete.append(i)
for i in to_delete[::-1]:
extractors.remove(i)
else:
return redirect(request.url)
sleep(1)
# Controlling feed showing
if len(extractors):
while len(tweets) >= limit:
tweets.pop(0)
update(extractors)
return render_template(
"index.html", tweets=tweets, tags=self.tags, recovery_code=""
)
@self.app.route("/recovery", methods=["GET", "POST"])
def recovery():
if request.method == "POST":
recovery_code = request.form["hash_input"]
query = db_select(recovery_code)
if query:
self.tags = query
for tag in self.tags:
ex = Extractor([tag])
extractors.add(ex)
recovery_code = ""
else:
recovery_code = gen_hash(self.tags)
query = db_select(recovery_code)
if not query:
db_insert(recovery_code, self.tags)
return render_template(
"index.html", tweets=tweets, tags=self.tags, recovery_code=recovery_code
)
def run(self):
port = int(os.environ.get("PORT", 5000))
self.app.run(host="0.0.0.0", port=port)
app = App().app
app.config.from_object("project.config.Config")
db = SQLAlchemy(app)
class Tag(db.Model):
__tablename__ = "tags"
id = db.Column(db.Integer, primary_key=True)
tags = db.Column(db.String(256), unique=True, nullable=False)
def __init__(self, idx, tags):
self.id = idx
self.tags = tags
def db_insert(pk, data):
instance = Tag(pk, ";".join(data))
db.session.add(instance)
db.session.commit()
def db_select(pk):
query = Tag.query.get(pk)
if query is None:
return False
else:
return set(query.tags.split(";"))
```
#### File: web/project/user.py
```python
class User:
"""
User class stores user data and have get methods
"""
def __init__(self, author):
self.name = author.name
self.username = author.screen_name
self.user_url = "https://twitter.com/" + self.username
def get_name(self):
return self.name
def get_username(self):
return self.username
def get_user_url(self):
return self.user_url
def __str__(self):
return "@" + self.username
```
#### File: TEG/past_works/gera_grafo.py
```python
import random as r
from pprint import pprint
import math as m
def cria_ponto():
return round(r.uniform(0, 1), 4)
def gera_x(n):
x = set()
while len(x) < n:
x.add(cria_ponto())
return x
def gera_y(n):
y = set()
while len(y) < n:
y.add(cria_ponto())
return y
def gera_coordenadas(n):
pontos = []
for x, y in zip(gera_x(n), gera_y(n)):
pontos.append((x, y))
return pontos
def gera_grafo(vertices, v):
arestas = set()
while len(arestas) < m.ceil(
v * (v - 1) / 3
): # gerando aleatoriamente (v*(v-1)/3 arestas, arredondando para o maior inteiro
v1, v2 = r.randrange(v), r.randrange(v)
if v1 != v2: # garantindo que não exista aresta para o próprio vértice
arestas.add((v1, v2))
grafo = [[0 for m in range(v)] for n in range(v)] # iniciando grafo[n][m] = 0
for de, para in arestas: # ligando arestas
grafo[de][para] = 1
grafo[para][de] = 1
return grafo
def pesa_grafo(grafo, vertices):
grafo_pesado = [
[-1 for m in range(len(grafo))] for n in range(len(grafo))
] # compressão de lista para criar a matriz grafo_pesado[n][m] com todos os valores -1
for n in range(len(grafo)): # iteração até n
for m in range(len(grafo[n])): # iteração até m
if grafo[n][m] == 1:
dist = distancia(
vertices[m], vertices[n]
) # calculando distancia entre os dois vertices
grafo_pesado[m][n] = dist # garantindo grafo não-dirigido
grafo_pesado[n][m] = dist # garantindo grafo não-dirigido
return grafo_pesado
def distancia(p1, p2):
return float(str(m.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2))[:6])
n = int(input())
vertices = gera_coordenadas(n)
grafo = gera_grafo(vertices, n)
grafo_pesado = pesa_grafo(grafo, vertices)
pprint(grafo)
pprint(grafo_pesado)
``` |
{
"source": "joao-frohlich/covid-sc",
"score": 2
} |
#### File: hospitals/admin/hospital.py
```python
from django.contrib import admin
from apps.hospitals.models import Hospital, HospitalBed
class HospitalBedInline(admin.TabularInline):
model = HospitalBed
def get_extra(self, request, obj=None, **kwargs):
extra = 3
extra_on_edit = 0
return extra_on_edit if obj else extra
class HospitalAdmin(admin.ModelAdmin):
empty_value_display = '--'
fieldsets = [
(None, {
'fields': ['acronym', ('name', 'city'), ('phonenumber', 'email')],
'classes': ('wide', 'extrapretty'),
}),
]
inlines = [HospitalBedInline]
list_display = ['upper_case_acronym', 'upper_case_name', 'city', 'phonenumber', 'email']
ordering = ['acronym', 'name']
search_fields = ['acronym', 'name']
autocomplete_fields = ['city']
def upper_case_acronym(self, obj):
return obj.acronym.upper()
upper_case_acronym.short_description = 'acronym'
def upper_case_name(self, obj):
return obj.name.capitalize()
upper_case_name.short_description = 'name'
admin.site.register(Hospital, HospitalAdmin)
``` |
{
"source": "JoaoFula/SnakeGame",
"score": 3
} |
#### File: JoaoFula/SnakeGame/textbox.py
```python
import pygame as pg
pg.init()
class Textbox:
def __init__(self, surface, x, y, sizex=50, sizey=50, caption="",
text="", font_size=22, font_color=(0, 0, 0), text_offset=(-35, 1),
outline_color=(255,255,255), color_active=(25,25,25), color_inactive=(10,10,10)):
self.surface = surface
self.color_active = color_active
self.color_inactive = color_inactive
self.color = color_inactive
self.x = x
self.y = y
self.text = text
self.sizex = sizex
self.sizey = sizey
self.caption = caption
self.outline_color = outline_color
self.font_size = font_size
self.font_color = font_color
self.text_offset = text_offset
self.checkbox_obj = pg.Rect(self.x, self.y, 12, 12)
self.checkbox_outline = self.checkbox_obj.copy()
self.active = False
def _draw_button_text(self):
self.font = pg.font.Font(None, self.font_size)
self.font_surf = self.font.render(self.caption, True, self.font_color)
w, h = self.font.size(self.caption)
self.font_pos = (self.x + 12 / 2 - w / 2 + self.text_offset[0], self.y + 12 / 2 - h / 2 + self.text_offset[1])
self.surface.blit(self.font_surf, self.font_pos)
def render_textbox(self):
pg.draw.rect(self.surface, (255,255,255), self.checkbox_obj)
pg.draw.rect(self.surface, self.outline_color, self.checkbox_outline, 1)
font = pg.font.Font(None, self.font_size)
txt_surface = font.render(self.text, True, self.color)
width = max(200, txt_surface.get_width() + 10)
self.checkbox_obj.w = width
# Blit the text.
self.surface.blit(txt_surface, (self.checkbox_obj.x + 5, self.checkbox_obj.y ))
# Blit the input_box rect.
#pg.draw.rect( self.surface, self.color, self.checkbox_obj, 2)
self._draw_button_text()
def update_textbox(self, event_object):
if event_object.type == pg.MOUSEBUTTONDOWN:
if self.checkbox_obj.collidepoint(event_object.pos):
self.active = True
else:
self.active = False
self.color = self.color_active if self.active else self.color_inactive
if event_object.type == pg.KEYDOWN:
if self.active:
if event_object.key == pg.K_RETURN:
print(self.text)
elif event_object.key == pg.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += event_object.unicode
``` |
{
"source": "joaofurini/GuerreiroDasEstradas",
"score": 3
} |
#### File: joaofurini/GuerreiroDasEstradas/carroInimigo.py
```python
from random import randint
import time
class CarroInimigo:
def __init__(self, poderDeFogo, blindagem):
self.__carro = {
"Poder De Fogo": poderDeFogo,
"Blindagem": blindagem
}
def getPoderDeFogo(self):
return self.__carro['Poder De Fogo']
def getBlindagem(self):
return self.__carro['Blindagem']
def tomaTiro(self):
danoTiro = randint(0, 6)
if danoTiro == 0:
print("A blindagem do inimigo foi mais forte")
print("Voce deu {} de dano".format(danoTiro))
time.sleep(1.5)
else:
self.__carro['Blindagem'] -= danoTiro
print("\nVOCE ACERTOU TIROS NO CARRO DO INIMIGO")
print("e tirou {} da blindagem dele".format(danoTiro))
time.sleep(1.5)
```
#### File: joaofurini/GuerreiroDasEstradas/inimigo.py
```python
from random import randint
import time
class Inimigo:
def __init__(self, habilidade, energia):
self.__inimigo= {
"Habilidade": habilidade,
"Energia": energia
}
self.__habilidade = habilidade
self.__energia = energia
def getHabilidade(self):
return self.__inimigo['Habilidade']
def getEnergia(self):
return self.__inimigo['Energia']
def setEnergia(self, value):
self.__inimigo['Energia'] = value
def perdeuRound(self):
self.__inimigo['Energia'] -=1
def mostraEstado(self):
print("\n==========================================================")
print("ATUALMENTE SEU INIMIGO SE ENCONTRA NO SEGUINTE ESTADO:")
for key, value in self.__inimigo.items():
print('{} - {}'.format(key, value))
print("\n==========================================================")
def tomaTiro(self):
danoTiro = randint(0, 6)
if danoTiro == 0:
print("Voce teria acertado se o inimigo nao tivesse desviado de ultima hora ")
print("Voce tirou {} de dano".format(danoTiro))
time.sleep(1.5)
elif danoTiro == 6:
self.__inimigo['Energia'] =0
print("Voce acertou um tiro na cabeca do inimigo e ele esta morto")
else:
self.__inimigo['Energia'] -= danoTiro
print("\nVOCE ACERTOU UM TIRO")
print("e tirou {} de energia do inimigo".format(danoTiro))
``` |
{
"source": "JoaoG250/AracompWeb",
"score": 2
} |
#### File: aracomp/enigmas/views.py
```python
from django.views import View
from django.contrib import messages
from django.views.generic import TemplateView
from django.shortcuts import render, redirect, get_object_or_404
from core.models import Config
from enigmas.models import VencedorEnigmas, Enigma
from enigmas.forms import RespostaEnigmaForm, EnigmaVencedorForm
def get_respostas(enigma):
respostas = list(Enigma.objects.order_by('enigma')[:enigma - 1].values_list('resposta', flat=True))
for i in range(len(respostas) + 1, 6):
respostas.append('????')
return respostas
class EnigmaView(View):
def post(self, request, *args, **kwargs):
# Verificando se a caça ao tesouro já começou
start = Config.objects.filter(name='enigmas_start').first()
if not start or not start.active:
return redirect('home')
form = RespostaEnigmaForm(request.POST)
if form.is_valid():
resposta = form.cleaned_data['resposta']
enigma = request.session.get('enigma', 1)
# Checando se todos os enigmas já foram ultrapassados
if enigma > 5:
return redirect('enigmas')
# Checando resposta
enigma_obj = get_object_or_404(Enigma, enigma=enigma)
if enigma_obj.checar_resposta(resposta):
enigma += 1
else:
messages.warning(request, 'Resposta errada :(, tente novamente.')
# Atualizando variável do enigma
request.session['enigma'] = enigma
return redirect('enigmas')
context = {
'form': form,
'respostas': get_respostas(enigma),
}
return render(request, 'enigmas/enigma.html', context)
def get(self, request, *args, **kwargs):
# Verificando se a caça ao tesouro já começou
start = Config.objects.filter(name='enigmas_start').first()
if not start or not start.active:
return redirect('home')
enigma = request.session.get('enigma', 1)
if enigma > 5:
if VencedorEnigmas.objects.count() > 0:
return redirect('enigmas-finalizado')
return redirect('enigmas-vencedor')
else:
enigma_obj = get_object_or_404(Enigma, enigma=enigma)
context = {
'form': RespostaEnigmaForm(),
'vencedor': VencedorEnigmas.objects.first(),
'enigma': enigma_obj,
'respostas': get_respostas(enigma),
}
return render(request, 'enigmas/enigma.html', context)
class EnigmaFinalizadoView(View):
def get(self, request, *args, **kwargs):
enigma = request.session.get('enigma', 1)
if VencedorEnigmas.objects.count() > 0 and enigma > 5:
context = {
'vencedor': VencedorEnigmas.objects.first(),
'respostas': get_respostas(enigma),
}
return render(request, 'enigmas/finalizado.html', context)
else:
return redirect('enigmas')
class EnigmaVencedorView(View):
def post(self, request, *args, **kwargs):
enigma = request.session.get('enigma', 1)
if VencedorEnigmas.objects.count() == 0 and enigma > 5:
form = EnigmaVencedorForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Você é o ganhador da caça ao tesouro. Parabêns!!')
return redirect('enigmas-finalizado')
context = {
'form': form,
'respostas': get_respostas(enigma),
}
return render(request, 'enigmas/vencedor.html', context)
else:
return redirect('enigmas')
def get(self, request, *args, **kwargs):
enigma = request.session.get('enigma', 1)
if VencedorEnigmas.objects.count() == 0 and enigma > 5:
context = {
'form': EnigmaVencedorForm(),
'respostas': get_respostas(enigma),
}
return render(request, 'enigmas/vencedor.html', context)
else:
return redirect('enigmas')
class EnigmaReset(View):
def get(self, request, *args, **kwargs):
request.session['enigma'] = 1
return redirect('enigmas')
```
#### File: aracomp/posts/models.py
```python
from pathlib import Path
from django.db import models
from django.templatetags.static import static
from django.contrib.staticfiles import finders
from django.utils.crypto import get_random_string
from django.core.exceptions import SuspiciousOperation
class Post(models.Model):
OPTIONS_POST_TYPE = [
('P', 'Palestras'),
('H', 'Hackathon'),
('M', 'Minicursos'),
('R', 'Mesas Redondas'),
('A', 'Maratona'),
('O', 'Oficinas'),
('B', 'BATE-PAPO')
]
post_type = models.CharField('Tipo de Postagem', max_length=1, choices=OPTIONS_POST_TYPE)
title = models.CharField('Título', max_length=120)
description = models.TextField('Descrição', max_length=800)
content = models.TextField('Conteúdo', max_length=3000)
image = models.CharField('Path da imagem em static', max_length=200)
created = models.DateField('Data de Criação', auto_now_add=True)
class Meta:
verbose_name = 'Postagem'
verbose_name_plural = 'Postagens'
def save(self, *args, **kwargs):
self.image = self.image.replace('/static/', '')
try:
if finders.find(self.image):
self.image = static(self.image)
else:
self.image = static('posts/default/post_image.png')
except SuspiciousOperation:
self.image = static('posts/default/post_image.png')
super(Post, self).save(*args, **kwargs)
def __str__(self):
return f'{self.get_post_type_display()}: {self.title[:50]}'
``` |
{
"source": "JoaoG250/Docker-Nuxt-Django",
"score": 2
} |
#### File: api/api/settings.py
```python
import os
from pathlib import Path
from django.core.exceptions import ImproperlyConfigured
def get_env_value(env_variable):
"""Função para recuperar variável de ambiente.
Args:
env_variable (str): Nome da variável.
Raises:
ImproperlyConfigured: Levanta a exceção caso a variável não exista.
Returns:
str: Valor da variável de ambiente.
"""
try:
return str(os.environ[env_variable]).strip()
except KeyError:
error_msg = f"Set the {env_variable} environment variable"
raise ImproperlyConfigured(error_msg)
ALLOWED_HOSTS = []
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
SECRET_KEY = get_env_value("NUXTDJANGO_SECRET_KEY")
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": get_env_value("NUXTDJANGO_DATABASE_NAME"),
"USER": get_env_value("NUXTDJANGO_DATABASE_USER"),
"PASSWORD": get_env_value("NUXTDJANGO_DATABASE_PASSWORD"),
"HOST": get_env_value("NUXTDJANGO_DATABASE_HOST"),
"PORT": get_env_value("NUXTDJANGO_DATABASE_PORT"),
}
}
if get_env_value("NUXTDJANGO_PRODUCTION") == "FALSE":
DEBUG = True
elif get_env_value("NUXTDJANGO_PRODUCTION") == "TRUE":
ALLOWED_HOSTS = ["*"]
DEBUG = False
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"corsheaders",
"core",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "api.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "api.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "pt-br"
TIME_ZONE = "America/Sao_Paulo"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles/")
# Media files
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media/")
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# CORS configuration
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
]
``` |
{
"source": "JoaoG250/Todo-REST",
"score": 2
} |
#### File: todo_rest/todo/serializers.py
```python
from django.urls import reverse
from rest_framework import serializers
from todo.models import Todo
class TodoSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
class Meta:
model = Todo
fields = ('id', 'url', 'titulo', 'descricao', 'concluido')
def url(self, obj):
return reverse('todo-detail', args=[obj.id])
``` |
{
"source": "joaog314/uff-projects",
"score": 3
} |
#### File: 2020-2-uff-lrp/lista-2/ex-2.py
```python
def numbers(n):
return (2**n + 1)**2
entr = None
i = 1
test_list = []
while entr!=-1:
entr = int(input())
if -1 < entr <= 15:
test_list.append(entr)
for n in test_list:
print('Teste '+str(i))
print(numbers(n))
print('')
i += 1
``` |
{
"source": "joaogabriel15/spfc",
"score": 2
} |
#### File: spfc/home/views.py
```python
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from .models import Civil, Documento, TipoDocumento
# Create your views here.
@login_required()
def home(request):
return render(request, 'home/index.html')
def pesquisa_id(request, civil_id):
civil_dados = Civil.objects.get(id=civil_id)
documento_dados = Documento.objects.all()
documento_dados_civil = documento_dados.filter(civil=civil_dados)
documento_tipo_dados = TipoDocumento.objects.all()
return render(request, 'home/index.html',{
'civil':civil_dados,
'documentos':documento_dados_civil,
'tipos':documento_tipo_dados,
})
``` |
{
"source": "joaogabrielcarnevale/secure-pipelines-in-aws-workshop",
"score": 2
} |
#### File: code/SecGuardRails/cfn_validate_lambda.py
```python
from __future__ import print_function
from boto3.session import Session
import json
import urllib
import boto3
import zipfile
import tempfile
import botocore
import traceback
import re
import zipfile
import time
print('Loading function')
cf = boto3.client('cloudformation')
code_pipeline = boto3.client('codepipeline')
def find_artifact(artifacts, name):
"""Finds the artifact 'name' among the 'artifacts'
Args:
artifacts: The list of artifacts available to the function
name: The artifact we wish to use
Returns:
The artifact dictionary found
Raises:
Exception: If no matching artifact is found
"""
for artifact in artifacts:
if artifact['name'] == name:
return artifact
raise Exception('Input artifact named "{0}" not found in event'.format(name))
def get_template(s3, artifact, file_in_zip):
"""Gets the template artifact
Downloads the artifact from the S3 artifact store to a temporary file
then extracts the zip and returns the file containing the CloudFormation
template.
Args:
artifact: The artifact to download
file_in_zip: The path to the file within the zip containing the template
Returns:
The CloudFormation template as a string
Raises:
Exception: Any exception thrown while downloading the artifact or unzipping it
"""
tmp_file = tempfile.NamedTemporaryFile()
bucket = artifact['location']['s3Location']['bucketName']
key = artifact['location']['s3Location']['objectKey']
with tempfile.NamedTemporaryFile() as tmp_file:
print("Retrieving s3://" + bucket + "/" + key)
s3.download_file(bucket, key, tmp_file.name)
with zipfile.ZipFile(tmp_file.name, 'r') as zip:
zip.printdir()
return zip.read(file_in_zip)
def put_job_success(job, message):
"""Notify CodePipeline of a successful job
Args:
job: The CodePipeline job ID
message: A message to be logged relating to the job status
Raises:
Exception: Any exception thrown by .put_job_success_result()
"""
print('Putting job success')
print(message)
code_pipeline.put_job_success_result(jobId=job)
def put_job_failure(job, message):
"""Notify CodePipeline of a failed job
Args:
job: The CodePipeline job ID
message: A message to be logged relating to the job status
Raises:
Exception: Any exception thrown by .put_job_failure_result()
"""
print('Putting job failure')
print(message)
code_pipeline.put_job_failure_result(jobId=job, failureDetails={'message': message, 'type': 'JobFailed'})
def continue_job_later(job, message):
"""Notify CodePipeline of a continuing job
This will cause CodePipeline to invoke the function again with the
supplied continuation token.
Args:
job: The JobID
message: A message to be logged relating to the job status
continuation_token: The continuation token
Raises:
Exception: Any exception thrown by .put_job_success_result()
"""
# Use the continuation token to keep track of any job execution state
# This data will be available when a new job is scheduled to continue the current execution
continuation_token = json.dumps({'previous_job_id': job})
print('Putting job continuation')
print(message)
code_pipeline.put_job_success_result(jobId=job, continuationToken=continuation_token)
def get_user_params(job_data):
print(job_data)
"""Decodes the JSON user parameters and validates the required properties.
Args:
job_data: The job data structure containing the UserParameters string which should be a valid JSON structure
Returns:
The JSON parameters decoded as a dictionary.
Raises:
Exception: The JSON can't be decoded or a property is missing.
"""
try:
# Get the user parameters which contain the artifact and file settings
user_parameters = job_data['actionConfiguration']['configuration']['UserParameters']
decoded_parameters = json.loads(user_parameters)
except Exception as e:
# We're expecting the user parameters to be encoded as JSON
# so we can pass multiple values. If the JSON can't be decoded
# then fail the job with a helpful message.
raise Exception('UserParameters could not be decoded as JSON')
if 'input' not in decoded_parameters:
# Validate that the artifact name is provided, otherwise fail the job
# with a helpful message.
raise Exception('Your UserParameters JSON must include the artifact name')
if 'file' not in decoded_parameters:
# Validate that the template file is provided, otherwise fail the job
# with a helpful message.
raise Exception('Your UserParameters JSON must include the template file name')
if 'output' not in decoded_parameters:
# Validate that the template file is provided, otherwise fail the job
# with a helpful message.
raise Exception('Your UserParameters JSON must include the output bucket')
return decoded_parameters
def setup_s3_client(job_data):
"""Creates an S3 client
Uses the credentials passed in the event by CodePipeline. These
credentials can be used to access the artifact bucket.
Args:
job_data: The job data structure
Returns:
An S3 client with the appropriate credentials
"""
key_id = job_data['artifactCredentials']['accessKeyId']
key_secret = job_data['artifactCredentials']['secretAccessKey']
session_token = job_data['artifactCredentials']['sessionToken']
session = Session(
aws_access_key_id=key_id,
aws_secret_access_key=key_secret,
aws_session_token=session_token)
return session.client('s3', config=botocore.client.Config(signature_version='s3v4'))
def get_rules():
# Find table
client = boto3.client('dynamodb')
resource = boto3.resource('dynamodb')
response = client.list_tables()
logTable = ""
for i in range(len(response['TableNames'])):
if "AWS-devsecops" in response['TableNames'][i]:
logTable = response['TableNames'][i]
# Verify that rules are created and if not, create them
response = client.scan(
TableName=logTable,
AttributesToGet=[
'rule',
]
)
if len(response['Items']) == 0:
add_rules(logTable)
time.sleep(45)
# Get all rules from DDB.
# Rules have rule, ruledata, type and weight
rules = dict()
sgRules = []
ec2Rules = []
volRules = []
for n in range(len(response['Items'])):
rule = client.get_item(
TableName=logTable,
Key={
'rule': {'S':response['Items'][n]['rule']['S']}
},
ConsistentRead=True
)['Item']
if rule['category']['S'] == "SecurityGroup":
sgRules.append(rule)
elif rule['category']['S'] == "EC2Instance":
ec2Rules.append(rule)
elif rule['category']['S'] == "Volume":
volRules.append(rule)
rules['sgRules'] = sgRules
rules['ec2Rules'] = ec2Rules
rules['volRules'] = volRules
return rules
def add_rules(logTable):
client = boto3.client('dynamodb')
client.put_item(
TableName=logTable,
Item={
'rule' : {'S': "IngressOpenToWorld"},
'category' : {'S': "SecurityGroup"},
'ruletype' : {'S': "regex"},
'ruledata' : {'S': "^.*Ingress.*((0\.){3}0\/0)"},
'riskvalue' : {'N': "100"},
'active' : {'S': "Y"}
}
)
client.put_item(
TableName=logTable,
Item={
'rule' : {'S': "SSHOpenToWorld"},
'category' : {'S': "SecurityGroup"},
'ruletype' : {'S': "regex"},
'ruledata' : {'S': "^.*Ingress.*(([fF]rom[pP]ort|[tT]o[pP]ort).\s*:\s*u?.(22).*[cC]idr[iI]p.\s*:\s*u?.((0\.){3}0\/0)|[cC]idr[iI]p.\s*:\s*u?.((0\.){3}0\/0).*([fF]rom[pP]ort|[tT]o[pP]ort).\s*:\s*u?.(22))"},
'riskvalue' : {'N': "100"},
'active' : {'S': "Y"}
}
)
client.put_item(
TableName=logTable,
Item={
'rule' : {'S': "AllowHttp"},
'category' : {'S': "SecurityGroup"},
'ruletype' : {'S': "regex"},
'ruledata' : {'S': "^.*Ingress.*[fF]rom[pP]ort.\s*:\s*u?.(80)"},
'riskvalue' : {'N': "3"},
'active' : {'S': "N"}
}
)
client.put_item(
TableName=logTable,
Item={
'rule' : {'S': "ForbiddenAMIs"},
'category' : {'S': "EC2Instance"},
'ruletype' : {'S': "regex"},
'ruledata' : {'S': "^.*ImageId.\s*:\s*u?.(ami-7a11e211|ami-08111162|ami-f6035893)"},
'riskvalue' : {'N': "10"},
'active' : {'S': "N"}
}
)
client.put_item(
TableName=logTable,
Item={
'rule' : {'S': "VolumesNotEncrypted"},
'category' : {'S': "Volume"},
'ruletype' : {'S': "regex"},
'ruledata' : {'S': "^.*Encrypted.?\s*:\s*u?.?false"},
'riskvalue' : {'N': "90"},
'active' : {'S': "Y"}
}
)
def evaluate_template(rules, template):
# Validate rules and increase risk value
risk = 0
# Extract Security Group Resources
sgResources = []
ec2Resources = []
volumeResources = []
failedRules = []
jsonTemplate = json.loads(template)
print(json.dumps(jsonTemplate, sort_keys=True, indent=4, separators=(',', ': ')))
print(rules)
for key in jsonTemplate['Resources'].keys():
if "EC2::SecurityGroup" in jsonTemplate['Resources'][key]['Type']:
sgResources.append(jsonTemplate['Resources'][key])
elif "EC2::Instance" in jsonTemplate['Resources'][key]['Type']:
ec2Resources.append(jsonTemplate['Resources'][key])
elif "EC2::Volume" in jsonTemplate['Resources'][key]['Type']:
volumeResources.append(jsonTemplate['Resources'][key])
for n in range(len(sgResources)):
for m in range(len(rules['sgRules'])):
if rules['sgRules'][m]['active']['S'] == "Y":
if re.match(rules['sgRules'][m]['ruledata']['S'], str(sgResources[n])):
risk = risk + int(rules['sgRules'][m]['riskvalue']['N'])
failedRules.append(str(rules['sgRules'][m]['rule']['S']))
print("Matched rule: " + str(rules['sgRules'][m]['rule']['S']))
print("Resource: " + str(sgResources[n]))
print("Riskvalue: " + rules['sgRules'][m]['riskvalue']['N'])
print("")
for n in range(len(ec2Resources)):
for m in range(len(rules['ec2Rules'])):
if rules['ec2Rules'][m]['active']['S'] == "Y":
if re.match(rules['ec2Rules'][m]['ruledata']['S'], str(ec2Resources[n])):
risk = risk + int(rules['ec2Rules'][m]['riskvalue']['N'])
failedRules.append(str(rules['ec2Rules'][m]['rule']['S']))
print("Matched rule: " + str(rules['ec2Rules'][m]['rule']['S']))
print("Resource: " + str(ec2Resources[n]))
print("Riskvalue: " + rules['ec2Rules'][m]['riskvalue']['N'])
print("")
for n in range(len(volumeResources)):
for m in range(len(rules['volRules'])):
if rules['volRules'][m]['active']['S'] == "Y":
if re.match(rules['volRules'][m]['ruledata']['S'], str(volumeResources[n])):
risk = risk + int(rules['volRules'][m]['riskvalue']['N'])
failedRules.append(str(rules['volRules'][m]['rule']['S']))
print("Matched rule: " + str(rules['volRules'][m]['rule']['S']))
print("Resource: " + str(volumeResources[n]))
print("Riskvalue: " + rules['volRules'][m]['riskvalue']['N'])
print("")
print("Risk value: " +str(risk))
return risk, failedRules
def s3_next_step(s3, bucket, risk, failedRules, template, job_id):
# Store data in temporary physical file
s3Client = boto3.client('s3', config=botocore.client.Config(signature_version='s3v4'))
tmp_file = tempfile.NamedTemporaryFile()
tmp_zip = tempfile.NamedTemporaryFile()
# for item in template:
# tmp_file.write(item)
tmp_file.write(template)
tmp_file.flush()
# Process file based on risk value
if risk < 5:
with zipfile.ZipFile(tmp_zip.name, 'w') as zip:
zip.write(tmp_file.name, "valid.template.json")
zip.close()
s3Client.upload_file( # Add encryption support
tmp_zip.name,
bucket,
'valid.template.zip')
tmp_file.close()
put_job_success(job_id, 'Job succesful, minimal or no risk detected.')
elif 5 <= risk < 50:
with zipfile.ZipFile(tmp_zip.name, 'w') as zip:
zip.write(tmp_file.name, "flagged.template.json")
zip.close()
s3Client.upload_file( # Add encryption support
tmp_zip.name,
bucket,
'flagged.template.zip')
tmp_file.close()
put_job_success(job_id, 'Job succesful, medium risk detected, manual approval needed.')
elif risk >= 50:
tmp_file.close()
print("High risk file, fail pipeline")
put_job_failure(job_id, 'Function exception: Failed filters ' + str(failedRules))
return 0
def lambda_handler(event, context):
"""The Lambda function handler
Validate input template for security vulnerables. Route as appropriate based on risk assesment.
Args:
event: The event passed by Lambda
context: The context passed by Lambda
"""
# Extract the Job ID
job_id = event['CodePipeline.job']['id']
# Extract the Job Data
job_data = event['CodePipeline.job']['data']
try:
# Print the entire event for tracking
print("Received event: " + json.dumps(event, indent=2))
# Extract the params
params = get_user_params(job_data)
# Get the list of artifacts passed to the function
input_artifacts = job_data['inputArtifacts']
input_artifact = params['input']
template_file = params['file']
output_bucket = params['output']
# Get the artifact details
input_artifact_data = find_artifact(input_artifacts, input_artifact)
# Get S3 client to access artifact with
s3 = setup_s3_client(job_data)
# Get the JSON template file out of the artifact
template = get_template(s3, input_artifact_data, template_file)
print("Template: " + str(template))
# Get validation rules from DDB
rules = get_rules()
# Validate template from risk perspective. FailedRules can be used if you wish to expand the script to report failed items
risk, failedRules = evaluate_template(rules, template)
# Based on risk, store the template in the correct S3 bucket for future process
s3_next_step(s3, output_bucket, risk, failedRules, template, job_id)
except Exception as e:
# If any other exceptions which we didn't expect are raised
# then fail the job and log the exception message.
print('Function failed due to exception.')
print(e)
traceback.print_exc()
put_job_failure(job_id, 'Function exception: ' + str(e))
print('Function complete.')
return "Complete."
``` |
{
"source": "joaogabrielferr/AlgosandDataStructures",
"score": 4
} |
#### File: AlgosandDataStructures/AlgosandDataStructures/BinarySearch.py
```python
def Binarysearch(arr,value):
ini = 0
end = len(arr) - 1
while ini<=end:
middle = ini + ((end - ini) / 2)
middle = int(middle)
if arr[middle] == value:
return middle
if arr[middle] > value:
end = middle - 1
else: ini = middle + 1
return -1
```
#### File: AlgosandDataStructures/AlgosandDataStructures/Stack.py
```python
class Stack():
stack = []
numelements = 0
def push(self,value):
self.stack.append(value)
self.numelements+=1
def pop(self):
value = 0
if self.numelements == 0:
raise Exception("Trying to pop on a empty stack")
else:
self.numelements-=1
value = self.stack.pop()
return value
def empty(self):
if self.numelements == 0:
return True
else:
return False
def top(self):
if self.numelements == 0:
raise Exception("Trying to get the top of a empty stack")
else:
return self.stack[self.numelements-1]
def size(self):
return self.numelements
``` |
{
"source": "joaogabrieljs/document-layout-analysis-app",
"score": 3
} |
#### File: project/api/dla_api.py
```python
from subprocess import check_output
import base64
import numpy as np
from flask import Blueprint, jsonify, request
import matplotlib.pyplot as plt
from io import BytesIO
from project.predictor import make_predictions
from pdf2image import convert_from_bytes
from PyPDF2 import PdfFileReader, PdfFileWriter
dla_blueprint = Blueprint("", __name__)
@dla_blueprint.route("/analyse-image-json", methods=["POST"])
def analyse_image_json():
# Read pdf from request and convert to PyPdf2 PdfFileReader
pdfFileFromRequest = request.files["pdf_file"].read()
pdfFile = PdfFileReader(BytesIO(pdfFileFromRequest))
# Resize all pdf pages
for pageNumber in range(pdfFile.getNumPages()):
pdfFile.getPage(pageNumber).scaleTo(400, 700)
pdfWriter = PdfFileWriter()
#pdfWriter.addAttachment('pdfResized.pdf', pdfFile)
pdfWriter.addPage(pdfFile.getPage(0))
with open('pdfResized.pdf', 'wb') as f:
pdfWriter.write(f)
imagesFromPdf = convert_from_bytes(BytesIO(pdfFileFromRequest).read(), size=(400, 700))
for image in imagesFromPdf:
openCVImage = np.array(image)
openCVImage = openCVImage[:, :, ::-1].copy() # Convert RGB to BGR
jsonData = make_predictions(openCVImage, True)
predictions = jsonData.get('predictions')
boundingBoxes = sort_bounding_boxes(predictions.get('pred_boxes'))
jsonParagraphs = {}
for index, boundingBox in enumerate(boundingBoxes):
paragraph = {}
pointX = int(boundingBox[0])
pointY = int(boundingBox[1])
width = int(boundingBox[2]) - pointX
height = int(boundingBox[3]) - pointY
paragraph['text'] = check_output(['pdftotext', '-x', str(pointX), '-y', str(pointY), '-W', str(width), '-H', str(height), 'pdfResized.pdf', '-enc', 'UTF-8', '-'])
paragraph['score'] = predictions.get('scores')[index]
paragraph['type'] = get_prediction_type(predictions.get('pred_classes')[index])
jsonParagraphs[index] = paragraph
return jsonify(jsonParagraphs)
def sort_bounding_boxes(boundingBoxes):
npArrayBoundingBoxes = np.array(boundingBoxes)
index = np.lexsort((npArrayBoundingBoxes[:,0], npArrayBoundingBoxes[:,1]))
sortedBoundingBoxes = npArrayBoundingBoxes[index]
return sortedBoundingBoxes
def get_prediction_type(type):
switcher = {
0: 'text',
1: 'title',
2: 'figure',
3: 'table'
}
return switcher.get(type, 'nothing')
``` |
{
"source": "JoaoGabriel-Lima/motivational-bot",
"score": 3
} |
#### File: JoaoGabriel-Lima/motivational-bot/frog2.py
```python
username = input("Coloque o nome do usuário: ")
def start_frog():
input("Aperte Enter após o Código QR")
name = username
msg = "<Status> Servidor Iniciado! Aguarde"
count = 1
user = driver.find_element_by_xpath("//span[@title='{}']".format(name))
user.click()
msg_box = driver.find_element_by_xpath("//*[@id='main']/footer/div[1]/div[2]/div/div[2]")
for index in range(count):
msg_box.send_keys(msg)
driver.find_element_by_xpath("//*[@id='main']/footer/div[1]/div[3]/button").click()
print("Success")
def send_time():
# from datetime import datetime
# now = datetime.now()
# current_time = now.strftime("%H:%M:%S")
from getword import frase_motivacional
frase_motivacional()
current_time = frase_motivacional.frase
name = username
msg = name + " não fique triste! Fique com uma frase motivacional: " + current_time
count = 1
user = driver.find_element_by_xpath("//span[@title='{}']".format(name))
user.click()
msg_box = driver.find_element_by_xpath("//*[@id='main']/footer/div[1]/div[2]/div/div[2]")
for index in range(count):
msg_box.send_keys(msg)
driver.find_element_by_xpath("//*[@id='main']/footer/div[1]/div[3]/button").click()
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://web.whatsapp.com/")
driver.maximize_window()
start_frog()
``` |
{
"source": "joaogabrielrc/fusion",
"score": 2
} |
#### File: apps/core/forms.py
```python
from django import forms
from django.core.mail.message import EmailMessage
from django.forms.fields import EmailField
class ContactForm(forms.Form):
name = forms.CharField(label='Nome', max_length=100)
email = forms.EmailField(label='E-mail', max_length=100)
subject = forms.CharField(label='Assunto', max_length=100)
message = forms.CharField(label='Mensagem', widget=forms.Textarea())
def send_mail(self):
# Recebe os dados do
name = self.cleaned_data['name']
email = self.cleaned_data['email']
subject = self.cleaned_data['subject']
message = self.cleaned_data['message']
content = f'Nome: {name}\nE-mail: {email}\nAssunto: {subject}\nMensagem: {message}'
mail = EmailMessage(
subject=subject,
body=content,
from_email='<EMAIL>',
to=['<EMAIL>'],
headers={'Reply-To': email}
)
mail.send()
``` |
{
"source": "joaogarciadelima/oopy",
"score": 4
} |
#### File: oopy/caes/cao_abc.py
```python
import abc
class CaoBase(object):
__metaclass__ = abc.ABCMeta
qt_patas = 4
carnivoro = True
nervoso = False
def __init__(self, nome):
self.nome = nome
@abc.abstractmethod
def latir(self, vezes=1):
'''exibir latido na saída padrão'''
def detectar_intruso(self):
self.latir()
# mais alguma ação...
def __str__(self):
return self.nome
def __repr__(self):
return 'Cao(%r)' % self.nome
def __eq__(self, outro):
return (isinstance(outro, Cao) and
self.__dict__ == outro.__dict__)
class Cao(CaoBase):
def latir(self, vezes=1):
# quando nervoso, late o dobro
vezes = vezes + (self.nervoso * vezes)
print(self.nome + ':' + ' Au!' * vezes)
```
#### File: joaogarciadelima/oopy/carta.py
```python
class Carta(object):
def __init__(self, valor, naipe):
self.valor = valor
self.naipe = naipe
def __repr__(self):
return 'Carta(valor=%r, naipe=%r)' % (self.valor, self.naipe)
```
#### File: exercicios/tombola/tombola_bug.py
```python
class Tombola(object):
'''IMPLEMENTACAO COM BUG!!!'''
def __init__(self, seq):
self.itens = seq
def carregar(self, seq):
self.itens.extend(seq)
def sortear(self):
return self.itens.pop()
def carregada(self):
return bool(self.itens)
```
#### File: exercicios/tombola/tombola.py
```python
from random import shuffle
class Tombola(object):
'''Sorteia itens sem repetir'''
def carregar(self, seq):
self.itens = list(seq)
def misturar(self):
shuffle(self.itens)
def sortear(self):
return self.itens.pop()
def carregada(self):
return bool(self.itens)
```
#### File: joaogarciadelima/oopy/tombola.py
```python
from random import shuffle
class Tombola:
"""sorteia itens sem repetir"""
def __init__(self):
self.itens = []
def carregada(self):
return bool(self.itens)
def carregar(self, itens):
self.itens = list(itens)
def misturar(self):
shuffle(self.itens)
def sortear(self):
return self.itens.pop()
``` |
{
"source": "JoaoGasparini/ADS2D",
"score": 2
} |
#### File: JoaoGasparini/ADS2D/teste.py
```python
import pytest
from principal import soma
def test_soma():
assert soma(3, 2) == 5
``` |
{
"source": "joaogbcravo/gocdapi",
"score": 3
} |
#### File: gocdapi/gocdapi/pipeline_groups.py
```python
from gocdapi.pipeline_group import PipelineGroup
from gocdapi.gobase import GoBase
from gocdapi.custom_exceptions import GoCdApiException
class PipelineGroups(dict, GoBase):
"""
Class to hold information on a collection of PipelineGroups objects
This class acts like a dictionary
"""
def __init__(self, go_server):
"""Inits PipelineGroups objects.
Args:
go_server (Go): A Go object which this PipelineGroups belongs to.
"""
dict.__init__(self)
GoBase.__init__(self, go_server, path='go/api/config/pipeline_groups/')
def __getitem__(self, group_name):
"""Custom __getitem__ method
Overrides the default __getitem__ method from dict class to raise a custom exception when the item doen't exist
Args:
group_name (str): the name of group of pipelines that it is looking for
Return:
PipelineGroup: the PipelineGroups with the 'group_name' found
Raises:
GoCdApiException: When no PipelineGroups with the 'group_name' was found
"""
try:
self.repoll()
return dict.__getitem__(self, group_name)
except KeyError:
raise GoCdApiException("No PipelineGroup with name %s connected to server." % group_name)
def __str__(self):
"""Returns a pretty representation of the object
Returns:
str: representation of the object
"""
return 'Pipelines Groups @ %s' % self.go_server.baseurl
def _poll(self):
"""Will get information of all PipelineGroups in the Go server.
Uses _data attribute populated by inherited methods, creating PipelineGroups objects with that information.
The PipelineGroups's objects are saved as a pair (key,value) with their name as key.
"""
data = self.load_json_data(self._data)
for item in data:
pipeline_group = PipelineGroup(self.go_server, item)
self[pipeline_group.name] = pipeline_group
``` |
{
"source": "joaogehlen91/django-easy-tenants",
"score": 2
} |
#### File: django-easy-tenants/tests/test_easy_tenants.py
```python
from easy_tenants import get_current_tenant, get_tenant_model, tenant_context
from tests.models import StoreTenant
def test_get_tenant_model():
assert StoreTenant == get_tenant_model()
def test_get_and_set_current_tenant_thread_local(db):
tenant = StoreTenant.objects.create()
with tenant_context(tenant):
assert tenant == get_current_tenant()
def test_tenant_context(db):
tenant1 = StoreTenant.objects.create()
tenant2 = StoreTenant.objects.create()
with tenant_context(tenant1):
with tenant_context(tenant2):
assert tenant2 == get_current_tenant()
assert tenant1 == get_current_tenant()
``` |
{
"source": "JoaoGF21/apache-spark-programming-with-databricks",
"score": 3
} |
#### File: Solutions/ASP 3 - Functions/ASP 3.5L - Sort Day Lab.py
```python
from pyspark.sql.functions import approx_count_distinct, avg, col, date_format, to_date
df = (spark
.read
.format("delta")
.load(events_path)
.withColumn("ts", (col("event_timestamp") / 1e6).cast("timestamp"))
.withColumn("date", to_date("ts"))
.groupBy("date").agg(approx_count_distinct("user_id").alias("active_users"))
.withColumn("day", date_format(col("date"), "E"))
.groupBy("day").agg(avg(col("active_users")).alias("avg_users"))
)
display(df)
# COMMAND ----------
# MAGIC %md
# MAGIC ### 1. Define UDF to label day of week
# MAGIC
# MAGIC Use the **`label_day_of_week`** function provided below to create the UDF **`label_dow_udf`**
# COMMAND ----------
def label_day_of_week(day: str) -> str:
dow = {"Mon": "1", "Tue": "2", "Wed": "3", "Thu": "4",
"Fri": "5", "Sat": "6", "Sun": "7"}
return dow.get(day) + "-" + day
# COMMAND ----------
# ANSWER
label_dow_udf = spark.udf.register("label_dow", label_day_of_week)
# COMMAND ----------
# MAGIC %md ### 2. Apply UDF to label and sort by day of week
# MAGIC - Update the **`day`** column by applying the UDF and replacing this column
# MAGIC - Sort by **`day`**
# MAGIC - Plot as a bar graph
# COMMAND ----------
# ANSWER
final_df = (df
.withColumn("day", label_dow_udf(col("day")))
.sort("day")
)
display(final_df)
# COMMAND ----------
# MAGIC %md ### Clean up classroom
# COMMAND ----------
classroom_cleanup()
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
``` |
{
"source": "JoaoGFarias/CucumberWithDataSauce",
"score": 2
} |
#### File: FeatureFileProcessor/exceptions/no_data_file_exception.py
```python
class NoDataFileException(Exception):
def __init__(self, message="No data file", errors=[]):
super().__init__(message, errors)
pass
```
#### File: FeatureFileProcessor/file_models/feature_file.py
```python
from .scenario import Scenario
from functools import reduce
class FeatureFile(object):
def __init__(self, title, data_path, file_scenarios=[]):
self.title = title
self.scenarios = [Scenario(file_scenario, data_path)
for file_scenario in file_scenarios]
def feature_title(self):
return self.title
def number_of_scenarios(self):
return len(self.scenarios)
def scenario_at(self, position):
return self.scenarios[position-1]
def feature_file_as_text(self):
printable_scenarios = [scenario.printable_scenario()
for scenario in self.scenarios]
scenarios_as_text = ["\n" + "\n".join(scenario)
for scenario in printable_scenarios]
return self.feature_title() + "\n" + "\n".join(scenarios_as_text)
```
#### File: FeatureFileProcessor/folder_processor/processor.py
```python
import shutil
import os
import itertools as it
from pathlib import Path
class FolderProcessor(object):
def __init__(self, target_folder, data_folder, file_processor=None):
self.target_folder = target_folder
self.data_folder = data_folder
self.file_processor = file_processor
def prepare_target_folder(self):
try:
self.delete_target_folder()
except FileNotFoundError as e:
pass
os.makedirs(self.target_folder)
def process_data_folder(self):
for path in list(Path(self.data_folder).glob('**/*.feature')):
parsed_file = self.process_file(path).feature_file_as_text()
target_file_path = self.discover_target_file_path(path)
Path.mkdir(target_file_path.parent, parents=True, exist_ok=True)
with open(target_file_path, "w+") as text_file:
text_file.write(parsed_file)
def process_file(self, file_path):
text = self.file_processor.read_file(file_path)
self.file_processor.create_scenarios(text, file_path.parent)
return self.file_processor.parsed_feature()
def discover_target_file_path(self, path):
path_parts = path.parts
data_folder_parts = Path(self.data_folder).parts
internal_file_path = [
x for x, y
in it.zip_longest(path_parts, data_folder_parts)
if x != y and x is not None]
result = Path(self.target_folder)
for folder in internal_file_path:
result = result.joinpath(folder)
return result
def delete_target_folder(self):
shutil.rmtree(self.target_folder)
```
#### File: tests/test_data/test_data_interface.py
```python
import os
from .simple_file.simple_file_data import SimpleFileData
from .without_data_file.without_data_file import WithoutDataFile
import json
class TestDataInterface(object):
def __init__(self):
self._setup_base_path()
self._setup_target_path()
self._setup_files()
pass
def _setup_base_path(self):
self.base_path = os.path.join(
os.path.abspath(
os.path.join(
os.path.dirname(__file__))))
def _setup_target_path(self):
self.target_path = os.path.join(self.base_path, "target_data")
def _setup_files(self):
with open(os.path.join(self.base_path, 'files.json')) as f:
files = json.load(f)['files']
for key in files.keys():
setattr(self, key, files[key])
def getFileData(self, fileName):
return {
self.SIMPLE_FILE_DATA: SimpleFileData(self.base_path),
self.WITHOUT_DATA_FILE_DATA: WithoutDataFile(self.base_path),
}[fileName]
```
#### File: CucumberWithDataSauce/tests/test_folder_processor.py
```python
from .context import FolderProcessor
from .context import FeatureFileProcessor
import os
import unittest
from nose2dep.core import depends
import shutil
from .test_data.test_data_interface import TestDataInterface
class FolderProcessorTestSuite(unittest.TestCase):
@classmethod
def setUpClass(self):
self.testData = TestDataInterface()
self.base_path = self.testData.base_path
self.target_path = self.testData.target_path
def setUp(self):
self.file_processor = FolderProcessor(
data_folder=self.base_path,
target_folder=self.target_path,
file_processor=FeatureFileProcessor(self.base_path))
def tearDown(self):
try:
shutil.rmtree(self.target_path)
except FileNotFoundError as e:
pass
def test_prepare_target_folder(self):
self.file_processor.prepare_target_folder()
self.assertTrue(os.path.exists(self.target_path))
self.assertTrue(os.path.isdir(self.target_path))
self.assertEqual(os.listdir(self.target_path), [])
@depends(before=test_prepare_target_folder)
def test_data_flow(self):
self.file_processor.prepare_target_folder()
self.file_processor.process_data_folder()
self.assertEqual(
self.number_files_in_directory(self.target_path),
3)
@depends(before=test_prepare_target_folder)
def test_deletes_target_folder(self):
self.file_processor.prepare_target_folder()
self.file_processor.delete_target_folder()
self.assertFalse(os.path.exists(self.target_path))
def number_files_in_directory(self, target_path):
return sum([len(files) for _, _, files in os.walk(self.target_path)])
```
#### File: CucumberWithDataSauce/tests/test_scenario.py
```python
from .context import FeatureFileProcessor
from .context import FeatureFile
from .context import Scenario
from .context import NoDataFileException
import unittest
from nose2dep.core import depends
from .test_data.test_data_interface import TestDataInterface
class FeatureFileProcessorTestSuite(unittest.TestCase):
@classmethod
def setUpClass(self):
self.testData = TestDataInterface()
self.simpleFileData = self.testData.getFileData(
self.testData.SIMPLE_FILE_DATA)
self.base_path = self.simpleFileData.base_path
self.withoutDataFile = self.testData.getFileData(
self.testData.WITHOUT_DATA_FILE_DATA)
self.scenario_witout_data = self.withoutDataFile.scenario_text(1)
def setUp(self):
self.file_processor = FeatureFileProcessor(self.base_path)
def test_prints_with_data_table(self):
printable_scenario = self.__create_scenario_at_position__(
1).printable_scenario()
expected_scenario = Scenario(
self.simpleFileData.printable_table_text(1),
self.base_path)
self.assertEqual(expected_scenario, printable_scenario)
def test_prints_without_scenario_outline(self):
scenario = Scenario(self.scenario_witout_data, self.base_path)
printable_scenario = scenario.printable_scenario()
self.assertEqual(scenario, printable_scenario)
def __create_scenario_at_position__(self, scenario_position):
return Scenario(
self.simpleFileData.scenario_text(scenario_position),
self.base_path)
if __name__ == '__main__':
nose.run()
``` |
{
"source": "JoaoGFarias/locust-plugins",
"score": 2
} |
#### File: locust-plugins/examples/pgreader.py
```python
import os
from locust_plugins.postgresreader import PostgresReader
from locust import HttpLocust, task, TaskSet
from locust.wait_time import constant
customer_reader = PostgresReader(f"env='{os.environ['LOCUST_TEST_ENV']}' AND tb=0 AND lb=1")
class UserBehavior(TaskSet):
@task
def my_task(self):
customer = customer_reader.get()
self.client.get(f"/?ssn={customer['ssn']}")
customer_reader.release(customer)
class MyHttpLocust(HttpLocust):
task_set = UserBehavior
wait_time = constant(0)
host = "http://example.com"
```
#### File: locust-plugins/examples/socketio.py
```python
from locust import task
from locust.core import TaskSet
from locust_plugins.locusts import SocketIOLocust
from locust.wait_time import constant
class UserBehaviour(TaskSet):
@task
def my_task(self):
# example of subscribe
self.locust.send('42["subscribe",{"url":"/sport/matches/11995208/draws","sendInitialUpdate": true}]')
# you can do http in the same taskset as well
self.client.get("/")
# wait for pushes, while occasionally sending heartbeats, like a real client would
self.locust.sleep_with_heartbeat(10)
class MySocketIOLocust(SocketIOLocust):
task_set = UserBehaviour
wait_time = constant(0)
if __name__ == "__main__":
host = "http://example.com"
```
#### File: locust-plugins/examples/timescale_listener.py
```python
from locust_plugins.listeners import TimescaleListener
from locust import HttpLocust, TaskSet, task
from locust.wait_time import constant
TimescaleListener("example", "env1")
class MyTaskSet(TaskSet):
@task
def index(self):
self.client.post("/authentication/1.0/getResults", {"username": "something"})
class MyHttpLocust(HttpLocust):
task_set = MyTaskSet
wait_time = constant(1)
host = "http://example.com"
``` |
{
"source": "JoaoGFarias/Projects",
"score": 3
} |
#### File: Numbers/AlarmClock/AlarmClock.py
```python
import pygame
import threading
def play_sound():
""" Plays a loaded sound and waits 5 seconds to do so again """
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pass
threading.Timer(5,play_sound).start()
pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=4096)
pygame.mixer.music.load("you_suffer.wav")
play_sound()
```
#### File: Python/Numbers/FactorialFinder.py
```python
def factIter(num):
"""
Calculates the factorial of a given natural number interactively.
Keyword arguments:
num -- the natural number
"""
if num == 0 :
return 1
result = 1
for i in range(1 , num+1):
result = result * i
return result
def factRecur(num):
"""
Calculates the factorial of a given natural number recursively.
Keyword arguments:
num -- the natural number
"""
def factRecurAux(acc,num):
if num == 1:
return acc
else:
return factRecurAux(acc*num, num-1)
if num == 0:
return 1
else:
return factRecurAux(1,num)
assert factIter(0) == 1
assert factIter(1) == 1
assert factIter(5) == 120
assert factIter(15) == 1307674368000
assert factRecur(0) == 1
assert factRecur(1) == 1
assert factRecur(5) == 120
assert factRecur(15) == 1307674368000
``` |
{
"source": "joaogilberto23/ProjetoReiDoCangaco",
"score": 3
} |
#### File: ProjetoReiDoCangaco/models/categoria_model.py
```python
from sql_alchemy import db
class CategoriaModel(db.Model):
__tablename__ = 'categoria'
cod_categoria = db.Column(db.Integer, primary_key=True)
nome_categoria = db.Column(db.String(50), nullable=False)
def __init__(self, nome_categoria):
self.nome_categoria = nome_categoria
def json(self):
return {
'cod_categoria': self.cod_categoria,
'nome_categoria': self.nome_categoria
}
@classmethod
def find_categoria(cls, cod_categoria):
categoria = cls.query.filter_by(cod_categoria=cod_categoria).first()
if categoria:
return categoria
return None
@classmethod
def find_categoria_nome(cls, nome_categoria):
categoria = cls.query.filter_by(nome_categoria=nome_categoria).first()
if categoria:
return categoria
return None
def save_categoria(self):
db.session.add(self)
db.session.commit()
def update_categoria(self, cod_categoria, nome_categoria):
self.cod_categoria = cod_categoria
self.nome_categoria = nome_categoria
def delete_categoria(self):
db.session.delete(self)
db.session.commit()
```
#### File: ProjetoReiDoCangaco/models/produto_model.py
```python
from sql_alchemy import db
from models.fornecedor_model import FornecedorModel
from models.categoria_model import CategoriaModel
class ProdutoModel(db.Model):
__tablename__ = 'produto'
id_produto = db.Column(db.Integer, primary_key=True)
cod_produto = db.Column(db.String(50), nullable=False)
nome_produto = db.Column(db.String(150), nullable=False)
valor_produto = db.Column(db.Float(precision=2), nullable=False)
ativo = db.Column(db.String(3), nullable=False)
cod_categoria = db.Column(
db.Integer, db.ForeignKey('categoria.cod_categoria'))
cod_fornecedor = db.Column(
db.Integer, db.ForeignKey('fornecedor.cod_fornecedor'))
def __init__(self, cod_produto, nome_produto,
valor_produto, ativo, cod_categoria, cod_fornecedor):
self.cod_produto = cod_produto
self.nome_produto = nome_produto
self.valor_produto = valor_produto
self.ativo = ativo
self.cod_categoria = cod_categoria
self.cod_fornecedor = cod_fornecedor
def getCodProduto(self):
return self.cod_produto
def json(self):
return {
'id_produto': self.id_produto,
'cod_produto': self.cod_produto,
'nome_produto': self.nome_produto,
'valor_produto': self.valor_produto,
'ativo': self.ativo,
'cod_categoria': self.cod_categoria,
'cod_fornecedor': self.cod_fornecedor
}
@classmethod
def find_produto_by_cod(cls, cod_produto):
produto = cls.query.filter_by(cod_produto=cod_produto).first()
if produto:
return produto
return None
@classmethod
def find_produto(cls, id_produto):
produto = cls.query.filter_by(id_produto=id_produto).first()
if produto:
return produto
return None
@classmethod
def find_produto_categoria(cls, cod_fornecedor):
produto = cls.query.filter_by(cod_fornecedor=cod_fornecedor).first()
if produto:
return produto
return None
@classmethod
def find_produto_fornecedor(cls, cod_categoria):
produto = cls.query.filter_by(cod_categoria=cod_categoria).first()
if produto:
return produto
return None
def save_produto(self):
db.session.add(self)
db.session.commit()
def update_produto(self, id_produto, cod_produto, nome_produto,
valor_produto, ativo, cod_categoria, cod_fornecedor):
self.id_produto = id_produto
self.cod_produto = cod_produto
self.nome_produto = nome_produto
self.valor_produto = valor_produto
self.ativo = ativo
self.cod_categoria = cod_categoria
self.cod_fornecedor = cod_fornecedor
def delete_produto(self):
db.session.delete(self)
db.session.commit()
def __json_categoria(self, cod_categoria):
categoria = CategoriaModel.find_categoria(cod_categoria)
return categoria.json()
def __json_fornecedor(self, cod_fornecedor):
fornecedor = FornecedorModel.find_fornecedor(cod_fornecedor)
return fornecedor.json()
``` |
{
"source": "joaogodinho/price_fetcher",
"score": 2
} |
#### File: price_fetcher/price_fetcher/pipelines.py
```python
from scrapy.exceptions import DropItem
class DuplicatesPipeline(object):
def __init__(self):
self.ids_seen = set()
def process_item(self, item, spider):
if item['url'] in self.ids_seen:
raise DropItem("Duplicate item found: %s" % item)
else:
self.ids_seen.add(item['url'])
return item
```
#### File: price_fetcher/spiders/alientech.py
```python
import math
import scrapy
import json
from lxml import etree
from price_fetcher.items import ProductItem
class AlienTechSpider(scrapy.Spider):
name = 'AlienTech'
allowed_domains = ['alientech.pt']
url_format = 'https://www.alientech.pt/toogas/product/ajax/action/index/uniqueSearchId/{}/page/{}?q=%25&price=20%2C999999999&order=price-high-to-low'
added_urls = set()
start_urls = [
'https://www.alientech.pt/toogas/product/ajax/action/index/uniqueSearchId/1/page/1?q=%25&price=20%2C999999999&order=price-high-to-low',
]
def parse(self, response):
response = json.loads(response.body_as_unicode())
products = etree.HTML(response['products'])
for sel in products.xpath('//div[contains(@class, "item")]/form/div[@class="box-product"]'):
item = ProductItem()
temp = sel.xpath('div/div[@class="prod-name"]')[0]
item['name'] = temp.xpath('a/text()')[0].strip()
item['url'] = temp.xpath('a/@href')[0]
item['part_number'] = temp.xpath('small/text()')[0].strip()
prices = sel.xpath('div/div/div[@class="block-price"]')[0]
sale_price = prices.xpath('span[contains(@class, "prod-old-price")]/text()')[0].strip().split(' ')[0]
sale_price = sale_price.replace(',', '')
norm_price = prices.xpath('span[contains(@class, "prod-price")]/text()')[0].strip().split(' ')[0]
norm_price = norm_price.replace(',', '')
if float(sale_price) != float(norm_price):
item['sale_price'] = sale_price
item['on_sale'] = True
item['price'] = norm_price
else:
item['sale_price'] = 0
item['on_sale'] = False
item['price'] = norm_price
yield item
for numb in range(1, int(response['max_pages']) + 1):
url = self.url_format.format(response['uniqueSearchId'], numb)
if numb not in self.added_urls:
self.added_urls.add(numb)
print(url)
yield scrapy.Request(url)
``` |
{
"source": "JoaoGranja/CarND-Advanced-Lane-Lines",
"score": 3
} |
#### File: JoaoGranja/CarND-Advanced-Lane-Lines/line.py
```python
import numpy as np
import cv2
import matplotlib.pyplot as plt
from helper_functions import *
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self, image_shape, debug = False):
# HYPERPARAMETERS
# Number of sliding windows
self.nwindows = 12
# Width of the windows +/- margin
self.margin = 50 #100
# Minimum number of pixels found to recenter window
self.minpix = 50
# Iteration number to average the polynomial coefficients
self.n_iteration = 10
# Image size
self.image_height = image_shape[0]
self.image_width = image_shape[1]
# y values of the line, spaced by 1 pixel
self.line_y = np.linspace(0, self.image_height-1, self.image_height )
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
self.debug = debug
def find_lane_pixels(self, binary_warped, base, out_img = None):
"""
Find the x and y pixels for the lane line.
Sliding windows will be used around starting points (base) passed as argument
"""
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//self.nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
x_current = base
# Create empty lists to receive the lane pixel indices
lane_inds = []
counter_empty_win = 0
dx_current = []
#last_y = binary_warped.shape[0] - window_height
#self.line_y = np.linspace(0, self.image_height-1, self.image_height )
# Step through the windows one by one
for window in range(self.nwindows):
# Identify window boundaries in x and y
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_x_low = x_current - self.margin
win_x_high = x_current + self.margin
## For Visualization - Draw the windows on the visualization image ##
if out_img is not None:
cv2.rectangle(out_img,(win_x_low,win_y_low),
(win_x_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_inds = ((nonzerox>=win_x_low) & (nonzerox<win_x_high) &
(nonzeroy>=win_y_low) & (nonzeroy<win_y_high)).nonzero()[0]
# Append these indices to the lists
lane_inds.append(good_inds)
# If > minpix pixels, recenter next window on their mean position
if len(good_inds) > self.minpix:
new_x_current = np.int(np.mean(nonzerox[good_inds]))
dx_current.append(new_x_current - x_current)
x_current = np.int(new_x_current + sum(dx_current)/len(dx_current))
#last_y = win_y_low
counter_empty_win = 0
else:
if len(dx_current) > 0:
x_current = np.int(x_current + sum(dx_current)/len(dx_current))
counter_empty_win +=1
# if 4 sequence windows have few pixels, it is better to stop searching more
if counter_empty_win == 4:
self.allx = []
self.ally = []
#self.line_y = np.linspace(last_y, self.image_height-1, (self.image_height-win_y_high) )
return
if ((x_current - self.margin ) <= 0) or ((x_current + self.margin)>= self.image_width):
#self.line_y = np.linspace(win_y_high, self.image_height-1, (self.image_height-win_y_high) )
if self.debug:
print("The curve crosses the lateral boundaries")
break
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
lane_inds = np.concatenate(lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
self.allx = nonzerox[lane_inds]
self.ally = nonzeroy[lane_inds]
def update_poly(self, order = 2, out_img = None):
"""
Append the polynomial x points to a list "recent_xfitted" and calculate the average x values of the fitted line over the last 'self.n_iteration' iterations.
"""
if len(self.allx) > 0 and len(self.ally)>0:
# Fit a polynomial to each using `np.polyfit'
self.current_fit = np.polyfit(self.ally, self.allx, order )
# Get the difference in fit coefficients between last and new fits
if self.best_fit is not None:
self.diffs = (self.best_fit - self.current_fit) / self.best_fit
# Generate x and y values for plotting
try:
x = self.current_fit[order]
for i in range(order):
x += self.current_fit[i]*self.line_y**(order-i)
if len(self.recent_xfitted) == self.n_iteration:
self.recent_xfitted.pop(0)
if (len(self.recent_xfitted) > 1) and (len(x) < len(self.recent_xfitted[0])):
if self.debug:
print("Before concatenating x values", len(x), len(self.recent_xfitted[0]))
x = np.concatenate([np.array([x[0]]*(len(self.recent_xfitted[0]) - len(x))), x])
self.recent_xfitted.append(x)
except TypeError:
# Avoids an error
print('The function failed to fit a line!')
self.detected = False
# Calculate the average x values of the fitted line over the last 'self.n_iteration' iterations
self.bestx = np.mean(self.recent_xfitted, axis = 0)
self.best_fit = np.polyfit(self.line_y, self.bestx, order )
self.detected = True
else:
if self.debug:
print('No x, y points fitting the line')
self.detected = False
## For Visualization ##
if out_img is not None:
# Colors in the left and right lane regions
out_img[self.ally, self.allx] = [255, 0, 0]
# Plots the left and right polynomials on the lane lines
plt.plot(self.bestx, self.line_y, color='yellow')
def first_fit_polynomial(self, binary_warped, basex, order = 2, out_img = None):
"""
Fit a polynomial with order "order" for the lane line based on the x,y pixels which fall on sliding windows.
"""
# Find the lane pixels first
self.find_lane_pixels(binary_warped, basex, out_img)
# Update the polynomial
self.update_poly(order, out_img)
def search_around_poly(self, binary_warped, order, out_img = None):
"""
Fit a polynomial with order "order" for the lane line based on the x,y pixels which are around a lane line detected on previous frame
"""
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Set the area of search based on activated x-values
# within the +/- margin of our polynomial function
x_current = 0
for i in range(order+1):
x_current += self.best_fit[i]*nonzeroy**(order-i)
win_x_low = x_current - self.margin
win_x_high = x_current + self.margin
lane_inds = ((nonzerox>=win_x_low) & (nonzerox<win_x_high)).nonzero()[0]
# Again, extract left and right line pixel positions
self.allx = nonzerox[lane_inds]
self.ally = nonzeroy[lane_inds]
self.update_poly(order, out_img)
## For Visualization ##
if out_img is not None:
# Create an image to draw on and an image to show the selection window
window_img = np.zeros_like(out_img)
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
line_window1 = np.array([np.transpose(np.vstack([self.bestx-self.margin, self.line_y]))])
line_window2 = np.array([np.flipud(np.transpose(np.vstack([self.bestx+self.margin,
self.line_y])))])
line_pts = np.hstack((line_window1, line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([line_pts]), (0,255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
``` |
{
"source": "JoaoGranja/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: JoaoGranja/CarND-Behavioral-Cloning-P3/model.py
```python
import os, csv, cv2
import numpy as np
from scipy import ndimage
import tensorflow as tf
import sklearn
from math import ceil
from random import shuffle
import matplotlib.pyplot as plt
### ---------------------------------------------- Data Generator ------------------------------------------ ###
def generator(samples, batch_size=32):
correction = [0, 0.2, -0.2]
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
labels = []
for batch_sample in batch_samples:
for i in range(3):
filename = batch_sample[i].split('/')[-1]
if len(batch_sample[i].split('/')) > 2: # The training data from Udacity has a different format
data_dir = batch_sample[i].split('/')[3]
else:
data_dir = "data"
current_path = "/opt/carnd_p3/" + data_dir +'/IMG/' + filename
image = ndimage.imread(current_path)
yuv=cv2.cvtColor(image,cv2.COLOR_RGB2YUV)
images.append(yuv)
labels.append(float(line[3]) + correction[i])
## Data Augmentation
augmented_images, augmented_labels = [], []
for image, label in zip(images, labels):
augmented_images.append(image)
augmented_labels.append(label)
augmented_images.append(cv2.flip(image,1))
augmented_labels.append(label*-1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_labels)
yield sklearn.utils.shuffle(X_train, y_train)
### ---------------------------------------------- Loading Data ------------------------------------------ ###
# Loading data from several sources
source_paths = ["/opt/carnd_p3/data_29_06/"]
samples = []
for path in source_paths:
with open(path+"driving_log.csv") as csvfile:
reader = csv.reader(csvfile)
if path == "/opt/carnd_p3/data/":
next(reader)
for line in reader:
samples.append(line)
### ---------------------------------------------- Traning and Validation Data Split ------------------------------------------ ###
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
print("Train samples length is", len(train_samples))
print("Validation samples length is", len(validation_samples))
###--------------------------------- Neural Network Model ------------------------------------------------------ ###
from keras.models import Model, Sequential
from keras.layers import Dense, Lambda, Flatten, Conv2D, MaxPooling2D, Activation, Cropping2D
from keras.layers import Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
debug = True
batch_size = 32
epochs = 5
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
# Build a Sequential Model
model = Sequential()
model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3)))
model.add(Lambda(lambda x: (x - 128) / 128))
# Conv 1
model.add(Conv2D(filters=24, kernel_size=(5,5), strides=(2,2), padding='valid'))
model.add(Activation('relu'))
# Conv 2
model.add(Conv2D(filters=36, kernel_size=(5,5), strides=(2,2), padding='valid'))
model.add(Activation('relu'))
# Conv 3
model.add(Conv2D(filters=48, kernel_size=(5,5), strides=(2,2), padding='valid'))
model.add(Activation('relu'))
# Conv 4
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Conv 5
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
model.add(Flatten())
# Fully Connected 1
model.add(Dense(1000))
# Fully Connected 2
model.add(Dense(100))
# Fully Connected 3
model.add(Dense(1))
# compile the model
model.compile(optimizer='adam', loss='mse')
if debug:
print("Model summary:")
# Check the summary of this new model to confirm the architecture
model.summary()
### --------------------------------- Train and save the model ------------------------------------------------------ ###
tensorboard_callback = TensorBoard(log_dir="./logs")
history_object = model.fit_generator(train_generator,
steps_per_epoch=ceil(len(train_samples)/batch_size),
validation_data=validation_generator,
validation_steps=ceil(len(validation_samples)/batch_size),
epochs=epochs, verbose=1,
callbacks=[tensorboard_callback])
# Save the model
model.save('model.h5')
### ---------------------------------------------- Plot Training and Validation Results ----------------------- ###
if debug:
# print the keys contained in the history object
print(history_object.history.keys())
# plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig(os.path.join("examples", "model_loss"))
plt.show()
plt.close()
``` |
{
"source": "joaogui1/evoflow",
"score": 2
} |
#### File: benchmark/benchmark/logger.py
```python
import json
from termcolor import cprint
import numpy as np
from tabulate import tabulate
from time import time
from pathlib import Path
from evoflow import __version__ as evoflow_version
class Logger():
def __init__(self, system_info, backend):
self.system_info = system_info
self.ts = int(time())
self.backend = backend
if len(system_info['gpu']):
gpu = system_info['gpu'][0]['name'].lower().replace(' ', '_')
else:
gpu = "CPU"
result_dir = Path("results/%s" % (evoflow_version))
if not result_dir.exists():
result_dir.mkdir()
fname = result_dir / ("%s_%s_%s.json" % (backend, gpu, self.ts))
self.out = open(str(fname), 'w+')
self.rows = []
cprint('Bench results will be saved here:%s' % fname, 'green')
def record_test(self, test_type, group, name, timings, num_runs,
num_generations, shape):
shape = list(shape)
record = {
'ts': self.ts,
'system': self.system_info,
'backend': self.backend,
'evoflow_version': evoflow_version,
'test_type': test_type,
'group': group,
'name': name,
'generations': num_generations,
'num_runs': num_runs,
'shape': shape,
}
record['input_size'] = int(np.prod(shape))
record['timings'] = {
"avg": float(np.average(timings)),
"max": float(np.max(timings)),
"min": float(np.min(timings)),
"std": float(np.std(timings)),
"raw": timings
}
self.out.write(json.dumps(record) + '\n')
self.rows.append([
group, name, shape,
round(record['timings']['min'], 3),
round(record['timings']['avg'], 3),
round(record['timings']['max'], 3),
round(record['timings']['std'], 3)
])
def summary(self):
print(
tabulate(
self.rows,
headers=['group', 'name', 'shape', 'min', 'avg', 'max',
'std']))
```
#### File: benchmark/ops/reverse.py
```python
from evoflow.ops import Reverse1D, Reverse2D
from evoflow.ops import Reverse3D
def bench_reverse(population):
# setup
shape = population.shape
population_fraction = 1
# select the right op
if len(shape) == 2:
mutations_probability = 0.5
OP = Reverse1D
elif len(shape) == 3:
mutations_probability = (0.5, 0.5)
OP = Reverse2D
elif len(shape) == 4:
mutations_probability = (0.5, 0.5, 0.5)
OP = Reverse3D
else:
raise ValueError("too many dimensions")
OP(population_fraction=population_fraction,
mutations_probability=mutations_probability)(population)
```
#### File: benchmark/ops/uniform_crossover.py
```python
from evoflow.ops import UniformCrossover1D, UniformCrossover2D
from evoflow.ops import UniformCrossover3D
def bench_uniform_crossover(population):
# setup
shape = population.shape
population_fraction = 1
# select the right op
if len(shape) == 2:
mutations_probability = 0.5
OP = UniformCrossover1D
elif len(shape) == 3:
mutations_probability = (0.5, 0.5)
OP = UniformCrossover2D
elif len(shape) == 4:
mutations_probability = (0.5, 0.5, 0.5)
OP = UniformCrossover3D
else:
raise ValueError("too many dimensions")
OP(population_fraction=population_fraction,
mutations_probability=mutations_probability)(population)
```
#### File: benchmark/benchmark/setup.py
```python
from termcolor import cprint
from .system import get_system_info
def setup(backend):
# disable gpu if needed - must be as early as possible.
if backend == 'tensorflow-cpu':
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU')
cprint('Tensoflow set to CPU', 'green')
# setting backend
from evoflow.config import set_backend
if backend in ['tensorflow-cpu', 'tensorflow-gpu']:
set_backend('tensorflow')
else:
set_backend(backend)
cprint('Requested backend: %s' % backend, 'magenta')
if backend in ['tensorflow-gpu', 'cupy']:
gpu_enable = True
else:
gpu_enable = False
sys_info = get_system_info(gpu_enable)
return sys_info
```
#### File: evoflow/backend/tensorflow.py
```python
import tensorflow as tf
from tensorflow.errors import InvalidArgumentError
from .common import _infer_dtype
from evoflow.config import intx, floatx
import numpy
from evoflow.backend.tf_ops.assign import assign # noqa: F401
from evoflow.backend.tf_ops.assign import assign2 # noqa: F401
# - tf specific functions
# ! don't import them in __init__
RAND_GENERATOR = tf.random.get_global_generator()
def get_num_gpu():
"Return the number of GPU available to Tensorflow"
return len(tf.config.get_visible_devices('GPU'))
# -initialization-
def tensor(a, dtype=None):
"""Converts an object to a tensor.
Args:
a: The source object.
dtype: Data type specifier. It is inferred from the input by default.
Returns:
ndarray: An array on the current device. If ``a`` is already on
the device, no copy is performed.
"""
if is_tensor(a):
return a
if not dtype and not isinstance(a, numpy.ndarray):
# automatic inference based of floatx and intx settings
dtype = _infer_dtype(a)
return tf.convert_to_tensor(a, dtype=dtype)
def copy(tensor):
"""Copy a tensor
Args:
tensor (ndarray): tensor to copy
Returns
ndarray: copied tensor
"""
return tf.identity(tensor)
def zeros(shape, dtype=float):
"""Returns a new Tensor of given shape and dtype, filled with zeros.
Args:
shape (int or tuple of ints): Dimensionalities of the array.
dtype (str, optional): dtype: Data type specifier. Defaults to 'l'.
Returns:
ndarray: An tensor filled with zeros.
"""
return tf.zeros(shape, dtype=dtype)
def ones(shape, dtype=float):
"""Returns a new Tensor of given shape and dtype, filled with ones.
Args:
shape (int or tuple of ints): Dimensionalities of the array.
dtype: Data type specifier.
Returns:
ndarray: An tensor filled with zeros.
"""
return tf.ones(shape, dtype=dtype)
def fill(shape, fill_value):
"""Returns a new Tensor of given shape and dtype, filled with the provided
value.
Args:
shape (int or tuple of ints): Dimensionalities of the array.
fill_value (int): The value to fill with.
Returns:
ndarray: An tensor filled with zeros.
"""
return tf.fill(shape, fill_value)
def normal(shape, mean=0, stddev=1.0):
"""Draw random samples from a normal (Gaussian) distribution.
Args:
shape (int or tuple of ints): Dimensionalities of the array.
mean (float): Mean value. Default to 0.0.
stddev (float): Standard deviations. Default to 1.0.
Returns:
ndarray: Drawn samples from the parameterized normal distribution.
"""
return RAND_GENERATOR.normal(shape, mean=mean, stddev=stddev)
def range(start, stop=None, step=1, dtype=intx()):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `stop`.
Args:
start (int): Initial value. Optional. Defaults to 0
stop (int, optional): End value.
delta (int, optional): Spacing between values. Defaults to 1.
dtype (str, optional): Tensor tyoe. Defaults to intx().
Returns:
Tensor: Tensor that contains the requested range.
"""
return tf.range(start, limit=stop, delta=step, dtype=dtype)
# - Reduce -
def prod(tensor, axis=None, keepdims=False):
"""Returns the product of an array along a given axis.
Args:
tensor (ndarray): Array to take the maximum.
axis (int): Along which axis to take the maximum. The flattened array
is used by default. Defaults to None.
keepdims (bool): If ``True``, the axis is kept as an axis of
size one. Default to False.
Returns:
ndarray: The maximum of ``tensor``, along the axis if specified.
"""
return tf.math.reduce_prod(tensor, axis=axis, keepdims=keepdims)
def max(tensor, axis=None, keepdims=False):
"""Returns the maximum of an array or the maximum along a given axis.
Note::
When at least one element is NaN, the corresponding min value will be
NaN.
Args:
tensor (ndarray): Array to take the maximum.
axis (int): Along which axis to take the maximum. The flattened array
is used by default. Defaults to None.
keepdims (bool): If ``True``, the axis is kept as an axis of
size one. Default to False.
Returns:
ndarray: The maximum of ``tensor``, along the axis if specified.
"""
return tf.math.reduce_max(tensor, axis=axis, keepdims=keepdims)
def min(tensor, axis=None, keepdims=False):
"""Returns the minimum of an array or the maximum along an axis.
Note::
When at least one element is NaN, the corresponding min value will be
NaN.
Args:
tensor (ndarray): Array to take the maximum.
axis (int): Along which axis to take the maximum. The flattened array
is used by default. Defaults to None.
keepdims (bool): If ``True``, the axis is kept as an axis of
size one. Default to False.
Returns:
ndarray: The maximum of ``tensor``, along the axis if specified.
"""
return tf.math.reduce_min(tensor, axis=axis, keepdims=keepdims)
def sum(tensor, axis=None, keepdims=False):
"""Returns the sum of an array along given axes.
Args:
tensor (ndarray): Array to sum reduce.
axis (int or sequence of ints): Axes along which the sum is taken.
keepdims (bool): If ``True``, the specified axes are remained as axes
of length one.
Returns:
ndarray: The sum of ``tensor``, along the axis if specified.
"""
return tf.math.reduce_sum(tensor, axis=axis, keepdims=keepdims)
def mean(tensor, axis=None, keepdims=False):
"""Returns the sum of an array along given axes.
Args:
tensor (ndarray): Array to mean reduce.
axis (int or sequence of ints): Axes along which the sum is taken.
keepdims (bool): If ``True``, the specified axes are remained as axes
of length one.
Returns:
ndarray: The mean of ``tensor``, along the axis if specified.
"""
return tf.math.reduce_mean(tensor, axis=axis, keepdims=keepdims)
def sqrt(tensor):
"""Computes element-wise square root of the input tensor.
Args:
tensor (ndarray): tensor
Returns:
tensor: square root of the input tensor.
"""
return tf.math.sqrt(tensor)
# - Manipulation -
def reverse(tensor, axis):
"""Reverses specific dimensions of a tensor.
Args:
tensor (tensor): tensor to reverse
axis (tensor): axis or tuple of axis
"""
return tf.reverse(tensor, axis)
def roll(tensor, shift, axis):
"""Rolls the elements of a tensor along an axis.
Args:
tensor (tensor): tensor to roll
shift (tensor): offset to roll
axis (tensor): axis to shift by
Returns:
[type]: [description]
"""
return tf.roll(tensor, shift, axis)
def tile(tensor, reps):
"""Construct a tensor by repeating tensor the number of times given by reps.
Args:
tensor (cupy.ndarray): Tensor to transform.
reps (int or tuple): The number of repeats.
Returns:
ndarray: Transformed tensor with repeats.
"""
return tf.tile(tensor, reps)
def concatenate(tup, axis=0):
"""Joins tensors along a given axis.
Args:
tup (sequence of arrays): Tensors to be joined. Tensors must have the
same cardinality except for the specified axis.
axis (int or None): Axis to joint along. If None, tensors are
flattened before use. Default is 0.
Returns:
ndarray: Joined array.
"""
return tf.concat(tup, axis=axis)
# - Utils -
def transpose(a):
"Transpose the tensor"
return tf.transpose(a)
def cast(tensor, dtype):
"""Cast
Args:
tensor (Tensor): tensor to cast.
dtype (str): type to cast. Usually floatx() or intx()
Returns:
ndarray: Tensor casted in the requested format.
"""
return tf.cast(tensor, dtype)
def dtype(tensor):
""""Returns the dtype of a tensor as a string.
Args:
tensor (tensor): Tensor
Returns:
str: type of the tensor as string. e.g int32.
"""
return tensor.dtype.name
def flatten(tensor):
"""Returns a copy of the tensor flatten into one dimension.
Args:
tensor (ndarray): tensor to flatten
Returns:
ndarray: flattened tensor
"""
# note: unsure if that is the fastest way. maybe compute flat_shape in
# pure python
# flat_shape = prod(tensor.shape)
return tf.reshape(tensor, [-1])
def as_numpy_array(t):
"""Convert tensor to a numpy array.
Useful when interfacing with other librairies to have a unified input
to them.
Args:
t (ndarray): tensor to convert
Returns:
numpy.ndarray: Tensor as numpy array
"""
if not is_tensor(t):
dtype = _infer_dtype(t)
if isinstance(t, list):
t = tensor(t)
else:
t = tensor([t])
else:
dtype = t.dtype.name
return t.numpy().astype(dtype)
def reshape(tensor, shape):
"reshape tensor"
return tf.reshape(tensor, shape)
def is_tensor(a):
"check if the given object is a tensor"
if isinstance(a, tf.Tensor):
return True
elif isinstance(a, tf.python.framework.tensor_shape.TensorShape):
return True
else:
return False
def tensor_equal(tensor1, tensor2):
"""True if two tensors are exactly equals
Args:
tensor1 (ndarray): tensor to compare.
tensor2 (ndarray): tensor to compare.
Returns:
Bool: True if exactly equal, False otherwise.
"""
return tf.reduce_all(tf.equal(tensor1, tensor2))
def assert_near(a, b, absolute_tolerance=0, relative_tolerance=0):
"""
Returns True if two arrays are element-wise equal within a tolerance.
Args:
a (ndarray): Tensor with a last dimension of at least k size.
b (ndarray): Tensor with a last dimension of at least k size.
absolute_tolerance (float): Default to 0
relative_tolerance (float): Default to 0
Returns:
bool: True if the two arrays are equal within the given tolerance; False otherwise.
Note:
This function return True if the following equation is satisfied:
`absolute(a - b) <= (absolute_tolerance + relative_tolerance * absolute(b))` # noqa
"""
try:
tf.debugging.assert_near(a,
b,
atol=absolute_tolerance,
rtol=relative_tolerance)
except InvalidArgumentError:
return False
return True
# - Math -
def _is_scalar(a):
if isinstance(a, int):
return True
elif isinstance(a, float):
return True
elif is_tensor(a):
if a.shape == ():
return True
else:
False
def dot(t1, t2):
"""Return the dot product of two arrays
Args:
t1 (ndarray): Left tensor
t2 (ndarray): Right tensor
Return:
ndarray: tensor containing the dot product
"""
# # scalar
if (_is_scalar(t1) or _is_scalar(t2)):
return t1 * t2
# return tf.reduce_sum(tf.multiply(t1, t2))
elif len(t1.shape) == 1 or len(t2.shape) == 1:
return tf.reduce_sum(tf.multiply(t1, t2), axis=-1)
else:
return tf.matmul(t1, t2)
def add(tensor1, tensor2):
"""Add two tensors
Args:
tensor1 (ndarray): Left tensor.
tensor2 (ndarray): right tensor.
"""
return tf.add(tensor1, tensor2)
def subtract(tensor1, tensor2):
"""Substract two tensors
Args:
tensor1 (ndarray): Left tensor.
tensor2 (ndarray): right tensor.
"""
return tf.subtract(tensor1, tensor2)
def multiply(tensor1, tensor2):
"""multiply two tensors
Args:
tensor1 (ndarray): Left tensor.
tensor2 (ndarray): right tensor.
"""
return tf.multiply(tensor1, tensor2)
def divide(numerator, denominator):
"""divide a tensor by another
Args:
tensor1 (ndarray): numerator tensor.
tensor2 (ndarray): denominator tensor.
"""
return tf.cast(tf.divide(numerator, denominator), floatx())
def mod(numerator, denominator):
"""Compute the reminder of the divisin of a tensor by another
Args:
tensor1 (ndarray): numerator tensor.
tensor2 (ndarray): denominator tensor.
"""
return tf.math.mod(numerator, denominator)
def clip(tensor, min_val=0, max_val=None, out=None):
"""Clips the values of a tensor to a given interval. For example,
if an interval of [0, 1] is specified, values smaller than 0 become 0,
and values larger than 1 become 1.
Efficient version of ``max(min(a, max_val), min_val)``
Args:
tensor (ndarray): The input Tensor.
min_val (scalar, ndarray or None): The left side of the interval. When
None ignored. Defaults to None.
max_val (scalar, ndarray or None): The right side of the interval. When
None ignored. Defaults to None.
Returns:
ndarray: Clipped tensor.
"""
return tf.clip_by_value(tensor,
clip_value_min=min_val,
clip_value_max=max_val)
def abs(tensor):
"Calculate the absolute value element-wise."
return tf.math.abs(tensor)
def broadcasted_norm(tensor):
"Norm broadcasted accross dimensions"
norm = cast(tf.abs(tensor), intx())
norm = norm**2
norm = tf.reduce_sum(norm, axis=-1)
norm = sqrt(cast(norm, floatx()))
return norm
# population_norm = B.sum(B.abs(flat_pop)**2, axis=-1)**0.5
def norm(tensor, ord='euclidean', axis=None, keepdims=False):
"""Return one of eight different matrix norms, or one of an infinite
number of vector norms (described below), depending on the value of
the `ord` parameter.
Args:
tensor (ndarray): Array to take the norm from. If ``axis`` is None,
the ``tensor`` must be 1D or 2D.
ord (non-zero int, inf, -inf, 'fro'): Norm type. Euclidian by default.
See `tf.norm` for explanation:
https://www.tensorflow.org/api_docs/python/tf/norm
axis (int, 2-tuple of ints, None): `axis` along which the norm is
computed.
keepdims (bool): If this is set ``True``, the axes which are normed
over are left in the resulting tensor with a size of one.
Returns:
ndarray: Norm of the tensor.
"""
return tf.norm(tensor, ord=ord, axis=axis, keepdims=keepdims)
# - Randomness -
def randint(low, high=0, shape=None, dtype=intx()):
"""Returns a scalar or an array of integer values over [low, high)
Args:
low (int): If high is None, it is the upper bound of the
interval and the lower bound is set to 0. if high is set, it is the
lower bound of the interval.
high (int, optional):Upper bound of the interval. Defaults to None.
shape (None or int or tuple of ints, optional): The shape of returned
value. Defaults to None.
dtype (str, optional): dtype: Data type specifier.
Defaults to 'float32'.
Returns:
int or ndarray of ints: If size is None, it is single integer
sampled. If size is integer, it is the 1D-array of length size
element. Otherwise, it is the array whose shape specified by size.
"""
# just one number
if not shape:
if high == 0:
high = low
low = 0
return RAND_GENERATOR.uniform(shape=(1, ),
minval=low,
maxval=high,
dtype=intx())[0]
if isinstance(shape, int):
shape = (shape, )
return RAND_GENERATOR.uniform(shape=shape,
minval=low,
maxval=high,
dtype=dtype)
def shuffle(t, axis=0):
"""Shuffle tensor along the given axis. Other axis remain in
place.
Args:
tensor (ndarray): tensor to shuffle.
axis (int, optional): axis to shuffle on. Default to 0.
Returns:
None: in place shuffling
"""
if not axis:
# ! tensorflow don't do in place shuffling
return tf.random.shuffle(t)
else:
# FIXME: its a hack as we use numpy which is liklely to cause slowness
t = as_numpy_array(t)
rng = numpy.random.default_rng()
rng.shuffle(t, axis=axis)
return tensor(t)
def full_shuffle(t):
"""Shuffle in place a tensor along all of its axis
Args:
t (ndarray): tensor to shuffle.
Returns:
tensor: shuffled tensor
We need to use transpose
Shuffle()
2D
[1, 0] -> [1, 0] : Dim 1
[1, 0] -> [0, 1] : restore order
3D
Transpose -> new order
[1, 0, 2] -> [1, 0, 2] : Dim 1
[2, 1, 0] -> [2, 0, 1] : Dim 2
[1, 2, 0] -> [0, 1, 2] : restore order
4D
Transpose -> new order
[1, 0, 2, 3] -> [1, 0, 2, 3] : Dim 1
[2, 1, 0, 3] -> [2, 0, 1, 3] : Dim 2
[3, 1, 2, 0] -> [3, 0, 1, 2] : Dim 3
[1, 2, 3, 0] -> [0, 1, 2, 3] : restore order
5D+
nope - might consider patching tf.shuffle at that point :)
"""
# ! dont use the variable name tensor as it confusion with the tensor()
dims = len(t.shape)
t = tf.random.shuffle(t) # always shuffle dim0
if dims == 2:
# dim 1
t = tf.transpose(t, [1, 0])
t = tf.random.shuffle(t)
# restore
t = tf.transpose(t, [1, 0])
elif dims == 3:
# dim 1
t = tf.transpose(t, [1, 0, 2])
t = tf.random.shuffle(t)
# dim 2
t = tf.transpose(t, [2, 1, 0])
t = tf.random.shuffle(t)
# restore
t = tf.transpose(t, [1, 2, 0])
elif dims == 4:
# dim 1
t = tf.transpose(t, [1, 0, 2, 3])
t = tf.random.shuffle(t)
# dim 2
t = tf.transpose(t, [2, 1, 0, 3])
t = tf.random.shuffle(t)
# dim 3
t = tf.transpose(t, [3, 1, 2, 0])
t = tf.random.shuffle(t)
# restore
t = tf.transpose(t, [1, 2, 3, 0])
elif dims > 4:
print('tensor shape', t.shape)
raise ValueError('Tensor Rank > 4 -- not implemented')
return t
# FIXME: its a hack as we use numpy which is liklely to cause slowness
# t = as_numpy_array(t)
# rng = numpy.random.default_rng()
# for idx in range(len(t.shape)):
# rng.shuffle(t, axis=idx)
# return tensor(t)
# - Indexing -
def take(tensor, indices, axis=None):
"""Takes elements of a Tensor at specified indices along a specified axis
Args:
tensor (ndarray): Tensor to extract elements from.
indices (int or array-like): Indices of elements that this function
takes.
axis (int, optional): The axis along which to select indices from.
The flattened input is used by default. Defaults to None.
Returns:
ndarray: Tensor containing the values from the specified indices.
"""
return tf.gather(tensor, indices, axis=axis)
def top_k_indices(tensor, k, axis=-1):
"""
Finds the indices of the k largest entries alongside an axis.
Args:
tensor (ndarray): Tensor with a last dimension of at least k size.
k (i): number of elements to return
"""
return tf.math.top_k(tensor, k)[1]
def bottom_k_indices(tensor, k, axis=-1):
"""
Finds the indices of the k smallest entries alongside an axis.
Args:
tensor (ndarray): Tensor with a last dimension of at least k size.
k (i): number of elements to return.
axis (int or None) - Axis along which to sort. Default is -1,
which is the last axis. If None is supplied,
the array will be flattened before sorting.
"""
# inverted top k and reinverted before returning
return tf.math.top_k(tf.multiply(tensor, -1), k)[1]
def unique_with_counts(tensor):
"""Finds unique elements and return them along side their position and
counts.
Args:
tensor (Tensor): 1D tensor to analyze.
Returns:
values (Tensor): unique values founded
indexes (Tensor): index of the value sorted
counts (Tensor): Tensor containing the count for each value.
"""
return tf.unique_with_counts(tensor)
# - Statistical -
def bincount(tensor, weights=None, minlength=None):
"""Count number of occurrences of each value in array of non-negative ints.
Args:
tensor (ndarray): Input tensor.
weights (cupy.ndarray): Weights tensor which has the same shape as
tensor``. Default to None.
minlength (int): A minimum number of bins for the output array.
"""
return tf.math.bincount(tensor, weights=weights, minlength=minlength)
```
#### File: backend/tf_ops/randint.py
```python
import tensorflow as tf
RAND_GENERATOR = tf.random.get_global_generator()
@tf.function()
def randint2(low, high=None, shape=None, dtype='int32'):
"""Returns a scalar or an array of integer values over [low, high)
Args:
low (int): If high is None, it is the upper bound of the
interval and the lower bound is set to 0. if high is set, it is the
lower bound of the interval.
high (int, optional):Upper bound of the interval. Defaults to None.
shape (None or int or tuple of ints, optional): The shape of returned
value. Defaults to None.
dtype (str, optional): dtype: Data type specifier.
Defaults to 'float32'.
Returns:
int or ndarray of ints: If size is None, it is single integer
sampled. If size is integer, it is the 1D-array of length size
element. Otherwise, it is the array whose shape specified by size.
"""
# just one number
if not shape:
return RAND_GENERATOR.uniform(shape=(1, ),
minval=low,
maxval=high,
dtype='int32')[0]
if isinstance(shape, int):
shape = (shape, )
return RAND_GENERATOR.uniform(shape=shape,
minval=low,
maxval=high,
dtype=dtype)
```
#### File: evoflow/evoflow/config.py
```python
_INTX = 'int32'
_FLOATX = 'float32'
_BACKEND = None
def floatx():
"""Returns the default float type, as a string.
E.g. `'float16'`, `'float32'`, `'float64'`.
Returns:
str: the current default float type.
Example:
>>> B.floatx()
'float32'
"""
return _FLOATX
def set_floatx(value):
"""Sets the default float type.
Note: It is not recommended to set this to float16 for training,
as this will likely cause numeric stability issues
Args:
value (str): `'float16'`, `'float32'`, or `'float64'`.
Example:
>>> evoflow.backend.floatx()
'float32'
>>> evoflow.backend.set_floatx('float64')
>>> evoflow.backend.floatx()
'float64'
>>> evoflow.backend.set_floatx('float32')
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
def intx():
"""Returns the default int type, as a string.
E.g. `'int8'`, `'unit8'`, `'int32'`.
Returns:
str: the current default int type.
Example:
>>> B.intx()
'int32'
"""
return _INTX
def set_intx(value):
"""Sets the default int type.
Args:
value (str): default int type to use.
One of `{'int8', 'uint8', 'int16', 'uint16', 'int32', 'int64'}`
Example:
>>> evoflow.backend.intx()
'int32'
>>> evoflow.set_intx('uint8')
>>> evoflow.backend.intx()
'uint8'
>>> evoflow.intx('float132')
Raises:
ValueError: In case of invalid value.
"""
global _INTX
if value not in {'int8', 'uint8', 'int16', 'uint16', 'int32', 'int64'}:
raise ValueError('Unknown intx type: ' + str(value))
_INTX = str(value)
def get_backend():
"Return the backend used"
return _BACKEND
def set_backend(name):
"""Set Geneflow backend to be a given framework
Args:
name(str): Name of the backend. {cupy, numpy, tensorflow}. Default
to tensorflow.
See:
`load_backend.py` for the actual loading code.
"""
global _BACKEND
if name not in {'cupy', 'numpy', 'tensorflow'}:
raise ValueError('Unknown backend: ' + str(name))
_BACKEND = name
```
#### File: evoflow/engine/evoflow.py
```python
import networkx as nx
from termcolor import cprint
from tabulate import tabulate
from collections import defaultdict
from tqdm.auto import tqdm
from evoflow.utils import box
from evoflow.io import print_debug
from .results import Results
from .callback_collection import CallbackCollection
class EvoFlow(object):
def __init__(self, inputs, outputs, debug=False):
if not inputs:
raise ValueError("Inputs can't be empty")
if not outputs:
raise ValueError("Ouputs can't be empty")
# FIXME: check we have the same number of inputs and output
# and they have the same shape because we are using the ouputs as
# the next inputs
# set debug
self.debug = debug
# graph underlying structure
self.graph = nx.DiGraph()
# tracking structures
self.idx2op = {} # op object
self.idx2results = {} # op computation result
self.idx2input_idx = defaultdict(set) # ops used as inputs
self.idx2ouput_ops = defaultdict(set) # ops used as outputs
self.inputs_idx = [] # track which op idx are inputs
self.outputs_idx = [] # track what op idx are outputs
self.fitness = None
self.compiled = False
self._results = None
self.callback_collection = None
# storing inputs tensors
self.inputs = box(inputs)
for ipt in self.inputs:
self.inputs_idx.append(ipt.idx)
# output
self.outputs = box(outputs)
for output in self.outputs:
self.outputs_idx.append(output.idx)
# build forward graph
for output in self.outputs:
self._add_op_to_graph(output, None, self.debug)
# FIXME: check that the graph is fully connected from input to output
# coerce exec_path as a list to allow reuse accros batches.
self.execution_path = list(nx.topological_sort(self.graph))
def compile(self, selection_strategy, fitness_functions):
"""Configure evoluationary model for training
"""
# FIXME: check args validity
self.selection_strategy = selection_strategy
self.fitness_functions = box(fitness_functions)
self.compiled = True
self._results = Results(debug=self.debug)
def evolve(self, populations, generations=1, callbacks=None, verbose=1):
if not self.compiled:
raise ValueError("compile() must be run before using the graph")
return
self.callback_collection = CallbackCollection(callbacks)
populations = box(populations)
self.print_debug("Initial Populations", populations)
if not len(populations) == len(self.inputs):
raise ValueError('The numbers of population must be equal\
to number of inputs')
# assign initial value to inputs
current_populations = []
for pop_idx, ipt in enumerate(self.inputs):
self.inputs[pop_idx].assign(populations[pop_idx])
pop = self.inputs[pop_idx].get()
current_populations.append(pop)
num_populations = len(current_populations)
self.print_debug('Initial current_populations', current_populations)
# callbacks
self.callback_collection.on_evolution_begin(current_populations)
# progress bar
if verbose:
pb = tqdm(total=generations, unit='generation')
# evolve loop
for generation_idx in range(generations):
# callbacks
self.callback_collection.on_generation_begin(generation_idx)
# perform evolution
evolved_populations = self.perform_evolution()
# assign evolved populations
self.print_debug(generation_idx, 'evolved pop',
evolved_populations)
fitness_scores_list = [] # keep track of fitness scores
metrics_list = [] # keep track of the metrics scores
for pop_idx in range(num_populations):
# find current informaiton
current_pop = current_populations[pop_idx]
evolved_pop = evolved_populations[pop_idx]
fitness_function = self.fitness_functions[pop_idx]
self.print_debug('current_population', pop_idx, current_pop)
self.print_debug('evolved_population', pop_idx, evolved_pop)
# select population
new_population, fitness_scores, metrics = self.selection_strategy( # noqa
fitness_function, current_pop, evolved_pop)
# tracks metrics
metrics_list.append(metrics)
fitness_scores_list.append(fitness_scores)
# update population tensor
self.inputs[pop_idx].assign(new_population)
# track current population
current_populations[pop_idx] = new_population
# record fitness scores
self._results.record_fitness(fitness_scores_list)
latest_metrics = self._results.get_latest_metrics(flatten=True)
# callbacks
self.callback_collection.on_generation_end(generation_idx,
latest_metrics,
fitness_scores_list,
populations)
# progress bar
if verbose:
formatted_metrics = {}
for name, value in latest_metrics.items():
name = name.lower().replace(' ', '_')
formatted_metrics[name] = value
pb.set_postfix(formatted_metrics)
pb.update()
if verbose:
pb.close()
# final callback
self.callback_collection.on_evolution_end(current_populations)
# record last population
self._results.set_population(current_populations)
# return last evolution
return self._results
def perform_evolution(self):
"""
Evolve population
Args:
populations (list): populations to evolve.
"""
# single batch # FIXME: move to a batch function as we need for
# evaluate
self.print_debug('execution path:', self.execution_path)
for op_idx in self.execution_path:
op = self.idx2op[op_idx]
self.print_debug('%s(%s)' % (op.idx, self.idx2input_idx[op.idx]))
# fetching inputs values
inputs = []
for input_idx in self.idx2input_idx[op_idx]:
inputs.append(self.idx2results[input_idx])
# execute the op
self.idx2results[op_idx] = op._call_from_graph(inputs)
self.print_debug('idx2results:', self.idx2results.keys())
# collect results
results = []
for op_idx in self.outputs_idx:
results.append(self.idx2results[op_idx])
return results
def summary(self):
"print a summary of the data flow"
rows = []
for op_idx in self.execution_path:
op = self.idx2op[op_idx]
# inputs
if len(self.idx2input_idx[op_idx]):
inputs = ",".join([o for o in self.idx2input_idx[op_idx]])
else:
inputs = ''
# the op id and its shape
op_info = "%s (%s)" % (op.idx, op.op_type)
# shape
op_shape = "%s" % str(op.get_output_shapes())
# output
# if len(self.idx2ouput_ops[op_idx]):
# outputs = ",".join([o for o in self.idx2ouput_ops[op_idx]]) # noqa E501
# else:
# outputs = '>>'
rows.append([op_info, op_shape, inputs])
print(tabulate(rows, headers=['OP (type)', 'Output Shape', 'Inputs']))
def _add_op_to_graph(self, op, output_op=None, debug=0):
"""
Recursively insert nodes in the DAG
Take the opportunity to compute outbound nodes and various variables
Args:
op (Op): the operation to insert
output_op (Op): the output_op to add.
debug (int): print debug
"""
self.idx2op[op.idx] = op
self.graph.add_node(op.idx)
# recording inbound op if any
if output_op:
self.idx2ouput_ops[op.idx].add(output_op.idx)
# Reversing the graph and storing it - notice:in_op become op
for in_op in op.input_ops:
self.idx2input_idx[op.idx].add(in_op.idx)
if debug:
cprint("[edge] %s->%s" % (in_op.idx, op.idx), 'yellow')
self.graph.add_edge(in_op.idx, op.idx)
self._add_op_to_graph(in_op, op, self.debug) # up the chain
def print_debug(self, *msg):
"output debug message"
if self.debug:
print_debug('GeneFlow', *msg)
def history(self):
return self._history
```
#### File: evoflow/engine/results.py
```python
from time import time
from collections import defaultdict
import altair as alt # dymamic
from matplotlib.pylab import plt # static graph
from tabulate import tabulate
import pandas as pd
import evoflow.backend as B
from evoflow.utils import unbox
from evoflow.io import print_debug
class Results(object):
def __init__(self, debug=False):
self.start_time = time()
self._populations = None
self._fitness_scores = None
self._metrics_latest = defaultdict(dict) # convinience holder
self._metrics_history = defaultdict(lambda: defaultdict(list))
self.debug = debug
def get_populations(self):
return unbox(self._populations)
def set_population(self, populations):
"""Assign unboxed population
Args:
populations (list): list of population tensors
Note: the whole class assumes populations as a list so don't set
unboxed results or it will break everything.
"""
if self.debug:
print_debug(populations)
self._populations = populations
def display_populations(self,
top_k=10,
max_chromosome_len=1024,
rounding=None):
for pop_idx, population in enumerate(self._populations):
num_chromosomes = min(len(population), top_k)
heatmap = []
gene_max = 0
for cidx, chromosome in enumerate(population):
genes = []
for gene in chromosome[:max_chromosome_len]:
if isinstance(rounding, type(None)):
genes.append(str(gene))
elif rounding > 0:
genes.append(round(float(gene), rounding))
else:
genes.append(int(gene))
gene_max = max(gene_max, gene)
heatmap.append(genes)
if cidx > num_chromosomes:
break
fig, ax = plt.subplots()
im = ax.imshow(heatmap,
interpolation='nearest',
cmap="viridis",
aspect='auto')
# gradient bar
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel('Gene value', rotation=-90, va="bottom")
plt.title(f'Population - top {num_chromosomes}',
fontsize=16,
fontweight='bold')
plt.xlabel("Genes")
plt.ylabel("Chromosome")
# fig.tight_layout()
plt.show()
def top_k(self, top_k=10, max_chromosome_len=20, precision=None):
"""Display the top k chromosome.
Args:
top_k (int, optional): Number of chromosomes to display.
Defaults to 10.
expected_max_value (int, optional): how many gene to display per
chromosomes.
precision (int, optional): how many digits per chromosome to
display. Default to None. If None full value.
FIXME: use HTML while in notebook
"""
for pop_idx, population in enumerate(self._populations):
rows = []
for cidx, chromosome in enumerate(population):
genes = []
for gene in chromosome[:max_chromosome_len]:
if isinstance(precision, type(None)):
genes.append(str(gene))
elif precision > 0:
genes.append(str(round(float(gene), precision)))
else:
genes.append(str(int(gene)))
if len(genes) != len(chromosome):
genes.append(' ...')
genes = " ".join(genes)
row = [self._fitness_scores[pop_idx][cidx], genes]
if cidx == top_k:
break
rows.append(row)
headers = ['fit score', 'genes [:%d]' % max_chromosome_len]
print(tabulate(rows, headers=headers))
def plot_fitness(self, static=False):
"""Plots the various metrics"""
if static:
return self._build_plot_static('Fitness function')
else:
return self._build_plot('Fitness function')
def plot(self, group_name, static=False):
"""Plots group metrics"""
if static:
return self._build_plot_static(group_name)
else:
return self._build_plot(group_name)
def _build_plot_static(self, metric_group):
metrics = self.get_metrics_history()
if metric_group not in metrics:
raise ValueError("can't find metric group")
for name, values in metrics[metric_group].items():
plt.plot(values, label=name)
plt.legend(loc='best')
plt.title(metric_group, fontsize=16, fontweight='bold')
plt.xlabel("Generations")
plt.show()
return None
def _build_plot(self, metric_group):
"Build an altair plot for a given metric group"
metrics = self.get_metrics_history()
if metric_group not in metrics:
raise ValueError("can't find metric group")
data = metrics[metric_group]
rows = []
for name, values in data.items():
for idx, val in enumerate(values):
rows.append({'generation': idx, 'metric': name, 'value': val})
metrics_pd = pd.DataFrame(rows)
chart = alt.Chart(metrics_pd, title='Fitness function').mark_line()
chart.encode(x='generation', y='value', color='metric')
chart.configure_title(fontSize=16,
offset=5,
orient='top',
anchor='middle')
return chart
def get_metrics_history(self):
"""Get the last evolution metrics values
Returns:
dict: name: value as list(float).
"""
return self._metrics_history
def get_latest_metrics(self, flatten=False):
"""Get the last evolution metrics values
Args:
flatten (bool, optional): Return metrics as a flat dictionary
instead of a nested one
Returns:
dict: name:value as float.
"""
if flatten:
metrics = {}
for group_name, group_data in self._metrics_latest.items():
for metric, value in group_data.items():
k = "%s_%s" % (group_name, metric)
metrics[k] = value
return metrics
else:
return self._metrics_latest
def record_metrics(self, metrics_list):
"""Record metrics and track their history
Args:
metrics (list(dict(dict))): group of metrics to track. Of the form:
[group][metric] = float(value)
"""
for pop_idx, metrics in enumerate(metrics_list):
for group, data in metrics.items():
for name, value in data.items():
# only suffix is more than one population
if len(metrics) > 1:
name += "_%s" % pop_idx
self._metrics_history[group][name].append(value)
self._metrics_latest[group][name] = value
def record_fitness(self, fitness_scores):
"""Compute and record fitness related metrics
Args:
fitness_scores (list(ndarray)): tensor holding fitness scores.
"""
self._fitness_scores = fitness_scores
# update history
for pop_idx, fit_scores in enumerate(fitness_scores):
METRICS = [['mean', B.mean], ['max', B.max], ['min', B.min]]
for name, fn in METRICS:
# only suffix is more than one population
if len(fitness_scores) > 1:
name += "_%s" % pop_idx
# compute result
value = float(fn(fit_scores))
self._metrics_history['Fitness function'][name].append(value)
self._metrics_latest['Fitness function'][name] = value
```
#### File: evoflow/selection/select_fittest.py
```python
import evoflow.backend as B
from evoflow.engine import SelectionStrategy
class SelectFittest(SelectionStrategy):
"Select the fittest member of the population"
def __init__(self, mode='max', **kwargs):
"""[summary]
Args:
mode (str, optional): one of `{min', 'max'}`. In 'min' mode,
the fitness function will select individual with the lowest fitness
value; in 'max' mode it will select the one with the highest
values. Defaults to 'max'.
"""
if mode not in ['min', 'max']:
raise ValueError('mode must be either max or min')
self.mode = mode
super(SelectFittest, self).__init__('select_fittest', **kwargs)
def call(self, fitness_function, current_population, evolved_population):
"""Select the most fit individuals from the combined current and
evolved population.
Args:
fitness_function (function): User provided function that return
the fitness value for each chromosome of a population as a Tensor.
current_population: Tensor containing the population prior to
evolution.
evolved_population: Tensor containing the population after the
evolution
"""
population_size = current_population.shape[0]
merged_population = B.concatenate(
[current_population, evolved_population])
# fitness computation
fitness_scores = fitness_function(merged_population)
metrics = fitness_function.get_metrics()
# population selection
if self.mode == 'max':
indices = B.top_k_indices(fitness_scores, k=population_size)
else:
indices = B.bottom_k_indices(fitness_scores, k=population_size)
selected_pop = B.take(merged_population, indices, axis=0)
return selected_pop, B.take(fitness_scores, indices, axis=0), metrics
```
#### File: tests/backend/test_manipulation.py
```python
from evoflow.utils import slices2array
def test_roll(backends):
inputs = [1, 2, 3]
expected = [3, 1, 2]
for B in backends:
tensor = B.tensor(inputs)
result = B.roll(tensor, 1, axis=0)
expected_tensor = B.tensor(expected)
assert B.tensor_equal(result, expected_tensor)
def test_roll_2d(backends):
inputs = [[1, 2, 3], [1, 2, 3]]
expected = [[3, 1, 2], [3, 1, 2]]
for B in backends:
tensor = B.tensor(inputs)
result = B.roll(tensor, 1, axis=1)
expected_tensor = B.tensor(expected)
assert B.tensor_equal(result, expected_tensor)
def test_assign(backends):
inputs = [[11, 12, 13], [21, 22, 23], [31, 32, 33]]
values = [[1, 1], [1, 1]]
slices = (slice(1, 3), slice(1, 3))
tslices = slices2array(slices)
expected = [[11, 12, 13], [21, 1, 1], [31, 1, 1]]
for B in backends:
tval = B.tensor(values)
tensor = B.tensor(inputs)
result = B.assign(tensor, tval, tslices)
expected_tensor = B.tensor(expected)
assert B.tensor_equal(result, expected_tensor)
def test_assign2d(backends):
inputs = [
[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
]
values = [[1, 1], [1, 1]]
slices = (slice(0, 1), slice(1, 3), slice(1, 3))
tslices = slices2array(slices)
expected = [
[[11, 12, 13], [21, 1, 1], [31, 1, 1]],
[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
]
for B in backends:
print('--%s--' % B)
tval = B.tensor(values)
tensor = B.tensor(inputs)
result = B.assign(tensor, tval, tslices)
expected_tensor = B.tensor(expected)
print(result)
assert B.tensor_equal(result, expected_tensor)
def test_concatenate(backends):
inputs = [1, 2, 3]
inputs2 = [4, 5, 6]
expected = [1, 2, 3, 4, 5, 6]
for B in backends:
tensor = B.tensor(inputs)
tensor2 = B.tensor(inputs2)
result = B.concatenate([tensor, tensor2])
expected_tensor = B.tensor(expected)
assert B.tensor_equal(result, expected_tensor)
```
#### File: tests/backend/test_random.py
```python
from termcolor import cprint
def arr2d():
return [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
def arr2dlarge():
return [[1, 2, 3, 10, 11, 12], [4, 5, 6, 13, 14, 15],
[7, 8, 9, 16, 17, 18]]
def test_full_shuffle_shape(backends):
# use arrays where each dimension is different to make sure we get back
# the same. This might seems odd but there is a real risk it is broken
# due to how the tf.op is implemented
SHAPES = [(10, ), (10, 20), (20, 10), (10, 20, 30), (30, 10, 20),
(10, 20, 30, 40), (40, 30, 20, 10)]
for B in backends:
print('\n==[Backend]==\n')
for shape in SHAPES:
t = B.normal(shape)
assert t.shape == shape
t = B.full_shuffle(t)
assert t.shape == shape
print('shape', shape, 't.shape', t.shape)
def test_shuffle_axis0(backends):
for B in backends:
t = B.tensor(arr2d())
# give it 20 tries to ensure consistency
for _ in range(20):
t = B.shuffle(t)
if t[0][0] != 1 or t[1][0] != 4:
break
assert t[0][0] in [1, 4, 7]
assert t[1][0] in [1, 4, 7]
assert t[2][0] in [1, 4, 7]
assert t[0][0] != 1 or t[1][0] != 4
def test_shuffle_axis1(backends):
for B in backends:
t = B.tensor(arr2d())
cprint(t, 'blue')
# give it 20 tries to ensure consistency
for _ in range(20):
t = B.shuffle(t, axis=1)
if t[0][0] != 1 or t[1][0] != 4:
break
cprint(t, 'green')
assert t[0][0] in [1, 2, 3]
assert t[1][0] in [4, 5, 6]
assert t[2][0] in [7, 8, 9]
assert t[0][0] != 1 or t[1][0] != 4
def test_full_shuffle(backends):
for B in backends:
t = B.tensor(arr2d())
cprint(t, 'blue')
# give it multiple try as identity is a valid shuffle
for _ in range(20):
t = B.full_shuffle(t)
if (t[0][0] != 1 or t[1][0] != 4) and (t[0][1] != 2
or t[1][1] != 5): # noqa
break
cprint(t, 'green')
assert (t[0][0] != 1 or t[1][0] != 4)
assert (t[0][1] != 2 or t[1][1] != 5)
def test_randint_1D(backends):
for B in backends:
print(B)
t = B.randint(0, 11, shape=10)
print(t)
assert len(t) == 10
assert B.max(t) <= 10
assert B.min(t) >= 0
def test_single_number(backends):
for B in backends:
t = B.randint(11)
assert t <= 10
assert t >= 0
t = B.randint(5, 11)
assert t <= 10
assert t >= 5
def test_randint_2SD(backends):
for B in backends:
t = B.randint(0, 11, shape=(10, 20))
assert t.shape == (10, 20)
assert B.max(t) <= 10
assert B.min(t) >= 0
```
#### File: tests/backend/test_reduce.py
```python
def test_prod(backends):
inputs = [1, 2, 3, 4]
for B in backends:
tensor = B.tensor(inputs)
assert B.prod(tensor) == 24
def test_sum(backends):
inputs = [1, 2, 3, 4]
for B in backends:
tensor = B.tensor(inputs)
assert B.sum(tensor) == 10
def test_max(backends):
inputs = [1, 2, 3, 4]
for B in backends:
tensor = B.tensor(inputs)
assert B.max(tensor) == 4
def test_min(backends):
inputs = [1, 2, 3]
for B in backends:
tensor = B.tensor(inputs)
assert B.min(tensor) == 1
def test_mean(backends):
inputs = [1, 2, 3]
for B in backends:
tensor = B.tensor(inputs)
assert B.mean(tensor) == 2
```
#### File: tests/engine/test_inputs.py
```python
from evoflow.ops import Input
from evoflow import backend as B
def test_call_vs_get():
shape = (128, 64)
population = B.randint(1, 10, shape=shape)
inputs = Input(shape)
inputs.assign(population)
assert B.tensor_equal(inputs.get(), inputs.call(''))
def test_1d():
shape = (128, 64)
population = B.randint(1, 10, shape=shape)
inputs = Input(shape)
inputs.assign(population)
assert B.tensor_equal(inputs.get(), population)
def test_2d():
shape = (128, 64, 64)
population = B.randint(1, 10, shape=shape)
inputs = Input(shape)
inputs.assign(population)
assert B.tensor_equal(inputs.get(), population)
def test_non_tensor_input():
shape = (2, 4)
population = [[1, 2, 3, 4], [1, 2, 3, 4]]
inputs = Input(shape)
inputs.assign(population)
res = inputs.get()
assert B.is_tensor(res)
```
#### File: tests/ops/test_single_crossover.py
```python
from copy import copy
from termcolor import cprint
from evoflow import backend as B # noqa: F402
from evoflow.ops import SingleCrossover1D, SingleCrossover2D
from evoflow.ops import SingleCrossover3D
def test_ND():
"test various tensor size random"
TEST_INPUTS = [
[SingleCrossover1D, (2, 4), 0.5],
[SingleCrossover2D, (2, 4, 4), (0.5, 0.5)],
[SingleCrossover3D, (2, 4, 4, 4), (0.5, 0.5, 0.5)],
]
for inputs in TEST_INPUTS:
OP = inputs[0]
pop_shape = inputs[1]
mutations_probability = inputs[2]
population_fraction = 1
population = B.randint(0, 1024, pop_shape)
# eager
RM = OP(population_fraction=population_fraction,
mutations_probability=mutations_probability)
population = RM(population)
assert B.is_tensor(population)
assert population.shape == pop_shape
# graph
RM = OP(population_fraction=population_fraction,
mutations_probability=mutations_probability)
population = RM._call_from_graph(population)
assert B.is_tensor(population)
assert population.shape == pop_shape
def test_1D_shape():
POPULATION_SHAPE = (64, 16)
population = B.randint(0, 1024, POPULATION_SHAPE)
population_fraction = 0.5
crossover_size_fraction = 0.2
original_population = copy(population)
population = SingleCrossover1D(population_fraction,
crossover_size_fraction,
debug=1)(population)
cprint(population, 'cyan')
cprint(original_population, 'yellow')
assert population.shape == POPULATION_SHAPE
# measuring mutation rate
diff = B.clip(abs(population - original_population), 0, 1)
cprint('diff', 'cyan')
cprint(diff, 'cyan')
# row test
num_ones_in_row = 0
for col in diff:
num_ones = list(col).count(1)
num_ones_in_row = max(num_ones, num_ones_in_row)
max_one_in_row = POPULATION_SHAPE[1] * crossover_size_fraction
assert num_ones_in_row <= max_one_in_row
assert num_ones_in_row
# col
diff = B.transpose(diff)
num_ones_in_col = 0
for col in diff:
num_ones_in_col = max(list(col).count(1), num_ones_in_col)
max_one_in_col = POPULATION_SHAPE[0] * population_fraction
assert max_one_in_col - 3 <= num_ones_in_col <= max_one_in_col
```
#### File: tests/tensorflow_compile/test_randint.py
```python
import evoflow.backend.tensorflow as B
# from evoflow.backend.tf_ops.assign import assign as assign2
import tensorflow as tf
from perfcounters import PerfCounters
@tf.function()
def randint_tf(low, high, shape):
return B.randint(low, high, shape)
@tf.function(experimental_compile=True)
def randint_xla(low, high, shape):
return B.randint(low, high, shape)
def test_randint_fn():
NUM_TESTS = 3
low = 10
high = 1000
shape = (100, 100, 100)
randint_tf(low, high, shape)
randint_xla(low, high, shape)
v = randint_tf(low, high, shape)
v2 = randint_tf(low, high, shape)
assert not B.tensor_equal(v, v2)
v = randint_xla(low, high, shape)
v2 = randint_xla(low, high, shape)
assert not B.tensor_equal(v, v2)
cnts = PerfCounters()
cnts.start('basic')
for _ in range(NUM_TESTS):
B.randint(low, high, shape)
cnts.stop('basic')
cnts.start('tf_fn')
for _ in range(NUM_TESTS):
randint_tf(low, high, shape)
cnts.stop('tf_fn')
cnts.start('tf_xla')
for _ in range(NUM_TESTS):
randint_xla(low, high, shape)
cnts.stop('tf_xla')
cnts.report()
```
#### File: tests/test_integration/test_direct_2d_tensors.py
```python
from evoflow.engine import EvoFlow
from evoflow.ops import Input, RandomMutations2D, UniformCrossover2D
from evoflow.selection import SelectFittest
from evoflow.fitness import Sum
from evoflow.population import randint_population
import evoflow.backend as B
def test_direct_2d():
NUM_EVOLUTIONS = 2
POPULATION_SIZE = 3
GENE_SIZE = 4
MAX_VAL = 10
SHAPE = (POPULATION_SIZE, GENE_SIZE, GENE_SIZE)
population = randint_population(SHAPE, MAX_VAL)
fitness_function = Sum(max_sum_value=GENE_SIZE)
evolution_strategy = SelectFittest()
inputs = Input(shape=SHAPE)
# x = RandomMutations2D(max_gene_value=1, min_gene_value=0)(inputs)
outputs = UniformCrossover2D()(inputs)
ef = EvoFlow(inputs, outputs, debug=True)
ef.compile(evolution_strategy, fitness_function)
ef.evolve(population, generations=NUM_EVOLUTIONS, verbose=0)
if __name__ == "__main__":
test_direct_2d()
```
#### File: evoflow/tests/test_population.py
```python
from evoflow.population import randint_population, uniform_population
import evoflow.backend as B
def test_randintpop():
shape = (100, 100, 10)
pop = randint_population(shape, 42, 1)
assert pop.shape == shape
assert B.max(pop) == 42
assert B.min(pop) == 1
assert pop.dtype == B.intx()
def test_uniformpop():
shape = (100, 100)
pop = uniform_population(shape)
assert pop.shape == shape
assert B.max(pop) == 99
assert B.min(pop) == 0
for chrm in pop:
_, _, count = B.unique_with_counts(chrm)
assert B.max(count) == 1
``` |
{
"source": "joaogui1/objax",
"score": 3
} |
#### File: objax/tests/testrandom.py
```python
import unittest
import numpy as np
import scipy.stats
import objax
class TestRandom(unittest.TestCase):
def helper_test_randint(self, shape, low, high):
"""Helper function to test objax.random.randint."""
value = objax.random.randint(shape, low, high)
self.assertEqual(value.shape, shape)
self.assertTrue(np.all(value >= low))
self.assertTrue(np.all(value < high))
def test_randint(self):
"""Test for objax.random.randint."""
self.helper_test_randint(shape=(3, 4), low=1, high=10)
self.helper_test_randint(shape=(5,), low=0, high=5)
self.helper_test_randint(shape=(), low=-5, high=5)
def helper_test_normal(self, shape, stddev):
"""Helper function to test objax.random.normal."""
value = objax.random.normal(shape, stddev=stddev)
self.assertEqual(value.shape, shape)
def test_normal(self):
"""Test for objax.random.normal."""
self.helper_test_normal(shape=(4, 2, 3), stddev=1.0)
self.helper_test_normal(shape=(2, 3), stddev=2.0)
self.helper_test_normal(shape=(5,), stddev=2.0)
self.helper_test_normal(shape=(), stddev=10.0)
value = np.array(objax.random.normal((1000, 100)))
self.assertAlmostEqual(value.mean(), 0, delta=0.01)
self.assertAlmostEqual(value.std(), 1, delta=0.01)
value = np.array(objax.random.normal((1000, 100), mean=0, stddev=2))
self.assertAlmostEqual(value.mean(), 0, delta=0.01)
self.assertAlmostEqual(value.std(), 2, delta=0.01)
value = np.array(objax.random.normal((1000, 100), mean=1, stddev=1.5))
self.assertAlmostEqual(value.mean(), 1, delta=0.01)
self.assertAlmostEqual(value.std(), 1.5, delta=0.01)
def helper_test_truncated_normal(self, shape, stddev, bound):
"""Helper function to test objax.random.truncated_normal."""
value = objax.random.truncated_normal(shape, stddev=stddev, lower=-bound, upper=bound)
self.assertEqual(value.shape, shape)
self.assertTrue(np.all(value >= -bound * stddev))
self.assertTrue(np.all(value <= bound * stddev))
def test_truncated_normal(self):
"""Test for objax.random.truncated_normal."""
self.helper_test_truncated_normal(shape=(5, 7), stddev=1.0, bound=2.0)
self.helper_test_truncated_normal(shape=(4,), stddev=2.0, bound=4.0)
self.helper_test_truncated_normal(shape=(), stddev=1.0, bound=4.0)
value = np.array(objax.random.truncated_normal((1000, 100)))
truncated_std = scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1)
self.assertAlmostEqual(value.mean(), 0, delta=0.01)
self.assertAlmostEqual(value.std(), truncated_std, delta=0.01)
self.assertAlmostEqual(value.min(), -1.9, delta=0.1)
self.assertAlmostEqual(value.max(), 1.9, delta=0.1)
value = np.array(objax.random.truncated_normal((1000, 100), stddev=2, lower=-3, upper=3))
truncated_std = scipy.stats.truncnorm.std(a=-3, b=3, loc=0., scale=2)
self.assertAlmostEqual(value.mean(), 0, delta=0.01)
self.assertAlmostEqual(value.std(), truncated_std, delta=0.01)
self.assertAlmostEqual(value.min(), -5.9, delta=0.1)
self.assertAlmostEqual(value.max(), 5.9, delta=0.1)
value = np.array(objax.random.truncated_normal((1000, 100), stddev=1.5))
truncated_std = scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.5)
self.assertAlmostEqual(value.mean(), 0, delta=0.01)
self.assertAlmostEqual(value.std(), truncated_std, delta=0.01)
self.assertAlmostEqual(value.min(), -2.9, delta=0.1)
self.assertAlmostEqual(value.max(), 2.9, delta=0.1)
def helper_test_uniform(self, shape):
"""Helper function to test objax.random.uniform."""
value = objax.random.uniform(shape)
self.assertEqual(value.shape, shape)
self.assertTrue(np.all(value >= 0.0))
self.assertTrue(np.all(value < 1.0))
def test_uniform(self):
"""Test for objax.random.uniform."""
self.helper_test_uniform(shape=(4, 3))
self.helper_test_uniform(shape=(5,))
self.helper_test_uniform(shape=())
def test_generator(self):
"""Test for objax.random.Generator."""
g1 = objax.random.Generator(0)
g2 = objax.random.Generator(0)
g3 = objax.random.Generator(1)
value1 = objax.random.randint((3, 4), low=0, high=65536, generator=g1)
value2 = objax.random.randint((3, 4), low=0, high=65536, generator=g2)
value3 = objax.random.randint((3, 4), low=0, high=65536, generator=g3)
self.assertTrue(np.all(value1 == value2))
self.assertFalse(np.all(value1 == value3))
g4 = objax.random.Generator(123)
value = [objax.random.randint(shape=(1,), low=0, high=65536, generator=g4) for _ in range(2)]
self.assertNotEqual(value[0], value[1])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joaogui1/parallax",
"score": 2
} |
#### File: parallax/parallax/core.py
```python
import itertools
from collections import namedtuple
from dataclasses import dataclass
from typing import Any, Union, Tuple
from frozendict import frozendict
import jax
class ParamInit:
initializer : Any # function (rng, shape) |-> tensor
shape : Tuple[int]
def __init__(self, shape, initializer):
self.shape = shape
self.initializer = initializer
def instantiate(self, rng):
"""Returns a tensor created according to this init."""
return self.initializer(key=rng, shape=self.shape)
def __repr__(self):
return "ParamInit(" + ", ".join([str(d) for d in self.shape]) + ")"
Parameter = Union[ParamInit, jax.interpreters.xla.DeviceArray]
@jax.tree_util.register_pytree_node_class
@dataclass
class ParameterTuple:
parameters : Tuple[Parameter]
def __init__(self, parameters):
self.parameters = tuple(parameters)
def instantiate(self, rng):
rngs = jax.random.split(rng, len(self.parameters))
return ParameterTuple(p.instantiate(rng) for p, rng in zip(self.parameters, rngs))
def __repr__(self):
return "ParameterTuple(" + ", ".join([repr(p) for p in self.parameters]) + ")"
def __iter__(self):
return self.parameters.__iter__()
def tree_flatten(self):
aux = [self.__class__]
leaves = []
for p in self.parameters:
if isinstance(p, ParameterTuple):
l, a = p.tree_flatten()
leaves += l
aux.append((len(l), a))
else:
leaves.append(p)
aux.append(None)
return leaves, aux
@classmethod
def tree_unflatten(cls, aux, leaves):
parameters = []
i = 0
for p in aux[1:]:
if p is None:
parameters.append(leaves[i])
i += 1
else:
nleaves, a = p
parameters.append(
cls.tree_unflatten(a, leaves[i:i+nleaves])
)
i += nleaves
assert i == len(leaves)
return cls(parameters)
def _recursive_all_annotations(cls):
d = frozendict()
for c in cls.__mro__[::-1]:
if "__annotations__" in c.__dict__:
d = d.copy(**c.__annotations__)
return d
@jax.tree_util.register_pytree_node_class
class Module:
_is_initialized: bool = False
mode : str
rng : jax.interpreters.xla.DeviceArray
_parameters : Tuple[Union[Parameter, ParameterTuple]]
_modules : Tuple[Union["Module", "ModuleTuple"]] # apparently that's the best we can do for recursive types :(
_constants : Tuple[Any]
ModField = namedtuple("ModField", ["name", "type"])
def __init__(self):
self._is_initialized = False
self.mode = "train"
self.rng = None
self._register_fields()
def __setattr__(self, name, value):
if self._is_initialized:
raise Exception(f"Can't set {name}, class is already initialized!")
elif name not in _recursive_all_annotations(self.__class__).keys():
raise Exception(f"Field {name} was not declared in {self.__class__} or ancestors!")
else:
self.__dict__[name] = value
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
jax.tree_util.register_pytree_node_class(cls)
@classmethod
def _user_fields(cls):
return sorted([
cls.ModField(k, v)
for k, v in _recursive_all_annotations(cls).items()
if k not in ["_is_initialized", "mode", "rng", "_parameters",
"_modules", "_constants"]
], key=lambda f: f.name)
@classmethod
def _new_from(cls, **kwargs):
obj = cls.__new__(cls)
for k, v in kwargs.items():
obj.__dict__[k] = v
obj._register_fields()
return obj
def _updated_with(self, **kwargs):
obj = self.__class__.__new__(self.__class__)
for k, v in self.__dict__.items():
if k in kwargs:
obj.__dict__[k] = kwargs[k]
else:
obj.__dict__[k] = v
return obj
def _register_fields(self):
super().__setattr__('_modules',
tuple([f.name
for f in self._user_fields()
if not f.type == Parameter and
not issubclass(f.type, ParameterTuple) and
(issubclass(f.type, ModuleTuple) or
issubclass(f.type, Module))]))
super().__setattr__('_parameters',
tuple([f.name
for f in self._user_fields()
if f.type == Parameter or
issubclass(f.type, ParameterTuple)]))
super().__setattr__('_constants',
tuple([f.name
for f in self._user_fields()
if not f.type == Parameter and
not issubclass(f.type, ParameterTuple) and
not issubclass(f.type, ModuleTuple) and
not issubclass(f.type, Module)
]))
def _all_constants(self):
d = frozendict({"_is_initialized" : self._is_initialized,
"mode" : self.mode,
"rng" : self.rng})
for c in self._constants:
d = d.copy(**{c : self.__dict__[c]})
return d
def split(self, num_splits):
rngs = jax.random.split(self.rng, num_splits)
return [self._updated_with(rng=rng) for rng in rngs]
def initialized(self, rng):
d = self._all_constants().copy(_is_initialized = True)
rng_p, rng_m = jax.random.split(rng)
rngs = jax.random.split(rng_p, len(self._parameters))
for p, rng in zip(self._parameters, rngs):
assert isinstance(self.__dict__[p], ParamInit) or \
isinstance(self.__dict__[p], ParameterTuple)
d = d.copy(**{p : self.__dict__[p].instantiate(rng)})
rngs = jax.random.split(rng_m, len(self._modules))
for m, rng in zip(self._modules, rngs):
if isinstance(self.__dict__[m], Module):
assert not self.__dict__[m]._is_initialized
d = d.copy(**{m: self.__dict__[m].initialized(rng)})
return self.__class__._new_from(**d)
def new_state(self, rng, mode=None):
d = frozendict({"rng": rng, "mode": mode or self.mode})
rngs = jax.random.split(rng, len(self._modules))
for m, rng in zip(self._modules, rngs):
d = d.copy(**{m: self.__dict__[m].new_state(rng, mode)})
return self._updated_with(**d)
def __call__(self, *args):
return self.forward(*args)
def grad(self, input):
return jax.grad(self.__class__.forward)(self, input)
def tree_flatten(self):
flat_module_names = tuple(self._modules)
flat_modules = [self.__dict__[m].tree_flatten()
for m in flat_module_names]
flat_parameter_names = tuple(self._parameters)
flat_parameters = [self.__dict__[p].tree_flatten()
if isinstance(self.__dict__[p], ParameterTuple)
else ([self.__dict__[p]], None)
for p in flat_parameter_names]
leaves = tuple(itertools.chain(
*[leaves for (leaves, _) in flat_modules + flat_parameters],
))
aux = (
self.__class__,
self._all_constants(),
[
(name, len(leaves), aux)
for name, (leaves, aux) in zip(
flat_module_names + flat_parameter_names,
flat_modules + flat_parameters
)
],
)
return (leaves, aux)
@classmethod
def tree_unflatten(cls, aux, leaves):
_cls, d, aux_fields = aux
assert cls == _cls
i = 0
add_d = {}
for name, n_leaves, aux in aux_fields:
if aux is None:
assert n_leaves == 1
add_d[name] = leaves[i]
else:
add_d[name] = aux[0].tree_unflatten(aux, leaves[i:i+n_leaves])
i += n_leaves
assert i == len(leaves)
d = d.copy(**add_d)
return cls._new_from(**d)
def modules(self):
for f in self._modules:
yield f, self.__dict__[f]
def _get_name(self):
return self.__class__.__name__
def extra_repr(self):
return ""
def __repr__(self):
"Mimic the pytorch pretty printer"
def _addindent(s_, numSpaces):
s = s_.split('\n')
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
for key, module in self.modules():
mod_str = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + (" uninit " if not self._is_initialized else "") + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
@jax.tree_util.register_pytree_node_class
@dataclass
class ModuleTuple:
modules : Tuple[Module]
def __init__(self, modules):
self.modules = tuple(modules)
def initialized(self, rng):
rngs = jax.random.split(rng, len(self.modules))
return ModuleTuple(m.initialized(rng) for m, rng in zip(self.modules, rngs))
def new_state(self, rng, mode=None):
rngs = jax.random.split(rng, len(self.modules))
return ModuleTuple(
m.new_state(rng, mode)
for m, rng in zip(self.modules, rngs)
)
def __repr__(self):
return "ModuleTuple(" + ", ".join([repr(m) for m in self.modules]) + ")"
def __iter__(self):
return self.modules.__iter__()
def tree_flatten(self):
aux = [self.__class__]
leaves = []
for m in self.modules:
l, a = m.tree_flatten()
leaves += l
aux.append((m.__class__, len(l), a))
return leaves, aux
@classmethod
def tree_unflatten(cls, aux, leaves):
modules = []
i = 0
for m in aux[1:]:
child_cls, nleaves, a = m
modules.append(
child_cls.tree_unflatten(a, leaves[i:i+nleaves])
)
i += nleaves
assert i == len(leaves)
return cls(modules)
@jax.tree_util.register_pytree_node_class
class OptState:
def __init__(self, state, _update, _get_params):
self.state = state
self._get_params = _get_params
self._update = _update
def updated(self, i, grad):
return OptState(self._update(i, grad, self.state), self._update, self._get_params)
def get(self):
return self._get_params(self.state)
def tree_flatten(self):
aux = [self._update, self._get_params]
leaves = [self.state]
return leaves, aux
@classmethod
def tree_unflatten(cls, aux, leaves):
return cls(leaves[0], aux[0], aux[1])
class Optimizer:
def __init__(self, hooks):
self._hooks = hooks
def initialized(self, module, rng):
return OptState(self._hooks[0](module.initialized(rng)),
self._hooks[1], self._hooks[2])
```
#### File: parallax/parallax/layers.py
```python
import jax
import jax.nn.initializers as init
from .core import *
class Dense(Module):
# All parameter-holders are explicitly declared.
weight : Parameter
bias : Parameter
# Setup replace __init__ and creates shapes and binds lazy initializers.
def __init__(self, in_size, out_size):
super().__init__()
self.weight = ParamInit((out_size, in_size), init.xavier_normal())
self.bias = ParamInit((out_size,), init.normal())
# Forward is just like standard pytorch.
def forward(self, input):
return self.weight @ input + self.bias
# Hook for pretty printing
def extra_repr(self):
return "%d, %d"%(self.weight.shape[1], self.weight.shape[0])
class Dropout(Module):
# Arbitrary constants allowed.
rate : float
def __init__(self, rate):
super().__init__()
self.rate = rate
def forward(self, input):
# RNG state is use-once or split. Attached to tree.
state = self.rng
if self.mode == "train":
keep = jax.random.bernoulli(state, self.rate, input.shape)
return jax.numpy.where(keep, input / self.rate, 0)
else:
return input
class BinaryNetwork(Module):
# No difference between modules and parameters
dense1 : Dense
dense2 : Dense
dense3 : Dense
dropout : Dropout
def __init__(self, input_size, hidden_size):
super().__init__()
self.dense1 = Dense(input_size, hidden_size)
self.dense2 = Dense(hidden_size, hidden_size)
self.dense3 = Dense(hidden_size, 1)
self.dropout = Dropout(0.2)
def forward(self, input):
# Standard usage works out of the box.
x = jax.numpy.tanh(self.dense1(input))
# Stochastic modules (have random seed already)
x = self.dropout(x)
# Shared params / recurrence only requires split to change RNG
x = jax.numpy.tanh(self.dense2(x))
x = jax.numpy.tanh(self.dense2(x))
return jax.nn.sigmoid(self.dense3(jax.numpy.tanh(x)))[0]
class LSTMCell(Module):
weight_ih : Parameter
linear_hh : Dense
def __init__(self, input_size, hidden_size):
super().__init__()
self.weight_ih = ParamInit((input_size, 4 * hidden_size), init.normal())
self.linear_hh = Dense(input_size, 4 * hidden_size)
def forward(self, input, h, c):
ifgo = self.weight_ih.T @ input + self.linear_hh(h)
i, f, g, o = jax.numpy.split(ifgo, indices_or_sections=4, axis=-1)
i = jax.nn.sigmoid(i)
f = jax.nn.sigmoid(f)
g = jax.numpy.tanh(g)
o = jax.nn.sigmoid(o)
new_c = f * c + i * g
new_h = o * jax.numpy.tanh(new_c)
return (new_h, new_c)
class MultiLayerLSTM(Module):
# Dynamic number of parameters and modules
cells : ModuleTuple
c_0s : ParameterTuple
def __init__(self, n_layers, n_hidden):
"""For simplicity, have everything have the same dimension."""
super().__init__()
self.cells = ModuleTuple([
LSTMCell(n_hidden, n_hidden)
for _ in range(n_layers)
])
self.c_0s = ParameterTuple([
ParamInit((n_hidden,), init.normal())
for _ in range(n_layers)
])
@property
def hc_0s(self):
return tuple((jax.numpy.tanh(c_0), c_0) for c_0 in self.c_0s)
@jax.jit # a lot faster (try it without!)
def forward(self, input, hcs):
new_hcs = []
for cell, hc in zip(self.cells, hcs):
input, c = cell(input, *hc)
new_hcs.append((input, c))
return tuple(new_hcs)
``` |
{
"source": "joaogui1/rlax-1",
"score": 2
} |
#### File: rlax/_src/multistep_test.py
```python
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from rlax._src import multistep
from rlax._src import test_util
class LambdaReturnsTest(parameterized.TestCase):
def setUp(self):
super(LambdaReturnsTest, self).setUp()
self.lambda_ = 0.75
self.r_t = np.array(
[[1.0, 0.0, -1.0, 0.0, 1.0], [0.5, 0.8, -0.7, 0.0, 2.1]])
self.discount_t = np.array(
[[0.5, 0.9, 1.0, 0.5, 0.8], [0.9, 0.5, 0.3, 0.8, 0.7]])
self.v_t = np.array(
[[3.0, 1.0, 5.0, -5.0, 3.0], [-1.7, 1.2, 2.3, 2.2, 2.7]])
self.expected = np.array(
[[1.6460547, 0.72281253, 0.7375001, 0.6500001, 3.4],
[0.7866317, 0.9913063, 0.1101501, 2.834, 3.99]],
dtype=np.float32)
@test_util.parameterize_vmap_variant()
def test_lambda_returns_batch(self, variant):
"""Tests for a full batch."""
lambda_returns = variant(multistep.lambda_returns, lambda_=self.lambda_)
# Compute lambda return in batch.
actual = lambda_returns(self.r_t, self.discount_t, self.v_t)
# Test return estimate.
np.testing.assert_allclose(self.expected, actual, rtol=1e-5)
class DiscountedReturnsTest(parameterized.TestCase):
def setUp(self):
super(DiscountedReturnsTest, self).setUp()
self.r_t = np.array(
[[1.0, 0.0, -1.0, 0.0, 1.0], [0.5, 0.8, -0.7, 0.0, 2.1]])
self.discount_t = np.array(
[[0.5, 0.9, 1.0, 0.5, 0.8], [0.9, 0.5, 0.3, 0.8, 0.7]])
self.v_t = np.array(
[[3.0, 1.0, 5.0, -5.0, 3.0], [-1.7, 1.2, 2.3, 2.2, 2.7]])
self.bootstrap_v = np.array([v[-1] for v in self.v_t])
self.expected = np.array(
[[1.315, 0.63000005, 0.70000005, 1.7, 3.4],
[1.33592, 0.9288, 0.2576, 3.192, 3.9899998]],
dtype=np.float32)
@test_util.parameterize_vmap_variant()
def test_discounted_returns_batch(self, variant):
"""Tests for a single element."""
discounted_returns = variant(multistep.discounted_returns)
# Compute discounted return.
actual_scalar = discounted_returns(self.r_t, self.discount_t,
self.bootstrap_v)
actual_vector = discounted_returns(self.r_t, self.discount_t, self.v_t)
# Test output.
np.testing.assert_allclose(self.expected, actual_scalar, rtol=1e-5)
np.testing.assert_allclose(self.expected, actual_vector, rtol=1e-5)
class TDErrorTest(parameterized.TestCase):
def setUp(self):
super(TDErrorTest, self).setUp()
self.r_t = np.array(
[[1.0, 0.0, -1.0, 0.0, 1.0], [0.5, 0.8, -0.7, 0.0, 2.1]])
self.discount_t = np.array(
[[0.5, 0.9, 1.0, 0.5, 0.8], [0.9, 0.5, 0.3, 0.8, 0.7]])
self.rho_tm1 = np.array(
[[0.5, 0.9, 1.3, 0.2, 0.8], [2., 0.1, 1., 0.4, 1.7]])
self.values = np.array(
[[3.0, 1.0, 5.0, -5.0, 3.0, 1.], [-1.7, 1.2, 2.3, 2.2, 2.7, 2.]])
@test_util.parameterize_vmap_variant()
def test_importance_corrected_td_errors_batch(self, variant):
"""Tests equivalence to computing the error from a the lambda-return."""
# Vmap and optionally compile.
lambda_returns = variant(multistep.lambda_returns)
td_errors = variant(multistep.importance_corrected_td_errors)
# Compute multistep td-error with recursion on deltas.
td_direct = td_errors(self.r_t, self.discount_t, self.rho_tm1,
np.ones_like(self.discount_t), self.values)
# Compute off-policy corrected return, and derive td-error from it.
ls_ = np.concatenate((self.rho_tm1[:, 1:], [[1.], [1.]]), axis=1)
td_from_returns = self.rho_tm1 * (
lambda_returns(self.r_t, self.discount_t, self.values[:, 1:], ls_) -
self.values[:, :-1])
# Check equivalence.
np.testing.assert_allclose(td_direct, td_from_returns, rtol=1e-5)
if __name__ == '__main__':
absltest.main()
```
#### File: rlax/_src/perturbations_test.py
```python
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
from rlax._src import perturbations
from rlax._src import test_util
class GaussianTest(parameterized.TestCase):
def setUp(self):
super(GaussianTest, self).setUp()
self._num_actions = 3
self._rng_key = jax.random.PRNGKey(42)
@test_util.parameterize_variant()
def test_deterministic(self, variant):
"""Check that noisy and noisless actions match for zero stddev."""
add_noise = variant(perturbations.add_gaussian_noise)
# Test that noisy and noisless actions match for zero stddev
for _ in range(10):
action = np.random.normal(0., 1., self._num_actions)
# Test output.
self._rng_key, key = jax.random.split(self._rng_key)
noisy_action = add_noise(key, action, 0.)
np.testing.assert_allclose(action, noisy_action)
class OrnsteinUhlenbeckTest(parameterized.TestCase):
def setUp(self):
super(OrnsteinUhlenbeckTest, self).setUp()
self._num_actions = 3
self._rng_key = jax.random.PRNGKey(42)
@test_util.parameterize_variant()
def test_deterministic(self, variant):
"""Check that noisy and noisless actions match for zero stddev."""
add_noise = variant(perturbations.add_ornstein_uhlenbeck_noise)
# Test that noisy and noisless actions match for zero stddev
noise_tm1 = np.zeros((self._num_actions,))
for _ in range(10):
action = np.random.normal(0., 1., self._num_actions)
# Test output.
self._rng_key, key = jax.random.split(self._rng_key)
noisy_action = add_noise(key, action, noise_tm1, 1., 0.)
noise_tm1 = action - noisy_action
np.testing.assert_allclose(action, noisy_action)
if __name__ == '__main__':
absltest.main()
```
#### File: rlax/_src/test_util.py
```python
import functools
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
def parameterize_variant(*testcases):
"""A decorator to test each test case with all variants.
This decorator is an enhanced version of `parameterized.named_parameters`.
The variant factory is appended to the end of the tuple of the function
parameters.
Args:
*testcases: Tuples to pass to `parameterized.named_parameters`.
An empty list of testcases will produce one test for each variant.
Returns:
A test generator to test each test case with all variants.
"""
factories = _get_variant_factories()
return _enhance_named_parameters(factories, testcases)
def parameterize_vmap_variant(*testcases):
"""A decorator to test each test case with all variants of vmap.
This decorator is an enhanced version of `parameterized.named_parameters`.
The variant factory is appended to the end of the tuple of the function
parameters.
Args:
*testcases: Tuples to pass to `parameterized.named_parameters`.
An empty list of testcases will produce one test for each variant.
Returns:
A test generator to test each test case with all variants.
"""
factories = _get_vmap_variant_factories()
return _enhance_named_parameters(factories, testcases)
def _enhance_named_parameters(factories, testcases):
"""Calls parameterized.named_parameters() with enhanced testcases."""
if not testcases:
testcases = [("variant",)]
enhanced_testcases = []
for testcase in testcases:
name = testcase[0]
test_args = tuple(testcase[1:])
for variant_name, raw_factory in factories.items():
variant_factory = _produce_variant_factory(raw_factory)
# The variant_factory will be the last argument.
case = (name + "_" + variant_name,) + test_args + (variant_factory,)
enhanced_testcases.append(case)
return parameterized.named_parameters(
*enhanced_testcases)
def _produce_variant_factory(raw_factory):
def variant_factory(fn, *args, **kwargs):
return raw_factory(functools.partial(fn, *args, **kwargs))
return variant_factory
def _get_variant_factories():
factories = dict(
nodevice=_without_device,
jit=lambda f: _without_device(jax.jit(f)),
device=_with_device,
device_jit=lambda f: _with_device(jax.jit(f)),
)
return factories
def _get_vmap_variant_factories():
"""Returns factories for variants operating on batch data."""
factories = dict(
jit_vmap=lambda f: _without_device(jax.jit(jax.vmap(f))),
device_vmap=lambda f: _with_device(jax.vmap(f)),
device_jit_vmap=lambda f: _with_device(jax.jit(jax.vmap(f))),
iteration=lambda f: _with_iteration(_without_device(f)),
iteration_jit=lambda f: _with_iteration(_without_device(jax.jit(f))),
iteration_device=lambda f: _with_iteration(_with_device(f)),
iteration_device_jit=lambda f: _with_iteration(_with_device(jax.jit(f))),
)
return factories
def strict_zip(*args):
"""A strict `zip()` that requires sequences with the same length."""
expected_len = len(args[0])
for arg in args:
np.testing.assert_equal(len(arg), expected_len)
return zip(*args)
def _with_iteration(fn):
"""Uses iteration to produce vmap-like output."""
def wrapper(*args):
outputs = []
# Iterating over the first axis.
for inputs in strict_zip(*args):
outputs.append(fn(*inputs))
return jax.tree_util.tree_multimap(lambda *x: jnp.stack(x), *outputs)
return wrapper
def _with_device(fn):
"""Puts all inputs to a device."""
def wrapper(*args):
converted = jax.device_put(args)
return fn(*converted)
return wrapper
def _without_device(fn):
"""Moves all inputs outside of a device."""
def wrapper(*args):
def get(x):
if isinstance(x, jnp.DeviceArray):
return jax.device_get(x)
return x
converted = jax.tree_util.tree_map(get, args)
return fn(*converted)
return wrapper
```
#### File: rlax/_src/vtrace_test.py
```python
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from rlax._src import distributions
from rlax._src import test_util
from rlax._src import vtrace
class VTraceTest(parameterized.TestCase):
def setUp(self):
super(VTraceTest, self).setUp()
behavior_policy_logits = np.array(
[[[8.9, 0.7], [5.0, 1.0], [0.6, 0.1], [-0.9, -0.1]],
[[0.3, -5.0], [1.0, -8.0], [0.3, 1.7], [4.7, 3.3]]],
dtype=np.float32)
target_policy_logits = np.array(
[[[0.4, 0.5], [9.2, 8.8], [0.7, 4.4], [7.9, 1.4]],
[[1.0, 0.9], [1.0, -1.0], [-4.3, 8.7], [0.8, 0.3]]],
dtype=np.float32)
actions = np.array([[0, 1, 0, 0], [1, 0, 0, 1]], dtype=np.int32)
self._rho_t = distributions.categorical_importance_sampling_ratios(
target_policy_logits, behavior_policy_logits, actions)
self._rewards = np.array([[-1.3, -1.3, 2.3, 42.0], [1.3, 5.3, -3.3, -5.0]],
dtype=np.float32)
self._discounts = np.array([[0., 0.89, 0.85, 0.99], [0.88, 1., 0.83, 0.95]],
dtype=np.float32)
self._values = np.array([[2.1, 1.1, -3.1, 0.0], [3.1, 0.1, -1.1, 7.4]],
dtype=np.float32)
self._bootstrap_value = np.array([8.4, -1.2], dtype=np.float32)
self._inputs = [
self._rewards, self._discounts, self._rho_t,
self._values, self._bootstrap_value]
self._clip_rho_threshold = 1.0
self._clip_pg_rho_threshold = 5.0
self._lambda = 1.0
self._expected_td = np.array(
[[-1.6155143, -3.4973226, 1.8670533, 5.0316002e1],
[1.4662437, 3.6116405, -8.3327293e-5, -1.3540000e1]],
dtype=np.float32)
self._expected_pg = np.array(
[[-1.6155143, -3.4973226, 1.8670534, 5.0316002e1],
[1.4662433, 3.6116405, -8.3369283e-05, -1.3540000e+1]],
dtype=np.float32)
@test_util.parameterize_vmap_variant()
def test_vtrace_td_error_and_advantage(self, variant):
"""Tests for a full batch."""
vtrace_td_error_and_advantage = variant(
vtrace.vtrace_td_error_and_advantage,
clip_rho_threshold=self._clip_rho_threshold, lambda_=self._lambda)
# Get function arguments.
r_t, discount_t, rho_t, v_tm1, bootstrap_value = self._inputs
v_t = np.concatenate([v_tm1[:, 1:], bootstrap_value[:, None]], axis=1)
# Compute vtrace output.
vtrace_output = vtrace_td_error_and_advantage(
v_tm1, v_t, r_t, discount_t, rho_t)
# Test output.
np.testing.assert_allclose(
self._expected_td, vtrace_output.errors, rtol=1e-3)
np.testing.assert_allclose(
self._expected_pg, vtrace_output.pg_advantage, rtol=1e-3)
@test_util.parameterize_vmap_variant()
def test_lambda_q_estimate(self, variant):
"""Tests for a full batch."""
lambda_ = 0.8
vtrace_td_error_and_advantage = variant(
vtrace.vtrace_td_error_and_advantage,
clip_rho_threshold=self._clip_rho_threshold, lambda_=lambda_)
# Get function arguments.
r_t, discount_t, rho_t, v_tm1, bootstrap_value = self._inputs
v_t = np.concatenate([v_tm1[:, 1:], bootstrap_value[:, None]], axis=1)
# Compute vtrace output.
vtrace_output = vtrace_td_error_and_advantage(
v_tm1, v_t, r_t, discount_t, rho_t)
expected_vs = vtrace_output.errors + v_tm1
clipped_rho_t = np.minimum(self._clip_rho_threshold, rho_t)
vs_from_q = v_tm1 + clipped_rho_t * (vtrace_output.q_estimate - v_tm1)
# Test output.
np.testing.assert_allclose(expected_vs, vs_from_q, rtol=1e-3)
if __name__ == '__main__':
absltest.main()
``` |
{
"source": "joaogui1/sonnet",
"score": 2
} |
#### File: sonnet/src/axis_norm_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from sonnet.src import axis_norm
from sonnet.src import initializers
from sonnet.src import test_utils
import tensorflow as tf
class LayerNormTest(test_utils.TestCase, parameterized.TestCase):
def testSimpleCase(self):
layer = axis_norm.LayerNorm([1, 2], create_scale=False, create_offset=False)
inputs = tf.ones([2, 3, 3, 5])
outputs = layer(inputs).numpy()
for x in np.nditer(outputs):
self.assertEqual(x, 0.0)
def testSimpleCaseVar(self):
layer = axis_norm.LayerNorm([1, 2],
create_scale=True,
create_offset=True,
scale_init=initializers.Constant(0.5),
offset_init=initializers.Constant(2.0))
inputs = tf.ones([2, 3, 3, 5])
outputs = layer(inputs).numpy()
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
def testSimpleCaseNCHWVar(self):
layer = axis_norm.LayerNorm([1, 2],
create_scale=True,
create_offset=True,
scale_init=initializers.Constant(0.5),
offset_init=initializers.Constant(2.0),
data_format="NCHW")
inputs = tf.ones([2, 5, 3, 3])
outputs = layer(inputs).numpy()
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
def testDataFormatAgnosticVar(self):
c_last_layer = axis_norm.LayerNorm([1, 2],
create_scale=True,
create_offset=True)
c_first_layer = axis_norm.LayerNorm([2, 3],
create_scale=True,
create_offset=True,
data_format="NCHW")
inputs = tf.random.uniform([3, 4, 4, 5], 0, 10)
c_last_output = c_last_layer(inputs)
inputs = tf.transpose(inputs, [0, 3, 1, 2])
c_first_output = c_first_layer(inputs)
c_first_output = tf.transpose(c_first_output, [0, 2, 3, 1])
self.assertAllClose(c_last_output.numpy(), c_first_output.numpy())
def testSimpleCaseTensor(self):
layer = axis_norm.LayerNorm([1, 2], create_scale=False, create_offset=False)
inputs = tf.ones([2, 3, 3, 5])
scale = tf.constant(0.5, shape=(5,))
offset = tf.constant(2.0, shape=(5,))
outputs = layer(inputs, scale, offset).numpy()
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
def testSimpleCaseNCHWTensor(self):
layer = axis_norm.LayerNorm([1, 2],
data_format="NCHW",
create_scale=False,
create_offset=False)
inputs = tf.ones([2, 5, 3, 3])
scale = tf.constant(0.5, shape=(5, 1, 1))
offset = tf.constant(2.0, shape=(5, 1, 1))
outputs = layer(inputs, scale, offset).numpy()
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
def testDataFormatAgnosticTensor(self):
c_last_layer = axis_norm.LayerNorm([1, 2],
create_scale=False,
create_offset=False)
c_first_layer = axis_norm.LayerNorm([2, 3],
data_format="NCHW",
create_scale=False,
create_offset=False)
inputs = tf.random.uniform([3, 4, 4, 5], 0, 10)
scale = tf.random.normal((5,), mean=1.0)
offset = tf.random.normal((5,))
c_last_output = c_last_layer(inputs, scale, offset)
inputs = tf.transpose(inputs, [0, 3, 1, 2])
scale = tf.reshape(scale, (5, 1, 1))
offset = tf.reshape(offset, (5, 1, 1))
c_first_output = c_first_layer(inputs, scale, offset)
c_first_output = tf.transpose(c_first_output, [0, 2, 3, 1])
self.assertAllClose(c_last_output.numpy(), c_first_output.numpy())
@parameterized.parameters("NHW", "HWC", "channel_last")
def testInvalidDataFormat(self, data_format):
with self.assertRaisesRegexp(
ValueError,
"Unable to extract channel information from '{}'.".format(data_format)):
axis_norm.LayerNorm(
3, data_format=data_format, create_scale=False, create_offset=False)
@parameterized.parameters("NCHW", "NCW", "channels_first")
def testValidDataFormatChannelsFirst(self, data_format):
test = axis_norm.LayerNorm(
3, data_format=data_format, create_scale=False, create_offset=False)
self.assertEqual(test._channel_index, 1)
@parameterized.parameters("NHWC", "NWC", "channels_last")
def testValidDataFormatChannelsLast(self, data_format):
test = axis_norm.LayerNorm(
3, data_format=data_format, create_scale=False, create_offset=False)
self.assertEqual(test._channel_index, -1)
@parameterized.named_parameters(("String", "foo"), ("ListString", ["foo"]))
def testInvalidAxis(self, axis):
with self.assertRaisesRegexp(
ValueError, "`axis` should be an int, slice or iterable of ints."):
axis_norm.LayerNorm(axis, create_scale=False, create_offset=False)
def testNoScaleAndInitProvided(self):
with self.assertRaisesRegexp(
ValueError, "Cannot set `scale_init` if `create_scale=False`."):
axis_norm.LayerNorm(
3,
create_scale=False,
create_offset=True,
scale_init=initializers.Ones())
def testNoOffsetBetaInitProvided(self):
with self.assertRaisesRegexp(
ValueError, "Cannot set `offset_init` if `create_offset=False`."):
axis_norm.LayerNorm(
3,
create_scale=True,
create_offset=False,
offset_init=initializers.Zeros())
def testCreateScaleAndScaleProvided(self):
layer = axis_norm.LayerNorm([2], create_scale=True, create_offset=False)
with self.assertRaisesRegexp(
ValueError, "Cannot pass `scale` at call time if `create_scale=True`."):
layer(tf.ones([2, 3, 4]), scale=tf.ones([4]))
def testCreateOffsetAndOffsetProvided(self):
layer = axis_norm.LayerNorm([2], create_offset=True, create_scale=False)
with self.assertRaisesRegexp(
ValueError,
"Cannot pass `offset` at call time if `create_offset=True`."):
layer(tf.ones([2, 3, 4]), offset=tf.ones([4]))
def testSliceAxis(self):
slice_layer = axis_norm.LayerNorm(
slice(1, -1), create_scale=False, create_offset=False)
axis_layer = axis_norm.LayerNorm((1, 2),
create_scale=False,
create_offset=False)
inputs = tf.random.uniform([3, 4, 4, 5], 0, 10)
scale = tf.random.normal((5,), mean=1.0)
offset = tf.random.normal((5,))
slice_outputs = slice_layer(inputs, scale, offset)
axis_outputs = axis_layer(inputs, scale, offset)
self.assertAllEqual(slice_outputs.numpy(), axis_outputs.numpy())
def testRankChanges(self):
layer = axis_norm.LayerNorm((1, 2), create_scale=False, create_offset=False)
inputs = tf.ones([2, 3, 3, 5])
scale = tf.constant(0.5, shape=(5,))
offset = tf.constant(2.0, shape=(5,))
layer(inputs, scale, offset)
with self.assertRaisesRegexp(
ValueError,
"The rank of the inputs cannot change between calls, the original"):
layer(tf.ones([2, 3, 3, 4, 5]), scale, offset)
def testWorksWithFunction(self):
layer = axis_norm.LayerNorm((1, 2), create_scale=False, create_offset=False)
function_layer = tf.function(layer)
inputs = tf.ones([2, 3, 3, 5])
scale = tf.constant(0.5, shape=(5,))
offset = tf.constant(2.0, shape=(5,))
outputs = layer(inputs, scale, offset)
function_outputs = function_layer(inputs, scale, offset)
self.assertAllEqual(outputs.numpy(), function_outputs.numpy())
def testShapeAgnostic(self):
layer = axis_norm.LayerNorm((1, 2), create_scale=False, create_offset=False)
inputs_spec = tf.TensorSpec([None, None, None, None], dtype=tf.float32)
params_spec = tf.TensorSpec([None], dtype=tf.float32)
function_layer = tf.function(layer).get_concrete_function(
inputs_spec, params_spec, params_spec)
scale = tf.constant(0.5, shape=(5,))
offset = tf.constant(2.0, shape=(5,))
outputs = function_layer(tf.ones([2, 3, 3, 5]), scale, offset)
self.assertEqual(outputs.shape, [2, 3, 3, 5])
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
scale = tf.constant(0.5, shape=(3,))
offset = tf.constant(2.0, shape=(3,))
outputs = function_layer(tf.ones([3, 4, 6, 3]), scale, offset)
self.assertEqual(outputs.shape, [3, 4, 6, 3])
for x in np.nditer(outputs):
self.assertEqual(x, 2.0)
def test5DDataFormatAgnostic(self):
c_last_layer = axis_norm.LayerNorm([1, 2, 3],
create_scale=False,
create_offset=False)
c_first_layer = axis_norm.LayerNorm([2, 3, 4],
create_scale=False,
create_offset=False,
data_format="NCDHW")
inputs = tf.random.uniform([3, 4, 4, 4, 5], 0, 10)
scale = tf.random.normal((5,), mean=1.0)
offset = tf.random.normal((5,))
c_last_output = c_last_layer(inputs, scale, offset)
inputs = tf.transpose(inputs, [0, 4, 1, 2, 3])
scale = tf.reshape(scale, [-1, 1, 1, 1])
offset = tf.reshape(offset, [-1, 1, 1, 1])
c_first_output = c_first_layer(inputs, scale, offset)
c_first_output = tf.transpose(c_first_output, [0, 2, 3, 4, 1])
self.assertAllClose(
c_last_output.numpy(), c_first_output.numpy(), atol=1e-5, rtol=1e-5)
def test3DDataFormatAgnostic(self):
c_last_layer = axis_norm.LayerNorm([1],
create_scale=False,
create_offset=False)
c_first_layer = axis_norm.LayerNorm([2],
create_scale=False,
create_offset=False,
data_format="NCW")
inputs = tf.random.uniform([3, 4, 5], 0, 10)
scale = tf.random.normal((5,), mean=1.0)
offset = tf.random.normal((5,))
c_last_output = c_last_layer(inputs, scale, offset)
inputs = tf.transpose(inputs, [0, 2, 1])
scale = tf.reshape(scale, [-1, 1])
offset = tf.reshape(offset, [-1, 1])
c_first_output = c_first_layer(inputs, scale, offset)
c_first_output = tf.transpose(c_first_output, [0, 2, 1])
self.assertAllClose(
c_last_output.numpy(), c_first_output.numpy(), atol=1e-5, rtol=1e-5)
def testInstanceNormCorrectAxis(self):
layer = axis_norm.InstanceNorm(create_scale=True, create_offset=True)
inputs = tf.ones([3, 4, 5, 6])
layer(inputs)
self.assertEqual(layer._axis, (1, 2))
def testInstanceNormCorrectNCW(self):
layer = axis_norm.InstanceNorm(
create_scale=True, create_offset=True, data_format="channels_first")
inputs = tf.ones([3, 4, 5, 6])
layer(inputs)
self.assertEqual(layer._axis, (2, 3))
if __name__ == "__main__":
# tf.enable_v2_behavior()
tf.test.main()
```
#### File: sonnet/src/batch_norm.py
```python
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
from sonnet.src import base
from sonnet.src import initializers
from sonnet.src import metrics
from sonnet.src import moving_averages
from sonnet.src import once
from sonnet.src import types
from sonnet.src import utils
import tensorflow as tf
from typing import Optional, Text, Tuple
class BaseBatchNorm(base.Module):
r"""Batch normalization module.
This implements normalization across the batch and spatial dimensions.
It maintains moving averages of the mean and variance which can be
used to normalize at test time. The constructor is generic and
requires the user to pass in objects to compute these.
At training time we use the batch statistics for that batch and these are then
used to update the moving averages.
At test time we can either use the moving averages of the batch statistics
(``test_local_stats=False``) or we can use the local statistics
(``test_local_stats=True``).
It transforms the input ``x`` into:
.. math::
\d{outputs} = \d{scale} \dfrac{x - \mu}{\sigma + \epsilon} + \d{offset}
Where :math:`\mu` and :math:`\sigma` are respectively the mean and standard
deviation of ``x``. Note that this module automatically uses the fused batch
norm op if the data format is ``NHWC``.
There are many different variations for how users want to manage scale and
offset if they require them at all. These are:
- No scale/offset in which case ``create_*`` should be set to ``False`` and
``scale``/``offset`` aren't passed when the module is called.
- Trainable scale/offset in which case ``create_*`` should be set to
``True`` and again ``scale``/``offset`` aren't passed when the module is
called. In this case this module creates and owns the ``scale``/``offset``
variables.
- Externally generated ``scale``/``offset``, such as for conditional
normalization, in which case ``create_*`` should be set to ``False`` and
then the values fed in at call time.
Attributes:
scale: If ``create_scale``, a trainable :tf:`Variable` holding the current
scale after the module is connected for the first time.
offset: If ``create_offset``, a trainable :tf:`Variable` holding the current
offset after the module is connected for the first time.
"""
def __init__(self,
create_scale: bool,
create_offset: bool,
moving_mean: metrics.Metric,
moving_variance: metrics.Metric,
eps: types.FloatLike = 1e-5,
scale_init: Optional[initializers.Initializer] = None,
offset_init: Optional[initializers.Initializer] = None,
data_format: Text = "channels_last",
name: Optional[Text] = None):
"""Constructs a ``BaseBatchNorm`` module.
Args:
create_scale: whether to create a trainable scale per channel applied
after the normalization.
create_offset: whether to create a trainable offset per channel applied
after normalization and scaling.
moving_mean: A metric which tracks the moving average of the mean which
can be used to normalize at test time.
moving_variance: A metric which tracks the moving average of the variance
which can be used to normalize at test time.
eps: Small epsilon to avoid division by zero variance. Defaults to
``1e-5``.
scale_init: Optional initializer for the scale variable. Can only be set
if ``create_scale=True``. By default scale is initialized to ``1``.
offset_init: Optional initializer for the offset variable. Can only be set
if ``create_offset=True``. By default offset is initialized to ``0``.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default it is ``channels_last``.
name: Name of the module.
"""
super(BaseBatchNorm, self).__init__(name=name)
self._eps = eps
self.moving_mean = moving_mean
self.moving_variance = moving_variance
self._data_format = data_format
self._channel_index = utils.get_channel_index(data_format)
self._create_scale = create_scale
self._create_offset = create_offset
if not self._create_scale and scale_init is not None:
raise ValueError("Cannot set `scale_init` if `create_scale=False`")
self._scale_init = scale_init or initializers.Ones()
if not self._create_offset and offset_init is not None:
raise ValueError("Cannot set `offset_init` if `create_offset=False`")
self._offset_init = offset_init or initializers.Zeros()
@utils.smart_autograph
def __call__(self,
inputs: tf.Tensor,
is_training: types.BoolLike,
test_local_stats: types.BoolLike = False,
scale: Optional[tf.Tensor] = None,
offset: Optional[tf.Tensor] = None):
"""Returns normalized inputs.
Args:
inputs: An n-D tensor of the data_format specified above on which the
transformation is performed.
is_training: Whether the module should be connected in training mode,
meaning the moving averages are updated.
test_local_stats: Whether local batch statistics should be used when
``is_training=False``. If not, moving averages are used. By default
``False``.
scale: A tensor up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the scale applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_scale=True``.
offset: A tensor up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the offset applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_offset=True``.
Returns:
An n-d tensor of the same shape as inputs that has been normalized.
"""
use_batch_stats = is_training or test_local_stats
if self._create_scale:
if scale is not None:
raise ValueError(
"Cannot pass `scale` at call time if `create_scale=True`.")
if self._create_offset:
if offset is not None:
raise ValueError(
"Cannot pass `offset` at call time if `create_offset=True`.")
self._initialize(inputs)
if scale is None:
scale = self.scale
if offset is None:
offset = self.offset
mean, variance = self._moments(inputs, use_batch_stats)
if self._fused:
out, mean, variance, _, _ = tf.raw_ops.FusedBatchNormV2(
x=inputs,
mean=mean,
variance=variance,
scale=scale,
offset=offset,
is_training=use_batch_stats,
epsilon=self._eps,
data_format=self._fused_data_format)
else:
out = tf.nn.batch_normalization(
inputs,
mean=mean,
variance=variance,
scale=scale,
offset=offset,
variance_epsilon=self._eps)
if is_training:
self._update_statistics(mean, variance)
return out
@once.once
def _initialize(self, inputs: tf.Tensor):
input_shape = inputs.shape
rank = len(input_shape)
self._fused = (rank == 4 and self._channel_index == -1)
self._fused_data_format = "NHWC" if self._channel_index == -1 else "NCHW"
if self._channel_index < 0:
channel_index = self._channel_index + rank
else:
channel_index = self._channel_index
self._axis = tuple(i for i in range(rank) if i != channel_index)
# Ensure all the variables are created on the first call
mean, variance = tf.nn.moments(inputs, self._axis, keepdims=True)
self.shape = mean.shape
self.moving_mean.initialize(mean)
self.moving_variance.initialize(variance)
dtype = inputs.dtype
if self._channel_index == -1:
params_shape = [inputs.shape[-1]]
else: # self._channel_index == 1
params_shape = [inputs.shape[1]] + [1] * (rank - 2)
# Creates scale and offset parameters - required for fused_batch_norm
# trainable set to with_scale and with_offset which gives no-op if false
self.scale = tf.Variable(
self._scale_init(params_shape, dtype),
name="scale",
trainable=self._create_scale)
self.offset = tf.Variable(
self._offset_init(params_shape, dtype),
name="offset",
trainable=self._create_offset)
if self._fused:
with tf.init_scope():
self._fused_constant = tf.constant([])
def _moments(self, inputs: tf.Tensor,
use_batch_stats: types.BoolLike) -> Tuple[tf.Tensor, tf.Tensor]:
if use_batch_stats:
if self._fused:
# The raw ops version of fused batch norm calculates the mean and
# variance internally but requires tensors to be passed in.
mean = self._fused_constant
variance = self._fused_constant
else:
mean, variance = tf.nn.moments(inputs, self._axis, keepdims=True)
else: # use moving stats
mean = self.moving_mean.value
variance = self.moving_variance.value
if self._fused:
mean = tf.squeeze(mean)
variance = tf.squeeze(variance)
return mean, variance
def _update_statistics(self, mean, variance):
if self._fused:
mean = tf.reshape(mean, self.shape)
variance = tf.reshape(variance, self.shape)
self.moving_mean.update(mean)
self.moving_variance.update(variance)
class BatchNorm(BaseBatchNorm):
"""Batch normalization with exponential moving average for test statistics.
See :class:`BaseBatchNorm` for details.
Attributes:
scale: If ``create_scale=True``, a trainable :tf:`Variable` holding the
current scale after the module is connected for the first time.
offset: If ``create_offset``, a trainable :tf:`Variable` holding the current
offset after the module is connected for the first time.
"""
def __init__(self,
create_scale: bool,
create_offset: bool,
decay_rate: float = 0.999,
eps: types.FloatLike = 1e-5,
scale_init: Optional[initializers.Initializer] = None,
offset_init: Optional[initializers.Initializer] = None,
data_format: Text = "channels_last",
name: Optional[Text] = None):
"""Constructs a ``BatchNorm`` module.
Args:
create_scale: whether to create a trainable scale per channel applied
after the normalization.
create_offset: whether to create a trainable offset per channel applied
after normalization and scaling.
decay_rate: Decay rate of the exponential moving averages of the mean and
variance.
eps: Small epsilon to avoid division by zero variance. Defaults to
``1e-5``.
scale_init: Optional initializer for the scale variable. Can only be set
if ``create_scale=True``. By default scale is initialized to ``1``.
offset_init: Optional initializer for the offset variable. Can only be set
if ``create_offset=True``. By default offset is initialized to ``0``.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default it is ``channels_last``.
name: Name of the module.
"""
with tf.name_scope(name or "batch_norm"):
moving_mean = moving_averages.ExponentialMovingAverage(decay_rate)
moving_variance = moving_averages.ExponentialMovingAverage(decay_rate)
super(BatchNorm, self).__init__(
create_scale=create_scale,
create_offset=create_offset,
moving_mean=moving_mean,
moving_variance=moving_variance,
eps=eps,
scale_init=scale_init,
offset_init=offset_init,
data_format=data_format,
name=name)
```
#### File: src/conformance/goldens.py
```python
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import abc
from absl.testing import parameterized
import numpy as np
import six
import sonnet as snt
import tensorflow as tf
from typing import Text, Tuple, Sequence
_all_goldens = []
def named_goldens() -> Sequence[Tuple[Text, "Golden"]]:
return ((name, cls()) for _, name, cls in list_goldens())
def all_goldens(test_method):
return parameterized.named_parameters(named_goldens())(test_method)
def _register_golden(module_cls, golden_name):
def registration_fn(golden_cls):
_all_goldens.append((module_cls, golden_name, golden_cls))
golden_cls.name = golden_name
return golden_cls
return registration_fn
def list_goldens():
return list(_all_goldens)
def range_like(t, start=0):
"""Returns a tensor with sequential values of the same dtype/shape as `t`.
>>> range_like(tf.ones([2, 2]))
<tf.Tensor: ... shape=(2, 2), dtype=float32, numpy=
array([[ 0., 1.],
[ 2., 3.]], dtype=float32)>
>>> range_like(tf.ones([2, 2]), start=5)
<tf.Tensor: ... shape=(2, 2), dtype=float32, numpy=
array([[ 5., 6.],
[ 7., 8.]], dtype=float32)>
Args:
t: A tensor like object (with shape and dtype).
start: Value to start the range from.
Returns:
A `tf.Tensor` with sequential element values the same shape/dtype as `t`.
"""
return tf.reshape(
tf.cast(
tf.range(start,
np.prod(t.shape, dtype=int) + start), dtype=t.dtype),
t.shape)
@six.add_metaclass(abc.ABCMeta)
class Golden(object):
"""Represents a golden checkpoint file."""
@abc.abstractmethod
def create_module(self):
"""Should create a new module instance and return it."""
pass
@abc.abstractmethod
def create_all_variables(self, module):
"""Create all variables for the given model and return them."""
pass
@abc.abstractmethod
def forward(self, module):
"""Return the output from calling the module with a fixed input."""
pass
@six.add_metaclass(abc.ABCMeta)
class AbstractGolden(Golden):
"""Abstract base class for golden tests of single input modules."""
deterministic = True
has_side_effects = False
# Tolerance to be used for assertAllClose calls on TPU, where lower precision
# can mean results differ more.
tpu_atol = 1e-3
@abc.abstractproperty
def input_spec(self):
pass
@abc.abstractproperty
def num_variables(self):
pass
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x)
def create_all_variables(self, module):
self.forward(module)
variables = module.variables
assert len(variables) == self.num_variables, (
"Expected %d params, got %d %r" %
(self.num_variables, len(variables), variables))
return variables
# pylint: disable=missing-docstring
@_register_golden(snt.Linear, "linear_1x1")
class Linear1x1Test(AbstractGolden):
create_module = lambda _: snt.Linear(1)
input_spec = tf.TensorSpec([128, 1])
num_variables = 2
@_register_golden(snt.Linear, "linear_nobias_1x1")
class LinearNoBias1x1(AbstractGolden):
create_module = lambda _: snt.Linear(1, with_bias=False)
input_spec = tf.TensorSpec([1, 1])
num_variables = 1
@_register_golden(snt.Conv1D, "conv1d_3x3_2x2")
class Conv1D(AbstractGolden):
create_module = lambda _: snt.Conv1D(output_channels=3, kernel_shape=3)
input_spec = tf.TensorSpec([1, 2, 2])
num_variables = 2
@_register_golden(snt.Conv2D, "conv2d_3x3_2x2")
class Conv2D(AbstractGolden):
create_module = lambda _: snt.Conv2D(output_channels=3, kernel_shape=3)
input_spec = tf.TensorSpec([1, 2, 2, 2])
num_variables = 2
@_register_golden(snt.Conv3D, "conv3d_3x3_2x2")
class Conv3D(AbstractGolden):
create_module = lambda _: snt.Conv3D(output_channels=3, kernel_shape=3)
input_spec = tf.TensorSpec([1, 2, 2, 2, 2])
num_variables = 2
@_register_golden(snt.Conv1DTranspose, "conv1d_transpose_3x3_2x2")
class Conv1DTranspose(AbstractGolden):
create_module = (
lambda _: snt.Conv1DTranspose(output_channels=3, kernel_shape=3))
input_spec = tf.TensorSpec([1, 2, 2])
num_variables = 2
@_register_golden(snt.Conv2DTranspose, "conv2d_transpose_3x3_2x2")
class Conv2DTranspose(AbstractGolden):
create_module = (
lambda _: snt.Conv2DTranspose(output_channels=3, kernel_shape=3))
input_spec = tf.TensorSpec([1, 2, 2, 2])
num_variables = 2
@_register_golden(snt.Conv3DTranspose, "conv3d_transpose_3x3_2x2")
class Conv3DTranspose(AbstractGolden):
create_module = (
lambda _: snt.Conv3DTranspose(output_channels=3, kernel_shape=3))
input_spec = tf.TensorSpec([1, 2, 2, 2, 2])
num_variables = 2
@_register_golden(snt.DepthwiseConv2D, "depthwise_conv2d_3x3_2x2")
class DepthwiseConv2D(AbstractGolden):
create_module = lambda _: snt.DepthwiseConv2D(kernel_shape=3)
input_spec = tf.TensorSpec([1, 2, 2, 2])
num_variables = 2
@_register_golden(snt.nets.MLP, "mlp_3x4x5_1x3")
class MLP(AbstractGolden):
create_module = (lambda _: snt.nets.MLP([3, 4, 5]))
input_spec = tf.TensorSpec([1, 3])
num_variables = 6
@_register_golden(snt.nets.MLP, "mlp_nobias_3x4x5_1x3")
class MLPNoBias(AbstractGolden):
create_module = (lambda _: snt.nets.MLP([3, 4, 5], with_bias=False))
input_spec = tf.TensorSpec([1, 3])
num_variables = 3
@_register_golden(snt.nets.Cifar10ConvNet, "cifar10_convnet_2x3_2x2_1x3x3x2")
class Cifar10ConvNet(AbstractGolden):
create_module = (
lambda _: snt.nets.Cifar10ConvNet(output_channels=(2, 3), strides=(2, 2)))
input_spec = tf.TensorSpec([1, 3, 3, 2])
num_variables = 22
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x, is_training=False, test_local_stats=True)["logits"]
@_register_golden(snt.LayerNorm, "layer_norm_1_1x3_2")
class LayerNorm(AbstractGolden):
create_module = (
lambda _: snt.LayerNorm(1, create_scale=True, create_offset=True))
input_spec = tf.TensorSpec([1, 3, 2])
num_variables = 2
@_register_golden(snt.InstanceNorm, "instance_norm_1_1x3_2")
class Instance(AbstractGolden):
create_module = (
lambda _: snt.InstanceNorm(create_scale=True, create_offset=True))
input_spec = tf.TensorSpec([1, 3, 2])
num_variables = 2
@_register_golden(snt.GroupNorm, "group_norm_2_1x3x4")
class GroupNorm(AbstractGolden):
create_module = (
lambda _: snt.GroupNorm(2, create_scale=True, create_offset=True))
input_spec = tf.TensorSpec([1, 3, 4])
num_variables = 2
@_register_golden(snt.BaseBatchNorm, "base_batch_norm_1x2x2x3")
class BaseBatchNorm(AbstractGolden):
create_module = (
lambda _: snt.BaseBatchNorm(True, False, FooMetric(), FooMetric())) # pytype: disable=wrong-arg-types
input_spec = tf.TensorSpec([1, 2, 2, 3])
num_variables = 2
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x, is_training=False, test_local_stats=True)
@_register_golden(snt.BaseBatchNorm, "base_batch_norm_scale_offset_1x2x2x3")
class BaseBatchNormScaleOffset(AbstractGolden):
create_module = (
lambda _: snt.BaseBatchNorm(True, False, FooMetric(), FooMetric())) # pytype: disable=wrong-arg-types
input_spec = tf.TensorSpec([1, 2, 2, 3])
num_variables = 2
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x, is_training=False, test_local_stats=True)
@_register_golden(snt.BatchNorm, "batch_norm_1x2x2x3")
class BatchNorm(AbstractGolden):
create_module = (lambda _: snt.BatchNorm(True, True))
input_spec = tf.TensorSpec([1, 2, 2, 3])
num_variables = 8
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x, is_training=False, test_local_stats=True)
@_register_golden(snt.BatchNorm, "batch_norm_scale_offset_1x2x2x3")
class BatchNormScaleOffset(AbstractGolden):
create_module = (lambda _: snt.BatchNorm(True, True))
input_spec = tf.TensorSpec([1, 2, 2, 3])
num_variables = 8
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x, is_training=False, test_local_stats=True)
@_register_golden(snt.ExponentialMovingAverage, "ema_2")
class ExponentialMovingAverage(AbstractGolden):
create_module = (lambda _: snt.ExponentialMovingAverage(decay=0.9))
input_spec = tf.TensorSpec([2])
num_variables = 3
has_side_effects = True
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x)
@_register_golden(snt.BatchNorm, "batch_norm_training_1x2x2x3")
class BatchNormTraining(AbstractGolden):
create_module = (lambda _: snt.BatchNorm(True, True))
input_spec = tf.TensorSpec([1, 2, 2, 3])
num_variables = 8
has_side_effects = True
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x, is_training=True)
@_register_golden(snt.distribute.CrossReplicaBatchNorm,
"cross_replica_batch_norm_1x2x2x3")
class CrossReplicaBatchNorm(AbstractGolden):
create_module = (
lambda _: snt.BaseBatchNorm(True, False, FooMetric(), FooMetric()))
input_spec = tf.TensorSpec([1, 2, 2, 3])
num_variables = 2
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x, is_training=False, test_local_stats=True)
@_register_golden(snt.Dropout, "dropout")
class DropoutVariableRate(AbstractGolden):
create_module = lambda _: snt.Dropout(rate=tf.Variable(0.5))
input_spec = tf.TensorSpec([3, 3, 3])
num_variables = 1
deterministic = False
def forward(self, module):
tf.random.set_seed(3)
x = range_like(self.input_spec, start=1)
return module(x, is_training=True)
class AbstractRNNGolden(AbstractGolden):
def forward(self, module):
# Small inputs to ensure that tf.tanh and tf.sigmoid don't saturate.
x = 1.0 / range_like(self.input_spec, start=1)
batch_size = self.input_spec.shape[0]
prev_state = module.initial_state(batch_size)
y, next_state = module(x, prev_state)
del next_state
return y
@_register_golden(snt.Conv1DLSTM, "conv1d_lstm_3x3_2x2")
class Conv1DLSTM(AbstractRNNGolden):
input_spec = tf.TensorSpec([1, 2, 2])
num_variables = 3
def create_module(self):
return snt.Conv1DLSTM(
input_shape=self.input_spec.shape[1:],
output_channels=3,
kernel_shape=3)
@_register_golden(snt.Conv2DLSTM, "conv2d_lstm_3x3_2x2")
class Conv2DLSTM(AbstractRNNGolden):
input_spec = tf.TensorSpec([1, 2, 2, 2])
num_variables = 3
def create_module(self):
return snt.Conv2DLSTM(
input_shape=self.input_spec.shape[1:],
output_channels=3,
kernel_shape=3)
@_register_golden(snt.Conv3DLSTM, "conv3d_lstm_3x3_2x2")
class Conv3DLSTM(AbstractRNNGolden):
input_spec = tf.TensorSpec([1, 2, 2, 2, 2])
num_variables = 3
def create_module(self):
return snt.Conv3DLSTM(
input_shape=self.input_spec.shape[1:],
output_channels=3,
kernel_shape=3)
@_register_golden(snt.GRU, "gru_1")
class GRU(AbstractRNNGolden):
create_module = lambda _: snt.GRU(hidden_size=1)
input_spec = tf.TensorSpec([1, 128])
num_variables = 3
@_register_golden(snt.LSTM, "lstm_1")
class LSTM(AbstractRNNGolden):
create_module = lambda _: snt.LSTM(hidden_size=1)
input_spec = tf.TensorSpec([1, 128])
num_variables = 3
@_register_golden(snt.LSTM, "lstm_8_projected_1")
class LSTMWithProjection(AbstractRNNGolden):
create_module = lambda _: snt.LSTM(hidden_size=8, projection_size=1)
input_spec = tf.TensorSpec([1, 128])
num_variables = 4
@_register_golden(snt.UnrolledLSTM, "unrolled_lstm_1")
class UnrolledLSTM(AbstractRNNGolden):
create_module = lambda _: snt.UnrolledLSTM(hidden_size=1)
input_spec = tf.TensorSpec([1, 1, 128])
num_variables = 3
@_register_golden(snt.VanillaRNN, "vanilla_rnn_8")
class VanillaRNN(AbstractRNNGolden):
create_module = lambda _: snt.VanillaRNN(hidden_size=8)
input_spec = tf.TensorSpec([1, 128])
num_variables = 3
@_register_golden(snt.TrainableState, "trainable_state")
class TrainableState(AbstractGolden):
create_module = lambda _: snt.TrainableState(tf.zeros([1]))
input_spec = tf.TensorSpec(())
num_variables = 1
@_register_golden(snt.Bias, "bias_3x3x3")
class BiasTest(AbstractGolden):
create_module = lambda _: snt.Bias()
input_spec = tf.TensorSpec([1, 3, 3, 3])
num_variables = 1
@_register_golden(snt.Embed, "embed_100_100")
class EmbedTest(AbstractGolden):
create_module = lambda _: snt.Embed(vocab_size=100, embed_dim=100)
input_spec = tf.TensorSpec([10], dtype=tf.int32)
num_variables = 1
@_register_golden(snt.Mean, "mean_2x2")
class MeanTest(AbstractGolden):
create_module = lambda _: snt.Mean()
input_spec = tf.TensorSpec([2, 2])
num_variables = 2
has_side_effects = True
@_register_golden(snt.Sum, "sum_2x2")
class SumTest(AbstractGolden):
create_module = lambda _: snt.Sum()
input_spec = tf.TensorSpec([2, 2])
num_variables = 1
has_side_effects = True
@_register_golden(snt.nets.ResNet, "resnet50")
class ResNet(AbstractGolden):
create_module = (lambda _: snt.nets.ResNet([1, 1, 1, 1], 9))
input_spec = tf.TensorSpec([1, 8, 8, 3])
num_variables = 155
has_side_effects = True
def forward(self, module):
x = range_like(self.input_spec, start=1)
return module(x, is_training=True)
@_register_golden(snt.nets.VectorQuantizer, "vqvae")
class VectorQuantizerTest(AbstractGolden):
def create_module(self):
return snt.nets.VectorQuantizer(
embedding_dim=4, num_embeddings=6, commitment_cost=0.25)
# Input can be any shape as long as final dimension is equal to embedding_dim.
input_spec = tf.TensorSpec([2, 3, 4])
def forward(self, module):
x = range_like(self.input_spec)
return module(x, is_training=True)
# Numerical results can be quite different on TPU, be a bit more loose here.
tpu_atol = 4e-2
num_variables = 1
@_register_golden(snt.nets.VectorQuantizerEMA, "vqvae_ema_train")
class VectorQuantizerEMATrainTest(AbstractGolden):
def create_module(self):
return snt.nets.VectorQuantizerEMA(
embedding_dim=5, num_embeddings=7, commitment_cost=0.5, decay=0.9)
# Input can be any shape as long as final dimension is equal to embedding_dim.
input_spec = tf.TensorSpec([2, 5])
def forward(self, module):
x = range_like(self.input_spec)
return module(x, is_training=True)
# Numerical results can be quite different on TPU, be a bit more loose here.
tpu_atol = 4e-2
num_variables = 7 # 1 embedding, then 2 EMAs each of which contain 3.
has_side_effects = True
@_register_golden(snt.nets.VectorQuantizerEMA, "vqvae_ema_eval")
class VectorQuantizerEMAEvalTest(AbstractGolden):
def create_module(self):
return snt.nets.VectorQuantizerEMA(
embedding_dim=3, num_embeddings=4, commitment_cost=0.5, decay=0.9)
# Input can be any shape as long as final dimension is equal to embedding_dim.
input_spec = tf.TensorSpec([2, 3])
def forward(self, module):
x = range_like(self.input_spec)
return module(x, is_training=False)
# Numerical results can be quite different on TPU, be a bit more loose here.
tpu_atol = 4e-2
num_variables = 7 # 1 embedding, then 2 EMAs each of which contain 3.
has_side_effects = False # only has side effects when is_training==True
# pylint: enable=missing-docstring
class FooMetric(snt.Metric):
"""Used for testing a class which uses Metrics."""
def initialize(self, x):
pass
def reset(self):
pass
def update(self, x):
pass
```
#### File: sonnet/src/deferred_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sonnet.src import base
from sonnet.src import deferred
from sonnet.src import test_utils
import tensorflow as tf
class DeferredTest(test_utils.TestCase):
def test_target(self):
target = ExampleModule()
mod = deferred.Deferred(lambda: target)
self.assertIs(mod.target, target)
def test_only_computes_target_once(self):
target = ExampleModule()
targets = [target]
mod = deferred.Deferred(targets.pop)
for _ in range(10):
# If target was recomputed more than once pop should fail.
self.assertIs(mod.target, target)
self.assertEmpty(targets)
def test_attr_forwarding_fails_before_construction(self):
mod = deferred.Deferred(ExampleModule)
with self.assertRaises(AttributeError):
getattr(mod, "foo")
def test_getattr(self):
mod = deferred.Deferred(ExampleModule)
mod()
self.assertIs(mod.w, mod.target.w)
def test_setattr(self):
mod = deferred.Deferred(ExampleModule)
mod()
new_w = tf.ones_like(mod.w)
mod.w = new_w
self.assertIs(mod.w, new_w)
self.assertIs(mod.target.w, new_w)
def test_setattr_on_target(self):
mod = deferred.Deferred(ExampleModule)
mod()
w = tf.ones_like(mod.w)
mod.w = None
# Assigning to the target directly should reflect in the parent.
mod.target.w = w
self.assertIs(mod.w, w)
self.assertIs(mod.target.w, w)
def test_delattr(self):
mod = deferred.Deferred(ExampleModule)
mod()
self.assertTrue(hasattr(mod.target, "w"))
del mod.w
self.assertFalse(hasattr(mod.target, "w"))
def test_alternative_forward(self):
mod = deferred.Deferred(AlternativeForwardModule, call_methods=("forward",))
self.assertEqual(mod.forward(), 42)
def test_alternative_forward_call_type_error(self):
mod = deferred.Deferred(AlternativeForwardModule, call_methods=("forward",))
msg = "'AlternativeForwardModule' object is not callable"
with self.assertRaisesRegexp(TypeError, msg):
mod()
def test_name_scope(self):
mod = deferred.Deferred(ExampleModule)
mod()
self.assertEqual(mod.name_scope.name, "deferred/")
self.assertEqual(mod.target.name_scope.name, "example_module/")
def test_str(self):
m = ExampleModule()
d = deferred.Deferred(lambda: m)
self.assertEqual("Deferred(%s)" % m, str(d))
def test_repr(self):
m = ExampleModule()
d = deferred.Deferred(lambda: m)
self.assertEqual("Deferred(%r)" % m, repr(d))
class ExampleModule(base.Module):
def __init__(self):
super(ExampleModule, self).__init__()
self.w = tf.Variable(1.)
def __str__(self):
return "ExampleModuleStr"
def __repr__(self):
return "ExampleModuleRepr"
def __call__(self):
return self.w
class AlternativeForwardModule(base.Module):
def forward(self):
return 42
if __name__ == "__main__":
# tf.enable_v2_behavior()
tf.test.main()
```
#### File: sonnet/src/embed_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from sonnet.src import embed
from sonnet.src import initializers
from sonnet.src import test_utils
import tensorflow as tf
class EmbedTest(test_utils.TestCase, parameterized.TestCase):
@parameterized.parameters([1, 10, 100])
def test_vocab_size(self, vocab_size):
e = embed.Embed(vocab_size=vocab_size)
self.assertEqual(e.vocab_size, vocab_size)
self.assertEqual(e.embeddings.shape[0], vocab_size)
@parameterized.parameters([1, 10, 100])
def test_embed_dim(self, embed_dim):
e = embed.Embed(vocab_size=100, embed_dim=embed_dim)
self.assertEqual(e.embed_dim, embed_dim)
self.assertEqual(e.embeddings.shape[1], embed_dim)
@parameterized.parameters([(1, 1), (10, 10), (100, 100)])
def test_existing_vocab(self, vocab_size, embed_dim):
existing_vocab = tf.ones([vocab_size, embed_dim])
e = embed.Embed(existing_vocab=existing_vocab)
self.assertEqual(e.vocab_size, vocab_size)
self.assertEqual(e.embed_dim, embed_dim)
self.assertAllEqual(e.embeddings.read_value(), existing_vocab)
@parameterized.parameters([True, False])
def test_densify_gradients(self, densify_gradients):
e = embed.Embed(1, densify_gradients=densify_gradients)
with tf.GradientTape() as tape:
y = e([0])
dy = tape.gradient(y, e.embeddings)
if densify_gradients:
self.assertIsInstance(dy, tf.Tensor)
else:
self.assertIsInstance(dy, tf.IndexedSlices)
def test_initializer(self):
e = embed.Embed(1, 1, initializer=initializers.Constant(28.))
self.assertAllEqual(e.embeddings.read_value(), [[28.]])
def test_pinned_to_cpu(self):
with tf.device("CPU"):
e = embed.Embed(1)
spec = tf.DeviceSpec.from_string(e.embeddings.device)
self.assertEqual(spec.device_type, "CPU")
@parameterized.parameters([True, False])
def test_trainable(self, trainable):
e = embed.Embed(1, trainable=trainable)
self.assertEqual(e.embeddings.trainable, trainable)
@parameterized.parameters([tf.float32, tf.float16])
def test_dtype(self, dtype):
if dtype == tf.float16 and self.primary_device == "TPU":
self.skipTest("float16 embeddings not supported on TPU.")
e = embed.Embed(1, dtype=dtype)
self.assertEqual(e.embeddings.dtype, dtype)
def test_name(self):
e = embed.Embed(1, name="my_embedding")
self.assertEqual(e.name, "my_embedding")
self.assertEqual(e.embeddings.name, "my_embedding/embeddings:0")
if __name__ == "__main__":
# tf.enable_v2_behavior()
tf.test.main()
```
#### File: sonnet/src/sequential_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from sonnet.src import sequential
from sonnet.src import test_utils
import tensorflow as tf
input_parameters = parameterized.parameters(object(), ([[[1.]]],), ({1, 2, 3},),
None, "str", 1)
class SequentialTest(test_utils.TestCase, parameterized.TestCase):
@input_parameters
def test_empty(self, value):
net = sequential.Sequential()
self.assertIs(net(value), value)
@input_parameters
def test_empty_drops_varargs_varkwargs(self, value):
net = sequential.Sequential()
self.assertIs(net(value, object(), keyword=object()), value)
@input_parameters
def test_identity_chain(self, value):
net = sequential.Sequential([identity, identity, identity])
self.assertIs(net(value), value)
def test_call(self):
seq = sequential.Sequential([append_character(ch) for ch in "rocks!"])
self.assertEqual(seq("Sonnet "), "Sonnet rocks!")
def test_varargs_varkwargs_to_call(self):
layer1 = lambda a, b, c: ((a + b + c), (c + b + a))
layer2 = lambda a: a[0] + "," + a[1]
net = sequential.Sequential([layer1, layer2])
self.assertEqual(net("a", "b", c="c"), "abc,cba")
def identity(v):
return v
def append_character(c):
return lambda v: v + c
if __name__ == "__main__":
# tf.enable_v2_behavior()
tf.test.main()
``` |
{
"source": "joaoguilherme1/convert-files",
"score": 2
} |
#### File: tudoparapdf/main/views.py
```python
from typing import final
from django.conf import settings
from django.http import response
from django.http.response import HttpResponse
from django.shortcuts import redirect, render
from django.urls.conf import path
from django.template.response import TemplateResponse
#from django.views.generic import TemplateView
from django.http import FileResponse #, HttpResponse
import PyPDF2
import pyheif
from PIL import Image
import os
import zipfile as zp
def home(request):
return TemplateResponse(request, 'home.html', {})
def pdf_merge(request):
if request.method == 'GET':
return TemplateResponse(request, 'pdf_merge.html', {})
elif request.method == 'POST':
final_name = request.POST['final_name']
arqpdf_1 = request.FILES.getlist('arqpdf_1')
arqpdf_2 = request.FILES.getlist('arqpdf_2')
name_dir_with_main_dir = f'/tmp/{final_name}.pdf'
dados_arq1 = PyPDF2.PdfFileReader(open(arqpdf_1[0].temporary_file_path(), "rb"))
dados_arq2 = PyPDF2.PdfFileReader(open(arqpdf_2[0].temporary_file_path(), "rb"))
merge = PyPDF2.PdfFileMerger()
merge.append(dados_arq1)
merge.append(dados_arq2)
merge.write(name_dir_with_main_dir)
final = open(name_dir_with_main_dir, "rb")
return FileResponse(final)
def pdf_exclude_and_merge(request):
if request.method == 'GET':
return TemplateResponse(request, 'pdf_exclude_and_merge.html', {})
elif request.method == 'POST':
nome_arquivo_saida = request.POST['nome_arquivo_saida']
pagina_pra_tirar = request.POST['pagina_pra_tirar']
arquivo_entrada = request.FILES.getlist('arquivo_entrada')
mid_arquivo = f'/tmp/midlevel.pdf'
arquivo_pra_merge = request.FILES.getlist('arquivo_pra_merge')
name_dir_with_main_dir = f'/tmp/{nome_arquivo_saida}.pdf'
#tirando a pagina do arquivo de entrada
data = PyPDF2.PdfFileWriter()
pdfdata = PyPDF2.PdfFileReader(arquivo_entrada[0].temporary_file_path())
for i in range(pdfdata.getNumPages()):
if str(i) not in pagina_pra_tirar:
page = pdfdata.getPage(i)
data.addPage(page)
with open(mid_arquivo, "wb") as f:
data.write(f)
#mergeando os arquivos
dados_arq1 = PyPDF2.PdfFileReader(open(mid_arquivo, "rb"))
dados_arq2 = PyPDF2.PdfFileReader(open(arquivo_pra_merge[0].temporary_file_path(), "rb"))
merge = PyPDF2.PdfFileMerger()
merge.append(dados_arq1)
merge.append(dados_arq2)
merge.write(name_dir_with_main_dir)
final = open(name_dir_with_main_dir, "rb")
return FileResponse(final)
def csv(request):
return TemplateResponse(request, 'csv.html', {})
def docx(request):
return TemplateResponse(request, 'docx.html', {})
def heic_to_jpeg(request):
if request.method == 'GET':
return render(request, 'heic_to_jpeg.html')
elif request.method == 'POST':
name_files = request.POST['nome_arquivo']
name_dir = request.POST['nome_pasta']
name_dir_with_main_dir = f'/tmp/{name_dir}.zip'
images_heic = request.FILES.getlist('file[]')
if name_files == '':
name_files = 'helpmydoc_arquivo'
elif name_dir == '':
name_dir = 'Pasta_helpmydoc'
if len(images_heic) == 0 or len(images_heic) > 20:
return HttpResponse(status=403)
else:
middle_zipfile = zp.ZipFile(name_dir_with_main_dir, 'w')
for i in range(len(images_heic)):
if images_heic[i].size > 4000000:
return HttpResponse(status=403)
else:
heif_file = pyheif.read(images_heic[i].temporary_file_path())
heif_file_to_jpeg = Image.frombytes(
heif_file.mode,
heif_file.size,
heif_file.data,
"raw",
heif_file.mode,
heif_file.stride,
)
heif_file_to_jpeg.save(f'/tmp/{name_files}_{i}.jpeg', "JPEG")
middle_zipfile.write(f'/tmp/{name_files}_{i}.jpeg')
middle_zipfile.close()
final_zipfile = open(name_dir_with_main_dir, 'rb')
return FileResponse(final_zipfile)
``` |
{
"source": "joaoguilherme1/DekatrianDateConversion",
"score": 4
} |
#### File: joaoguilherme1/DekatrianDateConversion/date_conversion.py
```python
def Dek2week(dekDay, dekMonth, dekYear):
"""
Returns the Gregorian week day from a Dekatrian date.
1 = Sunday; 2 = Monday; 3 = Tuesday ... 7 = Saturday.
"""
weekDay = ((WeekdayOnFirstAuroran(dekYear)
+ DekatrianWeek(dekDay, dekMonth) - 2) % 7) + 1
if dekMonth == 0:
weekDay = ((weekDay - 3 + dekDay) % 7) + 1
return weekDay
def DekatrianWeek(dekDay, dekMonth):
"""
Returns the Dekatrian week day from a Dekatrian date.
Here we can see the elegance of Dekatrian, since it's not necessary to
inform the year. Actually, barely it's necessary to inform the month,
as it's only needed to check if that is an Achronian day.
0 = Achronian; 1 = first week day; 2 = second week day ... 7 = seventh.
"""
if dekMonth == 0:
return 0
else:
dekWeekDay = ((dekDay-1) % 7) + 1
return dekWeekDay
def WeekdayOnFirstAuroran(dekYear):
"""
Returns the Gregorian week day for the 1 Auroran of a given year
"""
weekDay = ((1 + 5*((dekYear) % 4) + 4*((dekYear) % 100)
+ 6*((dekYear) % 400)) % 7) + 1
return weekDay
def CheckLeapYear(dekYear):
if (dekYear % 4 == 0) and (dekYear % 100 != 0 or dekYear % 400 == 0):
return 1
else:
return 0
def YearDayOnDekaDate(dekDay, dekMonth, dekYear):
"""
Returns the day of the year from a Dekatrian date.
Achronian is the day 1.
Sinchronian is day 2 when it exists.
"""
if dekMonth == 0:
return dekDay
else:
return (CheckLeapYear(dekYear)) + 1 + (dekMonth-1)*28 + dekDay
def YearDayOnGregDate(day, month, year):
"""
Returns the day of the year from a Gregorian date.
Jan 1 is the day 1.
Dez 31 is the day 365 or 366, whether it's a leap year or not
"""
Jan = 31
Fev = 28 + CheckLeapYear(year)
Mar = 31
Apr = 30
Mai = 31
Jun = 30
Jul = 31
Ago = 31
Set = 30
Out = 31
Nov = 30
Dez = 31
Meses = (Jan, Fev, Mar, Apr, Mai, Jun, Jul, Ago, Set, Out, Nov, Dez)
i = 0
days = 0
while i < (month-1):
days += Meses[i]
i += 1
return days + day
def Dek2Greg(dekDay, dekMonth, dekYear):
"""
Returns a Gregorian date from a Dekatrian date.
"""
YearDay = YearDayOnDekaDate(dekDay, dekMonth, dekYear)
Jan = 31
Fev = 28 + CheckLeapYear(dekYear)
Mar = 31
Apr = 30
Mai = 31
Jun = 30
Jul = 31
Ago = 31
Set = 30
Out = 31
Nov = 30
Dez = 31
Meses = (Jan, Fev, Mar, Apr, Mai, Jun, Jul, Ago, Set, Out, Nov, Dez)
for mes, dias in enumerate(Meses, start=1):
if YearDay > dias:
YearDay -= dias
else:
break
return (YearDay, mes, dekYear)
def Greg2Dek(day, month, year):
"""
Returns a Dekatrian date from a Gregorian date
"""
YearDay = YearDayOnGregDate(day, month, year)
LeapYear = CheckLeapYear(year)
#print(YearDay)
if YearDay > (1 + LeapYear):
YearDay -= 1 + LeapYear
#print(YearDay)
dekMonth = int((YearDay-1) / 28) + 1
dekDay = (YearDay-1) % 28 + 1
else:
dekMonth = 0
dekDay = day
return (dekDay, dekMonth, year)
if __name__ == "__main__":
# Examples #
print("Dekatrian 28\\13\\2015 falls on Greg week day: "
+ str(Dek2week(28, 13, 2015)))
print("Dekatrian 1\\0\\2016 falls on Greg week day: "
+ str(Dek2week(1, 0, 2016)))
print("Dekatrian 2\\0\\2016 falls on Greg week day: "
+ str(Dek2week(2, 0, 2016)))
print("Dekatrian 1\\1\\2016 falls on Greg week day: "
+ str(Dek2week(1, 1, 2016)))
print("Achronian corresponds to Dekatrian week day: "
+ str(DekatrianWeek(1, 0)))
print("Dekatrian 1\\1\\2016 happens on Gregorian week day: "
+ str(WeekdayOnFirstAuroran(2016)))
print("Dekatrian 3\\1\\2017 is the year day: "
+ str(YearDayOnDekaDate(3, 1, 2017)))
print("Dekatrian 10\\10\\2017 corresponds to Gregorian "
+ str(Dek2Greg(10, 10, 2017)))
print("Gregorian 29/12/2016 is the year day: "
+ str(YearDayOnGregDate(29, 12, 2016)))
print("Gregorian 3/1/2016 corresponds to Dekatrian: "
+ str(Greg2Dek(3, 1, 2016)))
``` |
{
"source": "joaoguilherme1/dekatrian-date-conversion",
"score": 3
} |
#### File: dekatrian-date-conversion/dekatrian_date_conversion/base.py
```python
"""
Some functions to convert between Dekatrian and Gregorian calendar.
@author: Pena
11\10\2017
dekatrian.com
"""
import calendar
def dek_to_week(dek_day: int, dek_month: int, dek_year: int) -> int:
"""Returns the Gregorian week day from a Dekatrian date.
Args:
dek_day (int): Day of the month.
dek_month (int): Month of the year.
dek_year (int): Year.
Return:
int: The week day.
Example: 1 = Sunday; 2 = Monday; 3 = Tuesday ... 7 = Saturday.
"""
week_day = (
(
week_day_on_first_auroran(dek_year)
+ dekatrian_week(dek_day, dek_month)
- 2
)
% 7
) + 1
if dek_month == 0:
week_day = ((week_day - 3 + dek_day) % 7) + 1
return week_day
def dekatrian_week(dek_day: int, dek_month: int) -> int:
"""Returns the Dekatrian week day from a Dekatrian date.
Here we can see the elegance of Dekatrian, since it's not necessary to
inform the year. Actually, barely it's necessary to inform the month,
as it's only needed to check if that is an Achronian day.
Args:
dek_day (int): Day of the month.
dek_month (int): Month of the year.
Return:
int: The week day.
Example: 0 = Achronian; 1 = first week day; 2 = second week day ... 7 = seventh.
"""
if dek_month == 0:
return 0
else:
dek_week_day = ((dek_day - 1) % 7) + 1
return dek_week_day
def week_day_on_first_auroran(dek_year: int) -> int:
"""Returns the Gregorian week day for the first Auroran of a given year
Args:
dek_year (int): Year.
Return:
int: The week day.
Example: 1 = Sunday; 2 = Monday; 3 = Tuesday ... 7 = Saturday.
"""
week_day = (
(
1
+ 5 * ((dek_year) % 4)
+ 4 * ((dek_year) % 100)
+ 6 * ((dek_year) % 400)
)
% 7
) + 1
return week_day
def year_day_on_deka_date(dek_day: int, dek_month: int, dek_year: int) -> int:
"""Returns the day of the year from a Dekatrian date.
Achronian is the day 1.
Sinchronian is day 2 when it exists.
Args:
dek_day (int): Day of the month.
dek_month (int): Month of the year.
dek_year (int): Year.
Return:
int: The day of the year.
"""
if dek_month == 0:
return dek_day
else:
return (calendar.isleap(dek_year)) + 1 + (dek_month - 1) * 28 + dek_day
def year_day_on_greg_date(day: int, month: int, year: int) -> int:
"""Returns the day of the year from a Gregorian date.
Example1: Jan 1 is the day 1;
Dez 31 is the day 365 or 366, whether it's a leap year or not
Args:
day (int): Day of the month. Example: Jan 1 is the day 1.
month (int): Month of the year.
year (int): Year.
Return:
int: The day of the year.
"""
JAN = 31
FEB = 28 + calendar.isleap(year)
MAR = 31
APR = 30
MAY = 31
JUN = 30
JUL = 31
AUG = 31
SEP = 30
OCT = 31
NOV = 30
DEC = 31
gregorian_calendar_months = (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
) # TODO: MUDAR PRA DICIONARIO
i = 0
days = 0
while i < (month - 1):
days += gregorian_calendar_months[i]
i += 1
return days + day
def dek_to_greg(dek_day: int, dek_month: int, dek_year: int) -> tuple:
"""Returns a Gregorian date from a Dekatrian date.
Args:
dek_day (int): Day of the month.
dek_month (int): Month of the year.
dek_year (int): Year.
Return:
tuple: A tuple with the day, month and year.
"""
year_day = year_day_on_greg_date(dek_day, dek_month, dek_year)
JAN = 31
FEB = 28 + calendar.isleap(dek_year)
MAR = 31
APR = 30
MAY = 31
JUN = 30
JUL = 31
AUG = 31
SEP = 30
OCT = 31
NOV = 30
DEC = 31
gregorian_calendar_months = (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
) # TODO: MUDAR PRA DICIONARIO
for month, days in enumerate(gregorian_calendar_months, start=1):
if year_day > days:
year_day -= days
else:
break
return (year_day, month, dek_year)
def greg_to_dek(day: int, month: int, year: int) -> tuple:
"""Returns a Dekatrian date from a Gregorian date
Args:
day (int): Day of the month.
month (int): Month of the year.
year (int): Year.
Return:
tuple: A tuple with the day, month and year.
"""
year_day = year_day_on_greg_date(day, month, year)
if year_day > (1 + calendar.isleap(year)):
year_day -= 1 + calendar.isleap(year)
dek_month = int((year_day - 1) / 28) + 1
dek_day = (year_day - 1) % 28 + 1
else:
dek_month = 0
dek_day = day
return (dek_day, dek_month, year)
``` |
{
"source": "JoaoGustavoRogel/spider-music-api",
"score": 3
} |
#### File: api/tests/test_endpoint_spotify_api.py
```python
import mock
import os
import shutil
import unittest
from datetime import datetime
from src.models.SpotifyAPICrawler import ConcreteFactorySpotifyAPICrawler, ConcreteSpotifyAPICrawler
from src.routers.endpoints import spotify_api
class TestSpotifyAPIRouter(unittest.TestCase):
@mock.patch("src.models.SpotifyAPICrawler.ConcreteSpotifyAPICrawler")
def test_get_data_chart(self, mock_crawler):
mock_crawler().get_data.return_value = "testando o endpoint"
extract_data = spotify_api.query_crawler_track_by_id("random_id")
self.assertEqual(extract_data["data"], "testando o endpoint")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joaoh1/GamesKeeper",
"score": 2
} |
#### File: GamesKeeper/plugins/core.py
```python
import yaml
import re
import requests
import functools
import pprint
import os
import base64
import psycopg2
import time
from datetime import datetime, timedelta
from disco.types.permissions import Permissions
from disco.api.http import APIException
from disco.bot import Bot, Plugin, CommandLevels
from disco.bot.command import CommandEvent
from disco.types.message import MessageEmbed, MessageTable
from disco.types.user import GameType, Status, Game
from disco.types.channel import ChannelType
from disco.util.sanitize import S
from GamesKeeper.db import init_db, database
from GamesKeeper.models.guild import Guild
from GamesKeeper import bot_config, update_config
PY_CODE_BLOCK = '```py\n{}\n```'
TEMP_BOT_ADMINS = [
104376018222972928,
142721776458137600,
248245568004947969,
298516367311765505
]
def game_checker(string):
games = {
'ttt': 'ttt',
'hm': 'hm',
'c4': 'c4',
'uno': 'uno',
'2048': '2048',
'twentyfourtyeight': '2048',
'connect 4': 'c4',
'connect four': 'c4',
'connectfour': 'c4',
'connect4': 'c4',
'hangman': 'hm',
'hang man': 'hm',
'tic-tac-toe': 'ttt',
'tic tac toe': 'ttt',
}
name = games.get(string.lower(), None)
return name
class CorePlugin(Plugin):
def load(self, ctx):
init_db()
# self.bot.add_plugin = self.our_add_plugin
self.guilds = ctx.get('guilds', {})
super(CorePlugin, self).load(ctx)
def cooldown_check(self, user):
return False
#Basic command handler
@Plugin.listen('MessageCreate')
def on_message_create(self, event):
if event.message.channel.type == ChannelType.DM:
return
event.bot_admin = event.message.author.id in TEMP_BOT_ADMINS
event.user_level = 0
has_admin = False
new_setup = False
guild = None
if event.message.guild:
try:
guild = Guild.using_id(event.guild.id)
except Guild.DoesNotExist:
guild = self.fresh_start(event, event.guild.id)
new_setup = True
if len(event.message.member.roles) > 0:
for x in event.message.member.roles:
role = event.message.guild.roles.get(x)
if role.permissions.can(Permissions.ADMINISTRATOR):
event.user_level = 100
has_admin = True
if guild.referee_role:
if not has_admin and guild.referee_role in event.message.member.roles:
event.user_level = 50
if event.message.author.bot:
return
# Grab the list of commands
commands = list(self.bot.get_commands_for_message(False, {}, guild.prefix, event.message))
#Used for cmd cooldowns
user_ignores_cooldowns = self.cooldown_check(event.message.author.id)
#Sorry, nothing to see here :C
if not len(commands):
return
for command, match in commands:
needed_level = 0
if command.level:
needed_level = command.level
cooldown = 0
if hasattr(command.plugin, 'game'):
if not guild.check_if_listed(game_checker(command.plugin.game), 'enabled'):
return
if command.level == -1 and not event.bot_admin:
return
if not event.bot_admin and event.user_level < needed_level:
continue
try:
command_event = CommandEvent(command, event.message, match)
command_event.bot_admin = event.bot_admin
command_event.user_level = event.user_level
command_event.db_guild = guild
if command.args:
if len(command_event.args) < command.args.required_length:
self.dis_cmd_help(command, command_event, event, guild)
return
command.plugin.execute(command_event)
except:
self.log.exception('Command error:')
return event.reply('It seems that an error has occured! :(')
if new_setup:
event.message.reply('Hey! I\'ve noticed that I\'m new to the server and have no config, please check out `{}settings` to edit and setup the bot.'.format(guild.prefix))
return
def dis_cmd_help(self, command, command_event, event, guild_obj):
embed = MessageEmbed()
embed.title = 'Command: {}{}'.format('{} '.format(command.group) if hasattr(command, 'group') and command.group != None else '', command.name)
helpstr = command.get_docstring()
embed.description = helpstr
event.message.channel.send_message('', embed=embed)
@Plugin.command('help', '[command:str...]')
def cmd_help(self, event, command=None):
"""
This is the help command! Use this command to help you get info some certain commands.
Usage: `help [Command Name]`
To get general info, just type `help`
"""
if command is None:
embed = MessageEmbed()
embed.title = 'GamesKeeper Help'
embed.description = '**To get help with a certain command please use `{prefix}help Command`**\n** **\nFor help with settings please type `{prefix}help settings`'.format(prefix=event.db_guild.prefix)
return event.msg.reply('', embed=embed)
elif command == 'settings' and (event.user_level == 100 or event.bot_admin):
embed = MessageEmbed()
embed.title = 'GamesKeeper Settings Help'
description = [
'To change most settings, the command group is `update`',
'\♦ To change **Prefix**, use `{}update prefix`'.format(event.db_guild.prefix),
'\♦ To change **Games Category**, use `{}update gc`'.format(event.db_guild.prefix),
'\♦ To change the **Referee** role, use `{}update ref`'.format(event.db_guild.prefix),
'\♦ To update **Spectator** roles, use `{}update addspec/rvmspec`'.format(event.db_guild.prefix),
'\♦ To **Enable/Disable Games**, use `{}games enable/disable`'.format(event.db_guild.prefix),
]
embed.description = '\n'.join(description)
return event.msg.reply('', embed=embed)
elif command == 'settings' and (event.user_level != 100 or not event.bot_admin):
return event.msg.reply('`Error:` Command Not Found')
else:
commands = list(self.bot.commands)
for cmd in commands:
if cmd.name != command:
continue
elif cmd.level == -1 and not event.bot_admin:
continue
else:
embed = MessageEmbed()
embed.title = 'Command: {}{}'.format('{} '.format(cmd.group) if hasattr(cmd, 'group') and cmd.group != None else '', cmd.name)
helpstr = cmd.get_docstring()
embed.description = helpstr
return event.msg.reply('', embed=embed)
return event.msg.reply('`Error:` Command Not Found')
@Plugin.command('ping', level=-1)
def cmd_ping(self, event):
"""
Allow us to do what you wish you could do to your pings.
"""
return event.msg.reply('YEET!')
@Plugin.command('level', level=-1)
def cmd_level(self, event):
"""
Dev command to get a user level.
"""
if event.user_level is 0:
return event.msg.reply('>:C (0)')
else:
return event.msg.reply(event.user_level)
#Massive function to check for first run, and if so, create a blank server for all the emojis.
@Plugin.listen('Ready')#, priority=Priority.BEFORE)
def on_ready(self, event):
if bot_config.first_run != True:
return
else:
def gen_invite(channel):
invite = channel.create_invite(max_age=0, max_uses=0, unique=True, reason='First run invite generation.')
invite_url = 'https://discord.gg/{code}'.format(code=invite.code)
return invite_url
server_one = self.client.api.guilds_create(name='GamesKeeper Emojis (1/2)')
server_two = self.client.api.guilds_create(name='GamesKeeper Emojis (2/2)')
server_one_channel = server_one.create_text_channel(name='GamesKeeper')
server_two_channel = server_two.create_text_channel(name='GamesKeeper')
server_one_invite = gen_invite(server_one_channel)
server_two_invite = gen_invite(server_two_channel)
uno_emojis = {}
server_one_path = './assets/server_one_emojis'
server_two_path = './assets/server_two_emojis'
for emoji in os.listdir(server_one_path):
with open('{}/{}'.format(server_one_path, emoji), 'rb') as emoji_image:
encoded_string = base64.encodebytes(emoji_image.read())
emoji_image_string = encoded_string.decode()
name = emoji.replace('.png', '')
emoji = self.client.api.guilds_emojis_create(server_one.id, 'Setting up Uno Cards!', name=name, image='data:image/png;base64,{}'.format(emoji_image_string))
uno_emojis[emoji.name] = '{name}:{emoji_id}'.format(name=emoji.name, emoji_id=emoji.id)
for emoji in os.listdir(server_two_path):
with open('{}/{}'.format(server_two_path, emoji), 'rb') as emoji_image:
encoded_string = base64.encodebytes(emoji_image.read())
emoji_image_string = encoded_string.decode()
name = emoji.replace('.png', '')
emoji = self.client.api.guilds_emojis_create(server_two.id, 'Setting up Uno Cards!', name=name, image='data:image/png;base64,{}'.format(emoji_image_string))
uno_emojis[emoji.name] = '{name}:{emoji_id}'.format(name=emoji.name, emoji_id=emoji.id)
with open("config.yaml", 'r') as config:
current_config = yaml.safe_load(config)
emote_server_info = {
'invites': {
'server_one': server_one_invite,
'server_two': server_two_invite
},
'IDs': {
'server_one': server_one.id,
'server_two': server_two.id
}
}
current_config['emoji_servers'] = emote_server_info
current_config['uno_emojis'] = uno_emojis
current_config['first_run'] = False
with open("config.yaml", 'w') as f:
yaml.safe_dump(current_config, f)
# For developer use, also made by b1nzy (Only eval command in Disco we know of).
@Plugin.command('eval', level=-1)
def command_eval(self, event):
"""
This a Developer command which allows us to run code without having to restart the bot.
"""
ctx = {
'bot': self.bot,
'client': self.bot.client,
'state': self.bot.client.state,
'event': event,
'msg': event.msg,
'guild': event.msg.guild,
'channel': event.msg.channel,
'author': event.msg.author
}
# Mulitline eval
src = event.codeblock
if src.count('\n'):
lines = list(filter(bool, src.split('\n')))
if lines[-1] and 'return' not in lines[-1]:
lines[-1] = 'return ' + lines[-1]
lines = '\n'.join(' ' + i for i in lines)
code = 'def f():\n{}\nx = f()'.format(lines)
local = {}
try:
exec(compile(code, '<eval>', 'exec'), ctx, local)
except Exception as e:
event.msg.reply(PY_CODE_BLOCK.format(type(e).__name__ + ': ' + str(e)))
return
result = pprint.pformat(local['x'])
else:
try:
result = str(eval(src, ctx))
except Exception as e:
event.msg.reply(PY_CODE_BLOCK.format(type(e).__name__ + ': ' + str(e)))
return
if len(result) > 1990:
event.msg.reply('', attachments=[('result.txt', result)])
else:
event.msg.reply(PY_CODE_BLOCK.format(result))
@Plugin.command('sql', level=-1)
def command_sql(self, event):
"""
This a Developer command which allows us to run Database commands without having to interact with the actual database directly.
"""
conn = database.obj.get_conn()
try:
tbl = MessageTable(codeblock=False)
with conn.cursor() as cur:
start = time.time()
cur.execute(event.codeblock.format(e=event))
dur = time.time() - start
if not cur.description:
return event.msg.reply('_took {}ms - no result_'.format(int(dur * 1000)))
tbl.set_header(*[desc[0] for desc in cur.description])
for row in cur.fetchall():
tbl.add(*row)
result = tbl.compile()
if len(result) > 1900:
return event.msg.reply(
'_took {}ms_'.format(int(dur * 1000)),
attachments=[('result.txt', result)])
event.msg.reply('```' + result + '```\n_took {}ms_\n'.format(int(dur * 1000)))
except psycopg2.Error as e:
event.msg.reply('```{}```'.format(e.pgerror))
def fresh_start(self, event, guild_id):
new_guild = Guild.create(
guild_id = guild_id,
owner_id = event.guild.owner_id,
prefix = "+",
games_catergory = None,
spectator_roles = [],
enabled_games = 0,
referee_role = None,
role_allow_startgames = None,
booster_perks = False,
)
return new_guild
``` |
{
"source": "joaoh1/Inktober",
"score": 2
} |
#### File: backend/discord_events/on_command_error.py
```python
import logging
import traceback
import discord
from discord.ext import commands
from bot import Bot as Client
log = logging.getLogger(__name__)
class Errors(commands.Cog):
def __init__(self, bot):
self.bot: Client = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error: commands.CommandError):
ctx.message: discord.Message
error_server: discord.Guild = self.bot.get_guild(404070984769994753)
error_channel: discord.TextChannel = error_server.get_channel(404074085522472961)
trace = traceback.format_exception(type(error), error, error.__traceback__, limit=15)
log.error("".join(trace))
log.error(ctx.command)
log.error(ctx.invoked_with)
log.error(ctx.bot)
log.error(ctx.args)
if ctx.guild is not None:
server = ctx.guild.name
server_id = ctx.guild.id
channel_id = ctx.channel.id
else:
server = None
server_id = None
channel_id = None
await error_channel.send(
"_ _\nInvoked With: {}\nArgs: {}\nServer: {} {}\nChannel: {}\nUser: {}#{} {}\n```{}```".format(
repr(ctx.invoked_with),
repr(ctx.args),
repr(server),
repr(server_id),
repr(channel_id),
repr(ctx.author.name),
ctx.author.discriminator,
repr(ctx.author.id),
"".join(trace)))
def setup(bot):
bot.add_cog(Errors(bot))
``` |
{
"source": "joaoh82/leetcode_with_python",
"score": 4
} |
#### File: joaoh82/leetcode_with_python/3sum.py
```python
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
nums.sort()
triplets = []
for i in range(len(nums)-2):
if i > 0 and nums[i] == nums[i-1]:
continue
leftIdx = i+1
rightIdx = len(nums)-1
while leftIdx < rightIdx:
currentSum = nums[i] + nums[leftIdx] + nums[rightIdx]
if currentSum < 0:
leftIdx += 1
elif currentSum > 0:
rightIdx -= 1
else:
triplets.append((nums[i], nums[leftIdx], nums[rightIdx]))
while leftIdx < rightIdx and nums[leftIdx] == nums[leftIdx + 1]:
leftIdx += 1
while leftIdx < rightIdx and nums[rightIdx] == nums[rightIdx - 1]:
rightIdx -= 1
leftIdx += 1
rightIdx -= 1
return triplets
```
#### File: joaoh82/leetcode_with_python/chucked_palindrome.py
```python
def solution(str):
size = len(str)
if not str:
return 0
for i in range(size//2):
if str[:i+1] == str[size - 1 - i:]:
return 2 + solution(str[i + 1: size - 1 - i])
return 1
print(solution("valve"))
print(solution("voabcvo"))
print(solution("vovo"))
print(solution("volvolvo"))
print(solution("volvol"))
print(solution("aaaaaa"))
```
#### File: joaoh82/leetcode_with_python/invert_binary_tree.py
```python
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution:
def invertTree(self, node: TreeNode) -> TreeNode:
if not node:
return None
node.left, node.right = self.invertTree(node.right), self.invertTree(node.left)
return node
```
#### File: joaoh82/leetcode_with_python/longest_palindromic_substring.py
```python
class Solution:
def longestPalindrome(self, s: str) -> str:
# Return if string is empty of 1 letter
if len(s) < 2:
return s
longestPal = ""
for i in range(len(s)):
j = i + 1
# While j is less the length of string
# and longest palindrome length is less or equal to substring s[i:]
while j <= len(s) and len(longestPal) <= len(s[i:]):
# if substring of s[i:j] is a palindrome
# and substring is longer then longest palindrome so far
if s[i:j] == s[i:j][::-1] and len(s[i:j]) > len(longestPal):
longestPal = s[i:j]
j += 1
return longestPal
```
#### File: joaoh82/leetcode_with_python/move_zeros.py
```python
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
lastZeroFound = 0
for i in range(0, len(nums)):
if nums[i] != 0:
nums[i], nums[lastZeroFound] = nums[lastZeroFound], nums[i]
lastZeroFound += 1
``` |
{
"source": "joaohenggeler/software-vulnerability-collection-scripts",
"score": 3
} |
#### File: software-vulnerability-collection-scripts/Scripts/build_raw_dataset_from_database.py
```python
import os
import sys
from collections import namedtuple
import pandas as pd # type: ignore
from modules.common import log, GLOBAL_CONFIG, CURRENT_TIMESTAMP, get_list_index_or_default, get_path_in_data_directory, get_path_in_output_directory
from modules.database import Database
from modules.project import Project
from modules.sats import Sat
####################################################################################################
def build_raw_dataset_from_database() -> None:
with Database() as db:
CodeUnit = namedtuple('CodeUnit', ['Kind', 'MetricsTablePrefix', 'ProcedureName', 'ProcedureScriptPath'])
FILE_UNIT_INFO = CodeUnit('file', 'FILES_', 'BUILD_FILE_DATASET', get_path_in_data_directory('create_build_file_dataset_procedure.sql'))
FUNCTION_UNIT_INFO = CodeUnit('function', 'FUNCTIONS_', 'BUILD_FUNCTION_DATASET', get_path_in_data_directory('create_build_function_dataset_procedure.sql'))
CLASS_UNIT_INFO = CodeUnit('class', 'CLASSES_', 'BUILD_CLASS_DATASET', get_path_in_data_directory('create_build_class_dataset_procedure.sql'))
UNIT_INFO_LIST = [FILE_UNIT_INFO, FUNCTION_UNIT_INFO, CLASS_UNIT_INFO]
for unit_info in UNIT_INFO_LIST:
success, _ = db.execute_script(unit_info.ProcedureScriptPath)
if not success:
log.error(f'Failed to create the procedure "{unit_info.ProcedureName}" using the script "{unit_info.ProcedureScriptPath}".')
return
project_list = Project.get_project_list_from_config()
sat_list = Sat.get_sat_info_from_config()
for unit_info in UNIT_INFO_LIST:
if not GLOBAL_CONFIG['allowed_code_units'].get(unit_info.Kind):
log.info(f'Skipping the {unit_info.Kind} metrics at the user\'s request')
continue
for project in project_list:
unit_metrics_table = f'{unit_info.MetricsTablePrefix}{project.database_id}'
log.info(f'Building the {project} {unit_info.Kind} dataset using the table {unit_metrics_table}.')
output_csv_path = get_path_in_output_directory(f'raw-dataset-{unit_info.Kind}-{project.database_id}-{project.short_name}-{CURRENT_TIMESTAMP}.csv')
escaped_output_csv_path = output_csv_path.replace('\\', '\\\\')
filter_ineligible_samples = GLOBAL_CONFIG['dataset_filter_samples_ineligible_for_alerts']
filter_commits_without_alerts = GLOBAL_CONFIG['dataset_filter_commits_without_alerts']
allowed_sat_name_list = ','.join([sat.database_name for sat in sat_list])
success, _ = db.call_procedure( unit_info.ProcedureName,
unit_metrics_table, escaped_output_csv_path,
filter_ineligible_samples, filter_commits_without_alerts,
allowed_sat_name_list)
if success:
# @Hack: Change the resulting CSV file's permissions and owner since it would
# otherwise be associated with the user running the MySQL Daemon process (mysqld).
if sys.platform != 'win32':
username = GLOBAL_CONFIG['account_username']
password = GLOBAL_CONFIG['account_password']
log.info(f'Changing the raw dataset\'s file permissions and owner to "{username}".')
os.system(f'echo "{password}" | sudo -S chmod 0664 "{output_csv_path}"')
os.system(f'echo "{password}" | sudo -S chown "{username}:{username}" "{output_csv_path}"')
# Add some class label columns to the dataset. These include:
# 1. Binary - neutral (0) or vulnerable (1). In this case, vulnerable samples belong to any category.
# 2. Multiclass - neutral (0), vulnerable without a category (1), or vulnerability with a specific category (2 to N).
# 3. Grouped Multiclass - same as the multiclass label, but any vulnerability category (2 to N) is set to a new
# label if the number of samples in each category falls below a given threshold.
vulnerability_categories = list(GLOBAL_CONFIG['vulnerability_categories'].keys())
def assign_label(row: pd.Series) -> int:
""" Assigns each sample a label given the rules above. """
label = int(row['Affected'])
if label == 1:
category_index = get_list_index_or_default(vulnerability_categories, row['VULNERABILITY_CATEGORY'])
if category_index is not None:
label = category_index + 2
return label
dataset = pd.read_csv(output_csv_path, dtype=str)
dataset['multiclass_label'] = dataset.apply(assign_label, axis=1)
dataset['binary_label'] = dataset['multiclass_label']
is_category = dataset['multiclass_label'] > 1
dataset.loc[is_category, 'binary_label'] = 1
# Overwrite the dataset on disk.
dataset.to_csv(output_csv_path, index=False)
log.info(f'Built the raw dataset to "{output_csv_path}" successfully.')
else:
log.error(f'Failed to build the raw dataset to "{output_csv_path}".')
##################################################
build_raw_dataset_from_database()
log.info('Finished running.')
print('Finished running.')
```
#### File: software-vulnerability-collection-scripts/Scripts/fix_neutral_code_unit_status_in_affected_files_and_file_timeline.py
```python
import pandas as pd # type: ignore
from modules.common import log, deserialize_json_container, serialize_json_container
from modules.project import Project
####################################################################################################
total_rows = 0
total_functions = 0
total_classes = 0
def set_status_to_neutral(code_unit_list: list, is_function: bool) -> None:
""" Sets the vulnerability status of a function or class to neutral if it was vulenrable. """
global total_functions, total_classes
for unit in code_unit_list:
if unit['Vulnerable'] == 'Yes':
unit.update({'Vulnerable': 'No'})
if is_function:
total_functions += 1
else:
total_classes += 1
project_list = Project.get_project_list_from_config()
for project in project_list:
for input_csv_path in project.find_output_csv_files('affected-files'):
log.info(f'Fixing the neutral code unit status for the project "{project}" using the information in "{input_csv_path}".')
affected_files = pd.read_csv(input_csv_path, dtype=str)
for index, row in affected_files.iterrows():
neutral_function_list = deserialize_json_container(row['Neutral File Functions'], [])
neutral_class_list = deserialize_json_container(row['Neutral File Classes'], [])
set_status_to_neutral(neutral_function_list, True) # type: ignore[arg-type]
set_status_to_neutral(neutral_class_list, False) # type: ignore[arg-type]
affected_files.at[index, 'Neutral File Functions'] = serialize_json_container(neutral_function_list) # type: ignore[arg-type]
affected_files.at[index, 'Neutral File Classes'] = serialize_json_container(neutral_class_list) # type: ignore[arg-type]
total_rows += 1
affected_files.to_csv(input_csv_path, index=False)
for input_csv_path in project.find_output_csv_files('file-timeline'):
log.info(f'Fixing the neutral code unit status for the project "{project}" using the information in "{input_csv_path}".')
timeline = pd.read_csv(input_csv_path, dtype=str)
is_neutral = (timeline['Affected'] == 'Yes') & (timeline['Vulnerable'] == 'No')
for index, row in timeline[is_neutral].iterrows():
neutral_function_list = deserialize_json_container(row['Affected Functions'], [])
neutral_class_list = deserialize_json_container(row['Affected Classes'], [])
set_status_to_neutral(neutral_function_list, True) # type: ignore[arg-type]
set_status_to_neutral(neutral_class_list, False) # type: ignore[arg-type]
timeline.at[index, 'Affected Functions'] = serialize_json_container(neutral_function_list) # type: ignore[arg-type]
timeline.at[index, 'Affected Classes'] = serialize_json_container(neutral_class_list) # type: ignore[arg-type]
total_rows += 1
timeline.to_csv(input_csv_path, index=False)
result = f'Finished running. Updated {total_rows} rows including {total_functions} functions and {total_classes} classes.'
log.info(result)
print(result)
```
#### File: software-vulnerability-collection-scripts/Scripts/insert_alerts_in_database.py
```python
import glob
import os
from itertools import chain, zip_longest
from tempfile import TemporaryDirectory
from typing import cast, Optional, Tuple
from zipfile import ZipFile
import numpy as np # type: ignore
import pandas as pd # type: ignore
from modules.common import log, GLOBAL_CONFIG, delete_file, extract_numeric
from modules.database import Database
from modules.project import Project
from modules.sats import Sat, CppcheckSat, FlawfinderSat
####################################################################################################
def insert_alerts_in_database() -> None:
with Database(buffered=True) as db:
success, error_code = db.execute_query('SELECT SAT_NAME, SAT_ID FROM SAT;')
if not success:
log.error(f'Failed to query the SAT IDs with the error code {error_code}.')
return
sat_id_from_name = {row['SAT_NAME']: row['SAT_ID'] for row in db.cursor}
log.info(f'Found the following SATs: {sat_id_from_name}')
project_list = Project.get_project_list_from_config()
for project in project_list:
cppcheck = CppcheckSat(project)
flawfinder = FlawfinderSat(project)
file_metrics_table = 'FILES_' + str(project.database_id)
function_metrics_table = 'FUNCTIONS_' + str(project.database_id)
class_metrics_table = 'CLASSES_' + str(project.database_id)
SELECT_FILE_ID_QUERY = f'''
SELECT F.ID_File FROM {file_metrics_table} AS F
INNER JOIN EXTRA_TIME_FILES AS E ON F.ID_File = E.ID_File
INNER JOIN PATCHES AS P ON E.P_ID = P.P_ID
WHERE P.R_ID = %(R_ID)s AND P.P_COMMIT = %(P_COMMIT)s
AND F.FilePath = %(FILE_PATH)s AND F.Occurrence = %(P_OCCURRENCE)s;
'''
with TemporaryDirectory() as temporary_directory_path:
sat_list = Sat.get_sat_info_from_config()
for sat in sat_list:
sat_id = sat_id_from_name.get(sat.database_name)
if sat_id is None:
log.error(f'The SAT "{sat}" does not exist in the database.')
continue
cached_rule_ids: dict = {}
def find_zipped_csv_files(commit_type: str) -> list:
""" Finds the paths to any zipped CSV files that belong to this project, SAT and commit type (current or previous). """
# E.g. "mozilla/cppcheck/complete_scan/current_commit/part1/cppcheck-195-305babb41123e575e6fd6bf4ea4dab2716ce1ecc.csv.zip"
# E.g. "linux/flawfinder/complete_scan/previous_commit/part4/flawfinder-358-fc1ca73b3758f0c419b46cfeb2a951de22007d90-1.csv.zip"
base_directory = f'{project.github_data_name}/{sat.github_data_name}/complete_scan/{commit_type}'
data_path = os.path.join(GLOBAL_CONFIG['data_repository_path'], base_directory, '**', '*.csv.zip')
data_path = os.path.normpath(data_path)
data_file_list = glob.glob(data_path, recursive=True)
data_file_list = list(filter(os.path.isfile, data_file_list))
data_file_list = sorted(data_file_list, reverse=True)
return data_file_list
neutral_file_list = find_zipped_csv_files('current_commit')
vulnerable_file_list = find_zipped_csv_files('previous_commit')
# Create a list of alternating zipped CSV paths, regardless of each list's length.
zip_file_list: list = list(filter(None, chain.from_iterable(zip_longest(neutral_file_list, vulnerable_file_list))))
log.info(f'Starting the insertion of {len(zip_file_list)} CSV alert files.')
for i, zip_file_path in enumerate(zip_file_list):
zip_filename = os.path.basename(zip_file_path)
# E.g. "cppcheck-195-305babb41123e575e6fd6bf4ea4dab2716ce1ecc.csv.zip"
# E.g. "flawfinder-358-fc1ca73b3758f0c419b46cfeb2a951de22007d90-1.csv.zip"
# Note the "-1" in the previous_commit. This means we are in "previous_commit" and the real commit hash
# is the one immediately before the one shown here.
_, _, commit_hash = zip_filename.split('-', 2)
commit_hash, _, _ = commit_hash.rsplit('.', 2)
occurrence = 'before' if commit_hash.endswith('-1') else 'after'
commit_hash = commit_hash.rstrip('-1')
success, error_code = db.execute_query( '''
SELECT
(SELECT COUNT(*) > 0 FROM PATCHES WHERE R_ID = %(R_ID)s AND P_COMMIT = %(P_COMMIT)s) AS PATCH_EXISTS,
(
SELECT COUNT(*) > 0 FROM ALERT AS A
INNER JOIN RULE AS R ON A.RULE_ID = R.RULE_ID
WHERE R.SAT_ID = %(SAT_ID)s AND A.R_ID = %(R_ID)s
AND A.P_COMMIT = %(P_COMMIT)s AND A.P_OCCURRENCE = %(P_OCCURRENCE)s
) AS SAT_ALERTS_ALREADY_EXIST_FOR_THIS_COMMIT
;
''',
params={'SAT_ID': sat_id, 'R_ID': project.database_id,
'P_COMMIT': commit_hash, 'P_OCCURRENCE': occurrence})
if not success:
log.error(f'Failed to query any existing patches with the commit {commit_hash} ({occurrence}, "{zip_filename}") in the project "{project}" with the error code {error_code}.')
continue
alert_row = db.cursor.fetchone()
if alert_row['PATCH_EXISTS'] == 0:
log.warning(f'Skipping the patch with the commit {commit_hash} ({occurrence}, "{zip_filename}") in the project "{project}" since it does not exist in the database.')
continue
if alert_row['SAT_ALERTS_ALREADY_EXIST_FOR_THIS_COMMIT'] == 1:
log.info(f'Skipping the alerts for the commit {commit_hash} ({occurrence}, "{zip_filename}") in the project "{project}" since they already exist.')
continue
log.info(f'Inserting the alerts {i+1} of {len(zip_file_list)} from "{zip_file_path}" ({occurrence}).')
with ZipFile(zip_file_path, 'r') as zip_file: # type: ignore[assignment]
filenames_in_zip = zip_file.namelist() # type: ignore[attr-defined]
zip_file.extractall(temporary_directory_path) # type: ignore[attr-defined]
csv_file_path = os.path.join(temporary_directory_path, filenames_in_zip[0])
cached_file_ids: dict = {}
##################################################
def insert_rule_and_cwe_info(alert_params: dict, cwe_list: list) -> Tuple[bool, Optional[int]]:
""" Inserts a security alert's rule and CWEs in the database. This function is successful and returns the rule's primary
key if a new row was inserted or if the rule already exists. """
total_success = True
rule_id = None
alert_params['SAT_ID'] = sat_id
success, error_code = db.execute_query( '''
INSERT IGNORE INTO RULE (RULE_NAME, RULE_CATEGORY, SAT_ID)
VALUES (%(RULE_NAME)s, %(RULE_CATEGORY)s, %(SAT_ID)s);
''', params=alert_params)
if not success:
total_success = False
log.error(f'Failed to insert the rule with the error code {error_code} and the parameters: {alert_params}.')
for cwe in cwe_list:
success, error_code = db.execute_query('INSERT IGNORE INTO CWE_INFO (V_CWE) VALUES (%(V_CWE)s);', params={'V_CWE': cwe})
if not success:
total_success = False
log.error(f'Failed to insert the info for the CWE {cwe} with the error code {error_code} and the parameters: {alert_params}.')
rule_key = (sat_id, alert_params['RULE_NAME'])
rule_id = cached_rule_ids.get(rule_key, -1)
if rule_id == -1:
success, error_code = db.execute_query( 'SELECT RULE_ID FROM RULE WHERE RULE_NAME = %(RULE_NAME)s AND SAT_ID = %(SAT_ID)s;',
params=alert_params)
if success and db.cursor.rowcount > 0:
rule_row = db.cursor.fetchone()
rule_id = rule_row['RULE_ID']
else:
rule_id = None
total_success = False
log.error(f'Failed to query the rule ID for {rule_key} wiwith the error code {error_code} and the parameters: {alert_params}.')
cached_rule_ids[rule_key] = rule_id
if rule_id is None:
total_success = False
else:
for cwe in cwe_list:
success, error_code = db.execute_query( '''
INSERT IGNORE INTO RULE_CWE_INFO (RULE_ID, V_CWE)
VALUES
(
%(RULE_ID)s,
%(V_CWE)s
);
''', params={'RULE_ID': rule_id, 'V_CWE': cwe})
if not success:
total_success = False
log.error(f'Failed to insert the key ({rule_id}, {cwe}) in RULE_CWE_INFO with the error code {error_code} and the parameters: {alert_params}.')
return total_success, rule_id
def insert_alert(alert_params: dict, cwe_list: list) -> None:
""" Inserts a security alert in the database given its parameters: RULE_NAME, RULE_CATEGORY, ALERT_SEVERITY_LEVEL,
ALERT_LINE, ALERT_MESSAGE, FILE_PATH, and a list of CWEs. """
alert_params['R_ID'] = project.database_id
alert_params['P_COMMIT'] = commit_hash
alert_params['P_OCCURRENCE'] = occurrence
success, rule_id = insert_rule_and_cwe_info(alert_params, cwe_list)
if success:
alert_params['RULE_ID'] = rule_id
file_path = alert_params['FILE_PATH']
file_id = cached_file_ids.get(file_path, -1)
if file_id == -1:
success, error_code = db.execute_query(SELECT_FILE_ID_QUERY, params=alert_params)
if success and db.cursor.rowcount > 0:
file_id_row = db.cursor.fetchone()
file_id = file_id_row['ID_File']
else:
file_id = None
cached_file_ids[file_path] = file_id
if file_id is not None:
alert_params['ID_File'] = file_id
success, error_code = db.execute_query( '''
INSERT INTO ALERT
(
ALERT_SEVERITY_LEVEL, ALERT_LINE, ALERT_MESSAGE,
R_ID, P_COMMIT, P_OCCURRENCE,
RULE_ID, ID_File
)
VALUES
(
%(ALERT_SEVERITY_LEVEL)s, %(ALERT_LINE)s, %(ALERT_MESSAGE)s,
%(R_ID)s, %(P_COMMIT)s, %(P_OCCURRENCE)s,
%(RULE_ID)s, %(ID_File)s
);
''', params=alert_params)
if success:
alert_params['ALERT_ID'] = db.cursor.lastrowid
success, error_code = db.execute_query(f'''
INSERT IGNORE INTO ALERT_FUNCTION (ALERT_ID, ID_Function)
SELECT A.ALERT_ID, F.ID_Function FROM {function_metrics_table} AS F
INNER JOIN ALERT AS A ON F.ID_File = A.ID_File
WHERE A.ALERT_ID = %(ALERT_ID)s AND A.ALERT_LINE BETWEEN F.BeginLine AND F.EndLine;
''', params=alert_params)
if not success:
log.error(f'Failed to insert the functions IDs where the alert appears with the error code {error_code} and the parameters: {alert_params}.')
success, error_code = db.execute_query(f'''
INSERT IGNORE INTO ALERT_CLASS (ALERT_ID, ID_Class)
SELECT A.ALERT_ID, C.ID_Class FROM {class_metrics_table} AS C
INNER JOIN ALERT AS A ON C.ID_File = A.ID_File
WHERE A.ALERT_ID = %(ALERT_ID)s AND A.ALERT_LINE BETWEEN C.BeginLine AND C.EndLine;
''', params=alert_params)
if not success:
log.error(f'Failed to insert the class IDs where the alert appears with the error code {error_code} and the parameters: {alert_params}.')
else:
log.error(f'Failed to insert the alert with the error code {error_code} and the parameters: {alert_params}.')
##################################################
if sat.database_name == 'Cppcheck':
alerts = cppcheck.read_and_convert_output_csv_in_default_format(csv_file_path)
for row in alerts.itertuples():
alert_params = {}
cwe_list = [row.CWE] if row.CWE is not None else []
alert_params['RULE_NAME'] = row.Rule
alert_params['RULE_CATEGORY'] = row.Severity
alert_params['ALERT_SEVERITY_LEVEL'] = None
alert_params['ALERT_LINE'] = row.Line
alert_params['ALERT_MESSAGE'] = row.Message
alert_params['FILE_PATH'] = row.File
insert_alert(alert_params, cwe_list)
elif sat.database_name == 'Flawfinder':
alerts = flawfinder.read_and_convert_output_csv_in_default_format(csv_file_path)
for row in alerts.itertuples():
alert_params = {}
# Get a list of CWEs. The following values may appear:
# - ''
# - 'CWE-676, CWE-120, CWE-20'
# - 'CWE-362/CWE-367!'
# - 'CWE-119!/CWE-120'
cwe_list = cast(list, extract_numeric(row.CWEs, all=True)) if row.CWEs is not None else []
alert_params['RULE_NAME'] = row.Name
alert_params['RULE_CATEGORY'] = row.Category
alert_params['ALERT_SEVERITY_LEVEL'] = row.Level
alert_params['ALERT_LINE'] = row.Line
alert_params['ALERT_MESSAGE'] = row.Warning
alert_params['FILE_PATH'] = row.File
insert_alert(alert_params, cwe_list)
else:
log.critical(f'Cannot insert the alerts from "{zip_file_path}" since the SAT "{sat.database_name}" is not recognized.')
##################################################
db.commit()
delete_file(csv_file_path)
##################################################
insert_alerts_in_database()
log.info('Finished running.')
print('Finished running.')
```
#### File: software-vulnerability-collection-scripts/Scripts/insert_metrics_in_database.py
```python
import os
from collections import namedtuple
from typing import cast
import numpy as np # type: ignore
import pandas as pd # type: ignore
from modules.common import log, GLOBAL_CONFIG, deserialize_json_container, extract_numeric, get_list_index_or_default
from modules.database import Database
from modules.project import Project
####################################################################################################
with Database(buffered=True) as db:
CodeUnit = namedtuple('CodeUnit', ['Kind', 'ExtraTimeTable', 'MetricsTablePrefix', 'MetricsTablePrimaryKey'])
FILE_UNIT_INFO = CodeUnit('file', 'EXTRA_TIME_FILES', 'FILES_', 'ID_File')
FUNCTION_UNIT_INFO = CodeUnit('function', 'EXTRA_TIME_FUNCTIONS', 'FUNCTIONS_', 'ID_Function')
CLASS_UNIT_INFO = CodeUnit('class', 'EXTRA_TIME_CLASS', 'CLASSES_', 'ID_Class')
UNIT_INFO_LIST = [FILE_UNIT_INFO, FUNCTION_UNIT_INFO, CLASS_UNIT_INFO]
CSV_TO_DATABASE_COLUMN = {
# For file tables:
'SumCountPath': 'CountPath',
'SumCountInput': 'FanIn',
'SumCountOutput' : 'FanOut',
'AvgCountInput': 'AvgFanIn',
'AvgCountOutput': 'AvgFanOut',
'MaxCountInput': 'MaxFanIn',
'MaxCountOutput': 'MaxFanOut',
'HenryKafura': 'HK',
}
project_list = Project.get_project_list_from_config()
for project in project_list:
file_metrics_table = FILE_UNIT_INFO.MetricsTablePrefix + str(project.database_id)
SELECT_FILE_ID_QUERY = f'''
SELECT F.ID_File FROM {file_metrics_table} AS F
INNER JOIN EXTRA_TIME_FILES AS E ON F.ID_File = E.ID_File
INNER JOIN PATCHES AS P ON E.P_ID = P.P_ID
WHERE P.R_ID = %(R_ID)s AND P.P_COMMIT = %(P_COMMIT)s
AND F.FilePath = %(FilePath)s AND F.Occurrence = %(Occurrence)s;
'''
# @Hack: Doesn't handle multiple versions that were scraped at different times, though that's not really necessary for now.
commits_csv_path = project.find_output_csv_files('affected-files')[0]
commits = pd.read_csv(commits_csv_path, usecols=['Topological Index', 'Vulnerable Commit Hash', 'Neutral Commit Hash'], dtype=str)
# Only neutral commits are stored in the PATCHES table. We need to convert from vulnerable to neutral commits so we can find the correct P_IDs.
vulnerable_to_neutral_commit = {row['Vulnerable Commit Hash']: row['Neutral Commit Hash'] for _, row in commits.iterrows()}
del commits_csv_path, commits
for unit_info in UNIT_INFO_LIST:
if not GLOBAL_CONFIG['allowed_code_units'].get(unit_info.Kind):
log.info(f'Skipping the {unit_info.Kind} metrics for the project "{project}" at the user\'s request')
continue
is_function = (unit_info.Kind == 'function')
is_class = (unit_info.Kind == 'class')
unit_metrics_table = unit_info.MetricsTablePrefix + str(project.database_id)
EXTRA_TIME_INSERT_QUERY = f'''
INSERT INTO {unit_info.ExtraTimeTable}
(
P_ID, {unit_info.MetricsTablePrimaryKey}
)
VALUES
(
%(P_ID)s, %({unit_info.MetricsTablePrimaryKey})s
);
'''
success, error_code = db.execute_query(f'SELECT MAX({unit_info.MetricsTablePrimaryKey} DIV 100) + 1 AS NEXT_ID FROM {unit_metrics_table};')
assert db.cursor.rowcount != -1, 'The database cursor must be buffered.'
next_id = -1
if success and db.cursor.rowcount > 0:
row = db.cursor.fetchone()
next_id = int(row['NEXT_ID'])
log.info(f'Found the next {unit_info.Kind} metrics ID {next_id} for the project "{project}".')
else:
log.error(f'Failed to find the next {unit_info.Kind} metrics ID for the project "{project}" with the error code {error_code}.')
continue
def get_next_unit_metrics_table_id() -> int:
""" Retrieves the next primary key value of the ID_File, ID_Function, or ID_Class column for the current project and code unit table. """
global next_id
result = next_id * 100 + project.database_id
next_id += 1
return result
cached_insert_queries: dict = {}
output_csv_prefix = f'{unit_info.Kind}-metrics'
output_csv_subdirectory = f'{unit_info.Kind}_metrics'
def output_csv_sort_key(csv_path: str) -> int:
""" Called when sorting the CSV list by each file path. We want units that weren't affected by a vulnerability (affected = 0, vulnerable = 0)
to be sorted before the vulnerable and neutral ones (affected = 1, vulnerable = 0 or 1). This is because the query that checks if the metrics
were already inserted only works if the units whose P_ID columns will be NULL are inserted first. """
commit_params = cast(list, extract_numeric(os.path.basename(csv_path), convert=True, all=True))
topological_index, affected, vulnerable, *_ = commit_params # (0 to N, 0 or 1, 0 or 1)
return topological_index * 100 + affected * 10 + vulnerable
for input_csv_path in project.find_output_csv_files(output_csv_prefix, subdirectory=output_csv_subdirectory, sort_key=output_csv_sort_key):
log.info(f'Inserting the {unit_info.Kind} metrics using the information in "{input_csv_path}".')
# The CSV may be empty if a given code unit type didn't exist in a file (e.g. classes) when we split them.
try:
metrics = pd.read_csv(input_csv_path, dtype=str)
except pd.errors.EmptyDataError:
log.info(f'Skipping the empty {unit_info.Kind} metrics file "{input_csv_path}".')
continue
metrics = metrics.replace({np.nan: None})
topological_index = metrics['Topological Index'].iloc[0]
commit_hash = metrics['Commit Hash'].iloc[0]
affected_commit = metrics['Affected Commit'].iloc[0] == 'Yes'
vulnerable_commit = metrics['Vulnerable Commit'].iloc[0] == 'Yes'
# The first commit (neutral) in the project is not in the PATCHES table, so we'll skip its metrics for now.
if topological_index == '0':
log.info(f'Skipping the first commit {commit_hash}.')
continue
commit_hash = vulnerable_to_neutral_commit.get(commit_hash, commit_hash)
occurrence = 'before' if vulnerable_commit else 'after'
# Since only neutral commits are stored in the PATCHES table, we need to use the occurrence (before and after a patch)
# to determine if a commit's metrics (vulnerable or neutral) already exist in the database.
success, error_code = db.execute_query(f'''
SELECT
P_ID,
(
SELECT COUNT(*) > 0 FROM {unit_metrics_table} AS U
WHERE U.P_ID LIKE CONCAT('%', P.P_ID, '%') AND Occurrence = %(Occurrence)s
) AS COMMIT_METRICS_ALREADY_EXIST
FROM PATCHES AS P
WHERE R_ID = %(R_ID)s AND P_COMMIT = %(P_COMMIT)s;
''',
params={'R_ID': project.database_id, 'P_COMMIT': commit_hash, 'Occurrence': occurrence})
if not success:
log.error(f'Failed to query any existing {unit_info.Kind} metrics for the commit {commit_hash} ({topological_index}, {affected_commit}, {vulnerable_commit}) in the project "{project}" with the error code {error_code}.')
continue
patch_row_list = [row for row in db.cursor]
if not patch_row_list:
log.error(f'Could not find any patch with the commit {commit_hash} ({topological_index}, {affected_commit}, {vulnerable_commit}) in the project "{project}".')
continue
if any(patch_row['COMMIT_METRICS_ALREADY_EXIST'] == 1 for patch_row in patch_row_list):
log.info(f'Skipping the {unit_info.Kind} metrics for the patches "{patch_row_list}" with the commit {commit_hash} ({topological_index}, {affected_commit}, {vulnerable_commit}) in the project "{project}" since they already exist.')
if any(patch_row['COMMIT_METRICS_ALREADY_EXIST'] == 0 for patch_row in patch_row_list):
log.warning(f'The patches "{patch_row_list}" to be skipped have one or more {unit_info.Kind} metric values that were not previously inserted.')
continue
patch_list = [patch_row['P_ID'] for patch_row in patch_row_list]
patch_list_string = '[' + ', '.join(patch_list) + ']'
cached_file_ids: dict = {}
##################################################
# Remove column name spaces for itertuples().
metrics.columns = metrics.columns.str.replace(' ', '')
csv_metric_names = metrics.columns.values.tolist()
first_metric_index = get_list_index_or_default(csv_metric_names, 'File') or get_list_index_or_default(csv_metric_names, 'Name') or get_list_index_or_default(csv_metric_names, 'Kind')
first_metric_index += 1
csv_metric_names = csv_metric_names[first_metric_index:]
# E.g. "SumCyclomatic" -> "SumCyclomatic"" or "HenryKafura" -> "HK".
database_metric_names = [CSV_TO_DATABASE_COLUMN.get(name, name) for name in csv_metric_names]
# Due to the way metrics are divided by code units, the CVEs and CodeUnitLines columns may only exist
# in CSV related to affected commits (vulnerable or neutral).
has_code_unit_lines = 'CodeUnitLines' in metrics.columns
has_file = 'File' in metrics.columns
def convert_string_status_to_number(status: str) -> int:
""" Converts a Yes/No/Unknown status from a CSV file into a numeric value used by the database. """
if status == 'Yes':
return 1
elif status == 'No':
return 0
else:
return 2
##################################################
for row in metrics.itertuples():
file_path = row.File if has_file else None
if file_path is None:
continue
affected_status = convert_string_status_to_number(row.VulnerableCodeUnit)
# Possible cases for the P_ID columns in the FILES_*, FUNCTIONS_*, and CLASSES_* tables (Affected Vulnerable):
# - (No, No) -> Set to NULL.
# - (Yes, Yes) -> Set to the string "[P_ID_1, P_ID_2, ..., P_ID_N]", for N patches.
# - (Yes, No) -> Set to the current P_ID, for N patches.
p_id_list: list
if not affected_commit:
p_id_list = [None]
elif affected_status != 0:
p_id_list = [patch_list_string]
else:
p_id_list = patch_list
for p_id in p_id_list:
# Columns:
# - File: ID_File, R_ID, P_ID, FilePath, Patched, Occurrence, Affected, [METRICS]
# - Function: ID_Function, R_ID, P_ID, ID_Class, ID_File, Visibility, Complement, NameMethod, FilePath, Patched, Occurrence, Affected, [METRICS]
# - Class: ID_Class, R_ID, P_ID, ID_File, Visibility, Complement, NameClass, FilePath, Patched, Occurrence, Affected, [METRICS]
unit_id = get_next_unit_metrics_table_id()
# Columns in common:
query_params = {
unit_info.MetricsTablePrimaryKey: unit_id,
'R_ID': project.database_id,
'P_ID': p_id,
'FilePath': file_path,
'Patched': convert_string_status_to_number(row.PatchedCodeUnit), # If the code unit was changed.
'Occurrence': occurrence, # Whether or not this code unit exists before (vulnerable) or after (neutral) the patch.
'Affected': affected_status, # If the code unit is vulnerable or not.
}
if is_function or is_class:
query_params['Visibility'] = row.Visibility
query_params['Complement'] = row.Complement
if has_code_unit_lines:
lines = cast(list, deserialize_json_container(row.CodeUnitLines, [None, None]))
query_params['BeginLine'] = lines[0]
query_params['EndLine'] = lines[1]
file_id = cached_file_ids.get(file_path, -1)
if file_id == -1:
success, error_code = db.execute_query(SELECT_FILE_ID_QUERY, params={
'R_ID': query_params['R_ID'],
'P_COMMIT': commit_hash,
'FilePath': query_params['FilePath'],
'Occurrence': query_params['Occurrence']
})
if success and db.cursor.rowcount > 0:
file_id_row = db.cursor.fetchone()
file_id = file_id_row[FILE_UNIT_INFO.MetricsTablePrimaryKey]
else:
file_id = None
cached_file_ids[file_path] = file_id
query_params[FILE_UNIT_INFO.MetricsTablePrimaryKey] = file_id
if is_function:
query_params['NameMethod'] = row.Name
query_params[CLASS_UNIT_INFO.MetricsTablePrimaryKey] = -1
elif is_class:
query_params['NameClass'] = row.Name
for database_name, csv_name in zip(database_metric_names, csv_metric_names):
query_params[database_name] = getattr(row, csv_name)
query_params_key = tuple(query_params.keys())
query = cached_insert_queries.get(query_params_key)
if query is None:
query = f'INSERT INTO {unit_metrics_table} ('
for name in query_params:
query += f'{name},'
query = query.rstrip(',')
query += ') VALUES ('
for name in query_params:
query += f'%({name})s,'
query = query.rstrip(',')
query += ');'
cached_insert_queries[query_params_key] = query
success, error_code = db.execute_query(query, params=query_params)
def insert_patch_and_unit_ids_in_extra_time_table(patch_id: str) -> None:
""" Inserts a row for the current code unit into the appropriate EXTRA_TIME_* table. """
success, error_code = db.execute_query(EXTRA_TIME_INSERT_QUERY,
params={
'P_ID': patch_id,
unit_info.MetricsTablePrimaryKey: unit_id,
}
)
if not success:
log.error(f'Failed to insert the {unit_info.Kind} metrics ID in the {unit_info.ExtraTimeTable} table for the unit "{row.Name}" ({unit_id}) in the file "{file_path}" and commit {commit_hash} ({topological_index}, {patch_id}, {affected_commit}, {vulnerable_commit}) with the error code {error_code}.')
if success:
if affected_commit and affected_status == 0:
insert_patch_and_unit_ids_in_extra_time_table(p_id)
else:
for p_id in patch_list:
insert_patch_and_unit_ids_in_extra_time_table(p_id)
else:
log.error(f'Failed to insert the {unit_info.Kind} metrics for the unit "{row.Name}" ({unit_id}) in the file "{file_path}" and commit {commit_hash} ({topological_index}, {p_id}, {affected_commit}, {vulnerable_commit}) with the error code {error_code}.')
##################################################
db.commit()
log.info('Finished running.')
print('Finished running.')
```
#### File: Scripts/modules/cve.py
```python
import re
from typing import TYPE_CHECKING, Callable, Optional
from urllib.parse import urlsplit, parse_qsl
if TYPE_CHECKING:
from .project import Project
import bs4 # type: ignore
from .common import log, remove_list_duplicates, serialize_json_container
from .scraping import ScrapingManager, ScrapingRegex
####################################################################################################
class Cve:
""" Represents a vulnerability (CVE) scraped from the CVE Details website. """
CVE_DETAILS_SCRAPING_MANAGER: ScrapingManager = ScrapingManager('https://www.cvedetails.com')
id: str
url: str
project: 'Project'
publish_date: Optional[str]
last_update_date: Optional[str]
cvss_score: Optional[str]
confidentiality_impact: Optional[str]
integrity_impact: Optional[str]
availability_impact: Optional[str]
access_complexity: Optional[str]
authentication: Optional[str]
gained_access: Optional[str]
vulnerability_types: Optional[list]
cwe: Optional[str]
affected_products: dict
bugzilla_urls: list
bugzilla_ids: list
advisory_urls: list
advisory_ids: list
advisory_info: dict
git_urls: list
git_commit_hashes: list
svn_urls: list
svn_revision_numbers: list
def __init__(self, id: str, project: 'Project'):
self.id = id
self.url = f'https://www.cvedetails.com/cve/{self.id}'
self.project = project
self.cve_details_soup = None
self.publish_date = None
self.last_update_date = None
self.cvss_score = None
self.confidentiality_impact = None
self.integrity_impact = None
self.availability_impact = None
self.access_complexity = None
self.authentication = None
self.gained_access = None
self.vulnerability_types = None
self.cwe = None
self.affected_products = {}
self.bugzilla_urls = []
self.bugzilla_ids = []
self.advisory_urls = []
self.advisory_ids = []
self.advisory_info = {}
self.git_urls = []
self.git_commit_hashes = []
self.svn_urls = []
self.svn_revision_numbers = []
def __str__(self):
return self.id
def download_cve_details_page(self) -> bool:
""" Downloads the CVE's page from the CVE Details website. """
response = Cve.CVE_DETAILS_SCRAPING_MANAGER.download_page(self.url)
if response is not None:
self.cve_details_soup = bs4.BeautifulSoup(response.text, 'html.parser')
return response is not None
def scrape_dates_from_page(self):
""" Scrapes any date values from the CVE's page. """
"""
<div class="cvedetailssummary">
Memory safety bugs were reported in Firefox 57 and Firefox ESR 52.5. Some of these bugs showed evidence of memory corruption and we presume that with enough effort that some of these could be exploited to run arbitrary code. This vulnerability affects Thunderbird < 52.6, Firefox ESR < 52.6, and Firefox < 58. <br>
<span class="datenote">Publish Date : 2018-06-11 Last Update Date : 2018-08-03</span>
</div>
"""
dates_span = self.cve_details_soup.find('span', class_='datenote')
if dates_span is None:
log.warning(f'--> No dates span found for {self}.')
dates_text = dates_span.get_text(strip=True)
cve_dates = {}
for date in re.split(r'\t+', dates_text):
key, value = date.split(' : ')
cve_dates[key] = value
self.publish_date = cve_dates.get('Publish Date')
self.last_update_date = cve_dates.get('Last Update Date')
def scrape_basic_attributes_from_page(self):
""" Scrapes any basic attributes from the CVE's page. """
"""
<table id="cvssscorestable" class="details">
<tbody>
<tr>
<th>CVSS Score</th>
<td><div class="cvssbox" style="background-color:#ff9c20">7.5</div></td>
</tr>
<tr>
<th>Confidentiality Impact</th>
<td><span style="color:orange">Partial</span>
<span class="cvssdesc">(There is considerable informational disclosure.)</span></td>
</tr>
<tr>
<th>Access Complexity</th>
<td><span style="color:red">Low</span>
<span class="cvssdesc">(Specialized access conditions or extenuating circumstances do not exist. Very little knowledge or skill is required to exploit. )</span></td>
</tr>
<tr>
<th>Authentication</th>
<td><span style="color:red">Not required</span>
<span class="cvssdesc">(Authentication is not required to exploit the vulnerability.)</span></td>
</tr>
<tr>
<th>Gained Access</th>
<td><span style="color:green;">None</span></td>
</tr>
<tr>
<th>Vulnerability Type(s)</th>
<td><span class="vt_overflow">Overflow</span><span class="vt_memc">Memory corruption</span></td>
</tr>
<tr>
<th>CWE ID</th>
<td><a href="//www.cvedetails.com/cwe-details/119/cwe.html" title="CWE-119 - CWE definition">119</a></td>
</tr>
</tbody>
</table>
"""
scores_table = self.cve_details_soup.find('table', id='cvssscorestable')
if scores_table is None:
log.warning(f'--> No scores table found for {self}.')
return
scores_th_list = scores_table.find_all('th')
scores_td_list = scores_table.find_all('td')
cve_attributes = {}
for th, td in zip(scores_th_list, scores_td_list):
key = th.get_text(strip=True)
value = None
if key == 'Vulnerability Type(s)':
value = [span.get_text(strip=True) for span in td.find_all('span')]
else:
span = td.find('span')
if span is not None:
value = span.get_text(strip=True)
else:
value = td.get_text(strip=True)
cve_attributes[key] = value
self.cvss_score = cve_attributes.get('CVSS Score')
self.confidentiality_impact = cve_attributes.get('Confidentiality Impact')
self.integrity_impact = cve_attributes.get('Integrity Impact')
self.availability_impact = cve_attributes.get('Availability Impact')
self.access_complexity = cve_attributes.get('Access Complexity')
self.authentication = cve_attributes.get('Authentication')
self.gained_access = cve_attributes.get('Gained Access')
self.vulnerability_types = cve_attributes.get('Vulnerability Type(s)')
cwe = cve_attributes.get('CWE ID')
if cwe is not None and not cwe.isnumeric():
cwe = None
self.cwe = cwe
def scrape_affected_product_versions_from_page(self):
""" Scrapes any affected products and their versions from the CVE's page. """
"""
<table class="listtable" id="vulnprodstable">
<tbody>
<tr>
<th class="num">#</th>
<th>Product Type</th>
<th>Vendor</th>
<th>Product</th>
<th>Version</th>
<th>Update</th>
<th>Edition</th>
<th>Language</th>
<th></th>
</tr>
<tr>
<td class="num">1</td>
<td>Application </td>
<td><a href="//www.cvedetails.com/vendor/452/Mozilla.html" title="Details for Mozilla">Mozilla</a></td>
<td><a href="//www.cvedetails.com/product/3264/Mozilla-Firefox.html?vendor_id=452" title="Product Details Mozilla Firefox">Firefox</a></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td><a href="/version/12613/Mozilla-Firefox-.html" title="Mozilla Firefox ">Version Details</a> <a href="/vulnerability-list/vendor_id-452/product_id-3264/version_id-12613/Mozilla-Firefox-.html" title="Vulnerabilities of Mozilla Firefox ">Vulnerabilities</a></td>
</tr>
<tr>
<td class="num">2 </td>
<td>Application </td>
<td><a href="//www.cvedetails.com/vendor/44/Netscape.html" title="Details for Netscape">Netscape</a></td>
<td><a href="//www.cvedetails.com/product/64/Netscape-Navigator.html?vendor_id=44" title="Product Details Netscape Navigator">Navigator</a></td>
<td>7.0.2 </td>
<td></td>
<td></td>
<td></td>
<td><a href="/version/11359/Netscape-Navigator-7.0.2.html" title="Netscape Navigator 7.0.2">Version Details</a> <a href="/vulnerability-list/vendor_id-44/product_id-64/version_id-11359/Netscape-Navigator-7.0.2.html" title="Vulnerabilities of Netscape Navigator 7.0.2">Vulnerabilities</a></td>
</tr>
</tbody>
</table>
"""
products_table = self.cve_details_soup.find('table', id='vulnprodstable')
if products_table is None:
log.warning(f'--> No products table found for {self}.')
return
# Parse each row in the product table.
th_list = products_table.find_all('th')
th_list = [th.get_text(strip=True) for th in th_list]
column_indexes = { 'vendor': th_list.index('Vendor'),
'product': th_list.index('Product'),
'version': th_list.index('Version')}
tr_list = products_table.find_all('tr')
for tr in tr_list:
# Skip the header row.
if tr.find('th'):
continue
td_list = tr.find_all('td')
def get_column_value_and_url(name):
""" Gets a specific cell value and any URL it references from the current row given its column name.. """
idx = column_indexes[name]
td = td_list[idx]
value = td.get_text(strip=True)
url = td.find('a', href=True)
if value in ['', '-']:
value = None
if url is not None:
url = url['href']
return value, url
_, vendor_url = get_column_value_and_url('vendor')
product, product_url = get_column_value_and_url('product')
version, _ = get_column_value_and_url('version')
vendor_pattern = f'/{self.project.vendor_id}/'
product_pattern = f'/{self.project.product_id}/' if self.project.product_id is not None else ''
# Check if the vendor and product belong to the current project.
if vendor_pattern in vendor_url and product_pattern in product_url:
if product not in self.affected_products:
self.affected_products[product] = []
if version is not None and version not in self.affected_products[product]:
self.affected_products[product].append(version)
def scrape_references_from_page(self):
""" Scrapes any references and links from the CVE's page. """
"""
<table class="listtable" id="vulnrefstable">
<tbody>
<tr>
<td class="r_average">
<a href="https://github.com/torvalds/linux/commit/09ccfd238e5a0e670d8178cf50180ea81ae09ae1" target="_blank" title="External url">https://github.com/torvalds/linux/commit/09ccfd238e5a0e670d8178cf50180ea81ae09ae1</a>
CONFIRM
<br>
</td>
</tr>
<tr>
<td class="r_average">
<a href="https://bugzilla.redhat.com/show_bug.cgi?id=1292045" target="_blank" title="External url">https://bugzilla.redhat.com/show_bug.cgi?id=1292045</a>
CONFIRM
<br>
</td>
</tr>
</tbody>
</table>
"""
references_table = self.cve_details_soup.find('table', id='vulnrefstable')
if references_table is None:
log.warning(f'--> No references table found for {self}.')
return
def list_all_urls(url_regex: str, url_handler: Callable = None):
""" Creates a list of URL that match a regex (or a list of regexes). If a handler method is passed as the second argument, then it
will be called for each URL in order to create and return a secondary list. This may be used to extract specific parts of the URL."""
a_list = references_table.find_all('a', href=url_regex)
url_list = []
for a in a_list:
url = a['href']
if re.search(self.project.url_pattern, url, re.IGNORECASE):
url_list.append(url)
secondary_list = []
if url_handler is not None:
for url in url_list:
secondary_value = url_handler(url)
if secondary_value is not None:
secondary_list.append(secondary_value)
return url_list, secondary_list
def get_query_param(url: str, query_key_list: list) -> Optional[str]:
""" Gets the value of the first parameter in a URL's query segment given a list of keys to check. """
split_url = urlsplit(url)
params = dict(parse_qsl(split_url.query))
result = None
for query_key in query_key_list:
result = params.get(query_key)
if result is not None:
break
return result
"""
Various helper methods to handle specific URLs from different sources.
"""
def handle_bugzilla_urls(url: str) -> Optional[str]:
id = get_query_param(url, ['id', 'bug_id'])
if id is None:
log.error(f'--> Could not find a valid Bugzilla ID in "{url}".')
return id
def handle_advisory_urls(url: str) -> Optional[str]:
split_url = urlsplit(url)
id = None
for regex in [ScrapingRegex.MFSA_ID, ScrapingRegex.XSA_ID, ScrapingRegex.APACHE_SECURITY_ID]:
match = regex.search(split_url.path)
if match is not None:
id = match.group(1)
if regex is ScrapingRegex.MFSA_ID:
id = id.upper()
id = id.replace('MFSA', 'MFSA-')
elif regex is ScrapingRegex.XSA_ID:
id = 'XSA-' + id
elif regex is ScrapingRegex.APACHE_SECURITY_ID:
id = 'APACHE-' + id[0] + '.' + id[1:]
break
if id is None:
log.error(f'--> Could not find a valid advisory ID in "{url}".')
return id
def handle_git_urls(url: str) -> Optional[str]:
commit_hash = get_query_param(url, ['id', 'h'])
if commit_hash is None:
split_url = urlsplit(url)
path_components = split_url.path.rsplit('/')
commit_hash = path_components[-1]
# If the hash length is less than 40, we need to refer to the repository
# to get the full hash.
if commit_hash is not None and len(commit_hash) < ScrapingRegex.GIT_COMMIT_HASH_LENGTH:
commit_hash = self.project.find_full_git_commit_hash(commit_hash)
if commit_hash is not None and not ScrapingRegex.GIT_COMMIT_HASH.match(commit_hash):
commit_hash = None
if commit_hash is None:
log.error(f'--> Could not find a valid commit hash in "{url}".')
return commit_hash
def handle_svn_urls(url: str) -> Optional[str]:
revision_number = get_query_param(url, ['rev', 'revision', 'pathrev'])
if revision_number is not None:
# In some rare cases, the revision number can be prefixed with 'r'.
# As such, we'll only extract the numeric part of this value.
match = ScrapingRegex.SVN_REVISION_NUMBER.search(revision_number)
if match is not None:
# For most cases, this is the same value.
revision_number = match.group(1)
else:
# For cases where the query parameter was not a valid number.
revision_number = None
if revision_number is None:
log.error(f'--> Could not find a valid revision number in "{url}".')
return revision_number
self.bugzilla_urls, self.bugzilla_ids = list_all_urls(ScrapingRegex.BUGZILLA_URL, handle_bugzilla_urls)
self.advisory_urls, self.advisory_ids = list_all_urls([ScrapingRegex.MFSA_URL, ScrapingRegex.XSA_URL, ScrapingRegex.APACHE_SECURITY_URL], handle_advisory_urls)
self.git_urls, self.git_commit_hashes = list_all_urls([ScrapingRegex.GIT_URL, ScrapingRegex.GITHUB_URL], handle_git_urls)
self.svn_urls, self.svn_revision_numbers = list_all_urls(ScrapingRegex.SVN_URL, handle_svn_urls)
def remove_duplicated_values(self):
""" Removes any duplicated values from specific CVE attributes that contain lists. """
self.vulnerability_types = remove_list_duplicates(self.vulnerability_types)
self.bugzilla_urls = remove_list_duplicates(self.bugzilla_urls)
self.bugzilla_ids = remove_list_duplicates(self.bugzilla_ids)
self.advisory_urls = remove_list_duplicates(self.advisory_urls)
self.advisory_ids = remove_list_duplicates(self.advisory_ids)
self.git_urls = remove_list_duplicates(self.git_urls)
self.git_commit_hashes = remove_list_duplicates(self.git_commit_hashes)
self.svn_urls = remove_list_duplicates(self.svn_urls)
self.svn_revision_numbers = remove_list_duplicates(self.svn_revision_numbers)
def serialize_containers(self):
""" Serializes specific CVE attributes that contain lists or dictionaries using JSON. """
self.vulnerability_types = serialize_json_container(self.vulnerability_types)
self.affected_products = serialize_json_container(self.affected_products)
self.bugzilla_urls = serialize_json_container(self.bugzilla_urls)
self.bugzilla_ids = serialize_json_container(self.bugzilla_ids)
self.advisory_urls = serialize_json_container(self.advisory_urls)
self.advisory_ids = serialize_json_container(self.advisory_ids)
self.advisory_info = serialize_json_container(self.advisory_info)
self.git_urls = serialize_json_container(self.git_urls)
self.git_commit_hashes = serialize_json_container(self.git_commit_hashes)
self.svn_urls = serialize_json_container(self.svn_urls)
self.svn_revision_numbers = serialize_json_container(self.svn_revision_numbers)
if __name__ == '__main__':
pass
```
#### File: Scripts/modules/database.py
```python
import os
import subprocess
import sys
from typing import Iterator, Optional, Tuple, Union
from mysql.connector import MySQLConnection, Error as MySQLError # type: ignore
from mysql.connector.cursor import MySQLCursor # type: ignore
from .common import log, GLOBAL_CONFIG, DATABASE_CONFIG
class Database:
""" Represents a connection to the software vulnerability MySQL database. """
host: str
port: str
user: str
password: str
database: str
connection: MySQLConnection
cursor: MySQLCursor
input_directory_path: str
def __init__(self, config: dict = DATABASE_CONFIG, **kwargs):
try:
log.info(f'Connecting to the database with the following configurations: {config}')
for key, value in config.items():
setattr(self, key, value)
self.connection = MySQLConnection(**config)
self.cursor = self.connection.cursor(dictionary=True, **kwargs)
log.info(f'Autocommit is {self.connection.autocommit}.')
self.input_directory_path = os.path.abspath(GLOBAL_CONFIG['output_directory_path'])
except MySQLError as error:
log.error(f'Failed to connect to the database with the error: {repr(error)}')
sys.exit(1)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
try:
self.cursor.close()
self.connection.close()
except MySQLError as error:
log.error(f'Failed to close the connection to the database with the error: {repr(error)}')
def execute_query(self, query: str, commit: bool = False, **kwargs) -> Tuple[bool, Optional[int]]:
""" Executes a given SQL query and optionally commits the results. """
try:
self.cursor.execute(query, **kwargs)
if commit:
self.connection.commit()
success = True
error_code = None
except MySQLError as error:
success = False
error_code = error.errno
log.warning(f'Failed to execute the query "{query}" with the error: {repr(error)}')
return (success, error_code)
def commit(self) -> bool:
""" Commits the current transaction. """
try:
self.connection.commit()
success = True
except MySQLError as error:
success = False
log.error(f'Failed to perform the commit with the error: {repr(error)}')
return success
def rollback(self) -> bool:
""" Rolls back the current transaction. """
try:
self.connection.rollback()
success = True
except MySQLError as error:
success = False
log.error(f'Failed to perform the rollback with the error: {repr(error)}')
return success
def execute_script(self, script_path: str) -> Tuple[bool, str]:
""" Executes one or more SQL queries inside a file and returns the output of the MySQL command. """
arguments = ['mysql',
f'--host={self.host}', f'--port={self.port}', f'--user={self.user}', f'--password={self.password}',
'--default-character-set=utf8', '--comments', self.database]
try:
script_file = open(script_path)
result = subprocess.run(arguments, stdin=script_file, capture_output=True, text=True)
success = result.returncode == 0
output = result.stdout
if not success:
command_line_arguments = ' '.join(arguments)
error_message = result.stderr or result.stdout
log.error(f'Failed to run the command "{command_line_arguments}" with the error code {result.returncode} and the error message "{error_message}".')
except Exception as error:
success = False
output = ''
log.error(f'Failed to execute the script "{script_path}" with the error: {repr(error)}')
return (success, output)
def call_procedure(self, name: str, *args) -> Tuple[bool, tuple]:
""" Calls a previously created stored procedure. """
try:
output = self.cursor.callproc(name, args)
success = True
except Exception as error:
success = False
output = ()
log.error(f'Failed to call the procedure "{name}" with the error: {repr(error)}')
return (success, output)
```
#### File: Scripts/modules/sats.py
```python
import os
import subprocess
import tempfile
from collections import namedtuple
from typing import cast, Optional, Tuple, Union
import bs4 # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
from .common import log, GLOBAL_CONFIG, delete_directory, delete_file, extract_numeric, get_path_in_data_directory
from .project import Project
####################################################################################################
class Sat():
""" Represents a third-party static analysis tool (SAT) and allows the execution of its commands. """
config: dict
name: str
executable_path: str
version: Optional[str]
project: Project
def __init__(self, name: str, project: Project):
self.config = GLOBAL_CONFIG['sats'][name]
self.name = name
self.executable_path = self.config['executable_path']
self.version = None
self.project = project
def __str__(self):
return self.name
def get_version(self) -> str:
""" Gets the tool's version number. """
return self.version or 'Unknown'
def run(self, *args) -> Tuple[bool, str]:
""" Runs the tool with a series of command line arguments. """
if self.executable_path is None:
return (False, '')
arguments = [self.executable_path] + [arg for arg in args]
result = subprocess.run(arguments, capture_output=True, text=True)
success = (result.returncode == 0)
if not success:
command_line_arguments = ' '.join(arguments)
error_message = result.stderr or result.stdout
log.error(f'Failed to run the command "{command_line_arguments}" with the error code {result.returncode} and the error message "{error_message}".')
return (success, result.stdout)
@staticmethod
def write_list_to_temporary_file(value_list: list) -> Optional[str]:
""" Writes a list to a temporary file, where each item appears in its own line. If this file cannot be created, this function returns None.
This file is closed before returning so it can be opened by other processes. For example, passing a list of file paths to a SAT. """
result = None
try:
_, temporary_file_path = tempfile.mkstemp()
with open(temporary_file_path, 'w') as temporary_file:
for value in value_list:
temporary_file.write(value + '\n')
result = temporary_file_path
except Exception as error:
log.error(f'Failed to write the list to a temporary file with the error: {repr(error)}')
return result
@staticmethod
def get_sat_info_from_config() -> list:
""" Creates a list of SAT information given the current configuration. """
template_list = list(GLOBAL_CONFIG['sats'].values())
SatInfo = namedtuple('SatInfo', template_list[0]) # type: ignore[misc]
sat_list = []
for name, items in GLOBAL_CONFIG['sats'].items():
sat_database_name = items['database_name']
if sat_database_name is not None:
should_be_allowed = GLOBAL_CONFIG['allowed_sats'].get(sat_database_name)
if should_be_allowed:
info = SatInfo(**items) # type: ignore[call-arg]
sat_list.append(info)
else:
log.info(f'Ignoring the SAT "{sat_database_name}".')
return sat_list
####################################################################################################
class UnderstandSat(Sat):
""" Represents the Understand tool, which is used to generate software metrics given a project's source files. """
use_new_database_format: bool
database_extension: str
def __init__(self, project: Project):
super().__init__('Understand', project)
version_success, build_number = self.run('version')
if version_success:
build_number = cast(str, extract_numeric(build_number))
self.version = build_number
self.use_new_database_format = int(build_number) >= 1039 # Understand 6.0 or later.
self.database_extension = '.und' if self.use_new_database_format else '.udb'
log.info(f'Loaded {self} version {self.version}.')
def generate_project_metrics(self, file_path_list: Union[list, bool], output_csv_path: str) -> bool:
""" Generates the project's metrics using the files and any other options defined in the database directory. """
"""
Understand Metrics Settings:
- WriteColumnTitles on/off (default on)
- ShowFunctionParameterTypes on/off (default off)
- ShowDeclaredInFile on/off (default off)
- FileNameDisplayMode NoPath/FullPath/RelativePath (default NoPath)
- DeclaredInFileDisplayMode NoPath/FullPath/RelativePath (default NoPath)
- OutputFile <CSV File Path> (default "<Database Name>.csv")
These were listed using the command: und list -all settings <Database Name>
"""
success = False
database_path = os.path.join(self.project.output_directory_path, self.project.short_name + self.database_extension)
if isinstance(file_path_list, bool):
file_path_list = [self.project.repository_path]
# Understand fails if one of the files doesn't exist on disk so we'll filter the paths before running it.
filtered_file_path_list = []
for file_path in file_path_list:
if os.path.isfile(file_path):
filtered_file_path_list.append(file_path)
else:
log.warning(f'Skipping the file path "{file_path}" since it does not exist on disk.')
file_path_list = filtered_file_path_list
del filtered_file_path_list
temporary_file_path = Sat.write_list_to_temporary_file(file_path_list)
if temporary_file_path:
success, _ = self.run (
'-quiet', '-db', database_path,
'create', '-languages', 'c++', # This value cannot be self.project.language since only "c++" is accepted.
'settings', '-metrics', 'all',
'-metricsWriteColumnTitles', 'on',
'-metricsShowFunctionParameterTypes', 'on',
'-metricsShowDeclaredInFile', 'on',
'-metricsFileNameDisplayMode', 'NoPath',
'-metricsDeclaredInFileDisplayMode', 'FullPath', # See below.
'-metricsOutputFile', output_csv_path,
'add', f'@{temporary_file_path}',
'analyze',
'metrics'
)
delete_file(temporary_file_path)
# Safeguard against the tool executing successfully without having created the CSV file.
success = success and os.path.isfile(output_csv_path)
if success:
try:
metrics = pd.read_csv(output_csv_path, dtype=str)
except pd.errors.ParserError as error:
log.warning(f'Could not parse the metrics in "{output_csv_path}" with the error: {repr(error)}')
metrics = pd.read_csv(output_csv_path, dtype=str, error_bad_lines=False, warn_bad_lines=True)
# Ideally, we'd just set the "DeclaredInFileDisplayMode" option to "RelativePath" and skip this step. However, doing that would
# lead to a few cases where the relative path to the file in the repository was incorrect.
metrics['File'] = metrics['File'].map(lambda x: self.project.get_relative_path_in_repository(x) if pd.notna(x) else x)
metrics.to_csv(output_csv_path, index=False)
if self.use_new_database_format:
delete_directory(database_path)
else:
delete_file(database_path)
return success
####################################################################################################
class CppcheckSat(Sat):
""" Represents the Cppcheck tool, which is used to generate security alerts given a project's source files. """
RULE_TO_CWE: dict = {}
mapped_rules_to_cwes: bool = False
def __init__(self, project: Project):
super().__init__('Cppcheck', project)
version_success, version_number = self.run('--version')
if version_success:
self.version = cast(Optional[str], extract_numeric(version_number, r'\d+\.\d+'))
log.info(f'Loaded {self} version {self.version}.')
if not CppcheckSat.mapped_rules_to_cwes:
CppcheckSat.mapped_rules_to_cwes = True
error_list_file_path = get_path_in_data_directory('cppcheck_error_list.xml')
with open(error_list_file_path) as xml_file:
error_soup = bs4.BeautifulSoup(xml_file, 'xml')
if error_soup is not None:
error_list = error_soup.find_all('error', id=True, cwe=True)
CppcheckSat.RULE_TO_CWE = {error['id']: error['cwe'] for error in error_list}
else:
log.error(f'Failed to map a list of SAT rules in "{error_list_file_path}" to their CWE values.')
def generate_project_alerts(self, file_path_list: Union[list, bool], output_csv_path: str) -> bool:
""" Generates the project's alerts given list of files. """
success = False
if self.project.include_directory_path is not None:
include_arguments = ['-I', self.project.include_directory_path]
else:
include_arguments = ['--suppress=missingInclude']
if isinstance(file_path_list, bool):
file_path_list = [self.project.repository_path]
temporary_file_path = Sat.write_list_to_temporary_file(file_path_list)
if temporary_file_path:
# The argument "--enable=error" is not necessary since it's enabled by default.
# @Future: Should "--force" be used? If so, remove "--suppress=toomanyconfigs".
success, _ = self.run (
'--quiet',
'--enable=warning,portability', '--inconclusive',
f'--language={self.project.language}', *include_arguments,
'--suppress=toomanyconfigs', '--suppress=unknownMacro', '--suppress=unmatchedSuppression',
'--template="{file}","{line}","{column}","{severity}","{id}","{cwe}","{message}"',
f'--output-file={output_csv_path}',
f'--file-list={temporary_file_path}'
)
delete_file(temporary_file_path)
# Safeguard against the tool executing successfully without having created the CSV file.
success = success and os.path.isfile(output_csv_path)
if success:
alerts = pd.read_csv(output_csv_path, header=None, names=['File', 'Line', 'Column', 'Severity', 'Rule', 'CWE', 'Message'], dtype=str)
alerts['File'] = alerts['File'].map(lambda x: None if x == 'nofile' else self.project.get_relative_path_in_repository(x))
alerts['Line'] = alerts['Line'].replace({'0': None})
alerts['Column'] = alerts['Column'].replace({'0': None})
alerts['CWE'] = alerts['CWE'].replace({'0': None})
alerts.to_csv(output_csv_path, index=False)
return success
def read_and_convert_output_csv_in_default_format(self, csv_file_path: str) -> pd.DataFrame:
""" Reads a CSV file generated using Cppcheck's default output parameters and converts it to a more convenient format. """
# The default CSV files generated by Cppcheck don't quote values with commas correctly.
# This means that pd.read_csv() would fail because some lines have more columns than others.
# We'll read each line ourselves and interpret anything after the fourth column as being part
# of the "Message" column. Format: "[FILE]:[LINE],[SEVERITY],[RULE],[MESSAGE]"
dictionary_list = []
with open(csv_file_path, 'r') as csv_file:
for line in csv_file:
# Some rare cases showed only "Segmentation fault (core dumped)" in the line.
if not ':' in line:
continue
# We'll assume that a source file's path never has a colon so we don't accidentally
# break paths with commas. In some rare cases the following can appear as the first
# value: ":,[ETC]". Since there's no file path or line number, we'll discard it below.
file_path = line_number = severity = rule = message = None
file_path, remaining_line = line.split(':', 1)
if remaining_line:
line_number, severity, rule, message = remaining_line.split(',', 3)
message = message.rstrip()
dictionary_list.append({'File': file_path, 'Line': line_number, 'Severity': severity, 'Rule': rule, 'Message': message})
alerts = pd.DataFrame.from_dict(dictionary_list, dtype=str)
alerts = alerts.replace({np.nan: None, '': None})
alerts.dropna(subset=['File', 'Line'], inplace=True)
alerts['File'] = alerts['File'].map(lambda x: None if x == 'nofile' else self.project.get_relative_path_in_repository(x))
alerts['CWE'] = alerts['Rule'].map(lambda x: CppcheckSat.RULE_TO_CWE.get(x, None))
return alerts
####################################################################################################
class FlawfinderSat(Sat):
""" Represents the Flawfinder tool, which is used to generate security alerts given a project's source files. """
def __init__(self, project: Project):
super().__init__('Flawfinder', project)
version_success, version_number = self.run('--version')
if version_success:
self.version = version_number.strip()
log.info(f'Loaded {self} version {self.version}.')
def generate_project_alerts(self, file_path_list: Union[list, bool], output_csv_path: str) -> bool:
""" Generates the project's alerts given list of files. """
raise NotImplementedError('Cannot yet generate alerts using Flawfinder.')
def read_and_convert_output_csv_in_default_format(self, csv_file_path: str) -> pd.DataFrame:
""" Reads a CSV file generated using Flawfinder's default output parameters and converts it to a more convenient format. """
alerts = pd.read_csv(csv_file_path, dtype=str)
alerts.dropna(subset=['File', 'Line', 'Level', 'Category', 'Name'], inplace=True)
alerts = alerts.replace({np.nan: None})
alerts['File'] = alerts['File'].map(lambda x: self.project.get_relative_path_in_repository(x))
return alerts
if __name__ == '__main__':
pass
```
#### File: instances/vulnerability_prediction_final/InstanceBatchExecution.py
```python
import glob
import os
from copy import deepcopy
import propheticus.core.BatchExecution
from InstanceConfig import InstanceConfig
from modules.common import log, remove_list_duplicates
class InstanceBatchExecution(propheticus.core.BatchExecution):
def __init__(self, context):
super(InstanceBatchExecution, self).__init__(context)
# self.display_visuals = False
"""
The GUI methods are specified in 'propheticus/core/GUI.py'.
The data analysis techniques are specified in 'propheticus/core/DataAnalysis.py'.
GUI Methods:
def DataAnalysis(self, method)
def parseCurrentConfigurationAlgorithms(self, choice=None, skip_validation=False)
[there are others]
DataAnalysis Methods:
descriptiveAnalysis
boxPlots
barPlot
scatterPlotMatrix
correlationMatrixPlot
timeSeriesStd
timeSeries
lineGraphsStd
lineGraphs
parallelCoordinates
"""
self.Processes = [
{'method': 'DataAnalysis', 'arguments': ['lineGraphs']},
{'method': 'DataAnalysis', 'arguments': ['boxPlots']},
{'method': 'DataAnalysis', 'arguments': ['correlationMatrixPlot']},
{'method': 'parseCurrentConfigurationAlgorithms', 'arguments': [None, True]},
]
"""
The dimensionality reduction techniques are specified in 'propheticus/configs/DimensionalityReduction.py'.
The sampling techniques are specified in 'propheticus/configs/Sampling.py'.
The algorithms and their parameters are specified in 'propheticus/configs/Classification.py'.
Algorithm Parameters:
# Random Forests:
'n_estimators': {'type': 'int', 'default': 100},
'criterion': {'type': 'str', 'values': ['gini', 'entropy']},
'max_depth': {'type': ['int', 'None']},
'min_samples_split': {'type': ['int', 'float']},
'min_samples_leaf': {'type': ['int', 'float']},
'min_weight_fraction_leaf': {'type': 'float'},
'max_features': {'type': ['int', 'float', 'str', 'None']},
'max_leaf_nodes': {'type': ['int', 'None']},
'min_impurity_decrease': {'type': 'float'},
'min_impurity_split': {'type': 'float'},
'bootstrap': {'type': 'bool', 'values': [True, False]},
'oob_score': {'type': 'bool', 'values': [True, False]},
'n_jobs': {'type': ['int', 'None'], 'default': -1},
'random_state': {'hide': True, 'type': ['int', 'None']},
'verbose': {'type': 'int'},
'warm_start': {'type': 'bool', 'values': [True, False]},
'class_weight': {'type': ['dict', 'list-dicts', 'balanced', 'None']}
# Bagging:
'base_estimator': {'type': ''},
'n_estimators': {'type': '', 'default': 100},
'max_samples': {'type': ''},
'max_features': {'type': ''},
'bootstrap': {'type': ''},
'bootstrap_features': {'type': ''},
'oob_score': {'type': ''},
'warm_start': {'type': ''},
'n_jobs': {'type': ''},
'random_state': {'hide': True, 'type': ''},
'verbose': {'type': ''}
# XGBoost:
'n_estimators': {'type': 'int'},
'learning_rate': {'type': 'float'},
'max_depth': {'type': 'int'},
'subsample': {'type': 'float'},
'objective': {'type': 'str'},
'gamma': {'type': 'float'},
'alpha': {'type': 'float'},
'lambda': {'type': 'float'},
'random_state': {'hide': True, 'type': ''},
"""
base_configuration = {}
base_configuration['config_seed_count'] = InstanceConfig.PROPHETICUS['seed_count']
# Deprecated (see the config): base_configuration['config_cv_fold'] = 5
base_configuration['config_data_split_parameters'] = InstanceConfig.PROPHETICUS['data_split']
base_configuration['config_grid_search'] = False
base_configuration['config_binary_classification'] = False
# This is only used when 'config_binary_classification' is false.
prediction_classes = list(InstanceConfig.ClassesDescription.values())
base_configuration['datasets_positive_classes'] = prediction_classes[1:]
for code_unit in InstanceConfig.CODE_UNIT_LIST:
dataset_path = os.path.join(InstanceConfig.framework_instance_data_path, fr'propheticus-dataset-{code_unit}-*')
dataset_list = glob.glob(dataset_path)
if not dataset_list:
log.warning(f'Could not find any {code_unit} datasets.')
continue
dataset_list = [os.path.basename(path).split('.')[0] for path in dataset_list]
dataset_list = remove_list_duplicates(dataset_list)
log.info(f'Found the following {code_unit} datasets: {dataset_list}')
for target_label in InstanceConfig.PROPHETICUS['labels']:
excluded_label_list = [label for label in InstanceConfig.TARGET_LABEL_LIST if label != target_label]
for dimensionality_reduction in InstanceConfig.PROPHETICUS['dimensionality_reduction']:
for data_balancing in InstanceConfig.PROPHETICUS['data_balancing']:
# From the original demo:
# TODO: improve the following logic to allow passing more than one algorithm to improve performance
# TODO: use something similar to itertools but be aware of different lenghts in configs; itertools.product stops at the minimum length
for classification_algorithm, raw_algorithm_parameters_list in InstanceConfig.PROPHETICUS['classification_algorithms'].items():
algorithm_parameters_list = []
for raw_algorithm_parameters in raw_algorithm_parameters_list:
if isinstance(raw_algorithm_parameters, dict):
dict_list = propheticus.shared.Utils.cartesianProductDictionaryLists(**raw_algorithm_parameters)
algorithm_parameters_list.extend(dict_list)
else:
algorithm_parameters_list.append(raw_algorithm_parameters)
for algorithm_parameters in algorithm_parameters_list:
configuration = deepcopy(base_configuration)
configuration['datasets'] = dataset_list
configuration['pre_target'] = target_label
configuration['pre_excluded_features'] = excluded_label_list
configuration['proc_reduce_dimensionality'] = dimensionality_reduction
configuration['proc_balance_data'] = data_balancing
configuration['proc_classification'] = [classification_algorithm]
configuration['proc_classification_algorithms_parameters'] = {}
if algorithm_parameters is not None:
configuration['proc_classification_algorithms_parameters'] = {classification_algorithm: algorithm_parameters}
log.info(f'Adding the batch configuration: {dataset_list}, {target_label}, {dimensionality_reduction}, {data_balancing}, {classification_algorithm}, {algorithm_parameters_list}')
self.Configurations.append(configuration)
log.info(f'Added a total of {len(self.Configurations)} batch configurations.')
``` |
{
"source": "joaohenggeler/twitch-chat-highlights",
"score": 3
} |
#### File: twitch-chat-highlights/Source/common.py
```python
import json
import sqlite3
from datetime import datetime
from typing import Tuple, Union
####################################################################################################
class CommonConfig():
# From the config file.
json_config: dict
client_id: str
access_token: str
database_filename: str
def __init__(self):
with open('config.json') as file:
self.json_config = json.load(file)
self.__dict__.update(self.json_config['common'])
def connect_to_database(self) -> sqlite3.Connection:
db = sqlite3.connect(self.database_filename, isolation_level=None)
db.row_factory = sqlite3.Row
db.execute('''PRAGMA journal_mode = WAL;''')
db.execute('''PRAGMA synchronous = NORMAL;''')
db.execute('''PRAGMA temp_store = MEMORY;''')
db.execute('''
CREATE TABLE IF NOT EXISTS 'Channel'
(
'Id' INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
'Name' VARCHAR(50) NOT NULL UNIQUE
);
''')
db.execute('''
CREATE TABLE IF NOT EXISTS 'Video'
(
'Id' INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
'ChannelId' INTEGER NOT NULL,
'TwitchId' VARCHAR(50) NOT NULL UNIQUE,
'Title' TEXT NOT NULL,
'CreationTime' TIMESTAMP NOT NULL,
'Duration' TIME NOT NULL,
'YouTubeId' VARCHAR(50) UNIQUE,
'Notes' TEXT,
FOREIGN KEY (ChannelId) REFERENCES Channel (Id)
);
''')
# VideoId can be NULL when we're storing messages from a live stream, meaning there's no VOD yet.
db.execute('''
CREATE TABLE IF NOT EXISTS 'Chat'
(
'Id' INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
'ChannelId' INTEGER NOT NULL,
'VideoId' INTEGER,
'Timestamp' TIMESTAMP NOT NULL,
'Message' TEXT NOT NULL,
FOREIGN KEY (ChannelId) REFERENCES Channel (Id),
FOREIGN KEY (VideoId) REFERENCES Video (Id)
);
''')
return db
####################################################################################################
def split_twitch_duration(duration: str) -> Tuple[int, int, int, int]:
# Duration format: 00h00m00s or 00m00s
duration = duration.replace('h', ':').replace('m', ':').replace('s', '')
tokens = duration.split(':', 2)
hours = int(tokens[-3]) if len(tokens) >= 3 else 0
minutes = int(tokens[-2]) if len(tokens) >= 2 else 0
seconds = int(tokens[-1]) if len(tokens) >= 1 else 0
total_seconds = hours * 3600 + minutes * 60 + seconds
return hours, minutes, seconds, total_seconds
def convert_twitch_timestamp_to_datetime(timestamp: str) -> datetime:
# Datetime format: YYYY-MM-DDThh:mm:ss.sssZ
# Where the following precisions where observed:
# - YYYY-MM-DDThh:mm:ss.sssssssssZ
# - YYYY-MM-DDThh:mm:ss.ssZ
# - YYYY-MM-DDThh:mm:ss.sZ
# - YYYY-MM-DDThh:mm:ssZ
# Truncate anything past the microsecond precision.
if '.' in timestamp:
microseconds: Union[str, int]
beginning, microseconds = timestamp.rsplit('.', 1)
microseconds, _ = microseconds.rsplit('Z', 1)
timestamp = beginning + '.' + microseconds[:6].ljust(6, '0') + 'Z'
timestamp = timestamp.replace('Z', '+00:00')
return datetime.fromisoformat(timestamp)
``` |
{
"source": "JoaoHenrique12/uri_compiler",
"score": 3
} |
#### File: tools/language_support/Cpp.py
```python
class Cpp11():
def create_file(self):
with open("main.cpp",'w') as f:
st = '''
#include<bits/stdc++.h>
using namespace std;
int main()
{
return 0;
}
'''
f.write(st)
class Cpp17():
def create_file(self):
with open("main.cpp",'w') as f:
st = '''
#include<bits/stdc++.h>
using namespace std;
int main()
{
return 0;
}
'''
f.write(st)
```
#### File: tools/language_support/C.py
```python
class C():
def create_file(self):
with open("main.c",'w') as f:
st = '''
#include<stdio.h>
#include<stdlib.h>
using namespace std;
int main()
{
return 0;
}
'''
f.write(st)
```
#### File: tools/language_support/LanguageFactory.py
```python
from tools.language_support.Cpp import Cpp11, Cpp17
from tools.language_support.Paitom import Paitom
from tools.language_support.C import C
class LanguageFactory:
def __init__(self, st: str):
self.st = st.lower()
def create(self):
if self.st == "cpp11":
return Cpp11()
elif self.st == "cpp17":
return Cpp17()
elif self.st == "py":
return Paitom()
elif self.st == "c":
return C()
else:
return None
``` |
{
"source": "joaohenriques/dungeon_generator",
"score": 3
} |
#### File: dungeon_generator/renderers/__init__.py
```python
__author__ = 'jpsh'
from abc import ABCMeta, abstractmethod
class MapRenderer(object):
__metaclass__ = ABCMeta
@abstractmethod
def render(self, cave):
pass
@abstractmethod
def render_cell(self, cave):
pass
``` |
{
"source": "joaohenry23/metlib",
"score": 3
} |
#### File: metlib/metlib/functions.py
```python
import numpy as np
import xarray as xr
from . import flib
#-----------------------------------------------------------------------------------------------------------------------------------
# finite differences centered
def cdiff(Field, Dim):
"""
Calculates a centered finite difference of Numpy array or Xarray.DataArray.
The function use a fortran subroutine to perform a quick calculation.
Parameters
----------
Field: Numpy array or Xarray.DataArray
Their structure can be:
- 1D [x]
- 2D [y,x]
- 3D [z,y,x]
- 4D [t,z,y,x]
Dim: String (str)
Defines axis of derivative and can be 'X', 'Y', 'Z', 'T'.
Returns
-------
CDIFF: Numpy array or Xarray.DataArray
Centered finite difference in Dim of Field. The shape is the same that input(Field).
"""
try:
assert type(Field) == np.ndarray or type(Field) == xr.DataArray
except AssertionError:
print('\nThe Field must be Numpy array or Xarray\n')
return
try:
assert Dim=='X' or Dim=='x' or Dim=='Y' or Dim=='y' or Dim=='Z' or Dim=='z' or Dim=='T' or Dim=='t'
except AssertionError:
print('\nYou need to specify the dimension X, Y, Z or T\n')
return
try:
assert Field.ndim >= 2 and Field.ndim <= 4
except AssertionError:
print('\nThe Field must be 2, 3 or 4 Dimensions\n')
return
if type(Field) == np.ndarray:
FieldType = np.ndarray
elif type(Field) == xr.DataArray:
CoordsData = Field.coords
DimsData = Field.dims
try:
FieldUnits = Field.units
except:
FieldUnits = 'Field_units'
try:
FieldLongName = Field.long_name
except:
FieldLongName = 'Field_Name'
FieldType = xr.DataArray
Field = Field.values
# replaces np.nan before pass to fortran
Field = np.where(np.isnan(Field)==True,-999.99,Field)
if Field.ndim==2:
if Dim=='X' or Dim=='x':
axis = 1
elif Dim=='Y' or Dim=='y':
axis = 0
CDIFF = flib.cdiff2d(Field, axis)
elif Field.ndim==3:
if Dim=='X' or Dim=='x':
axis = 2
elif Dim=='Y' or Dim=='y':
axis = 1
elif Dim=='Z' or Dim=='z' or Dim=='T' or Dim=='t':
axis = 0
CDIFF = flib.cdiff3d(Field, axis)
elif Field.ndim==4:
if Dim=='X' or Dim=='x':
axis = 3
elif Dim=='Y' or Dim=='y':
axis = 2
elif Dim=='Z' or Dim=='z':
axis = 1
elif Dim=='T' or Dim=='t':
axis = 0
CDIFF = flib.cdiff4d(Field, axis)
CDIFF = np.where(CDIFF<-990.0, np.nan, CDIFF)
if FieldType == xr.DataArray:
CDIFF = xr.DataArray(CDIFF, coords=CoordsData, dims=DimsData)
CDIFF.name = 'cdiff'
CDIFF.attrs['units'] = FieldUnits
CDIFF.attrs['long_name'] = 'CDIFF_'+FieldLongName+'_in_'+Dim
CDIFF.attrs['standard_name'] = 'Centered_finite_difference_of_'+FieldLongName+'_in_'+Dim
return CDIFF;
#-----------------------------------------------------------------------------------------------------------------------------------
# dynamic calcs
def relative_vorticity(UComp, VComp, Lon=None, Lat=None):
'''
Calculates the relative vorticity of horizontal wind.
Parameters
----------
UComp: Numpy array or Xarray.DataArray
Zonal component of wind. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
VComp: Numpy array or Xarray.DataArray
Meridional component of wind. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
Lon: Numpy array
2D array with the longitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Lat: Numpy array
2D array with the latitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Returns
-------
vor: Numpy array or Xarray.DataArray
Relative vorticity of Ucomp and Vcomp [s**-1]
'''
if type(UComp) == type(VComp) == np.ndarray:
try:
assert type(Lon) == type(Lat) == np.ndarray
except AssertionError:
print('\nThe data input (UComp, VComp) is Numpy array, so you need pass 2D array of Lon and Lat, e.g.:')
print('hcurl(UComp, VComp, 2DLon, 2DLat)\n')
return
else:
dvdx = cdiff(VComp,'X')
dudy = cdiff(UComp*np.cos(Lat*np.pi/180.0),'Y')
dx = cdiff(Lon,'X') * np.pi/180.0
dy = cdiff(Lat,'Y') * np.pi/180.0
vor = (dvdx/dx-dudy/dy)/(6.37e6*np.cos(Lat*np.pi/180.0))
elif type(UComp) == type(VComp) == xr.DataArray:
try:
assert UComp.dims == VComp.dims
except AssertionError:
print('\nThe data input (UComp, VComp) is Xarray.DataArray but they do not have the same dimensions\n')
return
else:
try:
assert True in [ True if word in (UComp.dims)[-1] else False for word in ['lon','LON','Lon'] ] and True in [ True if word in (UComp.dims)[-2] else False for word in ['lat','LAT','Lat'] ]
except AssertionError:
print('\nThe data input (UComp, VComp) is Xarray.DataArray and must have unless two dimensions [latitude, longitude]')
print('If data input have three dimensions their structure must be [level, latitude, longitude] or [time, latitude, longitude]')
print('If data input have four dimensions their structure must be [time, level, latitude, longitude] or [level, time, latitude, longitude]\n')
return
else:
CoordsData = UComp.coords
DimsData = UComp.dims
Lon = UComp.coords[(UComp.dims)[-1]].values
Lat = UComp.coords[(UComp.dims)[-2]].values
Lon, Lat = np.meshgrid(Lon, Lat)
dvdx = cdiff(VComp,'X').values
dudy = cdiff(UComp*np.cos(Lat*np.pi/180.0),'Y').values
dx = cdiff(Lon,'X') * np.pi/180.0
dy = cdiff(Lat,'Y') * np.pi/180.0
vor = (dvdx/dx-dudy/dy)/(6.37e6*np.cos(Lat*np.pi/180.0))
vor = xr.DataArray(vor, coords=CoordsData, dims=DimsData)
vor.name = 'vor'
vor.attrs['units'] = 's**-1'
vor.attrs['long_name'] = 'Vorticity'
vor.attrs['standard_name'] = 'Relative_vorticity_of_wind'
return vor;
#-----------------------------------------------------------------------------------------------------------------------------------
# dynamic calcs
def absolute_vorticity(UComp, VComp, Lon=None, Lat=None):
'''
Calculates the absolute vorticity of horizontal wind.
Parameters
----------
UComp: Numpy array or Xarray.DataArray
Zonal component of wind. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
VComp: Numpy array or Xarray.DataArray
Meridional component of wind. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
Lon: Numpy array
2D array with the longitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Lat: Numpy array
2D array with the latitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Returns
-------
avor: Numpy array or Xarray.DataArray
Absolute relative vorticity of Ucomp and Vcomp [s**-1]
'''
if type(UComp) == type(VComp) == np.ndarray:
try:
assert type(Lon) == type(Lat) == np.ndarray
except AssertionError:
print('\nThe data input (UComp, VComp) is Numpy array, so you need pass 2D array of Lon and Lat, e.g.:')
print('hcurl(UComp, VComp, 2DLon, 2DLat)\n')
return
else:
dvdx = cdiff(VComp,'X')
dudy = cdiff(UComp*np.cos(Lat*np.pi/180.0),'Y')
dx = cdiff(Lon,'X') * np.pi/180.0
dy = cdiff(Lat,'Y') * np.pi/180.0
omega = 2.0*np.pi/86400.0
fc = 2*omega*np.cos(Lat*np.pi/180.0)
avor = (dvdx/dx-dudy/dy)/(6.37e6*np.cos(Lat*np.pi/180.0)) + fc
elif type(UComp) == type(VComp) == xr.DataArray:
try:
assert UComp.dims == VComp.dims
except AssertionError:
print('\nThe data input (UComp, VComp) is Xarray.DataArray but they do not have the same dimensions\n')
return
else:
try:
assert True in [ True if word in (UComp.dims)[-1] else False for word in ['lon','LON','Lon'] ] and True in [ True if word in (UComp.dims)[-2] else False for word in ['lat','LAT','Lat'] ]
except AssertionError:
print('\nThe data input (UComp, VComp) is Xarray.DataArray and must have unless two dimensions [latitude, longitude]')
print('If data input have three dimensions their structure must be [level, latitude, longitude] or [time, latitude, longitude]')
print('If data input have four dimensions their structure must be [time, level, latitude, longitude] or [level, time, latitude, longitude]\n')
return
else:
CoordsData = UComp.coords
DimsData = UComp.dims
Lon = UComp.coords[(UComp.dims)[-1]].values
Lat = UComp.coords[(UComp.dims)[-2]].values
Lon, Lat = np.meshgrid(Lon, Lat)
dvdx = cdiff(VComp,'X').values
dudy = cdiff(UComp*np.cos(Lat*np.pi/180.0),'Y').values
dx = cdiff(Lon,'X') * np.pi/180.0
dy = cdiff(Lat,'Y') * np.pi/180.0
omega = 2.0*np.pi/86400.0
fc = 2*omega*np.cos(Lat*np.pi/180.0)
avor = (dvdx/dx-dudy/dy)/(6.37e6*np.cos(Lat*np.pi/180.0)) + fc
avor = xr.DataArray(avor, coords=CoordsData, dims=DimsData)
avor.name = 'avor'
avor.attrs['units'] = 's**-1'
avor.attrs['long_name'] = 'Absolute_vorticity'
avor.attrs['standard_name'] = 'Absolute_relative_vorticity_of_wind'
return avor;
#-----------------------------------------------------------------------------------------------------------------------------------
def divergence(UComp, VComp, Lon=None, Lat=None):
'''
Calculates the divergence of horizontal wind or some vector field.
Parameters
----------
UComp: Numpy array or Xarray.DataArray
Zonal component of wind. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
VComp: Numpy array or Xarray.DataArray
Meridional component of wind. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
Lon: Numpy array
2D array with the longitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Lat: Numpy array
2D array with the latitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Returns
-------
div: Numpy array or Xarray.DataArray
Horizontal divergence of Ucomp and Vcomp [1/s]
Negative divergence is also known as convergence.
'''
if type(UComp) == type(VComp) == np.ndarray:
try:
assert type(Lon) == type(Lat) == np.ndarray
except AssertionError:
print('\nThe data input (UComp, VComp) is Numpy array, so you need pass 2D array of Lon and Lat, e.g.:')
print('hdivg(UComp, VComp, 2DLon, 2DLat)\n')
return
else:
dudx = cdiff(UComp,'X')
dvdy = cdiff(VComp*np.cos(Lat*np.pi/180.0),'Y')
dx = cdiff(Lon,'X') * np.pi/180.0
dy = cdiff(Lat,'Y') * np.pi/180.0
div = (dudx/dx+dvdy/dy)/(6.37e6*np.cos(Lat*np.pi/180.0))
elif type(UComp) == type(VComp) == xr.DataArray:
try:
assert UComp.dims == VComp.dims
except AssertionError:
print('\nThe data input (UComp, VComp) is Xarray.DataArray but they do not have the same dimensions\n')
return
else:
try:
assert True in [ True if word in (UComp.dims)[-1] else False for word in ['lon','LON','Lon'] ] and True in [ True if word in (UComp.dims)[-2] else False for word in ['lat','LAT','Lat'] ]
except AssertionError:
print('\nThe data input (UComp, VComp) is Xarray.DataArray and must have unless two dimensions [latitude, longitude]')
print('If data input have three dimensions their structure must be [level, latitude, longitude] or [time, latitude, longitude]')
print('If data input have four dimensions their structure must be [time, level, latitude, longitude] or [level, time, latitude, longitude]\n')
return
else:
CoordsData = UComp.coords
DimsData = UComp.dims
Lon = UComp.coords[(UComp.dims)[-1]].values
Lat = UComp.coords[(UComp.dims)[-2]].values
Lon, Lat = np.meshgrid(Lon, Lat)
dudx = cdiff(UComp,'X').values
dvdy = cdiff(VComp*np.cos(Lat*np.pi/180.0),'Y').values
dx = cdiff(Lon,'X') * np.pi/180.0
dy = cdiff(Lat,'Y') * np.pi/180.0
div = (dudx/dx+dvdy/dy)/(6.37e6*np.cos(Lat*np.pi/180.0))
div = xr.DataArray(div, coords=CoordsData, dims=DimsData)
div.name = 'div'
div.attrs['units'] = 's**-1'
div.attrs['long_name'] = 'Divergence'
div.attrs['standard_name'] = 'Horizontal_divergence_of_wind'
return div;
#-----------------------------------------------------------------------------------------------------------------------------------
def advection(Field, UComp, VComp, Lon=None, Lat=None):
'''
Calculates the horizontal adveccion of Field.
Parameters
----------
Field: Numpy array or Xarray.DataArray
Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
UComp: Numpy array or Xarray.DataArray
Zonal component of wind. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
VComp: Numpy array or Xarray.DataArray
Meridional component of wind. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
Lon: Numpy array
2D array with the longitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Lat: Numpy array
2D array with the latitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Returns
-------
adv: Numpy array or Xarray.DataArray
Horizontal advection of Field [Field_units/s]
'''
if type(Field) == type(UComp) == type(VComp) == np.ndarray:
try:
assert type(Lon) == type(Lat) == np.ndarray
except AssertionError:
print('\nThe data input (Field, UComp, VComp) are Numpy array, so you need pass 2D array of Lon and Lat, e.g.:')
print('hdivg(UComp, VComp, 2DLon, 2DLat)\n')
return
else:
dfdx = cdiff(Field,'X')
dfdy = cdiff(Field,'Y')
dx = cdiff(Lon,'X') * np.pi/180.0
dy = cdiff(Lat,'Y') * np.pi/180.0
adv = -1.0*( ((UComp*dfdx)/(np.cos(Lat*np.pi/180.0)*dx)) + ((VComp*dfdy)/(dy)) )/6.37e6
elif type(Field) == type(UComp) == type(VComp) == xr.DataArray:
try:
assert Field.dims == UComp.dims == VComp.dims
except AssertionError:
print('\nThe data input (Field, UComp, VComp) are Xarray.DataArray but they do not have the same dimensions\n')
return
else:
try:
assert True in [ True if word in (Field.dims)[-1] else False for word in ['lon','LON','Lon'] ] and True in [ True if word in (Field.dims)[-2] else False for word in ['lat','LAT','Lat'] ]
except AssertionError:
print('\nThe data input (Field, UComp, VComp) is Xarray.DataArray and must have unless two dimensions [latitude, longitude]')
print('If data input have three dimensions their structure must be [level, latitude, longitude] or [time, latitude, longitude]')
print('If data input have four dimensions their structure must be [time, level, latitude, longitude] or [level, time, latitude, longitude]\n')
return
else:
CoordsData = Field.coords
DimsData = Field.dims
try:
UnitsData = Field.units
except:
UnitsData = 'Field_units'
try:
LongNameData = Field.long_name
except:
LongNameData = 'Field_Name'
Lon = Field.coords[(Field.dims)[-1]].values
Lat = Field.coords[(Field.dims)[-2]].values
Lon, Lat = np.meshgrid(Lon, Lat)
dfdx = cdiff(Field,'X').values
dfdy = cdiff(Field,'Y').values
dx = cdiff(Lon,'X') * np.pi/180.0
dy = cdiff(Lat,'Y') * np.pi/180.0
adv = -1.0*( ((UComp*dfdx)/(np.cos(Lat*np.pi/180.0)*dx)) + ((VComp*dfdy)/(dy)) )/6.37e6
adv = xr.DataArray(adv, coords=CoordsData, dims=DimsData)
adv.name = 'adv'
adv.attrs['units'] = UnitsData+'/s'
adv.attrs['long_name'] = LongNameData+'_advection'
adv.attrs['standard_name'] = 'Horizontal_advection_of_'+LongNameData
return adv;
#-----------------------------------------------------------------------------------------------------------------------------------
def potential_temperature(Temperature, Levels=None):
'''
Calculates the potential temperature.
Parameters
----------
Temperature: Numpy array or Xarray.DataArray
Temperature field in Kelvin. Their structure can be:
- 2D [y,x]
- 3D [z,y,x] or [t,y,x]
- 4D [t,z,y,x]
Levels: Numpy array
1D array with pressure levels of Temperature.
Returns
-------
PTemp: Numpy array or Xarray.DataArray
Potential temperature [K].
'''
if type(Temperature) == np.ndarray:
try:
#assert Levels is not None
assert type(Levels) == np.ndarray #type(Levels) == list or
except AssertionError:
print('\nYou need pass 1D array of Levels')
print('potential_temperature(Temperature, Levels)\n')
return
else:
#if isinstance(Levels,list)==True:
# Levels = np.array(Levels,dtype=np.float32)
if Temperature.ndim == 2:
pass
elif Temperature.ndim == 3:
Levels = Levels[:,None,None]
elif Temperature.ndim == 4:
Levels = Levels[None,:,None,None]
PTemp = Temperature*np.power(1000.0/Levels,0.286)
elif type(Temperature) == xr.DataArray:
try:
assert True in [ True if word in (Temperature.dims)[-1] else False for word in ['lon','LON','Lon'] ] and True in [ True if word in (Temperature.dims)[-2] else False for word in ['lat','LAT','Lat'] ]
except AssertionError:
print('\nThe data input (Temperature) is Xarray.DataArray and must have unless two dimensions [latitude, longitude]')
print('If data input have three dimensions their structure must be [level, latitude, longitude]')
print('If data input have four dimensions their structure must be [time, level, latitude, longitude]\n')
return
else:
CoordsData = Temperature.coords
DimsData = Temperature.dims
Levels = Temperature.coords[(Temperature.dims)[-3]].values
if Temperature.ndim == 2:
pass
elif Temperature.ndim == 3:
Levels = Levels[:,None,None]
elif Temperature.ndim == 4:
Levels = Levels[None,:,None,None]
PTemp = Temperature*np.power(1000.0/Levels,0.286)
PTemp = xr.DataArray(PTemp, coords=CoordsData, dims=DimsData)
PTemp.name = 'PTemp'
PTemp.attrs['units'] = 'K'
PTemp.attrs['long_name'] = 'Potential_temperature'
PTemp.attrs['standard_name'] = 'Potential_temperature'
return PTemp;
#-----------------------------------------------------------------------------------------------------------------------------------
def potential_vorticity(Temperature, UComp, VComp, Lon=None, Lat=None, Levels=None):
'''
Calculates the baroclinic potential vorticity.
Parameters
----------
Temperature: Numpy array or Xarray.DataArray
Temperature field in Kelvin. Their structure can be:
- 3D [z,y,x]
- 4D [t,z,y,x]
UComp: Numpy array or Xarray.DataArray
Zonal component of wind. Their structure can be:
- 3D [z,y,x]
- 4D [t,z,y,x]
VComp: Numpy array or Xarray.DataArray
Meridional component of wind. Their structure can be:
- 3D [z,y,x]
- 4D [t,z,y,x]
Lon: Numpy array
2D array with the longitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Lat: Numpy array
2D array with the latitudes of UComp and VComp.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Levels: Numpy array
1D array with pressure levels of Temperature.
If UComp and VComp are xarray.DataArray is not necessary define this parameter.
Returns
-------
PVor: Numpy array or Xarray.DataArray
Baroclinic potential voticity [1/s].
'''
if type(Temperature) == type(UComp) == type(VComp) == np.ndarray:
try:
assert type(Lon) == type(Lat) == type(Levels) == np.ndarray
except AssertionError:
print('\nThe data input (Temperature, UComp, VComp) are Numpy array, so you need pass 2D array of Lon and Lat,')
print('and 1D array of Levels, e.g.:')
print('potential_vorticity(Temperature, UComp, VComp, 2DLon, 2DLat, 1DLevels)\n')
return
else:
if Temperature.ndim == 3:
Levels2 = Levels[:,None,None]*100.0
elif Temperature.ndim == 4:
Levels2 = Levels[None,:,None,None]*100.0
AVor = absolute_vorticity(UComp, VComp, Lon=Lon, Lat=Lat)
PTemp = potential_temperature(Temperature, Levels=Levels)
dx = 6.37e6 * cdiff(Lon,'X') * np.pi/180.0 * np.cos(Lat*np.pi/180.0)
dy = 6.37e6 * cdiff(Lat,'Y') * np.pi/180.0
dp = cdiff(Levels2,'Z')
dPTempdp = cdiff(PTemp,'Z')/dp
dUCompdp = cdiff(UComp,'Z')/dp
dVCompdp = cdiff(VComp,'Z')/dp
dPTempdx = cdiff(PTemp,'X')/dx
dPTempdy = cdiff(PTemp,'Y')/dy
PVor = -9.8*(AVor*dPTempdp - dVCompdp*dPTempdx + dUCompdp*dPTempdy)
elif type(Temperature) == type(UComp) == type(VComp) == xr.DataArray:
try:
assert True in [ True if word in (Temperature.dims)[-1] else False for word in ['lon','LON','Lon'] ] and True in [ True if word in (Temperature.dims)[-2] else False for word in ['lat','LAT','Lat'] ]
except AssertionError:
print('\nThe data input (Temperature, UComp, VComp) is Xarray.DataArray and must have unless three dimensions [levels, latitude, longitude]')
print('If data input have three dimensions their structure must be [levels, latitude, longitude]')
print('If data input have four dimensions their structure must be [times, level, latitude, longitude]\n')
return
else:
CoordsData = Temperature.coords
DimsData = Temperature.dims
Lon = Temperature.coords[(Temperature.dims)[-1]].values
Lat = Temperature.coords[(Temperature.dims)[-2]].values
Levels = Temperature.coords[(Temperature.dims)[-3]].values
Lon, Lat = np.meshgrid(Lon, Lat)
if Temperature.ndim == 3:
Levels = Levels[:,None,None]*100.0
elif Temperature.ndim == 4:
Levels = Levels[None,:,None,None]*100.0
AVor = absolute_vorticity(UComp, VComp).values
PTemp = potential_temperature(Temperature).values
dx = 6.37e6 * cdiff(Lon,'X') * np.pi/180.0 * np.cos(Lat*np.pi/180.0)
dy = 6.37e6 * cdiff(Lat,'Y') * np.pi/180.0
dp = cdiff(Levels,'Z')
dPTempdp = cdiff(PTemp,'Z')/dp
dUCompdp = cdiff(UComp.values,'Z')/dp
dVCompdp = cdiff(VComp.values,'Z')/dp
dPTempdx = cdiff(PTemp,'X')/dx
dPTempdy = cdiff(PTemp,'Y')/dy
PVor = -9.8*(AVor*dPTempdp - dVCompdp*dPTempdx + dUCompdp*dPTempdy)
PVor = xr.DataArray(PVor, coords=CoordsData, dims=DimsData)
PVor.name = 'PVor'
PVor.attrs['units'] = 's**-1'
PVor.attrs['long_name'] = 'Potential_vorticity'
PVor.attrs['standard_name'] = 'Potential_vorticity'
return PVor;
#-----------------------------------------------------------------------------------------------------------------------------------
``` |
{
"source": "joaoheron/data_mining",
"score": 3
} |
#### File: data_mining/data_mining/cli.py
```python
import sys
import os
import click
from data_mining import data_mining
from data_mining.vars import adult_data_test
DB_URI = os.environ.get('DB_URI', None)
def verify_environment_variables():
if DB_URI is None:
raise Exception(MESSAGE_ENV_VAR_NOT_SET % ('DB_URI'))
@click.command()
def extract():
"""
Extract data from website.
"""
try:
data_mining.get_data()
except Exception as e:
raise e
@click.command()
def integrate():
"""
Integrate data from multiple sources into a single file.
"""
try:
data_mining.integrate_data()
except Exception as e:
raise e
@click.command()
def clean():
"""
Clean data removing invalid lines.
"""
try:
data_mining.clean_data(adult_data_test)
except Exception as e:
raise e
@click.command()
def build_data():
"""
Build new information from current data.
"""
try:
data_mining.build_data(adult_data_test)
except Exception as e:
raise e
@click.command()
def format_data():
"""
Format builded data.
"""
try:
data_mining.format_data(adult_data_test)
except Exception as e:
raise e
@click.command()
@click.option('--columns', '-c', help='Columns to be considered on the model.', multiple=True, required=True)
@click.option('--criterion', '-cr', default='entropy', help='The function to measure the quality of a split. Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain.')
@click.option('--splitter', '-s', default='best', help='The strategy used to choose the split at each node. Supported strategies are “best” to choose the best split and “random” to choose the best random split.')
@click.option('--max-depth', '-d', default=None, help='The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.', type=int)
@click.option('--min-samples-split', '-m', default=2, help='The minimum number of samples required to split an internal node.', type=int)
@click.option('--test-size', '-t', default=0.3, help='Percentage size of data test classifier.', type=float)
def build_tree(columns, criterion, splitter, max_depth, test_size, min_samples_split):
"""
Build decision tree.
"""
try:
data_mining.build_tree(columns, criterion, splitter, max_depth, test_size, min_samples_split)
except Exception as e:
raise e
@click.command()
def build_final_tree():
"""
Build decision tree.
"""
try:
data_mining.build_final_tree(adult_data_test)
except Exception as e:
raise e
@click.group()
def entry_point():
pass
entry_point.add_command(extract)
entry_point.add_command(integrate)
entry_point.add_command(clean)
entry_point.add_command(build_data)
entry_point.add_command(format_data)
entry_point.add_command(build_tree)
entry_point.add_command(build_final_tree)
if __name__ == "__main__":
sys.exit(entry_point()) # pragma: no cover
```
#### File: data_mining/data_mining/crawler.py
```python
import time
import os
from data_mining import vars
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def enable_download_headless(browser, download_dir):
browser.command_executor._commands["send_command"] = (
"POST",
'/session/$sessionId/chromium/send_command')
params = {
'cmd': 'Page.setDownloadBehavior',
'params': {'behavior': 'allow','downloadPath': download_dir}
}
browser.execute("send_command", params)
def build_chrome_options():
chrome_options = Options()
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-notifications")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--verbose')
chrome_options.add_experimental_option("prefs", {
"download.default_directory": vars.download,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
})
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-software-rasterizer')
chrome_options.add_argument('--headless=True')
return chrome_options
def extract_data():
# remove old files from /downloads folder
if os.path.exists(vars.adult_data):
os.remove(vars.adult_data)
if os.path.exists(vars.adult_test):
os.remove(vars.adult_test)
if os.path.exists(vars.adult_data_test):
os.remove(vars.adult_data_test)
# build options
chrome_options = build_chrome_options()
# initialize webdriver
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=vars.chromedriver_path)
# set download folder
enable_download_headless(driver, vars.download)
# navigate to url and downloads files
driver.get(vars.url_download_data)
driver.get(vars.url_download_test)
time.sleep(vars.TIMEOUT)
driver.close
driver.quit
```
#### File: data_mining/data_mining/data_mining.py
```python
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from data_mining.vars import download, adult_data, adult_test, adult_data_test
from data_mining import crawler
from data_mining.utils import (
append_files,
create_continent_column,
delete_lines,
replace_characters,
build_final_decision_tree,
build_decision_tree,
create_index
)
models = []
def get_data():
print('Extracting data from website...')
crawler.extract_data()
print('Data has been extracted.')
def integrate_data():
print(f'Appending {adult_data_test}...')
append_files(
output_file=adult_data_test,
input_filenames=[adult_data, adult_test],
basepath=download
)
print(f'Data from {adult_data} and {adult_test} has been appended to {adult_data_test}.')
def clean_data(filename):
print('Deleting invalid lines...')
delete_lines(bad_word="?", basepath=download, filename=filename)
delete_lines(bad_word="|1x3", basepath=download, filename=filename)
delete_lines(bad_word="South", basepath=download, filename=filename)
replace_characters(', ', ',', filename=filename)
print('Invalid lines deleted.')
def build_data(filename):
print('Building data...')
create_index()
create_continent_column(basepath=download, filename=filename)
print('Data has been builded.')
def format_data(filename):
print('Formatting data...')
# Salary
replace_characters('>50K.', '>50K', filename=filename)
replace_characters('<=50K.', '<=50K', filename=filename)
# Country
replace_characters('Columbia', 'Colombia', filename=filename)
replace_characters('Hong', 'Hong Kong', filename=filename)
replace_characters('Trinadad&Tobago', 'Trinidad and Tobago', filename=filename)
replace_characters('United-States', 'United States', filename=filename)
replace_characters('Puerto-Rico', 'Puerto Rico', filename=filename)
replace_characters('Dominican-Republic', 'Dominican Republic', filename=filename)
replace_characters('El-Salvador', 'El Salvador', filename=filename)
replace_characters('Holand-Netherlands', 'Netherlands', filename=filename)
print('Data has been formatted.')
def build_final_tree(filename):
print('Building decision tree...')
build_final_decision_tree(filename)
print('Decision tree has been built')
def clear_decision_model():
print('cleared')
def build_tree(columns, criterion, splitter, max_depth, min_samples_split, test_size):
print('Building decision tree...')
build_decision_tree(columns, criterion, splitter, max_depth, min_samples_split, test_size)
print('Decision tree has been built')
```
#### File: data_mining/model/adult.py
```python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
Base = declarative_base()
class Adult(Base):
__tablename__ = 'adult'
age = Column("age", Integer) # primary_key=True
workclass = Column("Created At", String)
fnlwgt = Column("Discount", String)
education = Column("Product ID", String)
education_num = Column("Quantity", String)
marital_status = Column("Subtotal", String)
occupation = Column("Tax", String)
relationship = Column("Total", String)
race = Column("User ID", String)
sex = Column("User ID", String)
capital_gain = Column("User ID", String)
capital_loss = Column("User ID", String)
hours_per_week = Column("User ID", String)
native_countr = Column("User ID", String)
def __repr__(self):
return "<Adult(age='%s', workclass='%s', fnlwgt='%s', education='%s', education_num='%s', marital_status='%s', occupation='%s', relationship='%s', race='%s', sex='%s', capital_gain='%s', capital_loss='%s', hours_per_week='%s', native_countr='%s' )>" % (
self.age, self.workclass, self.fnlwgt, self.education, self.education_num, self.marital_status, self.occupation, self.relationship, self.race, self.sex, self.capital_gain, self.capital_loss, self.hours_per_week, self.native_countr)
``` |
{
"source": "joaoheron/eddy_bot",
"score": 3
} |
#### File: eddy_bot/eddy_bot/utils.py
```python
from random import choice
from yaml import load, FullLoader
def get_credentials(path):
with open(path, 'r') as f:
tagsl = [line.strip() for line in f]
return tagsl[0], tagsl[1]
def get_resource(path):
with open(path, 'r') as f:
resources = [line.strip() for line in f]
return resources
def get_yaml(path):
with open(path) as yaml_file:
obj = load(yaml_file, Loader=FullLoader)
return obj
def pick_random_resource(resources):
res = choice(resources)
return res
def get_comma_sepparated_values(values):
return [x.strip() for x in values.split(',')]
``` |
{
"source": "joaoheron/numba_pipelines_performance",
"score": 2
} |
#### File: numba_pipelines_performance/dags/numba_performance_dag.py
```python
from datetime import timedelta
from airflow.models import DAG
from airflow.utils import timezone
from airflow.operators.python_operator import PythonOperator
from utils.vars import email_list
from utils.numba_pipelines_performance import (
calc_elapsed_time_numpy_jit_c,
calc_elapsed_time_numpy_jit_python,
calc_elapsed_time_loop_jit_c,
calc_elapsed_time_loop_jit_python
)
start_date = timezone.utcnow().replace(
minute=0,
second=0,
microsecond=0
) - timedelta(hours=1)
default_args = {
'email': email_list,
'email_on_failure': True,
'email_on_retry': False,
'owner': 'airflow',
'start_date': start_date,
'concurrency': 1,
'retries': 1
}
dag = DAG(
dag_id='numba_performance',
default_args=default_args,
schedule_interval=timedelta(minutes=1440),
dagrun_timeout=timedelta(minutes=45),
)
"""
Executes python script with numba framework
"""
def build_numba_loop_python(dag):
return PythonOperator(
task_id='numba_performance_loop_python',
python_callable=calc_elapsed_time_loop_jit_python,
dag=dag
)
def build_numba_loop_c(dag):
return PythonOperator(
task_id='numba_performance_loop_c',
python_callable=calc_elapsed_time_loop_jit_c,
dag=dag
)
def build_numba_numpy_python(dag):
return PythonOperator(
task_id='numba_performance_numpy_python',
python_callable=calc_elapsed_time_numpy_jit_python,
dag=dag
)
def build_numba_numpy_c(dag):
return PythonOperator(
task_id='numba_performance_numpy_c',
python_callable=calc_elapsed_time_numpy_jit_c,
dag=dag
)
numba_numpy_python = build_numba_numpy_python(dag)
numba_numpy_c = build_numba_numpy_c(dag)
numba_loop_python = build_numba_loop_python(dag)
numba_loop_c = build_numba_loop_c(dag)
numba_loop_python >> numba_numpy_python
numba_loop_c >> numba_numpy_c
``` |
{
"source": "JoaoHFerreira/FileOrganizer",
"score": 3
} |
#### File: JoaoHFerreira/FileOrganizer/file_organizer.py
```python
import os
import pathlib
import yaml
class OrganizeFolderFiles:
def __init__(self, yaml_origin):
self.configs = self._load_configs()
self.root_path = self.configs.get("path")
self.default_folders = self.configs.get("default_folders")
self.destiny_root = self.configs.get("destiny_root")
self.yaml_origin = yaml_origin
def _load_configs(self):
with open('configs.yml') as conf_file:
configs = yaml.safe_load(conf_file)
return configs
def execute(self):
file_dict_list, file_types = self._get_files(os.listdir(path=self.root_path))
self._to_correct_folder(file_dict_list, file_types)
def _get_files(self, files):
files = self._remove_folders(files)
file_types = [file.split(".")[-1] if len(file.split(".")) > 1 else "script" for file in files]
unique_types = self._get_single_file_types(file_types)
return self._build_files_dict(files, file_types), unique_types
def _remove_folders(self, files):
return [file for file in files if self._not_folder(file)]
def _not_folder(self, file):
return not os.path.isdir(os.path.join(self.root_path, file))
def _build_files_dict(self, files, file_types):
return [{file_type:file} for (file_type,file) in zip(file_types, files)]
@staticmethod
def _get_single_file_types(file_types):
return list(set(file_types))
def _to_correct_folder(self, file_dict_list, file_types):
for file_type in file_types:
self._move_file(file_type, file_dict_list)
def _move_file(self, file_type, file_dict_list):
for file_dict in file_dict_list:
if file_dict.get(file_type):
self._change_folder(file_type, file_dict[file_type])
continue
def _change_folder(self, folder_name, item_name):
folder_name = self._define_right_path(folder_name)
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
current = os.path.join(self.root_path, item_name)
destination = "/".join([folder_name, item_name])
os.replace(current, destination)
def _define_right_path(self, folder_name):
for key in self.default_folders:
if not self.default_folders[key]:
continue
path_name = self.default_folders[key].replace(" ","").split(",")
if folder_name in path_name:
return os.path.join(self.destiny_root, key, folder_name.capitalize())
return os.path.join(self.root_path, folder_name.capitalize())
# yaml_origin = "/home/joaoh/repos_to_conribute/joaoh/FileOrganizer/configs.yml"
print(pathlib.Path(__file__).parent.resolve())
# OrganizeFolderFiles(yaml_origin).execute()
``` |
{
"source": "Joaohigor/JCUsinagem",
"score": 2
} |
#### File: test/pecas_teste_python/peca_home_teste.py
```python
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from mommygae import mommy
from pecas_app.pecas_model import Peca
from routes import pecas
class HomeTests(GAETestCase):
def test_index(self):
mommy.save_one(Peca)
resposta = pecas.index()
self.assert_can_render(resposta)
```
#### File: test/pecas_testes/modelo_testes.py
```python
from __future__ import absolute_import, unicode_literals
import unittest
from base import GAETestCase
from pecas_app.pecas_model import Peca
from mommygae import mommy
class PecasTestes(GAETestCase):
def teste_salvar_peca(self):
peca=mommy.make_one(Peca, title='Testando Teste')
peca.put()
pecas_em_bd=peca.query_by_creation().fetch()
self.assertListEqual([peca], pecas_em_bd)
self.assertEqual('Testando Teste', pecas_em_bd[0].title)
``` |
{
"source": "Joao-Inacio/Curso-de-Python3",
"score": 4
} |
#### File: 01_Python_Basico_Intermediario/Aula030/aula30.py
```python
def func(*args, **kwargs):
print(args, kwargs)
lista = [1, 2, 3, 4, 5]
func(*lista, nome='João')
```
#### File: vendas/formata/preco.py
```python
def real(n):
return f'R${n:.2f}'.replace('.', ',')
```
#### File: 01_Python_Basico_Intermediario/Aula061/cnpj.py
```python
import re
def remover_caract(cnpj):
return re.sub(r'[^0-9]', '', cnpj)
```
#### File: 02_Python_POO/Aula_066/aula66.py
```python
class BaseDeDados:
def __init__(self):
self.__dados = {}
def inserir_clientes(self, id, nome):
if 'clientes' not in self.__dados:
self.__dados['clientes'] = {id: nome}
else:
self.__dados['clientes'].update({id: nome})
def lista_clientes(self):
for id, nome in self.__dados['clientes'].items():
print(id, nome)
def apaga_clientes(self, id):
del self.__dados['clientes'][id]
bd = BaseDeDados()
bd.inserir_clientes(1, 'João')
bd.inserir_clientes(2, 'Miranda')
bd.inserir_clientes(3, 'Rose')
bd.apaga_clientes(2)
bd.lista_clientes()
```
#### File: 02_Python_POO/Aula_069/classes.py
```python
class CarrinhoDeCompras:
def __init__(self):
self.produtos = []
def inserir_produtos(self, produto):
self.produtos.append(produto)
def lista_produtos(self):
for produto in self.produtos:
print(produto.nome, produto.valor)
def soma_total(self):
total = 0
for produto in self.produtos:
total += produto.valor
return total
class Produto:
def __init__(self, nome, valor):
self.nome = nome
self.valor = valor
```
#### File: 02_Python_POO/Aula_077/main.py
```python
class Retangulo:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return f"<class 'Retangulo({self.x}, {self.y})'>"
def __add__(self, other):
novo_x = self.x + other.x
novo_y = self.y + other.y
return Retangulo(novo_x, novo_y)
r1 = Retangulo(10, 20)
r2 = Retangulo(10, 20)
print(r1 + r2)
```
#### File: 02_Python_POO/Aula_082/main.py
```python
from dataclasses import dataclass
from dataclasses import field
@dataclass(eq=True, repr=True, order=True, frozen=False, init=True)
class Pessoa:
nome: str
sobrenome: str = field(repr=False)
def __post_init__(self):
if not isinstance(self.nome, str):
raise TypeError(
f'Tipo Inválido {type(self.nome).__name__} != str em {self}'
)
@property
def nome_completo(self):
return f'{self.nome} {self.sobrenome}'
p1 = Pessoa('João', 'Inácio')
print(p1.nome, p1.sobrenome)
```
#### File: Desafios/POO/banco.py
```python
class Banco:
def __init__(self):
self.agencia = [1111, 2222, 3333]
self.clientes = []
self.contas = []
def inserir_cliente(self, cliente):
self.clientes.append(cliente)
def inserir_conta(self, conta):
self.contas.append(conta)
def autenticar(self, cliente):
if cliente not in self.clientes:
return False
if cliente.conta not in self.contas:
return False
if cliente.conta.agencia not in self.agencia:
return False
return True
```
#### File: Curso-de-Python3/Pratica/exe11.py
```python
def func1():
print(func2())
def func2():
return 'eai'
func1()
# Resolunção
def ola_mundo():
return 'olá mundo!'
def mestre(funcao):
return funcao()
exec = mestre(ola_mundo)
print(exec)
```
#### File: Curso-de-Python3/Pratica/exe9.py
```python
def funcao(n1, n2):
soma = (n1 * n2) / 100
total = n1 + soma
return total
print(funcao(500, 35))
```
#### File: Projeto_Ecommerce/pedido/views.py
```python
from django.shortcuts import render, redirect
from django.views.generic.list import ListView
from django.views import View
from django.http import HttpResponse
from django.contrib import messages
from produto.models import Variacao
from utils import utils
from .models import Pedido, ItemPedido
class Pagar(View):
template_name = 'pedido/pagar.html'
def get(self, *args, **kwargs):
if not self.request.user.is_authenticated:
messages.error(
self.request,
'Você Precisa fazer login.'
)
return redirect('perfil:criar')
if not self.request.session.get('carrinho'):
messages.error(
self.request,
'Carrinho vazio.'
)
return redirect('produtos:lista')
carrinho = self.request.session.get('carrinho')
carrinho_variacao_ids = [v for v in carrinho]
bd_variacoes = list(
Variacao.objects.select_related('produto').filter(
id__in=carrinho_variacao_ids)
)
for variacao in bd_variacoes:
vid = str(variacao.id)
estoque = variacao.estoque
qtd_carrinho = carrinho[vid]['quantidade']
preco_unt = carrinho[vid]['preco_unitario']
preco_unt_prom = carrinho[vid]['preco_unitario_promocional']
error_msg_estoque = ''
if estoque < qtd_carrinho:
carrinho[vid]['quantidade'] = estoque
carrinho[vid]['preco_quantitativo'] = estoque * preco_unt
carrinho[vid]['preco_quantitativo_promocional'] = estoque * \
preco_unt_prom
error_msg_estoque = 'Estoque insuficiente para alguns'\
' produtos do seu carrinho'
if error_msg_estoque:
messages.error(
self.request,
error_msg_estoque
)
self.request.session.save()
return redirect('produto:carrinho')
qtd_total_carrinho = utils.qtd_total_carr(carrinho)
valor_total_carrinho = utils.cart_totals(carrinho)
pedido = Pedido(
usuario=self.request.user,
total=valor_total_carrinho,
qtd_total=qtd_total_carrinho,
status='C',
)
pedido.save()
ItemPedido.objects.bulk_create(
[
ItemPedido(
pedido=pedido,
produto=v['produto_nome'],
produto_id=v['produto_id'],
variacao=v['variacao_nome'],
variacao_id=v['variacao_id'],
preco=v['preco_quantitativo'],
preco_promocional=v['preco_quantitativo_promocional'],
quantidade=v['quantidade'],
imagem=v['imagem'],
) for v in carrinho.values()
]
)
del self.request.session['carrinho']
return redirect('pedido:lista')
class SalvaPedido(View):
pass
class Detalhe(View):
pass
class Lista(View):
def get(self, request, *args, **kwargs):
return HttpResponse('Lista')
``` |
{
"source": "JoaoJanini/data_pipeline",
"score": 3
} |
#### File: JoaoJanini/data_pipeline/stepOne.py
```python
import pandas as pd
import luigi
import pandas.io.sql as psqlio
from datetime import datetime
import PSQLConn as psql
import helper
class ExtractFromDataBase(luigi.Task):
""" Task that Extracts the tables from the database given by the challenge to the local files.
Its output is a dic containing the path for all the tables.
You can use the parameter Date to define the date you want to run the task on.
The default date is the current date. """
# Sets the parameter date.
date = luigi.DateParameter(default=datetime.today())
# All the credentials to connect to the postgres database.
database="northwind"
user="northwind_user"
password="<PASSWORD>"
host="127.0.0.1"
port="5432"
# Assign credentials here.
cred = psql.PSQLConn(database, user, password, host, port)
conn = cred.connect()
def output(self):
return self.create_dic_paths()
def run(self):
# Gets the name of all the database tables.
list_tables = self.tables_names_list(self.conn)
for table in list_tables:
table_name = table
# Buscar a table utilizando um select statement pelo nome da table e salvar o resultado como um dataframe.
dataframe = psqlio.read_sql(f'SELECT * FROM {table_name}', self.conn)
# Name of the csv containing the table.
outname = f'{table_name}.csv'
outdir = f"./data/postgres/{table_name}/{self.date}"
path = helper.createDir(outdir, outname)
dataframe.to_csv(path, index = False)
def create_dic_paths(self):
""" Function which creates a dictionary containing all the database tables names and their respective paths. """
tables = self.tables_names_list(self.conn)
dic_paths = {}
for table in tables:
dic_paths[table] = luigi.LocalTarget(f"./data/postgres/{table}/{self.date}/{table}.csv")
return dic_paths
def tables_names_list(self, conn):
""" Function which returns a list of all the tables in the database. """
s = ""
s += "SELECT"
s += " table_name"
s += " FROM information_schema.tables"
s += " WHERE"
s += " ("
s += " table_schema = 'public'"
s += " )"
s += " ORDER BY table_name;"
# db_cursor to lookup the tables' names
db_cursor = conn.cursor()
db_cursor.execute(s)
list_tables = db_cursor.fetchall()
tables_names = []
for table in list_tables:
tables_names.append(table[0])
return tables_names
class ExtractFromCSV(luigi.Task):
""" Task that Extracts the csv file containing the orders given by the challenge to the local files.
Its output is a dic containing the path for all the tables.
You can use the parameter Date to define the date you want to run the task on.
The default date is the current date. """
date = luigi.DateParameter(default=datetime.today())
def output(self):
return luigi.LocalTarget(f"./data/csv/{self.date}/order_details.csv")
def run(self):
order_details_df = pd.read_csv('data/order_details.csv')
# Name of the csv containing the orders.
outname = 'order_details.csv'
# Name of the csv where the directory will be saved.
outdir = f"./data/csv/{self.date}"
path = helper.createDir(outdir, outname)
order_details_df.to_csv(path, index = False)
```
#### File: JoaoJanini/data_pipeline/stepTwo.py
```python
import pandas as pd
import luigi
import pandas.io.sql as psql
from datetime import datetime
import PSQLConn as psql
from stepOne import ExtractFromCSV, ExtractFromDataBase
import sqlalchemy
import helper
class ExtractLocal(luigi.Task):
# Task that extracts the order_details.csv and the orders.csv from the Local file
# to the Final postgres database.
# The task also returns the result of the query to the final database containing the order and its details.
# You can use the parameter Date to define the date you want to run the task on.
# The default date is the current date.
date = luigi.DateParameter(default=datetime.today())
# Parameters to connect to the final database.
database="db_final"
user="postgresUser"
password="<PASSWORD>"
host="127.0.0.1"
port="5433"
# Assign credentials here
cred = psql.PSQLConn(database, user, password, host, port)
conn = cred.connect()
# Alternative way of connecting to the database using the package sqlalchemy. It is the only method
# which works of the pandas package sql related functions.
engine = sqlalchemy.create_engine(f'postgresql://{user}:{password}@{host}:{port}/{database}')
def requires(self):
# Defines the dependency of the task. In this case the dependency are the completion of both
# the ExtractFromDataBase task and ExtractFromCSV task.
return { "data_base_tables": ExtractFromDataBase(self.date),
"orders_csv": ExtractFromCSV(self.date)}
def output(self):
# The task returns the csv resulted from the query.
return luigi.LocalTarget(f"./data/query/{self.date}/order_with_details.csv")
def run(self):
# Read both csv's from local file and put their content into dataframes.
df_orders_details = pd.read_csv(f"./data/csv/{self.date}/order_details.csv")
df_orders_table = pd.read_csv(f"./data/postgres/orders/{self.date}/orders.csv")
# Append the content of the dataframes to their respective tables on the database.
df_orders_details.to_sql(f'orders_details', con = self.engine, if_exists = 'append')
df_orders_table.to_sql(f'orders_table', con = self.engine, if_exists = 'append')
# Stores the result of the challenge query.
s = self.join_query()
# Makes the query and saves the content to a dataframe
dataframe_sql_query = pd.read_sql_query(s, con = self.engine )
# Name of the csv containing the csv.
outname = 'order_with_details.csv'
# Nome do diretório onde será salvo o csv.
outdir = f"./data/query/{self.date}"
path_csv = helper.createDir(outdir, outname)
# Store the dataframe as a csv.
dataframe_sql_query.to_csv(path_csv, index = False)
def join_query(self):
# Specifies the content of the join query for the challenge.
s = ""
s += "SELECT"
s += " orders_table.order_id, customer_id,employee_id,order_date,required_date,shipped_date,ship_via,freight,ship_name,ship_address,ship_city,ship_region,ship_postal_code,ship_country,"
s += "product_id,unit_price,quantity,discount"
s += " FROM orders_table"
s += " RIGHT JOIN orders_details"
s += " ON orders_table.order_id = orders_details.order_id"
return s
``` |
{
"source": "JoaoJanini/Faixa_CEP",
"score": 3
} |
#### File: Faixa_CEP/faixa_cep/helpers.py
```python
from lxml.html import fromstring
import requests
import time
from itertools import cycle
import traceback
import random
from bs4 import BeautifulSoup
import pandas as pd
import requests
import json
import jsonlines
my_time_out = 20
def get_proxy_list():
"""Retorna uma lista de proxies supostamente válidos"""
url = 'https://free-proxy-list.net/'
response = requests.get(url)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr')[:10]:
if i.xpath('.//td[7][contains(text(),"yes")]'):
#Grabbing IP and corresponding PORT
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
return proxies
#Learn how to save the json to a buffer instead of the file system
def df_to_jsonl(data_frame: pd.DataFrame, UF: str):
"""Recebe um DataFrame, retorna um jsonl"""
data_frame.to_json("tabela.json", orient = "records")
with open("./tabela.json") as f:
data = json.load(f)
#Turn DataFrame into Jsonl
with open(f'./{UF}.jsonl', 'w') as outfile:
for entry in data:
json.dump(entry, outfile)
outfile.write('\n')
def clean_data(df: pd.DataFrame):
"""Recebe um DataFrame, retorna um DataFrame tratado da maneira adequada """
df = df.drop_duplicates()
df = df.dropna()
return df
def make_post_request(post_fields, proxy):
"""Recebe dicionário com os post fields e um proxy válido, retora uma request"""
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "http://www.buscacep.correios.com.br",
"Referer": "http://www.buscacep.correios.com.br/sistemas/buscacep/buscaFaixaCep.cfm",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36",
}
url = "http://www.buscacep.correios.com.br/sistemas/buscacep/resultadoBuscaFaixaCEP.cfm"
print("")
print("Trying to Make the post request.")
request = requests.post(url = url, data = post_fields, headers = headers, proxies = {"http": proxy, "https": proxy}, timeout = my_time_out)
return request
def request_text_to_table(request, page_content_index):
"""Recebe request, retorna uma tabela de faixas de CEP"""
result = request.text
soup = BeautifulSoup(result, 'html.parser')
page_content = soup.find_all(name= 'table', class_ ='tmptabela')
tabela_UF = page_content[page_content_index]
return tabela_UF
def table_to_df(tabela_UF):
"""Recebe tabela, Retorna DataFrame"""
logradouros = pd.read_html(str(tabela_UF),flavor= "bs4")
page_table_data_frame = logradouros[0]
return page_table_data_frame
def proxy_list_to_cycle():
print("Tentando conseguir a proxy list")
proxy_list_attempts = 0
proxies = get_proxy_list()
while len(proxies) == 0:
proxies = get_proxy_list()
print("oi")
if proxy_list_attempts == 5:
print("There was a problem retrieving the proxies List. Try again later.")
raise Exception()
print(f"Tentativa: {proxy_list_attempts}")
proxy_list_attempts = proxy_list_attempts + 1
print(f"Proxy left: {proxies}")
#Turn the proxy list into a cycle
proxy_pool = cycle(proxies)
return proxy_pool
def request_to_dataframe(UF):
"""Recebe string do estado, retona DataFrame com faixa de CEP do estado"""
#Try to load the proxy list. If after several attempts it still doesn't work, raise an exception and quit.
proxy_pool = proxy_list_to_cycle()
#Set initial values for post request's parameters.
pagini = 1
pagfim = 50
count = 1
while True:
#random sleep times to decrease the chances of being blocked.
num1 = random.randint(2,5)
time.sleep(num1)
try:
#select_proxy from proxy pool.
proxy = next(proxy_pool)
print(f"Proxy atual: {proxy}")
#Define o post Field de acordo com a página Atual. Para a primeira página os campos "Bairro", "qtdrow", "pagini", "pagfim" não são considerados.
if count == 1:
post_fields = {"UF":UF, "Localidade":""}
full_dataframe = pd.DataFrame()
else:
post_fields = {"UF": UF, "Localidade":"**", "Bairro":"", "qtdrow":"50", "pagini":str(pagini),"pagfim": str(pagfim)}
#Makes the post request
request = make_post_request(post_fields, proxy)
#Extrai tabela com as faixas de CEP do HTML. Se estivermos na primeira página, o conteúdo se encontra no primeiro index do page content, caso o contrário, se encontra no próximo index.
if count == 1:
UF_table = request_text_to_table(request = request, page_content_index = 1)
else:
UF_table = request_text_to_table(request = request, page_content_index = 0)
except requests.exceptions.ProxyError:
print("")
print(f"Error with the proxy: {proxy}")
print(f"Proxies left: {proxy_pool}")
print("Tentando novamente")
print("")
continue
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as err:
print("")
print('Servidor demorando muito')
print("Tentando novamente")
print("")
continue
except Exception as e:
print("")
print(e)
proxy_pool = proxy_list_to_cycle()
continue
#Turning the table into a dataframe.
current_page_df = table_to_df(UF_table)
#Concat DataFrames for each page into one DataFrame
full_dataframe = pd.concat([full_dataframe, current_page_df])
print(f"Total de dados coletados sobre o Estado {UF}: {full_dataframe.shape[0]} ")
#Sair do loop de post requests para o estado atual se chegamos na última página.
if current_page_df.shape[0] < 49:
print(f"Última página do estado:{UF}")
break
#Incrementa o número da página e o contador de página.
pagini += 50
pagfim += 50
count = count + 1
return full_dataframe
``` |
{
"source": "joaojunior/crawler_tribunais",
"score": 3
} |
#### File: crawler_tribunais/scripts/generate_process_number.py
```python
def generate(year: str, j: str, tr: str, final: str, qty=10) -> str:
for i in range(qty):
n = int(f'{i}{year}{j}{tr}{final}00')
digit = 98 - (n % 97)
yield f'{i}-{digit}.{year}.{j}.{tr}.{final}'.rjust(25, '0')
def main():
process_tjal = generate('2018', '8', '02', '0001')
process_tjms = generate('2018', '8', '12', '0001')
for process in [process_tjal, process_tjms]:
for process_number in process:
print(process_number)
if __name__ == '__main__':
main()
```
#### File: src/api/resources.py
```python
from flask_restful import Resource
from models import db, Process
from schemas import ProcessSchema
from tasks import crawler_task
class ProcessResource(Resource):
def get(self, process_number):
if self._process_number_is_valid(process_number) is False:
return {
'msg': f'Número do processo {process_number} inválido'}, 422
process = Process.query.get(process_number)
if process is None:
process = Process(process_number=process_number)
db.session.add(process)
db.session.commit()
crawler_task.delay(process_number, 1)
crawler_task.delay(process_number, 2)
process_schema = ProcessSchema()
return process_schema.jsonify(process)
@staticmethod
def _process_number_is_valid(process_number: str) -> bool:
if len(process_number) != 25:
return False
try:
final = process_number.find('.')
digits = int((process_number[8:final]).replace('-', ''))
calculated_process_number = (f'{process_number[:8]}'
f'{process_number[final:]}00')
calculated_process_number = int(calculated_process_number.replace(
'.', '').replace('-', ''))
digits_calculated = 98 - (calculated_process_number % 97)
except Exception:
return False
return digits == digits_calculated
```
#### File: src/crawler/crawlers.py
```python
from requests_html import HTMLSession
def get_page_from_first_instance_TJAL(process_number: str) -> str:
url = 'https://www2.tjal.jus.br/cpopg/search.do'
params = {
'cbPesquisa': 'NUMPROC',
'dadosConsulta.tipoNuProcesso': 'UNIFICADO',
'numeroDigitoAnoUnificado': process_number[:15],
'foroNumeroUnificado': process_number[-4:],
'dadosConsulta.valorConsultaNuUnificado': process_number,
}
return get_page(url, params)
def get_page_from_second_instance_TJAL(process_number: str) -> str:
url = 'https://www2.tjal.jus.br/cposg5/search.do?'
params = {
'cbPesquisa': 'NUMPROC',
'tipoNuProcesso': 'UNIFICADO',
'numeroDigitoAnoUnificado': process_number[:15],
'foroNumeroUnificado': process_number[-4:],
'dePesquisaNuUnificado': process_number,
'pbEnviar': 'Pesquisar'
}
return get_page(url, params)
def get_page_from_first_instance_TJMS(process_number: str) -> str:
url = 'https://esaj.tjms.jus.br/cpopg5/search.do'
params = {
'cbPesquisa': 'NUMPROC',
'dadosConsulta.tipoNuProcesso': 'UNIFICADO',
'numeroDigitoAnoUnificado': process_number[:15],
'foroNumeroUnificado': process_number[-4:],
'dadosConsulta.valorConsultaNuUnificado': process_number,
}
return get_page(url, params)
def get_page_from_second_instance_TJMS(process_number: str) -> str:
url = 'https://esaj.tjms.jus.br/cposg5/search.do'
params = {
'cbPesquisa': 'NUMPROC',
'tipoNuProcesso': 'UNIFICADO',
'numeroDigitoAnoUnificado': process_number[:15],
'foroNumeroUnificado': process_number[-4:],
'dePesquisaNuUnificado': process_number
}
return get_page(url, params)
def get_page(url: str, params: dict, timeout=30) -> str:
session = HTMLSession()
r = session.get(url, params=params, timeout=timeout)
return r.html.html
```
#### File: tests/crawler/test_parsers.py
```python
from crawler import parsers
class TestFirstGradeProcess:
def test_parser_movements_return_correct_list(self, movements):
expected = [
{'data': '25/11/2018',
'movimento': ('Informação do Sistema\nPJMS - Certidão de '
'realização de consulta de repetiçaõ de ação')},
{'data': '25/11/2018',
'movimento': ('Realizada pesquisa de suspeita de repetição '
'de ação\nNenhum processo localizado')},
{'data': '22/10/2018',
'movimento': ('Em Cartório-p/ Escrivão/Diretor preparar '
'Conclusão')},
{'data': '10/10/2018',
'movimento': ('Juntada de Petição Intermediária '
'Realizada\nNº Protocolo: WCGR.18.08405509-7 '
'Tipo da Petição: Manifestação do Autor '
'Data: 09/10/2018 14:59')},
{'data': '05/10/2018',
'movimento': ('Publicado ato publicado em data da '
'publicação.\nRelação :0273/2018 Data da '
'Publicação: 08/10/2018 Número do Diário: 4126')
}]
assert expected == parsers.movements(movements)
def test_parser_process_parts_return_correct_list(self, parts):
expected = [
[{'Autora': '<NAME>'},
{'Advogada': '<NAME>'},
{'Advogada': 'Ana Silvia Pessoa Salgado de Moura'}],
[{'Autora': '<NAME>'},
{'Advogada': '<NAME>'},
{'Advogada': 'Ana Silvia Pessoa Salgado de Moura'}],
[{'Autora': '<NAME>'},
{'Advogada': '<NAME>'},
{'Advogada': 'Ana Silvia Pessoa Salgado de Moura'}],
[{'Réu': 'Estado de Mato Grosso do Sul'},
{'RepreLeg': 'Procuradoria Geral do Estado de Mato Grosso do Sul'}
]
]
assert expected == parsers.parts(parts)
def test_parser_general_data_return_correct_dict(self, general_data):
expected = {'Classe': 'Procedimento Comum Cível', 'Área': 'Cível',
'Assunto': 'Enquadramento',
'Distribuição': '30/07/2018 às 12:39 - Automática',
'Juiz': 'Zidiel Infantino Coutinho',
'Valor da ação': 'R$ 10.000,00'}
assert expected == parsers.general_data(general_data)
def test_parser_return_correct_keys(self, process):
expected_keys = ['Dados do processo', 'Partes do processo',
'Movimentações']
actual = parsers.process(process.html)
assert expected_keys == list(actual.keys())
def test_parser_process_not_found_and_return_empty_values(
self, process_not_found
):
expected = {'Dados do processo': {}, 'Partes do processo': [],
'Movimentações': []}
actual = parsers.process(process_not_found.html)
assert expected == actual
class TestSecondGradeProcess:
def test_parser_movements_return_correct_list(self, second_movements):
expected = [
{'data': '28/05/2019',
'movimento': ('Certidão Emitida\nCERTIDÃO Certifico que foi '
'interposto Agravo em Recurso Especial da decisão '
'de fls. 433-437. Certifico, ainda, que esse '
'Agravo, foi recebido, no carimbo, em 27/05/2019, '
'devido a problemas no saj/protocolo. Maceió, 28 '
'de maio de 2019 <NAME> '
'Diretora Adjunta Especial de Assuntos Judiciários '
'Fernanda Luiza de Albuquerque Brasil Lins Técnica '
'Judiciária')},
{'data': '28/05/2019',
'movimento': 'Juntada de Petição de\nAgravo'},
{'data': '28/05/2019',
'movimento': 'Incidente Cadastrado\nSeq.: 50 - Agravo'},
{'data': '03/05/2019',
'movimento': ('Certidão Emitida\nCERTIFICO que foi '
'disponibilizada no Diário da Justiça Eletrônico '
'do Tribunal de Justiça de Alagoas em 03/05/2019 '
'a decisão de fls. 433-437 e considerada publicada '
'em 06/05/2019, nos termos do Artigo 4º, § 3º, da '
'Lei nº 11.419/2006. Maceió, 03 de maio de 2019 '
'<NAME> Diretora '
'Adjunta Especial de Assuntos Judiciários Fernanda '
'Luiza de Albuquerque Brasil Lins Técnica '
'Judiciária')},
{'data': '03/05/2019',
'movimento': 'Publicado\nDisponibilizado no DJE de 03/05/2019.'}]
assert expected == parsers.movements(second_movements)
def test_parser_process_parts_return_correct_list(self, second_parts):
expected = [
[{'Apelante': 'Importadora Auto Peças Ltda'},
{'Advogada': '<NAME>'},
{'Advogado': '<NAME>'},
{'Advogado': '<NAME>'},
{'Advogado': '<NAME>'},
{'Advogado': '<NAME>'},
{'Advogada': '<NAME>'},
{'Advogado': '<NAME>'},
{'Advogado': '<NAME>'},
{'Advogada': 'Flávia Nobre de Melo'},
{'Advogada': '<NAME>'},
{'Advogado': '<NAME>'}],
[{'Apelado': '<NAME>'},
{'Advogado': '<NAME>'},
{'Advogada': 'Priscila Araújo Guedes'},
{'Advogado': '<NAME>'}]
]
assert expected == parsers.parts(second_parts)
def test_parser_general_data_return_correct_dict(self,
second_general_data):
expected = {'Classe': 'Apelação', 'Área ': 'Cível',
'Assunto': 'Perdas e Danos',
'Distribuição': 'Vice-Presidência',
'Relator': 'DES. SEBASTIÃO COSTA FILHO',
'Valor da ação': '380,00'}
assert expected == parsers.general_data(second_general_data)
``` |
{
"source": "joaojunior/data_structure",
"score": 4
} |
#### File: binary_search_tree/graph/bfs.py
```python
class BFS():
def bfs(self, source, graph):
visited = {}
result = []
for node in graph.nodes:
visited[node] = False
nodes = [source]
while nodes:
node = nodes.pop(0)
if visited[node] is False:
visited[node] = True
result.append(node)
for adjacent in graph.adjacents(node):
nodes.append(adjacent)
return result
```
#### File: binary_search_tree/graph/dfs_tests.py
```python
import unittest
from dfs import DFS
from graph import Edge, Graph
def create_edge(source, dest, size):
return Edge(source, dest, size)
class TestDFS(unittest.TestCase):
def setUp(self):
self.dfs = DFS()
self.graph = Graph()
self.graph.insert_edge(create_edge(0, 1, 10))
self.graph.insert_edge(create_edge(1, 3, 10))
self.graph.insert_edge(create_edge(0, 2, 20))
self.graph.insert_edge(create_edge(0, 3, 30))
self.graph.insert_edge(create_edge(2, 3, 60))
self.graph.insert_edge(create_edge(3, 4, 120))
def test_dfs(self):
result = self.dfs.dfs(0, self.graph)
expected = [0, 1, 3, 2, 4]
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
```
#### File: data_structure/double_linked_list/double_linked_list.py
```python
class Item():
def __init__(self, id_, value):
self.id_ = id_
self.value = value
self.next = None
self.before = None
class DoubleLinkedList():
def __init__(self):
self.size = 0
self.root = None
def insert(self, item):
if self.root is None:
self.root = item
else:
root = self.root
while root.next is not None:
root = root.next
item.before = root
root.next = item
self.size += 1
def search(self, id_):
founded = None
root = self.root
while founded is None and root is not None:
if id_ == root.id_:
founded = root
else:
root = root.next
return founded
def delete(self, id_):
item = None
if self.root is not None and self.root.id_ == id_:
item = self.root
self.root = self.root.next
self.root.next.before = self.root
else:
root = self.root
while root.next is not None and item is None:
if root.next.id_ == id_:
item = root.next
root.next = root.next.next
if root.next is not None:
root.next.before = root
else:
root = root.next
if item is not None:
self.size -= 1
return item
```
#### File: data_structure/double_linked_list/double_linked_list_tests.py
```python
import unittest
from double_linked_list import DoubleLinkedList, Item
def create_item(id_, value):
return Item(id_, value)
class TestDoubleLinkedList(unittest.TestCase):
def setUp(self):
self.double_list = DoubleLinkedList()
def test_insert_one_item(self):
self.assertEqual(0, self.double_list.size)
self.assertEqual(None, self.double_list.root)
item1 = create_item(1, 1)
self.double_list.insert(item1)
self.assertEqual(1, self.double_list.size)
self.assertEqual(item1, self.double_list.root)
self.assertEqual(None, self.double_list.root.next)
self.assertEqual(None, self.double_list.root.before)
def test_insert_two_items(self):
self.assertEqual(0, self.double_list.size)
self.assertEqual(None, self.double_list.root)
item1 = create_item(1, 1)
item2 = create_item(2, 2)
self.double_list.insert(item1)
self.double_list.insert(item2)
self.assertEqual(2, self.double_list.size)
self.assertEqual(item1, self.double_list.root)
self.assertEqual(item2, self.double_list.root.next)
self.assertEqual(None, self.double_list.root.next.next)
self.assertEqual(item1, self.double_list.root.next.before)
def test_insert_three_items(self):
self.assertEqual(0, self.double_list.size)
self.assertEqual(None, self.double_list.root)
item1 = create_item(1, 1)
item2 = create_item(2, 2)
item3 = create_item(3, 3)
self.double_list.insert(item1)
self.double_list.insert(item2)
self.double_list.insert(item3)
self.assertEqual(3, self.double_list.size)
self.assertEqual(item1, self.double_list.root)
self.assertEqual(item2, self.double_list.root.next)
self.assertEqual(item1, self.double_list.root.next.before)
self.assertEqual(item3, self.double_list.root.next.next)
self.assertEqual(item2, self.double_list.root.next.next.before)
self.assertEqual(item1, self.double_list.root.next.next.before.before)
def test_search_item_exist(self):
item1 = create_item(1, 1)
item2 = create_item(2, 2)
item3 = create_item(3, 3)
self.double_list.insert(item1)
self.double_list.insert(item2)
self.double_list.insert(item3)
self.assertEqual(item3, self.double_list.search(item3.id_))
def test_search_item_not_exist(self):
item1 = create_item(1, 1)
item2 = create_item(2, 2)
item3 = create_item(3, 3)
self.double_list.insert(item1)
self.double_list.insert(item2)
self.double_list.insert(item3)
self.assertEqual(None, self.double_list.search(4))
def test_search_in_list_empty(self):
self.assertEqual(None, self.double_list.search(1))
def test_remove_last_item(self):
item1 = create_item(1, 1)
item2 = create_item(2, 2)
item3 = create_item(3, 3)
self.double_list.insert(item1)
self.double_list.insert(item2)
self.double_list.insert(item3)
item_removed = self.double_list.delete(item3.id_)
self.assertEqual(item3, item_removed)
self.assertEqual(2, self.double_list.size)
self.assertEqual(item1, self.double_list.root)
self.assertEqual(item2, self.double_list.root.next)
def test_remove_first_item(self):
item1 = create_item(1, 1)
item2 = create_item(2, 2)
item3 = create_item(3, 3)
self.double_list.insert(item1)
self.double_list.insert(item2)
self.double_list.insert(item3)
item_removed = self.double_list.delete(item1.id_)
self.assertEqual(item1, item_removed)
self.assertEqual(2, self.double_list.size)
self.assertEqual(item2, self.double_list.root)
self.assertEqual(item3, self.double_list.root.next)
def test_remove_middle_item(self):
item1 = create_item(1, 1)
item2 = create_item(2, 2)
item3 = create_item(3, 3)
self.double_list.insert(item1)
self.double_list.insert(item2)
self.double_list.insert(item3)
item_removed = self.double_list.delete(item2.id_)
self.assertEqual(item2, item_removed)
self.assertEqual(2, self.double_list.size)
self.assertEqual(item1, self.double_list.root)
self.assertEqual(item3, self.double_list.root.next)
if __name__ == '__main__':
unittest.main()
```
#### File: data_structure/linked_list/linked_list.py
```python
class Item():
def __init__(self, id_, value):
self.id_ = id_
self.value = value
self.next = None
class LinkedList():
def __init__(self):
self.root = None
self.size = 0
def insert(self, item):
self.size += 1
if self.root is None:
self.root = item
else:
root = self.root
while root is not None:
if root.next is None:
root.next = item
root = item
root = root.next
def search(self, id_):
root = self.root
item_found = None
while item_found is None and root is not None:
if id_ == root.id_:
item_found = root
else:
root = root.next
return item_found
def delete(self, id_):
root = self.root
deleted = None
if root is not None:
if id_ == root.id_:
deleted = root
self.root = root.next
while deleted is None and root.next is not None:
if id_ == root.next.id_:
deleted = root.next
root.next = root.next.next
else:
root = root.next
if deleted is not None:
self.size -= 1
return deleted
``` |
{
"source": "joaojunior/data_structures_and_algorithms",
"score": 4
} |
#### File: algorithms/sorting/shell_sort.py
```python
from typing import List
class ShellSort:
def sort(self, items: List) -> None:
size = len(items)
h = 1
while h < size / 3:
h = 3 * h + 1
while h >= 1:
for i in range(h, size):
j = i
while j >= h and items[j] < items[j-h]:
items[j-h], items[j] = items[j], items[j-h]
j -= h
h = h // 3
```
#### File: data_structures_and_algorithms/python_implementations/draw.py
```python
from datetime import datetime
import matplotlib.pyplot as plt
def draw_bars(items, red_elements=None):
size = len(items)
bar = plt.bar(list(range(size)), items, color='b')
if red_elements:
for i in red_elements:
bar[i].set_color('r')
output = str(datetime.now().timestamp()).replace('.', '')
plt.savefig(output, dpi=300)
plt.close()
# ffmpeg -framerate 5 -pattern_type glob -i '*.png' -c:v libx264 -r 30 -pix_fmt yuv420p out.mp4 # noqa
# ffmpeg -i out.mp4 -filter_complex "[0:v] scale=480:-1" -f gif out.gif
```
#### File: python_implementations/ds/linked_list.py
```python
from typing import Any
class Node:
def __init__(self, item: Any) -> None:
self.item = item
self.next = None
class LinkedListIterator:
def __init__(self, head: Node) -> None:
self.__node = head
def __next__(self) -> Any:
if self.__node is not None:
item = self.__node.item
self.__node = self.__node.next
return item
else:
raise StopIteration
class LinkedList:
def __init__(self):
self._size = 0
self.head = None
def is_empty(self) -> bool:
return self._size == 0
def insert(self, item: Any) -> None:
new_node = Node(item)
if self.head is None:
self.head = new_node
else:
new_node.next = self.head
self.head = new_node
self._size += 1
def delete(self, item: Any) -> Any:
previous = None
current = self.head
while current is not None and current.item != item:
previous = current
current = current.next
if current is not None and current.item == item:
self._size -= 1
if current == self.head:
self.head = self.head.next
else:
previous.next = current.next
else:
raise ValueError(f'Item {item} is not in the LinkedList')
return item
def __iter__(self) -> LinkedListIterator:
return LinkedListIterator(self.head)
```
#### File: python_implementations/ds/max_heap.py
```python
from typing import List
class MaxHeap:
@classmethod
def build_max_heap(cls: 'MaxHeap', items: List):
for i in range((len(items) - 1) // 2, -1, -1):
cls.max_heapify(items, i)
@classmethod
def max_heapify(cls: 'MaxHeap', items: List, i: int):
left = i * 2 + 1
right = left + 1
size = len(items)
if left < size:
j = i
if items[j] < items[left]:
j = left
if right < size and items[j] < items[right]:
j = right
if j != i:
items[i], items[j] = items[j], items[i]
cls.max_heapify(items, j)
```
#### File: python_implementations/ds/min_heap.py
```python
from typing import List
class MinHeap():
@classmethod
def build_min_heap(cls: 'MinHeap', items: List):
for i in range((len(items) - 1) // 2, -1, -1):
cls.min_heapify(items, i)
@classmethod
def min_heapify(cls: 'MinHeap', items: List, i: int):
left = 2 * i + 1
right = left + 1
size = len(items)
j = i
if left < size:
if items[j] > items[left]:
j = left
if right < size and items[j] > items[right]:
j = right
if j != i:
items[i], items[j] = items[j], items[i]
cls.min_heapify(items, j)
```
#### File: python_implementations/ds/queue.py
```python
from typing import Any
from ds.linked_list import LinkedList, LinkedListIterator, Node
class LinkedListInsertInLast(LinkedList):
def __init__(self):
super().__init__()
self.last = None
def insert(self, item: Any) -> None:
self._size += 1
new_node = Node(item)
if self.last is None:
self.head = new_node
else:
self.last.next = new_node
self.last = new_node
def delete_first_element(self) -> Any:
if self.head is not None:
self._size -= 1
item = self.head.item
self.head = self.head.next
if super().is_empty():
self.last = None
return item
else:
raise ValueError('Queue is empty')
class Queue:
def __init__(self) -> None:
self.__linked_list = LinkedListInsertInLast()
def is_empty(self) -> bool:
return self.__linked_list.is_empty()
def enqueue(self, item: Any) -> None:
self.__linked_list.insert(item)
def dequeue(self) -> Any:
return self.__linked_list.delete_first_element()
def __iter__(self) -> LinkedListIterator:
return LinkedListIterator(self.__linked_list.head)
```
#### File: algorithms/tree_traversals/test_in_order.py
```python
import pytest
from algorithms.tree_traversals.in_order import in_order
from ds.binary_search_tree import BST
@pytest.fixture
def bst():
_bst = BST()
assert _bst.is_empty()
return _bst
def test_in_order_sanity(bst):
values = [1, 2, 3, 4, 5]
for value in values:
bst.insert(value)
assert values == in_order(bst.root)
```
#### File: algorithms/tree_traversals/test_post_order.py
```python
import pytest
from algorithms.tree_traversals.post_order import post_order
from ds.binary_search_tree import BST
@pytest.fixture
def bst():
_bst = BST()
assert _bst.is_empty()
return _bst
def test_post_order_sanity(bst):
values = [3, 1, 5, 2, 4]
expected = [2, 1, 4, 5, 3]
for value in values:
bst.insert(value)
assert expected == post_order(bst.root)
```
#### File: tests/ds/test_linked_list.py
```python
import pytest
from ds.linked_list import LinkedList
@pytest.fixture
def linked_list():
return LinkedList()
def test_ensure_empty_list_when_create_an_instance(linked_list):
assert linked_list.is_empty()
assert [] == list(linked_list)
def test_insert_one_item_in_an_empty_linked_list(linked_list):
linked_list.insert(1)
assert [1] == list(linked_list)
def test_insert_n_items(linked_list):
items_to_insert = [1, 2, 3, 4, 5]
for item in items_to_insert:
linked_list.insert(item)
items_to_insert.reverse()
assert items_to_insert == list(linked_list)
def test_remove_unique_item(linked_list):
linked_list.insert(1)
assert 1 == linked_list.delete(1)
assert linked_list.is_empty()
assert [] == list(linked_list)
def test_remove_all_items(linked_list):
items_to_delete = [1, 2, 3, 4, 5]
for item in items_to_delete:
linked_list.insert(item)
for item in items_to_delete:
linked_list.delete(item)
assert linked_list.is_empty()
assert [] == list(linked_list)
def test_remove_last_item(linked_list):
items_to_insert = [1, 2, 3, 4]
item_to_delete = 5
for item in items_to_insert:
linked_list.insert(item)
linked_list.insert(item_to_delete)
assert item_to_delete == linked_list.delete(item_to_delete)
items_to_insert.reverse()
assert items_to_insert == list(linked_list)
def test_remove_first_item(linked_list):
items = [1, 2, 3, 4, 5]
for item in items:
linked_list.insert(item)
assert 1 == linked_list.delete(items.pop(0))
items.reverse()
assert items == list(linked_list)
def test_try_to_remove_in_empty_linked_list_raises_exception(linked_list):
with pytest.raises(ValueError, match="Item 1 is not in the LinkedList"):
linked_list.delete(1)
def test_try_to_remove_item_not_in_the_linked_list_raises_exception(
linked_list):
items = [1, 2, 3, 4, 5]
for item in items:
linked_list.insert(item)
with pytest.raises(ValueError, match="Item 6 is not in the LinkedList"):
linked_list.delete(6)
```
#### File: tests/ds/test_min_heap.py
```python
import heapq
import random
from ds.min_heap import MinHeap
def test_ensure_create_min_heap_with_empty_array():
items = []
MinHeap.build_min_heap(items)
assert [] == items
def test_create_min_heap_with_only_one_element():
items = [1]
MinHeap.build_min_heap(items)
assert [1] == items
def test_create_min_heap_with_two_elements():
items = [2, 1]
MinHeap.build_min_heap(items)
assert [1, 2] == items
def test_create_min_heap_with_n_elements_that_is_already_a_min_heap():
n = 16
items = list(range(n))
MinHeap.build_min_heap(items)
expected = list(range(n))
assert expected == items
def test_create_min_heap_with_n_elements_that_is_not_a_min_heap():
n = 16
items = list(range(n))
items.reverse()
MinHeap.build_min_heap(items)
expected = list(range(n))
expected.reverse()
heapq.heapify(expected)
assert expected == items
def test_create_min_heap_from_an_random_array():
n = 1000
items = list(range(n))
random.shuffle(items)
expected = items[:]
MinHeap.build_min_heap(items)
assert expected != items
heapq.heapify(expected)
assert items == expected
```
#### File: tests/ds/test_stack.py
```python
import pytest
from ds.stack import Stack
@pytest.fixture()
def stack():
return Stack()
def test_ensure_empty_stack_when_create_an_instance(stack):
assert stack.is_empty()
assert [] == list(stack)
def test_insert_one_item_in_an_empty_stack(stack):
stack.push(1)
assert [1] == list(stack)
def test_insert_n_items(stack):
items_to_insert = [1, 2, 3, 4, 5]
for item in items_to_insert:
stack.push(item)
items_to_insert.reverse()
assert items_to_insert == list(stack)
def test_remove_unique_item(stack):
stack.push(1)
assert 1 == stack.pop()
assert stack.is_empty()
assert [] == list(stack)
def test_remove_all_items(stack):
items_to_delete = [1, 2, 3, 4, 5]
for item in items_to_delete:
stack.push(item)
items_to_delete.reverse()
for item in items_to_delete:
assert item == stack.pop()
assert stack.is_empty()
assert [] == list(stack)
def test_try_to_remove_in_empty_stack_raises_exception(stack):
with pytest.raises(ValueError, match="Stack is empty"):
stack.pop()
``` |
{
"source": "joaojunior/example_load_balancer",
"score": 3
} |
#### File: joaojunior/example_load_balancer/server.py
```python
from http.server import BaseHTTPRequestHandler, HTTPServer
import os
from sys import argv
class Server(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
def do_GET(self):
self._set_headers()
host_name = os.environ['HOSTNAME']
self.wfile.write(bytes(f'server:{host_name}', "utf-8"))
def run(port, server_class=HTTPServer, handler_class=Server):
server_address = ("0.0.0.0", port)
httpd = server_class(server_address, handler_class)
print('Server started')
httpd.serve_forever()
if __name__ == "__main__":
run(port=int(argv[1]))
``` |
{
"source": "joaojunior/hackerrank",
"score": 4
} |
#### File: hackerrank/3sum-closest/three_sum_closest.py
```python
from typing import List
class Solution:
def three_sum_closest(self, nums: List[int], target: int) -> int:
nums.sort()
size = len(nums)
max_distance = 1000
result = 0
for i in range(size - 2):
left = i + 1
r = size - 1
while left < r:
sum_ = nums[i] + nums[left] + nums[r]
distance = abs(sum_ - target)
if distance < max_distance:
max_distance = distance
result = sum_
if sum_ > target:
r -= 1
elif sum_ < target:
left += 1
else:
return result
return result
```
#### File: hackerrank/arithmetic_expression/main.py
```python
def arithmetic_expression(array):
size = len(array)
queue = [(array[0], str(array[0]))]
start = 0
i = 1
cache = {array[0]: True}
while i < size:
end = len(queue)
j = start
while j < end:
sum_, response = queue[j]
sum_plus = (sum_ + array[i]) % 101
sum_times = (sum_ * array[i]) % 101
sum_minus = (sum_ - array[i]) % 101
if sum_ % 101 == 0:
queue = [[sum_times, response + '*' + str(array[i])]]
j = 0
end = 0
else:
all_in_cache = (
sum_plus in cache and sum_times in cache and
sum_minus in cache
)
if all_in_cache:
queue.append(
[sum_plus, response + '+' + str(array[i])]
)
else:
if sum_plus not in cache:
queue.append(
[sum_plus, response + '+' + str(array[i])]
)
cache[sum_plus] = True
if sum_times not in cache:
queue.append(
[sum_times, response + '*' + str(array[i])]
)
cache[sum_times] = True
if sum_minus not in cache:
queue.append(
[sum_minus, response + '-' + str(array[i])]
)
cache[sum_minus] = True
j += 1
start = end
i += 1
for sum_, response in queue:
if sum_ % 101 == 0:
return response
if __name__ == '__main__':
n = int(input())
numbers = list(map(int, input().rstrip().split()))
print(arithmetic_expression(numbers))
```
#### File: hackerrank/combination-sum-iii/test_combination_sum_iii.py
```python
from combination_sum_iii import Solution
def test_example1():
n = 7
k = 3
expected = [[1, 2, 4]]
assert expected == Solution().combination_sum_3(k, n)
def test_example2():
n = 9
k = 3
expected = [[1, 2, 6], [1, 3, 5], [2, 3, 4]]
assert expected == Solution().combination_sum_3(k, n)
def test_example3():
n = 1
k = 4
expected = []
assert expected == Solution().combination_sum_3(k, n)
def test_example4():
n = 2
k = 3
expected = []
assert expected == Solution().combination_sum_3(k, n)
def test_example5():
n = 45
k = 9
expected = [[1, 2, 3, 4, 5, 6, 7, 8, 9]]
assert expected == Solution().combination_sum_3(k, n)
```
#### File: hackerrank/container-with-most-water/container_with_most_water.py
```python
from typing import List
class Solution:
def max_area(self, height: List[int]) -> int:
i = 0
j = len(height) - 1
result = 0
while i < j:
if height[i] < height[j]:
area = (j - i) * height[i]
i += 1
else:
area = (j - i) * height[j]
j -= 1
result = max(result, area)
return result
```
#### File: hackerrank/count-good-numbers/count_good_numbers.py
```python
class Solution:
def count_good_numbers(self, n: int) -> int:
result = 1
even_digits = 5
prime_numbers = 4
large = 1000000007
q, r = divmod(n, 2)
result = pow(even_digits, q + r, large) * pow(prime_numbers, q, large)
return result % large
```
#### File: hackerrank/ctci-array-left-rotation/main.py
```python
def rot_left(array, d):
copy_array = array[:]
for i in range(len(array)):
array[i-d] = copy_array[i]
return array
if __name__ == '__main__':
nd = input().split()
n = int(nd[0])
d = int(nd[1])
array = list(map(int, input().rstrip().split()))
result = rot_left(array, d)
print(' '.join(map(str, result)))
```
#### File: hackerrank/even_tree/main.py
```python
from collections import defaultdict
class Graph():
def __init__(self, number_of_nodes):
self.number_of_nodes = number_of_nodes
self._edges = {}
self._adjacents = defaultdict(list)
def nodes(self):
return list(range(1, self.number_of_nodes + 1))
def edges(self):
return self._edges.keys()
def add_edge(self, source, dest):
self._edges[(source, dest)] = True
self._adjacents[source].append(dest)
self._adjacents[dest].append(source)
def remove_edge(self, edge):
self._edges[edge] = False
def return_edge(self, edge):
self._edges[edge] = True
def adjacents(self, node):
result = []
for dest in self._adjacents[node]:
edge_exist = self._edges.get((node, dest), False)
if edge_exist or self._edges.get((dest, node), False):
result.append(dest)
return result
def edge_not_connected_leaf(self, edge):
adjacents = self.adjacents(edge[0])
if len(adjacents) > 1:
adjacents = self.adjacents(edge[1])
if len(adjacents) > 1:
return True
return False
def bfs(graph, node):
visited = {}
queue = [node]
for node in graph.nodes():
visited[node] = False
while queue:
node = queue.pop(0)
if visited[node] is False:
visited[node] = True
for adjacent in graph.adjacents(node):
if visited[adjacent] is False:
queue.append(adjacent)
component = []
for key, value in visited.items():
if value is True:
component.append(key)
return component
def even_tree(graph):
number_removed_edges = 0
for edge in graph.edges():
if graph.edge_not_connected_leaf(edge):
graph.remove_edge(edge)
removed = True
nodes = graph.nodes()
while nodes and removed:
node = nodes.pop(0)
component = bfs(graph, node)
if len(component) % 2 == 0:
for node in component:
if node in nodes:
nodes.remove(node)
else:
removed = False
if removed is True:
number_removed_edges += 1
else:
graph.return_edge(edge)
return number_removed_edges
if __name__ == '__main__':
number_of_nodes, number_of_edges = input().split()
number_of_nodes = int(number_of_nodes)
number_of_edges = int(number_of_edges)
graph = Graph(number_of_nodes)
for edge in range(number_of_edges):
source, dest = input().split()
graph.add_edge(int(source), int(dest))
print(even_tree(graph))
```
#### File: hackerrank/find-peak-element/test_find_peak_element.py
```python
from find_peak_element import Solution
def test_example1():
nums = [1, 2, 3, 1]
expected = 2
assert expected == Solution().find_peak_element(nums)
def test_example2():
nums = [1, 2, 1, 3, 5, 6, 4]
expected = 1
assert expected == Solution().find_peak_element(nums)
```
#### File: hackerrank/find-the-winner-of-the-circular-game/test_find_the_winner_of_the_circular_game.py
```python
from find_the_winner_of_the_circular_game import Solution
def test_example_1():
n = 5
k = 2
expected = 3
assert expected == Solution().find_the_winner(n, k)
def test_example_2():
n = 6
k = 5
expected = 1
assert expected == Solution().find_the_winner(n, k)
```
#### File: hackerrank/jim_and_the_orders/main.py
```python
def jimOrders(orders):
times = []
for key, order in enumerate(orders):
times.append((key+1, order[0] + order[1]))
times = sorted(times, key=lambda x: (x[1], x[0]))
return times
if __name__ == "__main__":
n = int(input().strip())
orders = []
for orders_i in range(n):
o_t = [int(orders_temp) for orders_temp in input().strip().split(' ')]
orders.append(o_t)
result = jimOrders(orders)
for key, value in result:
print(key, end=' ')
```
#### File: leetcode/3sum/main.py
```python
class Solution:
def three_sum(self, nums):
self.nums = sorted(nums)
self.size = len(self.nums)
self.result = []
for i in range(self.size):
self.find_sum(i)
return self.result
def find_sum(self, i):
value = 0 - self.nums[i]
j = i + 1
k = self.size - 1
while j < k:
if self.nums[j] + self.nums[k] == value:
r = [self.nums[i], self.nums[j], self.nums[k]]
if r not in self.result:
self.result.append(r)
j += 1
k -= 1
elif self.nums[j] + self.nums[k] > value:
k -= 1
else:
j += 1
if __name__ == '__main__':
nums = [-1, 0, 1, 2, -1, -4]
solution = Solution()
print(solution.three_sum(nums))
print(solution.three_sum([-2, 0, 1, 1, 2]))
```
#### File: leetcode/add-and-search-word-data-structure-design/main.py
```python
class WordDictionary:
def __init__(self):
self.letters = {}
self.is_end = False
def add_word(self, word: str):
trie = self
for c in word:
if c in trie.letters:
trie = trie.letters[c]
else:
trie.letters[c] = WordDictionary()
trie = trie.letters[c]
trie.is_end = True
def search(self, word: str, trie=None) -> bool:
if trie is None:
trie = self
for i in range(len(word)):
c = word[i]
if c == '.':
for t in trie.letters.values():
result = self.search(word[i+1:], t)
if result is True:
return True
return False
else:
if c in trie.letters:
trie = trie.letters[c]
else:
return False
return trie.is_end
```
#### File: leetcode/binary-tree-pruning/main.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def prune_tree(self, root: TreeNode) -> TreeNode:
self.post_order(root)
return root
def post_order(self, root) -> bool:
if root is not None:
left = self.post_order(root.left)
right = self.post_order(root.right)
if left is False:
root.left = None
if right is False:
root.right = None
if left is False and right is False:
return root.val == 1
else:
return True
else:
return False
```
#### File: leetcode/clone-graph/main.py
```python
class UndirectedGraphNode:
def __init__(self, x):
self.label = x
self.neighbors = []
class Solution:
def clone_graph(self, node: UndirectedGraphNode) -> UndirectedGraphNode:
new = None
map_ = {}
if node is not None:
queue = [node]
while queue:
node = queue.pop(0)
if new is None:
new = UndirectedGraphNode(node.label)
root = new
else:
if id(node) not in map_:
root = UndirectedGraphNode(node.label)
else:
root = map_[id(node)]
for n in node.neighbors:
if id(n) not in map_:
n1 = UndirectedGraphNode(n.label)
root.neighbors.append(n1)
queue.append(n)
map_[id(n)] = n1
else:
root.neighbors.append(map_[id(n)])
return new
```
#### File: leetcode/construct-binary-search-tree-from-preorder-traversal/main.py
```python
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def bst_from_preorder(self, preorder: List[int]) -> TreeNode:
val = preorder.pop(0)
root = TreeNode(val)
i = self.position_val_is_greater_than(val, preorder)
if i is not None:
left_preorder = preorder[:i]
right_preorder = preorder[i:]
else:
left_preorder = preorder
right_preorder = []
if left_preorder:
root.left = self.bst_from_preorder(left_preorder)
if right_preorder:
root.right = self.bst_from_preorder(right_preorder)
return root
def position_val_is_greater_than(self, val: int, data: List[int]) -> int:
for i, v in enumerate(data):
if v > val:
return i
```
#### File: leetcode/copy-list-with-random-pointer/main.py
```python
class RandomListNode():
def __init__(self, x: int):
self.label = x
self.next = None
self.random = None
class Solution():
def copy_random_list(self, root: RandomListNode) -> RandomListNode:
head = None
if root is not None:
pointers = {}
new_root = RandomListNode(root.label)
pointers[id(root)] = new_root
head = new_root
while (root is not None and
(root.next is not None or root.random is not None)):
if root.next is not None:
if id(root.next) not in pointers:
new_root.next = RandomListNode(root.next.label)
pointers[id(root.next)] = new_root.next
else:
new_root.next = pointers[id(root.next)]
if root.random is not None:
if id(root.random) not in pointers:
new_root.random = RandomListNode(root.random.label)
pointers[id(root.random)] = new_root.random
else:
new_root.random = pointers[id(root.random)]
root = root.next
new_root = new_root.next
return head
```
#### File: leetcode/course_schedule_II/main.py
```python
from typing import List
class Solution():
ZERO = 0
ONE = 1
TWO = 2
def find_order(self, num_courses: int,
prerequisites: List[List[int]]) -> int:
self.graph = {}
for dest, source in prerequisites:
if source in self.graph:
self.graph[source].append(dest)
else:
self.graph[source] = [dest]
self.colors = {}
for node in range(num_courses):
self.colors[node] = self.ZERO
self.result = []
for node in range(num_courses):
if self.visit(node) is False:
return []
return self.result
def visit(self, node):
if self.colors[node] == self.TWO:
return True
if self.colors[node] == self.ONE:
return False
self.colors[node] = self.ONE
for m in self.graph.get(node, []):
result = self.visit(m)
if result is False:
return False
self.colors[node] = self.TWO
self.result.insert(0, node)
return True
if __name__ == '__main__':
solution = Solution()
num_courses = 4
prerequisites = [[1, 0], [2, 0], [3, 1], [3, 2]]
print(solution.find_order(num_courses, prerequisites))
```
#### File: leetcode/delete-node-in-a-bst/main.py
```python
from typing import Tuple
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def delete_node(self, root: TreeNode, key: int) -> TreeNode:
self.root = root
parent, node_to_remove = self.binary_search(None, root, key)
if node_to_remove is None:
return root
if node_to_remove.left is None:
self.transplant(node_to_remove, node_to_remove.right)
elif node_to_remove.right is None:
self.transplant(node_to_remove, node_to_remove.left)
else:
y = self.minimum(node_to_remove.right)
if y != node_to_remove.right:
self.transplant(y, y.right)
y.right = node_to_remove.right
self.transplant(node_to_remove, y)
y.left = node_to_remove.left
return self.root
def binary_search(self, parent: TreeNode,
root: TreeNode, key: int) -> Tuple[TreeNode, TreeNode]:
if root is not None:
if root.val == key:
return parent, root
elif key < root.val:
return self.binary_search(root, root.left, key)
else:
return self.binary_search(root, root.right, key)
else:
return parent, None
def minimum(self, root: TreeNode) -> TreeNode:
while root.left is not None:
root = root.left
return root
def transplant(self, x: TreeNode, y: TreeNode):
parent, _ = self.binary_search(None, self.root, x.val)
if parent is None:
self.root = y
elif x == parent.left:
parent.left = y
else:
parent.right = y
```
#### File: leetcode/h-index-ii/main.py
```python
from typing import List
class Solution:
def h_index(self, citations: List[int]) -> int:
result = 0
size = len(citations)
i = 0
while i < size:
h = size - i
if len(citations[i:]) >= h:
if i != 0:
if citations[i-1] <= h and citations[i] >= h:
return h
else:
if citations[i] >= h:
return h
i += 1
return result
if __name__ == '__main__':
solution = Solution()
citations = [0, 1, 3, 5, 6]
print(solution.h_index(citations))
```
#### File: leetcode/increasing-triplet-subsequence/main.py
```python
from typing import List
class Solution:
def increasing_triplet(self, nums: List[int]) -> bool:
for i, value_i in enumerate(nums):
for j, value_j in enumerate(nums[i+1:]):
if value_i < value_j:
for k, value_k in enumerate(nums[i+j+1:]):
if value_j < value_k:
return True
return False
if __name__ == '__main__':
solution = Solution()
nums = [2, 1, 5, 0, 3]
print(solution.increasing_triplet(nums))
```
#### File: leetcode/maximum-difference-between-node-and-ancestor/main.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def max_ancestor_diff(self, root: TreeNode) -> int:
return self.in_order(root, root.val, root.val)
def in_order(self, root: TreeNode, min_: int, max_: int) -> int:
if root is not None:
new_min = min(min_, root.val)
new_max = max(max_, root.val)
return max(
abs(root.val - min_),
abs(root.val - max_),
self.in_order(root.left, new_min, new_max),
self.in_order(root.right, new_min, new_max)
)
return 0
```
#### File: leetcode/minimum-window-substring/main.py
```python
from typing import Dict
class Solution:
def min_window(self, s: str, t: str) -> str:
frequency_s = {}
frequency_t = {}
for c in s:
frequency_s[c] = frequency_s.get(c, 0) + 1
for c in t:
frequency_t[c] = frequency_t.get(c, 0) + 1
start = 0
end = len(s) - 1
result = ''
while end >= start and start >= 0:
if self.verify(frequency_s, frequency_t):
result = s[start:end+1]
frequency_s[s[start]] -= 1
start += 1
else:
frequency_s[s[start-1]] += 1
frequency_s[s[end]] -= 1
start -= 1
end -= 1
return result
def verify(self, frequency: Dict, frequency_t: Dict) -> bool:
for c, qty in frequency_t.items():
if frequency.get(c, 0) < qty:
return False
return True
if __name__ == '__main__':
solution = Solution()
s = "ADOBECODEBANC"
t = "ABC"
print(solution.min_window(s, t))
```
#### File: leetcode/product-of-array-except-self/main.py
```python
from typing import List
class Solution:
def product_except_self(self, nums: List[int]) -> List[int]:
result = []
memoize = {}
for i, num1 in enumerate(nums):
multi = 1
if num1 not in memoize:
for j, num2 in enumerate(nums):
if i != j:
multi *= num2
memoize[num1] = multi
result.append(memoize[num1])
return result
```
#### File: leetcode/search-in-rotated-sorted-array/main.py
```python
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> int:
self.nums = nums
return self.binary_search(0, len(nums) - 1, target)
def binary_search(self, l: int, r: int, value: int) -> int: # noqa
if l > r:
return -1
else:
mid = (l + r) // 2
if self.nums[mid] == value:
return mid
elif value < self.nums[mid]:
result = self.binary_search(l, mid-1, value)
if result == -1:
return self.binary_search(mid+1, r, value)
else:
return result
else:
result = self.binary_search(mid+1, r, value)
if result == -1:
return self.binary_search(l, mid-1, value)
else:
return result
```
#### File: leetcode/serialize-and-deserialize-binary-tree/main.py
```python
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root: TreeNode) -> str:
if root is None:
return ''
result = []
queue = [[root]]
while queue:
new_nodes = []
nodes = queue.pop(0)
for node in nodes:
if node is not None:
result.append(str(node.val))
new_nodes.append(node.left)
new_nodes.append(node.right)
else:
result.append('null')
if new_nodes:
queue.append(new_nodes)
return ','.join(result)
def deserialize(self, data: str) -> TreeNode:
if data == '':
return None
data = data.split(',')
size = len(data)
base_root = TreeNode(data[0])
queue = [(base_root, 0)]
while queue:
root, i = queue.pop(0)
left = 2*i+1
right = 2*i+2
if right < size:
root.left = TreeNode(data[left])
root.right = TreeNode(data[right])
queue.append((root.left, left))
queue.append((root.right, right))
return base_root
```
#### File: leetcode/smallest-string-starting-from-leaf/main.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def smallestFromLeaf(self, root: TreeNode) -> str:
self.result = ''
self.pre_order(root, '')
return self.result
def pre_order(self, root: TreeNode, word: str):
if root is not None:
word = chr(97 + root.val) + word
self.pre_order(root.left, word)
self.pre_order(root.right, word)
if root.left is None and root.right is None:
if word < self.result or self.result == '':
self.result = word
```
#### File: leetcode/top-k-frequent-words/main.py
```python
import heapq
from typing import List
class Solution:
def top_k_frequent(self, words: List[str], k: int) -> List[str]:
frequency = {}
for word in words:
frequency[word] = frequency.get(word, 0) - 1
result = []
items = [(item[1], item[0]) for item in frequency.items()]
heapq.heapify(items)
for i in range(k):
result.append(heapq.heappop(items)[1])
return result
```
#### File: leetcode/word-break/main.py
```python
from typing import List
class Solution():
def word_break(self, s: str, words: List[str]) -> bool:
self.words = words
self.memoization = {}
return self.find(s)
def find(self, s: str) -> bool:
if s in self.memoization:
return self.memoization[s]
if s == '':
return True
else:
result = False
i = 0
while i < len(self.words) and result is False:
word = self.words[i]
if s.startswith(word):
result = self.find(s[len(word):])
self.memoization[s[len(word):]] = result
i += 1
return result
if __name__ == '__main__':
solution = Solution()
s = "catsandog"
words = ["cats", "dog", "sand", "and", "cat"]
print(solution.word_break(s, words))
```
#### File: hackerrank/longest-increasing-path-in-a-matrix/longest_increasing_path_in_a_matrix.py
```python
from typing import List
class Solution:
def longest_increasing_path(self, matrix: List[List[int]]) -> int:
self.matrix = matrix
self.m = len(matrix)
self.n = len(matrix[0])
self.result = 0
self.memoization = {}
for i in range(self.m):
for j in range(self.n):
result = self.find_longest_from(i, j)
if result > self.result:
self.result = result
return self.result
def find_longest_from(self, i: int, j: int) -> int:
if (i, j) in self.memoization:
return self.memoization[(i, j)]
elif i < 0 or i >= self.m or j < 0 or j >= self.n:
return 0
else:
result = 1
if i - 1 >= 0 and self.matrix[i-1][j] > self.matrix[i][j]:
result = max(result, 1 + self.find_longest_from(i-1, j))
if i + 1 < self.m and self.matrix[i+1][j] > self.matrix[i][j]:
result = max(result, 1 + self.find_longest_from(i+1, j))
if j - 1 >= 0 and self.matrix[i][j-1] > self.matrix[i][j]:
result = max(result, 1 + self.find_longest_from(i, j-1))
if j + 1 < self.n and self.matrix[i][j+1] > self.matrix[i][j]:
result = max(result, 1 + self.find_longest_from(i, j+1))
self.memoization[(i, j)] = result
return result
```
#### File: hackerrank/luck-balance/main.py
```python
def luck_balance(k, contests):
contests.sort(key=lambda item: item[0])
importances = 0
for value, importance in contests:
if importance == 1:
importances += 1
result = 0
qty = 0
for value, importance in contests:
if importance == 0:
result += value
elif qty < importances - k:
qty += 1
result -= value
else:
result += value
return result
```
#### File: joaojunior/hackerrank/main.py
```python
def k_factorization(numbers, n):
numbers = sorted(numbers, reverse=True)
n_bck = n
divisors = []
for divisor in numbers:
if n % divisor == 0:
divisors.append(divisor)
result = []
while divisors:
divisor = divisors.pop(0)
while n % divisor == 0:
result.append(divisor)
n = n / divisor
result.append(1)
for i in range(len(result) - 2, -1, -1):
result[i] = result[i] * result[i+1]
result.reverse()
if result and result[-1] == n_bck:
return result
else:
return [-1]
if __name__ == "__main__":
n, size = input().split()
n = int(n)
size = int(size)
numbers = input().split()
for i in range(size):
numbers[i] = int(numbers[i])
result = k_factorization(numbers, n)
for number in result:
print(number, end=" ")
```
#### File: hackerrank/majority-element-ii/majority_element_ii.py
```python
from typing import List
class Solution:
def majority_element(self, nums: List[int]) -> List[int]:
size = len(nums)
times = size // 3
counter = {}
result = []
for num in nums:
counter[num] = counter.get(num, 0) + 1
for num, qty in counter.items():
if qty > times:
result.append(num)
return result
```
#### File: hackerrank/max-area-of-island/max_area_of_island.py
```python
from typing import List
class Solution:
def max_area_of_island(self, grid: List[List[int]]) -> int:
self.grid = grid
self.m = len(self.grid)
self.n = len(self.grid[0])
max_ = 0
for i in range(self.m):
for j in range(self.n):
if self.grid[i][j] == 1:
count = self.count_island(i, j)
if count >= max_:
max_ = count
return max_
def count_island(self, row: int, col: int) -> int:
if row < 0 or row >= self.m or col < 0 or col >= self.n:
return 0
if self.grid[row][col] == 0:
return 0
else:
self.grid[row][col] = 0
return 1 + (
self.count_island(row-1, col) +
self.count_island(row+1, col) +
self.count_island(row, col-1) +
self.count_island(row, col+1)
)
```
#### File: hackerrank/maximum-number-of-eaten-apples/maximum_number_of_eaten_apples.py
```python
from typing import List, Tuple
class IndexMinHeap:
def __init__(self):
self.items = []
self.indexes = {}
def is_empty(self) -> bool:
return len(self.items) == 0
def insert(self, key: int, value: int):
i = self.indexes.get(key, None)
if i is not None:
_, old_value = self.items[i]
self.items[i] = (key, value + old_value)
self.heapify_down(i)
else:
i = len(self.items)
self.items.append((key, value))
self.indexes[key] = i
self.heapify(i)
def delete_min(self) -> Tuple[int]:
key, value = self.items[0]
self.indexes.pop(key)
if len(self.items) > 1:
self.items[0] = self.items[-1]
self.indexes[self.items[0][0]] = 0
self.items.pop()
self.heapify_down(0)
return (key, value)
def heapify_down(self, i: int):
left = 2 * i + 1
right = left + 1
size = len(self.items)
if left < size:
j = i
if self.items[j][0] > self.items[left][0]:
j = left
if right < size and self.items[j][0] > self.items[right][0]:
j = right
if i != j:
self.items[i], self.items[j] = self.items[j], self.items[i]
self.indexes[self.items[i][0]] = i
self.indexes[self.items[j][0]] = j
self.heapify_down(j)
def heapify(self, i: int):
if i > 0:
parent = (i - 1) // 2
if self.items[i][0] < self.items[parent][0]:
self.items[i], self.items[parent] = (self.items[parent],
self.items[i])
self.indexes[self.items[i][0]] = i
self.indexes[self.items[parent][0]] = parent
self.heapify(parent)
class Solution:
def eaten_apples(self, apples: List[int], days: List[int]) -> int:
qty = 0
i = 0
size = len(apples)
index_min_heap = IndexMinHeap()
while i < size or not index_min_heap.is_empty():
if i < size:
apple = apples[i]
day = days[i]
if apple > 0:
index_min_heap.insert(i + day, apple)
deadline = 0
while deadline <= i and not index_min_heap.is_empty():
deadline, apple = index_min_heap.delete_min()
if deadline > i:
qty += 1
if apple - 1 > 0:
index_min_heap.insert(deadline, apple - 1)
i += 1
return qty
```
#### File: hackerrank/minesweeper/test_minesweeper.py
```python
from minesweeper import Solution
def test_example_1():
board = [["E", "E", "E", "E", "E"],
["E", "E", "M", "E", "E"],
["E", "E", "E", "E", "E"],
["E", "E", "E", "E", "E"]]
click = [3, 0]
expected = [["B", "1", "E", "1", "B"],
["B", "1", "M", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]]
assert expected == Solution().update_board(board, click)
def test_example_2():
board = [["B", "1", "E", "1", "B"],
["B", "1", "M", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]]
click = [1, 2]
expected = [["B", "1", "E", "1", "B"],
["B", "1", "X", "1", "B"],
["B", "1", "1", "1", "B"],
["B", "B", "B", "B", "B"]]
assert expected == Solution().update_board(board, click)
```
#### File: hackerrank/number-of-operations-to-make-network-connected/number_of_operations_to_make_network_connected.py
```python
from typing import List
class Solution:
def make_connected(self, n: int, connections: List[List[int]]) -> int:
if n - 1 > len(connections):
return -1
self.adjacents = {}
self.visited = {}
for i in range(n):
self.adjacents[i] = []
self.visited[i] = False
for i, j in connections:
self.adjacents[i].append(j)
self.adjacents[j].append(i)
components = 0
for i in range(n):
if self.visited[i] is False:
self.dfs(i)
components += 1
return components - 1
def dfs(self, i):
if self.visited[i] is False:
self.visited[i] = True
for j in self.adjacents[i]:
self.dfs(j)
```
#### File: hackerrank/number-of-operations-to-make-network-connected/test_number_of_operations_to_make_network_connected.py
```python
from number_of_operations_to_make_network_connected import Solution
def test_example_1():
n = 4
connections = [[0, 1], [0, 2], [1, 2]]
expected = 1
assert expected == Solution().make_connected(n, connections)
def test_example_2():
n = 6
connections = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3]]
expected = 2
assert expected == Solution().make_connected(n, connections)
def test_example_3():
n = 6
connections = [[0, 1], [0, 2], [0, 3], [1, 2]]
expected = -1
assert expected == Solution().make_connected(n, connections)
def test_example_4():
n = 5
connections = [[0, 1], [0, 2], [3, 4], [2, 3]]
expected = 0
assert expected == Solution().make_connected(n, connections)
```
#### File: hackerrank/number-of-orders-in-the-backlog/number_of_orders_in_the_backlog.py
```python
import heapq
from typing import List
class Solution:
def get_number_of_backlog_orders(self, orders: List[List[int]]) -> int:
sell_backlog = []
buy_backlog = []
for price, amount, order_type in orders:
if order_type == 0:
while amount > 0:
if sell_backlog and sell_backlog[0][0] <= price:
sell_price, sell_amount = heapq.heappop(sell_backlog)
if sell_amount > amount:
heapq.heappush(sell_backlog,
(sell_price, sell_amount - amount))
amount = 0
else:
amount -= sell_amount
else:
heapq.heappush(buy_backlog, (-price, amount))
amount = 0
else:
while amount > 0:
if buy_backlog and -buy_backlog[0][0] >= price:
buy_price, buy_amount = heapq.heappop(buy_backlog)
if buy_amount > amount:
heapq.heappush(buy_backlog,
(buy_price, buy_amount - amount))
amount = 0
else:
amount -= buy_amount
else:
heapq.heappush(sell_backlog, (price, amount))
amount = 0
result = 0
for _, amount in sell_backlog:
result += amount
for _, amount in buy_backlog:
result += amount
return result % (10**9 + 7)
```
#### File: hackerrank/palindrome-partitioning/palindrome_partitioning.py
```python
from typing import List
class Solution:
def partition(self, s: str) -> List[List[str]]:
self.partitions = []
self.generate_partitions(s, [])
return self.partitions
def generate_partitions(self, s, partition, start=0, end=1):
if end == len(s):
if self.is_palindrome(s[start:end]):
partition.append(s[start:end])
self.partitions.append(partition)
else:
if self.is_palindrome(s[start:end]):
self.generate_partitions(s, partition[:] + [s[start:end]],
end, end + 1)
self.generate_partitions(s, partition[:], start, end + 1)
def is_palindrome(self, s: str) -> bool:
i = 0
j = len(s) - 1
while i <= j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
```
#### File: hackerrank/restore-ip-addresses/restore_ip_addresses.py
```python
from typing import List
class Solution:
def restore_ip_addresses(self, s: str) -> List[str]:
if s == "":
return []
self.s = s
self.size = len(s)
self.result = []
self.partitions(0, 1, [])
return self.result
def partitions(self, start: int = 0, end: int = 1, partition: List = None):
if end == self.size:
if self.is_valid(self.s[start:end]):
partition.append(self.s[start:end])
if len(partition) == 4:
self.result.append('.'.join(partition))
else:
self.partitions(start, end+1, partition[:])
if (len(partition) + 1 <= 4) and self.is_valid(self.s[start:end]):
self.partitions(end, end+1, partition[:] + [self.s[start:end]])
def is_valid(self, s: str) -> bool:
i = 0
while i < len(s) and s[i] == '0':
i += 1
if i > 0 and len(s) > 1:
return False
return len(s) <= 3 and int(s) <= 255
```
#### File: hackerrank/sherlock-and-cost/main.py
```python
class SherlockCost():
def cost(self, b):
self.b = b
last = len(b) - 1
self.memoize = {}
return max(self._cost(1, last-1), self._cost(b[last], last-1))
def _cost(self, previous, i):
if (previous, i) in self.memoize:
return self.memoize[(previous, i)]
elif i == 0:
self.memoize[(previous, i)] = max(abs(1 - previous),
abs(self.b[0] - previous))
return self.memoize[(previous, i)]
else:
result = max(self._cost(1, i - 1) + abs(1 - previous),
self._cost(self.b[i], i - 1) + abs(self.b[i]-previous)
)
self.memoize[(previous, i)] = result
return result
if __name__ == '__main__':
sc = SherlockCost()
b = [10, 1, 10, 1, 10]
b = [4, 7, 9]
print(sc.cost(b))
```
#### File: hackerrank/sherlock-and-valid-string/tests.py
```python
import unittest
from main import is_valid
class IsValidTest(unittest.TestCase):
def test_sample_0_return_no(self):
s = 'aabbcd'
expected = 'NO'
self.assertEqual(expected, is_valid(s))
def test_sample_1_return_no(self):
s = 'aabbccddeefghi'
expected = 'NO'
self.assertEqual(expected, is_valid(s))
def test_sample_2_return_yes(self):
s = 'abcdefghhgfedecba'
expected = 'YES'
self.assertEqual(expected, is_valid(s))
def test_sample_3_return_yes(self):
s = 'a'
expected = 'YES'
self.assertEqual(expected, is_valid(s))
def test_sample_4_return_yes(self):
s = 'abcdd'
expected = 'YES'
self.assertEqual(expected, is_valid(s))
def test_sample_5_return_yes(self):
s = 'abcd'
expected = 'YES'
self.assertEqual(expected, is_valid(s))
def test_sample_6_return_no(self):
s = 'aaaabb'
expected = 'NO'
self.assertEqual(expected, is_valid(s))
def test_sample_7_return_yes(self):
s = 'abbac'
expected = 'YES'
self.assertEqual(expected, is_valid(s))
def test_sample_8_return_no(self):
s = 'abbacd'
expected = 'NO'
self.assertEqual(expected, is_valid(s))
def test_sample_9_return_yes(self):
s = 'ab'
expected = 'YES'
self.assertEqual(expected, is_valid(s))
def test_sample_10_return_yes(self):
s = 'aaabbbc'
expected = 'YES'
self.assertEqual(expected, is_valid(s))
if __name__ == '__main__':
unittest.main()
```
#### File: hackerrank/single_threaded_cpu/single_threaded_cpu.py
```python
import heapq
from typing import List
class Solution:
def get_order(self, tasks: List[List[int]]) -> List[int]:
time = 1
q = []
q_tasks = []
result = []
for i in range(len(tasks)):
q_tasks.append((tasks[i][0], tasks[i][1], i))
heapq.heapify(q_tasks)
while q_tasks or q:
while q_tasks and q_tasks[0][0] <= time:
enqueue_time, processing_time, i = heapq.heappop(q_tasks)
heapq.heappush(q, (processing_time, i))
if q:
processing_time, i = heapq.heappop(q)
time += processing_time
result.append(i)
else:
time = q_tasks[0][0]
return result
```
#### File: hackerrank/some_division/main.py
```python
import sys
sys.setrecursionlimit(10**4)
class Backtrack():
def backtrack(self, n, divisors):
self.divisors = sorted(divisors)
self.memoize = {}
return self._backtrack(n)
def _backtrack(self, n):
if n in self.memoize:
return self.memoize[n]
temp_max = 0
for divisor in self.divisors:
if divisor < n and n % divisor == 0:
temp_max = max(1 + (n//divisor) *
self._backtrack(divisor), temp_max)
self.memoize[n] = temp_max
return self.memoize[n]
if __name__ == '__main__':
q = int(input())
for i in range(q):
n, m = input().split()
n = int(n)
m = int(m)
_divisors = input().split()
divisors = []
for divisor in _divisors:
divisors.append(int(divisor))
backtrack = Backtrack()
print(backtrack.backtrack(n, divisors))
```
#### File: hackerrank/string-to-integer-atoi/test_string_to_integer_atoi.py
```python
from string_to_integer_atoi import Solution
def test_example_1():
s = '42'
expected = 42
assert expected == Solution().my_atoi(s)
def test_example_2():
s = ' -42'
expected = -42
assert expected == Solution().my_atoi(s)
def test_example_3():
s = '4193 with words'
expected = 4193
assert expected == Solution().my_atoi(s)
def test_example_4():
s = 'words and 987'
expected = 0
assert expected == Solution().my_atoi(s)
def test_example_5():
s = '-91283472332'
expected = -2147483648
assert expected == Solution().my_atoi(s)
```
#### File: hackerrank/truck-tour/main.py
```python
def truck_tour(petrolpumps):
size = len(petrolpumps)
initial = 0
founded = False
while founded is False and initial < size:
total = 0
for i in range(size):
position = (i + initial) % size
total += petrolpumps[position][0] - petrolpumps[position][1]
if total < 0:
break
if total >= 0:
founded = True
else:
initial += 1
return initial
if __name__ == '__main__':
petrolpumps = [[1, 5], [10, 3], [3, 4]]
print(truck_tour(petrolpumps))
``` |
{
"source": "joaojunior/identify_paragraphs_in_image",
"score": 3
} |
#### File: joaojunior/identify_paragraphs_in_image/tests.py
```python
import unittest
import numpy as np
from images import (get_lines_with_color, identify_paragraphs_in_image,
remove_boundary)
class TestImage(unittest.TestCase):
def setUp(self):
self.image_input = np.ones((3, 5), np.uint8)
self.image_input = self.image_input * 255
self.image_input[1, 2] = 0
def test_remove_boundary(self):
expected = np.zeros((1, 1), np.uint8)
self.assertEqual(expected, remove_boundary(self.image_input))
def test_lines_0_and_2_have_color_white(self):
expected = [0, 2]
self.assertEqual(expected, get_lines_with_color(self.image_input))
def test_have_one_paragraph(self):
paragraphs, image = identify_paragraphs_in_image(self.image_input)
self.assertEqual(1, paragraphs)
def test_have_two_paragraphs(self):
image = np.ones((3, 5), np.uint8)
image = image * 255
image[0, 2] = 0
image[2, 3] = 0
paragraphs, image = identify_paragraphs_in_image(image)
self.assertEqual(2, paragraphs)
def test_have_three_paragraphs(self):
image = np.ones((5, 5), np.uint8)
image = image * 255
image[0, 2] = 0
image[2, 3] = 0
image[4, 4] = 0
paragraphs, image = identify_paragraphs_in_image(image)
self.assertEqual(3, paragraphs)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joaojunior/talk_creating_faster_crawler",
"score": 3
} |
#### File: talk_creating_faster_crawler/crawler_app_flask/3_crawler_with_green_threads.py
```python
import requests
import gevent.monkey
from gevent import Greenlet
from constants import URL_FASTER, URL_SLOWLY
gevent.monkey.patch_socket()
def crawler(url):
response = requests.get(url)
if response.status_code != 200:
print('error')
return response.status_code
if __name__ == '__main__':
gthreads = [Greenlet(crawler, URL_SLOWLY)]
for i in range(20):
gthreads.append(Greenlet(crawler, URL_FASTER))
for gthread in gthreads:
gthread.start()
gevent.joinall(gthreads)
```
#### File: talk_creating_faster_crawler/crawler_app_flask_many_requests/1_crawler_with_threads.py
```python
from threading import Thread
import requests
from constants import BATCH_SIZE, NUMBER_REQUESTS, URL_FASTER
def crawler(url):
response = requests.get(url)
if response.status_code != 200:
print('error')
return response.status_code
if __name__ == '__main__':
size = int(NUMBER_REQUESTS/BATCH_SIZE)
for quantity in range(size):
threads = []
for i in range(BATCH_SIZE):
t = Thread(target=crawler, args=(URL_FASTER,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
``` |
{
"source": "JoaoLages/ecco",
"score": 3
} |
#### File: ecco/tests/analysis_tests.py
```python
from ecco import analysis
import pytest
import numpy as np
shape = (100, 1000)
np.random.seed(seed=1)
@pytest.fixture
def acts():
acts1 = np.random.randn(*shape)
acts2 = np.random.randn(*shape)
yield acts1, acts2
class TestAnalysis:
def test_cca_smoke(self, acts):
actual = analysis.cca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_svcca_smoke(self, acts):
actual = analysis.svcca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_pwcca_smoke(self, acts):
actual = analysis.pwcca(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_cka_smoke(self, acts):
actual = analysis.cka(acts[0], acts[1])
assert isinstance(actual, float)
assert actual >= 0
assert actual <= 1
def test_linear_transformation(self, acts):
acts_1 = acts[0]
acts_2 = acts_1 * 10
assert pytest.approx(analysis.cca(acts_1, acts_2), 1.0), "CCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.svcca(acts_1, acts_2), 1.0), "SVCCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.pwcca(acts_1, acts_2), 1.0), "PWCCA of linear transformation is approx 1.0"
assert pytest.approx(analysis.cka(acts_1, acts_2), 1.0), "CKA of linear transformation is approx 1.0"
```
#### File: ecco/tests/lm_test.py
```python
from ecco.lm import LM, _one_hot, sample_output_token, activations_dict_to_array
import ecco
import torch
import numpy as np
from transformers import PreTrainedModel
class TestLM:
def test_one_hot(self):
expected = torch.tensor([[1., 0., 0.], [0., 1., 0.]])
actual = _one_hot(torch.tensor([0, 1]), 3)
assert torch.all(torch.eq(expected, actual))
def test_select_output_token_argmax(self):
result = sample_output_token(torch.tensor([0., 1.]), False, 0, 0, 0)
assert result == torch.tensor(1)
def test_select_output_token_sample(self):
result = sample_output_token(torch.tensor([[0., 0.5, 1.]]), True, 1, 1, 1.0)
assert result == torch.tensor(2)
def test_activations_dict_to_array(self):
batch, position, neurons = 1, 3, 4
actual_dict = {0: np.zeros((batch, position, neurons)),
1: np.zeros((batch, position, neurons))}
activations = activations_dict_to_array(actual_dict)
assert activations.shape == (batch, 2, neurons, position)
def test_init(self):
lm = ecco.from_pretrained('sshleifer/tiny-gpt2', activations=True)
assert isinstance(lm.model, PreTrainedModel), "Model downloaded and LM was initialized successfully."
def test_generate(self):
lm = ecco.from_pretrained('sshleifer/tiny-gpt2',
activations=True,
verbose=False)
output = lm.generate('test', generate=1, attribution=['grad_x_input'])
assert output.token_ids.shape == (1, 2), "Generated one token successfully"
assert output.attribution['grad_x_input'][0] == 1, "Successfully got an attribution value"
# Confirm activations is dimensions:
# (batch 1, layer 2, h_dimension 8, position 1)
assert output.activations['decoder'].shape == (1, 2, 8, 1)
def test_call_dummy_bert(self):
lm = ecco.from_pretrained('julien-c/bert-xsmall-dummy',
activations=True,
verbose=False)
inputs = lm.to(lm.tokenizer(['test', 'hi'],
padding=True,
truncation=True,
return_tensors="pt",
max_length=512))
output = lm(inputs)
# Confirm it's (batch 2, layer 1, h_dimension 40, position 3)
# position is 3 because of [CLS] and [SEP]
# If we do require padding, this CUDA compains with this model for some reason.
assert output.activations['encoder'].shape == (2, 1, 40, 3)
# TODO: Test LM Generate with Activation. Tweak to support batch dimension.
# def test_generate_token_no_attribution(self, mocker):
# pass
#
# def test_generate_token_with_attribution(self, mocker):
# pass
``` |
{
"source": "joaolcaas/distiller",
"score": 2
} |
#### File: distiller/tests/test_post_train_quant.py
```python
import os
import sys
import pytest
import torch
import torch.testing
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from distiller.quantization import RangeLinearQuantParamLayerWrapper, LinearQuantMode
@pytest.fixture()
def conv_input():
return torch.cat((torch.tensor([[[[-7, 5], [2, -3]]]], dtype=torch.float32),
torch.tensor([[[[-15, 10], [-1, 5]]]], dtype=torch.float32)), 0)
@pytest.fixture()
def conv_weights():
return torch.tensor([[[[-1, -0.5, 0], [0.5, 1, 1.5], [2, 2.5, 3]]],
[[[-0.3, -0.25, -0.2], [-0.15, -0.1, -0.05], [0, 0.05, 0.1]]]], dtype=torch.float32)
@pytest.mark.parametrize(
"mode, clip_acts, per_channel_wts, expected_output",
[
(LinearQuantMode.ASYMMETRIC_UNSIGNED, False, False,
torch.cat((torch.tensor([[[[-3.648135333, -2.14596196], [0.858384784, 2.432090222]],
[[0.214596196, 0.500724457], [0.715320653, 0.786852719]]]], dtype=torch.float32),
torch.tensor([[[[12.51811144, 13.01883589], [14.0918168, 14.59254133]],
[[1.359109242, 1.645237503], [1.573705438, 1.645237503]]]], dtype=torch.float32)),
dim=0)
),
(LinearQuantMode.ASYMMETRIC_UNSIGNED, True, False,
torch.cat((torch.tensor([[[[-1.089218234, -1.089218234], [1.055180164, 2.518817167]],
[[0.238266489, 0.476532978], [0.680761396, 0.782875606]]]], dtype=torch.float32),
torch.tensor([[[[7.59048957, 7.59048957], [7.59048957, 7.59048957]],
[[1.123256304, 1.259408583], [1.089218234, 1.089218234]]]], dtype=torch.float32)),
dim=0)
),
(LinearQuantMode.ASYMMETRIC_UNSIGNED, False, True,
torch.cat((torch.tensor([[[[-3.648135333, -2.14596196], [0.858384784, 2.432090222]],
[[0.214596196, 0.429192392], [0.715320653, 0.858384784]]]], dtype=torch.float32),
torch.tensor([[[[12.51811144, 13.01883589], [14.09181687, 14.59254133]],
[[1.430641307, 1.502173372], [1.573705438, 1.645237503]]]], dtype=torch.float32)),
dim=0)
),
(LinearQuantMode.ASYMMETRIC_UNSIGNED, True, True,
torch.cat((torch.tensor([[[[-1.089768056, -1.089768056], [1.055712804, 2.52008863]],
[[0.238386762, 0.408663021], [0.681105035, 0.817326042]]]], dtype=torch.float32),
torch.tensor([[[[7.59432114, 7.59432114], [7.59432114, 7.59432114]],
[[1.191933811, 1.15787856], [1.123823308, 1.089768056]]]], dtype=torch.float32)),
dim=0)
)
]
)
def test_conv_layer_wrapper(conv_input, conv_weights, mode, clip_acts, per_channel_wts, expected_output):
layer = torch.nn.Conv2d(conv_input.shape[1], expected_output.shape[1], conv_weights.shape[-1],
padding=1, bias=False)
layer.weight.data = conv_weights
model = RangeLinearQuantParamLayerWrapper(layer, 8, 8, mode=mode, clip_acts=clip_acts,
per_channel_wts=per_channel_wts)
with pytest.raises(RuntimeError):
model(conv_input)
model.eval()
output = model(conv_input)
torch.testing.assert_allclose(output, expected_output)
@pytest.fixture()
def linear_input():
return torch.tensor([[-7, 5, 2, -3]], dtype=torch.float32)
@pytest.fixture()
def linear_weights():
return torch.tensor([[-1, 0.5, 0, 0.5],
[-0.05, 0, 0.05, 0.1],
[0.3, 0.6, -0.1, -0.2]], dtype=torch.float32)
@pytest.fixture()
def linear_bias():
return torch.tensor([-0.3, 0.1, -0.5], dtype=torch.float32)
@pytest.mark.parametrize(
"mode, clip_acts, per_channel_wts, expected_output",
[
(LinearQuantMode.ASYMMETRIC_UNSIGNED, False, False,
torch.tensor([[7.686200692, 0.241135708, 0.783691051]], dtype=torch.float32)),
(LinearQuantMode.ASYMMETRIC_UNSIGNED, False, True,
torch.tensor([[7.698823529, 0.241531719, 0.784978085]], dtype=torch.float32))
]
)
def test_linear_layer_wrapper(linear_input, linear_weights, linear_bias,
mode, clip_acts, per_channel_wts, expected_output):
layer = torch.nn.Linear(linear_input.shape[1], expected_output.shape[1], bias=True)
layer.weight.data = linear_weights
layer.bias.data = linear_bias
model = RangeLinearQuantParamLayerWrapper(layer, 8, 8, mode=mode, clip_acts=clip_acts,
per_channel_wts=per_channel_wts)
with pytest.raises(RuntimeError):
model(linear_input)
model.eval()
output = model(linear_input)
torch.testing.assert_allclose(output, expected_output)
``` |
{
"source": "joaolcaas/PySyft",
"score": 3
} |
#### File: syft/grid/__init__.py
```python
from .network import Network
DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
def register(node_id: str, **kwargs):
""" Add this process as a new peer registering it in the grid network.
Args:
node_id: Id used to identify this node.
Returns:
peer: Peer Network instance.
"""
if not kwargs:
args = args = {"max_size": None, "timeout": 444, "url": DEFAULT_NETWORK_URL}
else:
args = kwargs
peer = Network(node_id, **args)
peer.start()
return peer
``` |
{
"source": "JoaoLeal92/crawler_ps5",
"score": 3
} |
#### File: crawlerPS5/crawlerPS5/pipelines.py
```python
import telegram_send
from datetime import datetime
class Crawlerps5Pipeline:
def process_item(self, item, spider):
if item['price']:
telegram_send.send(messages=[f"Produto {item['name']} encontrado por {item['price']} na url abaixo: \n\n {item['url']}"])
else:
current_date = datetime.now()
current_hour = current_date.hour
current_minute = current_date.minute
# Checks if bot has run for 24h (every 7 am)
if current_hour == 9 and current_minute <= 1:
telegram_send.send(messages=["Bot ativo por 24h, produto ainda não encontrado"])
if not item['name']:
telegram_send.send(messages=["Ocorreu um erro ao buscar o produto, verificar no site"])
```
#### File: crawlerPS5/spiders/amazon.py
```python
import scrapy
from crawlerPS5.items import Crawlerps5Item
class AmazonSpider(scrapy.Spider):
name = 'amazon'
allowed_domains = ['amazon.com.br']
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
start_urls = [
'https://www.amazon.com.br/PlayStation-Console-PlayStation%C2%AE5/dp/B088GNRX3J/ref=sr_1_1?__mk_pt_BR=%C3%85M%C3%85%C5%BD%C3%95%C3%91&dchild=1&keywords=ps5&qid=1613470308&sr=8-1',
# Ulr below used for testing
# 'https://www.amazon.com.br/God-War-Hits-PlayStation-4/dp/B07YT1GLV9/?_encoding=UTF8&pd_rd_w=yRrM3&pf_rd_p=d2ea4cd9-b3fa-4bdb-ab83-24ca9c54ecbe&pf_rd_r=3JF03Z0NMXW0PXVM86Z1&pd_rd_r=b592df2f-51e0-4fe5-8ccd-e6ff9930134e&pd_rd_wg=CLvfl&ref_=pd_gw_ci_mcx_mr_hp_d'
]
def parse(self, response):
product_title = response.xpath('//span[@id="productTitle"]/text()').get()
if product_title:
product_title = product_title.strip()
product_price = response.xpath('//span[@id="priceblock_ourprice"]/text()').get()
ps5 = Crawlerps5Item(name=product_title, price=product_price, url=response.url)
yield ps5
``` |
{
"source": "JoaoLeal92/crawler_ps5_selenium",
"score": 3
} |
#### File: crawler_ps5_selenium/scripts/amazon_product.py
```python
from selenium.common.exceptions import NoSuchElementException
from selenium import webdriver
class Product:
def __init__(self, product_url):
self.url = product_url
self.name = None
self.price = None
def get_product_feature(self, driver, tag, tag_attr, tag_attr_value):
try:
product_feat_element = driver.find_element_by_xpath(
f'//{tag}[@{tag_attr}="{tag_attr_value}"]')
product_feat = product_feat_element.text
return product_feat
except NoSuchElementException:
return None
except Exception as e:
# Enviar mensagem de alerta telegram contendo a descrição do erro
print(f'Ocorreu o seguinte erro: {e}')
``` |
{
"source": "joaoleveiga/feedzai-openml-python",
"score": 3
} |
#### File: resources/dummy_model/classifier.py
```python
class Classifier(object):
def __init__(self, target_value):
self.target_value = target_value
self.multiplier = [1, 0, 0]
def classify(self, instances):
return self.target_value
def getClassDistribution(self, instances):
return [self.multiplier] * len(instances)
``` |
{
"source": "joaolevi/payroll_v2",
"score": 3
} |
#### File: Company/Departments/HumanResourcers.py
```python
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from RegisterTools.EmployeesRegister import EmployeesRegister
from RegisterTools.TimeRegister import TimeRegister
REGISTER = EmployeesRegister()
class HumanResourcers():
def __init__(self):
self.employeeTimeRegister = []
# Métodos de acesso
def get_employeeTimeRegister(self):
return self.employeeTimeRegister
def set_employeeTimeRegister(self, emp_id, date, workedHours):
if REGISTER.employee_finder(emp_id):
tr = TimeRegister(emp_id, date, workedHours)
self.employeeTimeRegister.append(tr)
print(tr)
else: print("\n\nID nao encontrado ou nao existe\n\n")
# Funções específicas
def show_employees(self):
print(REGISTER.get_employees_list())
def add_employee(self, emp_type, name, rg, adress, hour_value=None, wage=None):
return REGISTER.add_employee(emp_type, name, rg, adress, hour_value, wage)
def remove_employee_hr(self, emp_id):
REGISTER.remove_employee(emp_id)
def change_employee_details(self, emp_id, emp_t=None, name=None, rg=None, adress=None, hour_value=None, wage=None):
emp = REGISTER.change_employee_details(emp_id, emp_t, name, rg, adress, hour_value, wage)
print("\n\nAlteracao bem sucedida!")
self.show_full_employee_details(emp_id=emp.id)
def show_full_employee_details(self, emp_id):
i = REGISTER.employee_finder(emp_id)
if i:
emp = REGISTER.employees_list[i]
print("\n\nID:",emp.id,"\nTipo de Empregado:",emp.__class__.__name__, "\nNome:", emp.name,"\nRG:", emp.rg,"\nEndereco", emp.adress, "\n\n")
else: print("\n\nEmpregado não encontrado!!\n\n")
```
#### File: Company/RegisterTools/Sales.py
```python
class Sales:
def __init__(self, date, value, emp_id, comission):
self.__date = date
self.__value = value
self.__seller = emp_id
self.__comission = comission
def get_date(self):
return self.__date
def set_date(self, new_date):
self.__date = new_date
def get_value(self):
return self.__value
def set_value(self, new_value):
self.__value = new_value
def get_seller(self):
return self.__seller
def set_seller(self, new_seller):
self.__seller = new_seller
def get_comission(self):
return self.__comission
def set_comission(self, new_comission):
self.__comission = new_comission
def __repr__(self):
return "Data: %s ID: %s Valor: %.2f Comissao: %.2f\n" %(self.__date, self.__seller, self.__value, self.__comission)
```
#### File: Company/RegisterTools/TimeRegister.py
```python
class TimeRegister:
def __init__(self, emp_id, date, hours):
self.emp_id = emp_id
self.date = date
self.hours = hours
def get_emp_id(self):
return self.emp_id
def set_emp_id(self, new_id):
self.emp_id = new_id
def get_date(self):
return self.__date
def set_date(self, new_date):
self.__date = new_date
def get_hours(self):
return self.__hours
def set_hours(self, new_hours):
self.__hours = new_hours
def get_empTimeRegisters(self):
return self.employeesTimeRegisters
def set_to_clear_registers(self):
self.employeesTimeRegisters = []
def __repr__(self):
return "\nid: %i, data: %s -- %s horas\n\n" %(self.emp_id, self.date, self.hours)
```
#### File: src/PayMethods/PayMethod.py
```python
class PayMethod():
def __init__(self, value, date):
self.__value = value
self.__date = date
def get_value(self):
return self.__value
def set_value(self, new_value):
self.__value = new_value
def get_date(self):
return self.__date
def set_date(self, new_date):
self.__date = new_date
```
#### File: UI/DepartamentsUI/HumanResourcersUI.py
```python
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from MenuOfChoices import MenuOfChoices
MENU_OF_CHOICES = MenuOfChoices()
class HumanResourcersUI():
def __init__(self):
pass
def add_employee_ui(HumanResourcers, Finances, Administration):
wage, hour_value = None, None
name = input("Nome: ")
rg = input("Rg: ")
adress = input("Endereço: ")
emp_type = MENU_OF_CHOICES.menu_employee_types()
if emp_type == "Hourly":
hour_value = float(input("Valor da hora de trabalho: "))
else:
wage = float(input("Salario: "))
bankID, agency, account = MENU_OF_CHOICES.fill_in_bank_data()
paymentType = MENU_OF_CHOICES.menu_payment_types()
emp_id = HumanResourcers.add_employee(emp_type, name, rg, adress, hour_value, wage)
Administration.add_employeePayDate(emp_id, emp_type)
Finances.add_employee_finances(emp_id, 0, bankID, agency, account, paymentType)
HumanResourcers.show_full_employee_details(emp_id)
return HumanResourcers, Finances, Administration
def employee_remove_ui(HumanResourcers, Finances, Administration):
emp_id = int(input("ID do empregado: "))
HumanResourcers.remove_employee_hr(emp_id)
Finances.remove_employee_fin(emp_id)
Administration.remove_employee_adm(emp_id)
return HumanResourcers, Finances, Administration
def change_employee_datails_ui(HumanResourcers, Finances):
escolha = -1
emp_type, name, adress, rg, bankID, agency, account, paymentMethod, new_wage, hour_value = None, None, None, None, None, None, None, None, None, None
emp_id = int(input("ID do empregado: "))
while (escolha != 10):
escolha = MENU_OF_CHOICES.menu_change_emp_details()
if escolha == 1:
emp_type = MENU_OF_CHOICES.menu_employee_types()
elif escolha == 2:
name = str(input("Nome: "))
elif escolha == 3:
rg = input("RG: ")
elif escolha == 4:
adress = input("Endereco: ")
elif escolha == 5:
bankID, agency, account = MENU_OF_CHOICES.fill_in_bank_data()
elif escolha == 6:
paymentMethod = MENU_OF_CHOICES.menu_payment_types()
elif escolha == 7:
new_wage = float(input("Novo salario: "))
elif escolha == 8:
hour_value = float(input("Valor da Hora de trabalho: "))
elif escolha == 9:
HumanResourcers.change_employee_details(emp_id, emp_type, name, rg, adress, hour_value, new_wage)
Finances.change_emp_fin_data(emp_id, bankID, agency, account, paymentMethod)
return HumanResourcers, Finances
else: return
def time_register(HumanResourcers):
emp_id = int(input("ID do empregado: "))
date = MENU_OF_CHOICES.fill_in_date_format()
worked_hours = float(input("Horas trabalhadas: "))
HumanResourcers.set_employeeTimeRegister(emp_id, date, worked_hours)
return HumanResourcers
``` |
{
"source": "joaolevi/Projeto_de_software_2020.1",
"score": 3
} |
#### File: src/Tax/Tax.py
```python
class Tax:
def __init__(self, value):
self.__value = value
def get_value(self):
return self.__value
def set_value(self, new_value):
self.__value = new_value
``` |
{
"source": "joaolima16/Projeto-InterMySql",
"score": 3
} |
#### File: Projeto-InterMySql/Trabalho interpolador banco de dados/getExcel.py
```python
from openpyxl import Workbook, load_workbook
from openpyxl.utils import get_column_letter
import json
wb = load_workbook('tabelaCarros.xlsx')
wb = wb.active
# Pergunta quantidade de colunas
qtdCol = int(input("Insira a quantidade de colunas da tabela: "))
qtdLinhas = int(input("Insira a quantidade de linhas da tabela: "))
dados = {}
for col in range(0,qtdCol):
letra = 65 + col
letra = chr(letra)
celula = f"{letra}1"
informacoesCelula = []
for lin in range(2,qtdLinhas+1):
valorCol = f"{letra}{lin}"
informacoesCelula.append(wb[valorCol].value)
dados[wb[celula].value] = informacoesCelula
dadosTratados = []
def formataJson():
i = 0
for linhas in range(1,qtdLinhas):
arrayInfosTemp = []
arrayIndex = []
objetoLinha = {}
for colunas in range(0,qtdCol):
letra = 65 + colunas
letra = chr(letra)
celulaCampo = f"{letra}1"
arrayIndex.append(wb[celulaCampo].value)
arrayInfosTemp.append(dados[wb[celulaCampo].value][i])
for infos in range(0,len(arrayIndex)):
objetoLinha[arrayIndex[infos]] = arrayInfosTemp[infos]
dadosTratados.append(objetoLinha)
i = i + 1
formataJson()
arquivo = open("InfosExcel.json",'w')
json.dump(dadosTratados,arquivo,indent=4)
``` |
{
"source": "joaolima-code/thunder-hawks-arpia",
"score": 4
} |
#### File: Atividade 11/1.3/tarefa.py
```python
def eh_primo(num):
if (num <= 0):
return False
if (num == 2):
return True
if (num%2 == 0):
return False
raiz = int(num**0.5)
for i in range(3, (raiz+1), 2):
if (num%i == 0):
return False
return True
num = int(input("Digite o numero: "))
print("Numero digitado: {}".format(num))
print("E primo? {}".format(eh_primo(num)))
``` |
{
"source": "joaolisboa/PiCaApp",
"score": 2
} |
#### File: PiCaApp/actions/Capture.py
```python
class Capture:
def __init__(self, camera):
self.camera = camera
def run(self):
return self.camera.capture()
```
#### File: PiCaApp/api/routes.py
```python
from __main__ import app
from flask import request
@app.route('/start', methods=['POST'])
def start():
camera = app.config['SHARED'].camera
requestData = request.get_json()
camera.start(requestData, warmup=True)
return ('', 200)
@app.route('/stop', methods=['POST'])
def stop():
camera = app.config['SHARED'].camera
camera.stop()
return ('', 200)
@app.route('/capture-photo', methods=['POST'])
def capturePhoto():
camera = app.config['SHARED'].camera
filename = camera.action('capture').run()
return (filename, 200)
@app.route('/record-video', methods=['POST'])
def recordVideo():
camera = app.config['SHARED'].camera
filename = camera.action('record').run()
return (filename, 200)
@app.route('/set-option', methods=['POST'])
def setOption():
requestData = request.get_json()
if 'option' not in requestData:
return ('Missing option parameter', 400)
if 'value' not in requestData:
return ('Missing value parameter', 400)
camera = app.config['SHARED'].camera
result = camera.option(requestData['option'], requestData['value']).run()
return (result, 200)
```
#### File: PiCaApp/configs/CameraConfig.py
```python
from configs.Config import Config
class CameraConfig(Config):
config = []
fullscreen = False
previewWidth = 480
previewHeight = 320
x = 0
y = 0
def __init__(self):
Config.__init__(self, '/camera_config.json')
def previewWindow(self):
return (self.x, self.y, self.previewWidth, self.previewHeight)
def getPreviewConfig(self, config):
if config:
if 'fullscreen' in config:
self.fullscreen = config['fullscreen']
if 'window' in config or self.fullscreen == False:
# preferably use max width instead of width and height
# a max width will allow to properly determine the UI dimensions
if 'max_width' in config['window']:
self.previewWidth = config['window']['max_width']
self.previewHeight = round(self.previewWidth / (4/3))
else:
self.previewWidth = config['window']['width']
self.previewHeight = config['window']['height']
if 'x' in config['window']:
self.x = config['window']['x']
if 'y' in config['window']:
self.y = config['window']['y']
return self.previewWindow()
cameraConfig = CameraConfig()
```
#### File: joaolisboa/PiCaApp/Utils.py
```python
import os
def wakeDisplay():
os.system('DISPLAY=:0 xset s reset')
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.