metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JonasDauster/mistgabel",
"score": 3
} |
#### File: JonasDauster/mistgabel/TestDataPrep.py
```python
import glob
from Bio import SeqIO
import random
import pandas as pd
def slidingWindow(sequence, winSize, step=1):
"""Returns a generator that will iterate through
the defined chunks of input sequence. Input sequence
must be iterable."""
# Verify the inputs
try:
it = iter(sequence)
except TypeError:
raise Exception("**ERROR** sequence must be iterable.")
if not ((type(winSize) == type(0)) and (type(step) == type(0))):
raise Exception("**ERROR** type(winSize) and type(step) must be int.")
if step > winSize:
raise Exception("**ERROR** step must not be larger than winSize.")
if winSize > len(sequence):
raise Exception("**ERROR** winSize must not be larger than sequence length.")
# Pre-compute number of chunks to emit
numOfChunks = ((len(sequence) - winSize) / step) + 1
# Do the work
for i in range(0, int(numOfChunks) * step, step):
yield sequence[i:i + winSize]
def positive(filepath):
rlist1 = []
fastq_sequences = SeqIO.parse(open(filepath), 'fastq')
for fastq in fastq_sequences:
sequence = str(fastq.seq)
rlist1.append(sequence)
return rlist1
sequence=[]
label=[]
# insert your fastq to test here
positive_list = positive("NCFB_D_MG000106_2020_R1.fastq")
for x in range(len(positive_list)):
to_insert = positive_list[x]
#to_insert = random.choice(positive_list)
# if you want to draw random lines from the file and test them change to_insert to the above line
sequence.append(to_insert)
if x == 1:
label.append("insert")
else:
label.append("no_insert")
insert_df = pd.DataFrame(sequence,label,columns=["sequence"])
print(insert_df)
# data ready to be classified via DataLoad.py, a reference is found in example data
insert_df.to_csv("real_data.csv")
``` |
{
"source": "Jonasdedeus/Genetic-Algorithm",
"score": 4
} |
#### File: Jonasdedeus/Genetic-Algorithm/IF42INT_1301183615.py
```python
import pandas as pd # this library is using for finding a max and get index
import math # this library is using for cos and sin
import random # this library is to get the random numbers
import time # this lirary is to run the output in the tuple based on the second
from time import sleep # thus library is to support the time
def getPop(size_pop,size_chrom): #this function is to get the population from the chromosome
arpop = [] # initialize a list of population with variable arpop
for i in range(size_pop): # to give a condition for inserting the population until the list full
archrom = [] # initialize a list of chromosome with variable archrom
for j in range (size_chrom): # to give a condition for inserting the chromosome until the list full
get = random.randint(0,9) # store the random number in variable get
archrom.append(get) # add the store random number to list of chromosome
arpop.append(archrom) # add the list of chromosome to the list of population
return arpop # return the all list of population
def encodingGenotype(archrom,size_chrom): # this function is to get the phenotype
rmax_x1,rmin_x1 = 2,-1 # initialize the limit of x1 that given in the assignment
rmax_x2,rmin_x2 = 1,-1 # initialize the limit of x2 that given in the assignment
valright,valright1,valdiv=0,0,0 # define new variable for storing
for i in range(size_chrom-1): #to give a condition until each chromosome is calculated
b = i+1 # initialize the be for the power value
if i<=3: # give a condition until the end of first half calculation
vd = 10**-b # variable vd is for storing the power of base 10
vr = archrom[i]*vd # variable vr is to calculate the each chromosome times vd
valright+=vr # variable valright is to store every addition of vr
valdiv+=vd # variable valdiv is to store every addition of vd for division
else:# else condition for the last half calculation
b = i-3 # to make the power value remain same as before
vd = 10**-b # variable vd is for storing the power of base 10
vr = archrom[i]*vd # variable vr is to calculate the each chromosome times vd
valright1+=vr # variable valright1 is to store every addition of vr
x1 = rmin_x1+((rmax_x1 - rmin_x1) / (9 * valdiv)) * valright #initialize the variable x1 to store the value of x1
x2 = rmin_x2+((rmax_x2 - rmin_x2) / (9 * valdiv)) * valright1 #initialize the variable x1 to store the value of x2
return [x1,x2] # return the genotype (x1,x2)
def function_h(archrom,size_chrom): # this function is to calculate the function of h using -h for minimization
decode = encodingGenotype(archrom,size_chrom) # initialize decode variable to assign the function of encoding
x1_val = decode[0] # initialize x1_val for the first index as x1
x2_val = decode[1] # initialize x2_val for the first index as x2
h = math.cos(x1_val)*math.sin(x2_val)-x1_val/x2_val**2 + 1 # variable h is to store the result
return -h # return the h * -1
def fitness(arpop,size_chrom): #this function to find the maxfitness by collect it in the array
Total_fit = [] # initialize a list of all fitness with variable Total_fit
for i in range(len(arpop)): # to give a condition until lenth of population
Total_fit.append(function_h(arpop[i],size_chrom)) # add the result of calculation to the total fit
return Total_fit # return the all of the fitness of population
def Findbestfitness(find): #this function is to get the best fitness from the calculation of function h
get_index = pd.Series(find).idxmax() #this variable get_index is to store the the index of max fitness
return get_index # return the index that store in get_index
def parentSelection(arpop,size_chrom): #parent selection with tornament selection method
best = [] # initialize a list of best fitness with variable best
for i in range(0,4): # to give the condition until the range of tour which is 4
ind_chrom = arpop[random.randint(0,len(arpop)-1)] # initialize the ind_chrome to save the random index from list of population
if (best == [] or function_h(ind_chrom,size_chrom) > function_h(best,size_chrom)):# the contidion is to check wheter the list of best is empty
# or the result function - h in ind_chrom is greater than the result function - h in ind_chrom.
best = ind_chrom # swap the value if the condition is meet.
return best # return the best array.
def crossover(parent1, parent2): # crossover function
probability = random.random() # initialize variable probability to get the random percentage
if (probability < 0.71): # to give a condition if probability is greater than 0.71
point_bounder1 = random.randint(0,1) # to make a first point randomly
point_bounder2 = random.randint(4,7) # to make a second point randomly
parent1 = parent1[:point_bounder1] + parent2[point_bounder1:point_bounder2] + parent1[point_bounder2:] # concatenate it from two point in parent 1
parent2 = parent2[:point_bounder1] + parent1[point_bounder1:point_bounder2] + parent2[point_bounder2:] # concatenate it from two point in parent 2
return [parent1, parent2] # retunn the result in array of parent 1 and parent 2
def mutation(child1, child2): # mutation function
probability = random.random() # initialize variable probability to get the random percentage
if (probability <= 0.01): # to give a condition if probability is greater than 0.01
child1[random.randint(0,7)] = random.randint(0,9) # to assign the random index with random value of child 1
child2[random.randint(0,7)] = random.randint(0,9) # to assign the random index with random value of child 2
return child1, child2 # return child 1 and child 2
def gen_replacement(arpop,bf_chrome,size_chrom): # generational replacement function is to process and generate the new generation
new_gen = [] # initialize a list of new generation with variable new_gen
for i in range(0,1): # to give a condition for add the two new generation from best chromosome
new_gen.append(arpop[bf_chrome]) # add the two new generation from best chromosome
i = 0 #initialize i equal to zero
while i <(len(arpop)-2): # give a condition i less that population -2 (to minimize the size of making it stable)
check = True # assign the check as a true
parent1,parent2 = parentSelection(arpop,size_chrom),parentSelection(arpop,size_chrom) # assign parent 1 and parent 2 to the function of selection parent
while check: # give a condition if check is true
if (parent1 == parent2): # chech condition if there is same result of parent 1 and parent 2
parent2 = parentSelection(arpop,size_chrom) # assing again parent 2 to the function of selection parent
else: # else condition is the result of parent 2 is diferent with parent 1
check = False # assign the check as false to finish the while
offspring = crossover(parent1,parent2) # initialize off spring as a crossover result
offspring = mutation(offspring[0],offspring[1]) # mutate the offsprint of child 1 and child 2 and store again in the offspring as updated
new_gen+=offspring # add the last result of offspring to the new generation
i+=2 # increase the i variable by adding to to make the looping of population size become stable.
return new_gen # return the new generation list
pop = int(input("Enter the number of population: ")) # initialize pop for inputting the size of population
generation = int(input("Enter the number of generation: ")) # initialize generation for inputting the stopping generation number
size_chrom = 8 # assign new variable of size chromosome as 8
population = getPop(pop,size_chrom) # assign new variable population to the function get population
for i in range(generation): # to give a condition until all generation is completed calculating.
best = fitness(population,size_chrom) # assign new variable best to function fitness
bf_chrom = Findbestfitness(best) # assign new variable bf_chrom to the function findbestfitness
best_fitness = function_h(population[bf_chrom],size_chrom) # assign new variable best_fitness to the function_h
# Save the ouput in one tuple the print it by using function join.
one_tuple = ('{Generation -', str(i+1), '|', 'Best Chromossome: ', str(population[bf_chrom]), '|', 'Best Fitness: ', str(best_fitness),'|','Decode Chromosome (x1,x2):',str(encodingGenotype(population[bf_chrom],size_chrom)),'}')
print('\r', ' '.join(one_tuple), end='')
# Change every 0.5 second
time.sleep(0.05)
population = gen_replacement(population,bf_chrom,size_chrom)# assign new variable population to the function generational replacement to proceed the next generation
print("\n\nThe last Generation") # print the string the last generation
print("************************") # print the star
print("Best Chromossome:",population[bf_chrom])# print the best chromosome from population
print("Fitness: ",best_fitness) # print the best fitness
print("X1,x2: ",encodingGenotype(population[bf_chrom],size_chrom)) #print the x1 and x2 of encoding genotype function.
``` |
{
"source": "jonasdegrave/peptideClassifier",
"score": 3
} |
#### File: peptideClassifier/src/bulkDownload.py
```python
import os
import tqdm
import requests
import multiprocessing
# Import project files
from config import *
# Verifies if a file exists;
def fileExists(fileName):
return os.path.isfile(fileName)
# Verifies if a folder exists, if not, creates the folder;
def createFolder(folderPath):
if not os.path.isdir(folderPath):
if DEBUG:
print("[Debug] Folder {} does not exist. Creating folder.".format(folderPath))
os.mkdir(folderPath)
else:
if DEBUG:
print("[Debug] Folder {} already exists. Moving on.".format(folderPath))
def download(url):
try:
fileName = TEMP_FILES_FOLDER + url[url.rfind("/")+1:]
if not DOWNLOAD_AGAIN and fileExists(fileName): return
response = requests.get(url, stream=True)
if response.status_code == requests.codes.ok:
with open(fileName, "wb") as file:
for data in response:
file.write(data)
else:
print("[Error] Could not download {}. Error: {}".format(fileName, response.status_code))
except Exception as e:
print("[Error] {}".format(e))
def main(inputFileName):
if VERBOSE:
print()
# Create working folders
createFolder(TEMP_FILES_FOLDER)
createFolder(INPUT_FILES_FOLDER)
createFolder(OUTPUT_FILES_FOLDER)
# Parse input files
inputFile = open(inputFileName, "r")
inputValues = inputFile.read()
inputFile.close()
fileList = ["{}{}{}".format(SOURCE_URL,
inputValue,
FILE_EXTENSION) for inputValue in inputValues.split("\n")]
# Create multiprocessing work pool
N_CPUS = min(multiprocessing.cpu_count(), MAXIMUM_THREADS) * 8 # Multiply by 8 is a test
if VERBOSE:
print("[Info] System has {} CPUs. Using {} threads for parallel work.".format(multiprocessing.cpu_count(), N_CPUS))
if VERBOSE:
print("[Info] Initializing bulk file download. Total of {} files in queue. This may take a while.".format(len(fileList)))
with multiprocessing.Pool(N_CPUS) as processPool:
result = list(tqdm.tqdm(processPool.imap(download, fileList), total=len(fileList)))
if VERBOSE:
print("[Info] File downloading is complete!")
if __name__ == "__main__":
main(INPUT_FILES_FOLDER + DOWNLOAD_INDEX)
``` |
{
"source": "JonasDeichelmann/uMusic",
"score": 4
} |
#### File: JonasDeichelmann/uMusic/functions.py
```python
import random
import math
#This function adds all the Steps between the main notes
def makeMelody(randInput):
melody = []
for i in range(len(randInput)-1):
#Checks if the distance between the two notes is bigger then 2, if so then generate the numbers between
if abs(int(randInput[i])-int(randInput[i+1])) > 2:
k=0
#As many steps as the two notes are away from each other, divided by 2
for j in range(int(abs(int(randInput[i])-int(randInput[i+1]))/2)):
#check wich way it have to go
if randInput[i]> randInput[i+1]:
melody.append(randInput[i]-k)
else:
melody.append(randInput[i]+k)
k += 2
else:
melody.append(randInput[i])
return melody
#This function creates an random number between 20 and 100, with the user input and then returns the randomNumber
def createRondom(myInput):
#create a randomNumber between 50 and 90
randomNumber = int(random.uniform(50,90))
#Divide the randomNumber by the ASII input
randomNumber1 = randomNumber/myInput
#Multiply the randomNumber with the new divided randomNumber
randomNumber = randomNumber*randomNumber1
#if the randomNumber is to big, then create a new one
if int(randomNumber) > 100:
createRondom(int(randomNumber))
#if the randomNumber is to small, then creat a new one
if int(randomNumber) < 20:
createRondom(int(randomNumber))
# return the randomNumber, since it is okay
return int(randomNumber)
def handleInput(myInput):
out = []
inp = myInput
#Converting each character from the input into an ASCII Number and add these to the list
for i in inp:
out.append(ord(i))
for j in range(len(out)):
#Call the random function with the ASCII Letter
temp = createRondom(int(out[j]))
out[j]=temp
#Create the steps between the numbers
out = makeMelody(out)
return(out)
```
#### File: JonasDeichelmann/uMusic/generate_music.py
```python
import os
import glob
import pitch
def GenerateMusic(notes_per_second=3, input_melody=[60,-1,62], input_chord=[60, 64, 67], pitch_class="[2, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1]"):
os.system("del /q static\\music\\*")
performance = "multiconditioned_performance_with_dynamics"
first = "performance_rnn_generate --config=" + performance + " --bundle_file=" + performance + ".mag --output_dir=static/music/ --num_outputs=1"
# A step is 10 ms. Hence, 3000 steps will generate 30 seconds of music
steps = "--num_steps=3000"
notes_per_second = "--notes_per_second=" + str(notes_per_second)
# melody is the starting sequence of notes given to the network
primer = "--primer_melody=\"" + str(input_melody) + "\""
chord = "--primer_pitches=\"" + str(input_chord) + "\""
pitch = "--pitch_class_histogram=\"" + pitch_class + "\""
command = first + " " + pitch + " " + steps + " " + notes_per_second + " " + primer + " " + chord
# Converting the midi file to a wav file using pySynth, the .wav is placed in the current directory
os.system(command)
files = glob.glob("static/music/*.mid")
# Converting the midi file to a wav file using pySynth, the .wav is placed in the current directory
os.system("python .\PySynth-2.3\\readmidi.py " + files[0])
os.replace("midi.wav", "static/music/midi.wav")
# Remove the midi file
#files = glob.glob("\static\music\*.mid")
#print(files[0])
```
#### File: JonasDeichelmann/uMusic/page.py
```python
from generate_music import GenerateMusic
import functions
import pitch
from flask import Flask, render_template, request, redirect, url_for
from flask_bootstrap import Bootstrap
from pprint import pprint
app = Flask(__name__)
Bootstrap(app)
pitch_dict = pitch.mood
pitch_classes = pitch.pitch_classes
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
if request.method == "POST":
mood = request.form['mood']
sentence = request.form['sentence']
speed = request.form['speed']
per_second = 5
pitch = pitch_classes[pitch_dict[mood]]
if (speed == "Fast"):
per_second = 7
elif (speed == "Slow"):
per_second = 3
note_list = functions.handleInput(sentence)
GenerateMusic(notes_per_second=per_second, input_melody=note_list, pitch_class=pitch)
return render_template('sound.html')
return render_template('index.html')
@app.route('/test')
def test():
return render_template('test.html')
``` |
{
"source": "JonasDHomburg/LAMARCK",
"score": 2
} |
#### File: LAMARCK/examples/basics1.py
```python
from LAMARCK_ML.models import GenerationalModel
from LAMARCK_ML.metrics import CartesianFitness
from LAMARCK_ML.selection import TournamentSelection, ExponentialRankingSelection, MaxDiversitySelection, LinearRankingSelection
from LAMARCK_ML.reproduction import Mutation, Recombination, RandomStep
from LAMARCK_ML.replacement import NElitism
from LAMARCK_ML.utils.stopGenerational import StopByNoProgress, StopByGenerationIndex
from LAMARCK_ML.utils.dataSaver import DSSqlite3
from LAMARCK_ML.utils.modelStateSaverLoader import ModelStateSaverLoader
from LAMARCK_ML.utils.SlowDown import SlowDown
from LAMARCK_ML.models.initialization import RandomInitializer
from LAMARCK_ML.utils.evaluation import BaseEH
from LAMARCK_ML.individuals import CartesianIndividual
from LAMARCK_ML.utils.benchmark import Benchmark
def run_model():
model = GenerationalModel()
model.add([
# Initializing generation
RandomInitializer(**{
RandomInitializer.arg_CLASS: CartesianIndividual,
RandomInitializer.arg_GEN_SIZE: 36,
# RandomInitializer.arg_GEN_SIZE: 10,
RandomInitializer.arg_PARAM: {CartesianIndividual.arg_Dimensions: 2},
}),
# Metric
CartesianFitness(),
# Selection strategy
# ExponentialRankingSelection(**{ExponentialRankingSelection.arg_LIMIT: 6,}),
# LinearRankingSelection(**{LinearRankingSelection.arg_LIMIT: 10}),
MaxDiversitySelection(**{
MaxDiversitySelection.arg_LIMIT: 20,
# MaxDiversitySelection.arg_LIMIT: 4,
MaxDiversitySelection.arg_DIVERSITY: .8,
}),
# Reproduction
Recombination(**{
Recombination.arg_LIMIT: 36,
# Recombination.arg_LIMIT: 10,
Recombination.arg_DESCENDANTS: 2,
}),
# Mutation(**{
# Mutation.arg_P: 4,
# Mutation.arg_DESCENDANTS: 1,
# Mutation.arg_LIMIT: 36,
# }),
RandomStep(**{
RandomStep.arg_P: .7,
RandomStep.arg_STEP_SIZE: 3,
RandomStep.arg_DESCENDANTS: 1,
RandomStep.arg_LIMIT: 36,
# RandomStep.arg_LIMIT: 10,
}),
# Replacement method
NElitism(),
# Stopping
# StopByNoProgress(**{StopByNoProgress.arg_PATIENCE: 25,}),
StopByGenerationIndex(**{StopByGenerationIndex.arg_GENERATIONS: 25}),
# Saving states
# ModelStateSaverLoader(**{
# ModelStateSaverLoader.arg_REPRODUCTION: True,
# # ModelStateSaverLoader.arg_SELECTION: True,
# ModelStateSaverLoader.arg_REPLACEMENT: True,
# }),
DSSqlite3(**{DSSqlite3.arg_FILE: '/media/data/LAMARCK_DATA/shit3/history.db3'}),
# Slowing down the process
# SlowDown(**{SlowDown.arg_SLEEP_TIME: 10}),
# evaluation
BaseEH(),
# Benchmark
# Benchmark(),
])
if model.reset():
model.run()
print([ind.fitness for ind in model.generation])
else:
print('Model failed!')
if __name__ == '__main__':
run_model()
```
#### File: LAMARCK_ML/architectures/dataFlow.py
```python
from typing import Dict, Tuple, List
from LAMARCK_ML.data_util import TypeShape
from LAMARCK_ML.data_util import ProtoSerializable
class DataFlow(ProtoSerializable):
def __init__(self, *args, **kwargs):
super(DataFlow, self).__init__(**kwargs)
@property
def outputs(self) -> Dict[str, TypeShape]:
"""
Set of named data output shape and type of DataFlow object.
:return: Set of TypeShape
"""
raise NotImplementedError()
@property
def inputLabels(self) -> List[str]:
"""
Labels for one or more inputs.
:return: List of labels for inputs
"""
raise NotImplementedError()
@property
def inputs(self) -> Dict[str, Tuple[str, str]]:
"""
DataFlow connections: Dict[obj_inputLabel, Tuple[other_outputLabel, DataFlow object/id_name]]
:return: Dict[IOLabel, Tuple[IOLabel, DataFlow(str)]]
"""
raise NotImplementedError()
# def connect(self, dataFlow_obj: 'DataFlow', connection: Dict[IOLabel, IOLabel]):
# """
# Add a DataFlow object as input to another DataFlow object.
# :param dataFlow_obj: data providing DataFlow object
# :param connection: inputLabel -> outputLabel
# """
# raise NotImplementedError()
@property
def id_name(self) -> str:
"""
:return: unique object name, typically composition of class name and class wide unique identifier
"""
raise NotImplementedError()
pass
```
#### File: functions/implementations/Conv2D.py
```python
import math
from enum import Enum
from random import sample
from typing import Tuple, List, Dict, Set
from random import choice, random
from LAMARCK_ML.architectures.functions.interface import Function
from LAMARCK_ML.architectures.variables import Variable
from LAMARCK_ML.architectures.variables.initializer import *
from LAMARCK_ML.architectures.variables.regularisation import *
from LAMARCK_ML.data_util import DimNames, TypeShape, IOLabel
from LAMARCK_ML.data_util.dataType import \
DHalf, \
DFloat, \
DDouble, \
DInt64, \
DInt32, \
DInt16, \
DInt8
from LAMARCK_ML.data_util.shape import Shape
from LAMARCK_ML.reproduction.methods import Mutation
from LAMARCK_ML.metrics.implementations import FlOps, Parameters
class Conv2D(Function,
Mutation.Interface,
FlOps.Interface,
Parameters.Interface):
# IOLabel.CONV2D_IN = 'CONV2D_IN'
IOLabel.CONV2D_IN = 'DATA_IN'
# IOLabel.CONV2D_OUT = 'CONV2D_OUT'
IOLabel.CONV2D_OUT = 'DATA_OUT'
class Padding(Enum):
SAME = 'SAME'
VALID = 'VALID'
allowedTypes = [DFloat, DDouble, DHalf, DInt8, DInt16, DInt32, DInt64]
_DF_INPUTS = [IOLabel.CONV2D_IN]
arg_OUT_NAMED_TYPE_SHAPES = 'outTypeShape'
arg_KERNEL_WIDTH = 'kernel_width'
arg_KERNEL_HEIGHT = 'kernel_height'
arg_STRIDE_WIDTH = 'stride_width'
arg_STRIDE_HEIGHT = 'stride_height'
arg_PADDING = 'padding'
arg_FILTER = 'channel'
arg_IN_WIDTH = 'in_width'
arg_IN_HEIGHT = 'in_height'
arg_IN_CHANNEL = 'in_channel'
__min_f_hw = .5
__max_f_hw = 1
__min_f_c = .5
__max_f_c = 1.5
@classmethod
def possible_output_shapes(cls,
input_ntss: Dict[str, TypeShape],
target_output: TypeShape,
is_reachable,
max_possibilities: int = 10,
**kwargs) -> \
List[Tuple[Dict[str, TypeShape], Dict[str, TypeShape], Dict[str, str]]]:
target_shape = target_output.shape
for label, nts in input_ntss.items():
if nts.dtype not in cls.allowedTypes:
continue
possible_sizes = []
names = []
invalid_dim = False
for _dim in nts.shape.dim:
target_size = target_shape[_dim.name]
if _dim.name == DimNames.WIDTH or \
_dim.name == DimNames.HEIGHT:
lower_border = max(math.floor(_dim.size * cls.__min_f_hw), (min(2, target_size)
if target_size is not None else 2))
upper_border = math.ceil(_dim.size * cls.__max_f_hw)
pool = list(range(lower_border + 1, upper_border))
border_pool = list({lower_border, upper_border})
if target_size is None or not (lower_border < target_size < upper_border):
pool = sample(pool, k=min(max(max_possibilities - len(border_pool), 0), len(pool)))
else:
pool.remove(target_size)
pool = sample(pool, k=min(max(max_possibilities - len(border_pool) - 1, 0), len(pool))) + [target_size]
pool = pool + border_pool
elif _dim.name == DimNames.CHANNEL:
lower_border = max(math.floor(_dim.size * cls.__min_f_c), (min(2, target_size)
if target_size is not None else 2))
upper_border = math.ceil(_dim.size * cls.__max_f_c)
pool = list(range(lower_border + 1, upper_border))
border_pool = list({lower_border, upper_border})
if target_size is None or not (lower_border < target_size < upper_border):
pool = sample(pool, k=min(max(max_possibilities - len(border_pool), 0), len(pool)))
else:
pool.remove(target_size)
pool = sample(pool, k=min(max(max_possibilities - len(border_pool) - 1, 0), len(pool))) + [target_size]
pool = pool + border_pool
elif _dim.name == DimNames.BATCH:
pool = [_dim.size]
else:
invalid_dim = True
break
possible_sizes.append(pool)
names.append(_dim.name)
if invalid_dim:
continue
for comb in Shape.random_dimension_product(possible_sizes):
out_nts = TypeShape(nts.dtype, Shape(*zip(names, comb)))
if is_reachable(out_nts, target_output):
yield ({},
{IOLabel.CONV2D_OUT: out_nts},
{IOLabel.CONV2D_IN: label})
@classmethod
def configurations(cls, h_i, h_o, w_i, w_o, c):
def stride_range(in_, out_):
if out_ == 1:
return [in_]
else:
lower_limit = math.ceil(in_ / out_)
upper_limit = math.ceil(in_ / (out_ - 1)) - 1
if lower_limit == 0 or \
math.ceil(in_ / lower_limit) < out_ or \
upper_limit == 0 or \
math.ceil(in_ / upper_limit) > out_:
return []
return list(range(lower_limit, upper_limit + 1))
def filter_range(in_, out_):
lower_limit = 1
upper_limit = in_ - out_ + 1
return list(range(lower_limit, upper_limit + 1))
configurations = list()
for s_h in stride_range(h_i, h_o):
for s_w in stride_range(w_i, w_o):
for k_h in range(1, 10):
for k_w in range(1, 10):
configurations.append({
cls.arg_KERNEL_HEIGHT: k_h,
cls.arg_KERNEL_WIDTH: k_w,
cls.arg_STRIDE_HEIGHT: s_h,
cls.arg_STRIDE_WIDTH: s_w,
cls.arg_PADDING: Conv2D.Padding.SAME.value,
cls.arg_FILTER: c
})
for k_w in filter_range(w_i, w_o):
for k_h in filter_range(h_i, h_o):
for s_h in stride_range(h_i - k_h + 1, h_o):
for s_w in stride_range(w_i - k_w + 1, w_o):
configurations.append({
cls.arg_KERNEL_HEIGHT: k_h,
cls.arg_KERNEL_WIDTH: k_w,
cls.arg_STRIDE_HEIGHT: s_h,
cls.arg_STRIDE_WIDTH: s_w,
cls.arg_PADDING: Conv2D.Padding.VALID.value,
cls.arg_FILTER: c
})
return configurations
@classmethod
def generateParameters(cls,
input_dict: Dict[str, Tuple[str, Dict[str, TypeShape], str]],
expected_outputs: Dict[str, TypeShape],
variable_pool: dict = None) -> \
Tuple[List[Dict[str, object]], List[float]]:
if len(input_dict) != 1 or \
len(expected_outputs) != 1:
return [], []
input_nts_id, input_outputs, input_id = input_dict[IOLabel.CONV2D_IN]
in_nts = input_outputs[input_nts_id]
out_label, out_nts = next(iter(expected_outputs.items()))
if in_nts.dtype != out_nts.dtype:
return [], []
allowed_dimensions = [DimNames.BATCH, DimNames.CHANNEL, DimNames.WIDTH, DimNames.HEIGHT]
for _dim in in_nts.shape.dim:
if _dim.name not in allowed_dimensions:
return [], []
for _dim in out_nts.shape.dim:
if _dim.name not in allowed_dimensions:
return [], []
h_i = in_nts.shape[DimNames.HEIGHT]
h_o = out_nts.shape[DimNames.HEIGHT]
w_i = in_nts.shape[DimNames.WIDTH]
w_o = out_nts.shape[DimNames.WIDTH]
c_i = in_nts.shape[DimNames.CHANNEL]
configurations = cls.configurations(
h_i=h_i,
h_o=h_o,
w_i=w_i,
w_o=w_o,
c=out_nts.shape[DimNames.CHANNEL])
result_with_var = list()
result_without_var = list()
for config in configurations:
result_without_var.append({cls.arg_ATTRIBUTES: {**config,
**{cls.arg_OUT_NAMED_TYPE_SHAPES: {out_label: out_nts},
cls.arg_IN_WIDTH: w_i,
cls.arg_IN_HEIGHT: h_i,
cls.arg_IN_CHANNEL: c_i}},
cls.arg_INPUT_MAPPING: dict(
[(l_in, (l_out, id_name)) for l_in, (l_out, _, id_name) in input_dict.items()]),
cls.arg_VARIABLES: [
Variable(**{
Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: cls.__name__ + '|kernel',
Variable.arg_SHAPE: (
config.get(cls.arg_KERNEL_HEIGHT),
config.get(cls.arg_KERNEL_WIDTH),
in_nts.shape[DimNames.CHANNEL],
out_nts.shape[DimNames.CHANNEL]),
Variable.arg_INITIALIZER: GlorotUniform(),
Variable.arg_REGULARISATION: NoRegularisation()
}),
Variable(**{
Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: cls.__name__ + '|bias',
Variable.arg_SHAPE: (
out_nts.shape[DimNames.CHANNEL],),
Variable.arg_INITIALIZER: GlorotUniform(),
Variable.arg_REGULARISATION: NoRegularisation()
})
]})
possibleKernels = [v for v in variable_pool.get(cls.__name__ + '|kernel', [])
if v.shape == (config.get(cls.arg_KERNEL_HEIGHT),
config.get(cls.arg_KERNEL_WIDTH),
c_i,
config.get(cls.arg_FILTER))]
for kernel in possibleKernels:
result_with_var.append({cls.arg_ATTRIBUTES: {**config,
**{cls.arg_OUT_NAMED_TYPE_SHAPES: {out_label: out_nts},
cls.arg_IN_WIDTH: w_i,
cls.arg_IN_HEIGHT: h_i,
cls.arg_IN_CHANNEL: c_i}},
cls.arg_INPUT_MAPPING: dict(
[(l_in, (l_out, id_name)) for l_in, (l_out, _, id_name) in input_dict.items()]),
cls.arg_VARIABLES: [kernel,
Variable(**{
Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: cls.__name__ + '|bias',
Variable.arg_SHAPE: (
out_nts.shape[DimNames.CHANNEL],),
Variable.arg_INITIALIZER: GlorotUniform(),
Variable.arg_REGULARISATION: NoRegularisation()
})
]})
possibleBias = [v for v in variable_pool.get(cls.__name__ + '|bias', [])
if v.shape == (out_nts.shape[DimNames.CHANNEL],)]
for bias in possibleBias:
result_with_var.append({cls.arg_ATTRIBUTES: {**config,
**{cls.arg_OUT_NAMED_TYPE_SHAPES: {out_label: out_nts},
cls.arg_IN_WIDTH: w_i,
cls.arg_IN_HEIGHT: h_i,
cls.arg_IN_CHANNEL: c_i}},
cls.arg_INPUT_MAPPING: dict(
[(l_in, (l_out, id_name)) for l_in, (l_out, _, id_name) in input_dict.items()]),
cls.arg_VARIABLES: [
Variable(**{
Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: cls.__name__ + '|kernel',
Variable.arg_SHAPE: (
config.get(cls.arg_KERNEL_HEIGHT),
config.get(cls.arg_KERNEL_WIDTH),
c_i,
out_nts.shape[DimNames.CHANNEL]),
Variable.arg_INITIALIZER: GlorotUniform(),
Variable.arg_REGULARISATION: NoRegularisation()
}),
bias
]})
result_params = list()
result_prob = list()
amount_var = len(result_with_var)
if amount_var > 0:
prob = 1 / 2 / amount_var
result_params.extend(result_with_var)
result_prob.extend([prob for _ in range(amount_var)])
amount_without_var = len(result_without_var)
prob = 1 / amount_without_var
if amount_var > 0:
prob /= 2
result_params.extend(result_without_var)
result_prob.extend([prob for _ in range(amount_without_var)])
return result_params, result_prob
def __init__(self, **kwargs):
super(Conv2D, self).__init__(**kwargs)
if not (isinstance(self.attr[self.arg_OUT_NAMED_TYPE_SHAPES], dict) and
all([isinstance(nts, TypeShape) and isinstance(label, str) for label, nts in
self.attr[self.arg_OUT_NAMED_TYPE_SHAPES].items()])):
raise Exception('Wrong output TypeShapes!')
@property
def outputs(self) -> Set[TypeShape]:
return self.attr[self.arg_OUT_NAMED_TYPE_SHAPES]
def mutate(self, prob, variable_pool=None):
def resetVariable(v):
v.value = None
v.trainable = True
return v
def keepTraining(v):
v.trainable = True
return v
def replaceVariable(v):
variable = choice(
[_v for _v in variable_pool.get(self.__name__ + '|kernel', []) if v.shape == _v.shape])
return variable
result = Conv2D.__new__(Conv2D)
result.__setstate__(self.get_pb())
if random() < .8:
functions = [resetVariable, keepTraining]
if variable_pool is not None:
functions.append(replaceVariable)
new_variables = list()
changed = False
for _v in result.variables:
if random() < prob:
new_variable = choice(functions)(_v)
changed = True
else:
new_variable = _v
new_variables.append(new_variable)
result.variables = new_variables
if changed:
result._id_name = Conv2D.getNewName()
else:
if random() < prob:
result._id_name = Conv2D.getNewName()
out_nts = self.attr[self.arg_OUT_NAMED_TYPE_SHAPES][IOLabel.CONV2D_OUT]
h_o = out_nts.shape[DimNames.HEIGHT]
w_o = out_nts.shape[DimNames.WIDTH]
c = out_nts.shape[DimNames.CHANNEL]
config = choice(Conv2D.configurations(h_i=self.attr[self.arg_IN_HEIGHT],
h_o=h_o,
w_i=self.attr[self.arg_IN_WIDTH],
w_o=w_o,
c=c))
result.attr = {**result.attr, **config}
result.variables = [Variable(**{Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: self.__class__.__name__ + '|kernel',
Variable.arg_SHAPE: (
config.get(self.arg_KERNEL_HEIGHT),
config.get(self.arg_KERNEL_WIDTH),
self.attr[self.arg_IN_CHANNEL],
out_nts.shape[DimNames.CHANNEL]),
Variable.arg_INITIALIZER: GlorotUniform(),
Variable.arg_REGULARISATION: NoRegularisation()
}),
Variable(**{
Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: self.__class__.__name__ + '|bias',
Variable.arg_SHAPE: (
out_nts.shape[DimNames.HEIGHT],
out_nts.shape[DimNames.WIDTH],
out_nts.shape[DimNames.CHANNEL]),
Variable.arg_INITIALIZER: GlorotUniform(),
Variable.arg_REGULARISATION: NoRegularisation()
})]
return result
@classmethod
def min_transform(cls, nts):
if nts.dtype not in cls.allowedTypes:
return None
s = Shape()
result = TypeShape(nts.dtype, s)
for _dim in nts.shape.dim:
if _dim.name == DimNames.BATCH:
s.dim.append(Shape.Dim(_dim.name, _dim.size))
elif _dim.name == DimNames.CHANNEL:
s.dim.append(Shape.Dim(_dim.name, int(math.floor(_dim.size * cls.__min_f_c))))
elif _dim.name == DimNames.WIDTH or \
_dim.name == DimNames.HEIGHT:
s.dim.append(Shape.Dim(_dim.name, int(math.floor(_dim.size * cls.__min_f_hw))))
else:
return None
return result
@classmethod
def max_transform(cls, nts):
if nts.dtype not in cls.allowedTypes:
return None
s = Shape()
result = TypeShape(nts.dtype, s)
for _dim in nts.shape.dim:
if _dim.name == DimNames.BATCH:
s.dim.append(Shape.Dim(_dim.name, _dim.size))
elif _dim.name == DimNames.CHANNEL:
s.dim.append(Shape.Dim(_dim.name, int(math.ceil(_dim.size * cls.__max_f_c))))
elif _dim.name == DimNames.WIDTH or \
_dim.name == DimNames.HEIGHT:
s.dim.append(Shape.Dim(_dim.name, int(math.ceil(_dim.size * cls.__max_f_hw))))
else:
return None
return result
def flops_per_sample(self):
out_shape = self.attr[self.arg_OUT_NAMED_TYPE_SHAPES][IOLabel.CONV2D_OUT].shape
return self.attr[self.arg_KERNEL_HEIGHT] * self.attr[self.arg_KERNEL_WIDTH] * self.attr[self.arg_IN_CHANNEL] * \
out_shape[DimNames.HEIGHT] * out_shape[DimNames.WIDTH] * out_shape[DimNames.CHANNEL] \
+ out_shape[DimNames.HEIGHT] * out_shape[DimNames.WIDTH] * out_shape[DimNames.CHANNEL] # ReLU
def parameters(self):
return self.attr[self.arg_KERNEL_WIDTH] * self.attr[self.arg_KERNEL_HEIGHT] * \
self.attr[self.arg_IN_CHANNEL] * self.attr[self.arg_FILTER]
@property
def inputLabels(self) -> List[str]:
return self._DF_INPUTS
```
#### File: functions/implementations/Dense.py
```python
import math
from random import random, choice
from random import sample
from typing import Tuple, List, Dict, Set
from LAMARCK_ML.architectures.functions.interface import Function
from LAMARCK_ML.architectures.variables import Variable
from LAMARCK_ML.architectures.variables.initializer import *
from LAMARCK_ML.architectures.variables.regularisation import *
from LAMARCK_ML.data_util import DimNames, TypeShape, IOLabel
from LAMARCK_ML.data_util.dataType import \
DHalf, \
DFloat, \
DDouble, \
DInt64, \
DInt32, \
DInt16, \
DInt8
from LAMARCK_ML.data_util.shape import Shape
from LAMARCK_ML.reproduction.methods import Mutation
from LAMARCK_ML.metrics.implementations import FlOps, Parameters, Nodes
class Dense(Function,
Mutation.Interface,
FlOps.Interface,
Parameters.Interface,
Nodes.Interface,
):
# IOLabel.DENSE_OUT = 'DENSE_OUT'
IOLabel.DENSE_OUT = 'DATA_OUT'
# IOLabel.DENSE_IN = 'DENSE_IN'
IOLabel.DENSE_IN = 'DATA_IN'
allowedTypes = [DFloat, DDouble, DHalf, DInt8, DInt16, DInt32, DInt64]
_DF_INPUTS = [IOLabel.DENSE_IN]
arg_UNITS = 'units'
arg_IN_UNITS = 'in_units'
arg_OUT_NAMED_TYPE_SHAPES = 'outTypeShape'
__min_f = .5
__max_f = 1.5
@classmethod
def possible_output_shapes(cls,
input_ntss: Dict[str, TypeShape],
target_output: TypeShape,
is_reachable,
max_possibilities: int = 10,
**kwargs) -> \
List[Tuple[Dict[str, TypeShape], Dict[str, TypeShape], Dict[str, str]]]:
target_shape = target_output.shape
for label, nts in input_ntss.items():
if nts.dtype not in cls.allowedTypes:
continue
possible_sizes = []
names = []
invalid_dim = False
for _dim in nts.shape.dim:
target_size = target_shape[_dim.name]
# if _dim.name == DimNames.WIDTH or \
# _dim.name == DimNames.HEIGHT or \
# _dim.name == DimNames.CHANNEL or \
if _dim.name == DimNames.UNITS:
lower_border = max(math.floor(_dim.size * cls.__min_f), (min(2, target_size)
if target_size is not None else 2))
upper_border = math.ceil(_dim.size * cls.__max_f)
pool = list(range(lower_border + 1, upper_border))
border_pool = list({lower_border, upper_border})
if target_size is None or not (lower_border < target_size < upper_border):
pool = sample(pool, k=min(max(max_possibilities - len(border_pool), 0), len(pool)))
else:
pool.remove(target_size)
pool = sample(pool, k=min(max(max_possibilities - len(border_pool) - 1, 0), len(pool))) + [target_size]
pool = pool + border_pool
elif _dim.name == DimNames.BATCH: # or \
# _dim.name == DimNames.TIME:
pool = [_dim.size]
else:
invalid_dim = True
break
possible_sizes.append(pool)
names.append(_dim.name)
if invalid_dim:
continue
for dim_combination in Shape.random_dimension_product(possible_sizes):
out_nts = TypeShape(nts.dtype, Shape(*zip(names, dim_combination)))
if is_reachable(out_nts, target_output):
yield ({},
{IOLabel.DENSE_OUT: out_nts},
{IOLabel.DENSE_IN: label})
@classmethod
def generateParameters(cls,
input_dict: Dict[str, Tuple[str, Dict[str, TypeShape], str]],
expected_outputs: Dict[str, TypeShape],
variable_pool: dict = None) -> \
Tuple[List[Dict[str, object]], List[float]]:
if len(input_dict) != 1 or \
len(expected_outputs) != 1:
return [], []
input_nts_id, inputs_outputs, _ = input_dict[IOLabel.DENSE_IN]
in_nts = inputs_outputs[input_nts_id]
out_label, out_nts = next(iter(expected_outputs.items()))
if in_nts.dtype != out_nts.dtype:
return [], []
inUnits = in_nts.shape.units
outUnits = out_nts.shape.units
possibleKernels = []
possibleBias = []
if variable_pool is not None:
possibleKernels = [v for v in variable_pool.get(cls.__name__ + '|kernel', []) if v.shape == (inUnits, outUnits)]
possibleBias = [v for v in variable_pool.get(cls.__name__ + '|bias', []) if v.shape == (outUnits,)]
_dict = {cls.arg_ATTRIBUTES: {cls.arg_UNITS: outUnits,
cls.arg_OUT_NAMED_TYPE_SHAPES: {out_label: out_nts},
cls.arg_IN_UNITS: inUnits},
cls.arg_INPUT_MAPPING: dict(
[(l_in, (l_out, id_name)) for l_in, (l_out, _, id_name) in input_dict.items()]),
}
amount_ = len(possibleBias) + len(possibleKernels)
init_ = Constant()
reg_ = NoRegularisation()
prob_ = 0
if amount_ > 0:
prob_ = 1 / 2 / amount_
_init = [GlorotUniform()]
_reg = [NoRegularisation()]
_amount = len(_init) * len(_reg)
_prob = 1 / 2 / _amount if amount_ > 0 else 1 / _amount
return ([{**_dict, **{cls.arg_VARIABLES: [k, Variable(**{Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: cls.__name__ + '|bias',
Variable.arg_SHAPE: (outUnits,),
Variable.arg_INITIALIZER: init_,
Variable.arg_REGULARISATION: reg_
})]}} for k in possibleKernels] +
[{**_dict, **{cls.arg_VARIABLES: [b, Variable(**{Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: cls.__name__ + '|kernel',
Variable.arg_SHAPE: (inUnits, outUnits),
Variable.arg_INITIALIZER: init_,
Variable.arg_REGULARISATION: reg_,
})]}} for b in possibleBias] +
[{**_dict, **{cls.arg_VARIABLES: [Variable(**{Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: cls.__name__ + '|kernel',
Variable.arg_SHAPE: (inUnits, outUnits),
Variable.arg_INITIALIZER: _init_,
Variable.arg_REGULARISATION: _reg_}),
Variable(**{Variable.arg_DTYPE: out_nts.dtype,
Variable.arg_TRAINABLE: True,
Variable.arg_NAME: cls.__name__ + '|bias',
Variable.arg_SHAPE: (outUnits,),
Variable.arg_INITIALIZER: _init_,
Variable.arg_REGULARISATION: _reg_})
]}} for _init_ in _init for _reg_ in _reg]), \
[prob_ for _ in range(amount_)] + \
[_prob for _ in range(_amount)]
def __init__(self, **kwargs):
super(Dense, self).__init__(**kwargs)
if not (isinstance(self.attr[self.arg_OUT_NAMED_TYPE_SHAPES], dict) and
all([isinstance(nts, TypeShape) and isinstance(label, str) for label, nts in
self.attr[self.arg_OUT_NAMED_TYPE_SHAPES].items()])):
raise Exception('Wrong output TypeShapes!')
@property
def outputs(self) -> Set[TypeShape]:
return self.attr[self.arg_OUT_NAMED_TYPE_SHAPES]
def mutate(self, prob, variable_pool=None):
def resetVariable(v):
v.value = None
v.trainable = True
return v
def keepTraining(v):
v.trainable = True
return v
def replaceVariable(v):
variable = choice(
[_v for _v in variable_pool.get(self.__name__ + '|kernel', []) if v.shape == _v.shape])
return variable
functions = [resetVariable, keepTraining]
if variable_pool is not None:
functions.append(replaceVariable)
result = Dense.__new__(Dense)
result.__setstate__(self.get_pb())
new_variables = list()
changed = False
for _v in result.variables:
if random() < prob:
new_variable = choice(functions)(_v)
changed = True
else:
new_variable = _v
new_variables.append(new_variable)
result.variables = new_variables
if changed:
result._id_name = Dense.getNewName()
return result
@classmethod
def min_transform(cls, nts):
if nts.dtype not in cls.allowedTypes:
return None
s = Shape()
result = TypeShape(nts.dtype, s)
for _dim in nts.shape.dim:
if _dim.name == DimNames.BATCH:
s.dim.append(Shape.Dim(_dim.name, _dim.size))
elif _dim.name == DimNames.UNITS:
s.dim.append(Shape.Dim(_dim.name, int(math.floor(_dim.size * cls.__min_f))))
else:
return None
return result
@classmethod
def max_transform(cls, nts):
if nts.dtype not in cls.allowedTypes:
return None
s = Shape()
result = TypeShape(nts.dtype, s)
for _dim in nts.shape.dim:
if _dim.name == DimNames.BATCH:
s.dim.append(Shape.Dim(_dim.name, _dim.size))
elif _dim.name == DimNames.UNITS:
s.dim.append(Shape.Dim(_dim.name, int(math.ceil(_dim.size * cls.__max_f))))
else:
return None
return result
def flops_per_sample(self):
return self.attr[self.arg_IN_UNITS] * self.attr[self.arg_UNITS] \
+ self.attr[self.arg_UNITS] # ReLU
def parameters(self):
return self.attr[self.arg_IN_UNITS] * self.attr[self.arg_UNITS] + self.attr[self.arg_UNITS]
def nodes(self):
return self.attr[self.arg_UNITS]
@property
def inputLabels(self) -> List[str]:
return self._DF_INPUTS
```
#### File: functions/implementations/Softmax.py
```python
from typing import List, Dict, Tuple
from LAMARCK_ML.architectures.functions.interface import Function
from LAMARCK_ML.data_util import IOLabel, TypeShape
from LAMARCK_ML.data_util.dataType import \
DHalf, \
DFloat, \
DDouble, \
DInt64, \
DInt32, \
DInt16, \
DInt8
class Softmax(Function,
):
allowedTypes = {DFloat, DDouble, DHalf, DInt8, DInt16, DInt32, DInt64}
IOLabel.SOFTMAX_OUT = 'SOFTMAX_OUT'
IOLabel.SOFTMAX_IN = 'SOFTMAX_IN'
arg_OUT_TYPE_SHAPE = 'outTypeShape'
@classmethod
def possible_output_shapes(cls, input_ntss: Dict[str, TypeShape], target_output: TypeShape, is_reachable,
max_possibilities: int = 10, **kwargs) -> \
List[Tuple[Dict[str, TypeShape], Dict[str, TypeShape], Dict[str, str]]]:
for label, nts in input_ntss.items():
if nts.dtype not in cls.allowedTypes:
continue
yield ({},
{IOLabel.SOFTMAX_OUT: nts},
{IOLabel.SOFTMAX_IN: label})
@classmethod
def generateParameters(cls, input_dict: Dict[str, Tuple[str, Dict[str, TypeShape], str]],
expected_outputs: Dict[str, TypeShape], variable_pool: dict = None) -> \
Tuple[List[Dict[str, object]], List[float]]:
if len(input_dict) != 1 or \
len(expected_outputs) != 1:
print(len(input_dict))
print(len(expected_outputs))
return [], []
return [{cls.arg_ATTRIBUTES: {cls.arg_OUT_TYPE_SHAPE: expected_outputs, },
cls.arg_VARIABLES: [],
cls.arg_INPUT_MAPPING: {l_in: (l_out, id_name) for l_in, (l_out, _, id_name) in input_dict.items()}
}], [1]
@classmethod
def min_transform(cls, nts: TypeShape):
if nts.dtype not in cls.allowedTypes:
return None
return nts
@classmethod
def max_transform(cls, nts: TypeShape):
if nts.dtype not in cls.allowedTypes:
return None
return nts
def __init__(self, **kwargs):
super(Softmax, self).__init__(**kwargs)
if not (isinstance(self.attr[self.arg_OUT_TYPE_SHAPE], dict) and
all([isinstance(nts, TypeShape) and isinstance(label, str) for label, nts in
self.attr[self.arg_OUT_TYPE_SHAPE].items()])):
raise Exception('Wrong output TypeShapes!')
@property
def outputs(self) -> Dict[str, TypeShape]:
return self.attr[self.arg_OUT_TYPE_SHAPE]
@property
def inputLabels(self) -> List[str]:
return list(self.input_mapping.keys())
```
#### File: LAMARCK_ML/architectures/neuralNetwork_test.py
```python
import unittest
import os
from LAMARCK_ML.architectures.functions import *
from LAMARCK_ML.architectures.neuralNetwork import NeuralNetwork
from LAMARCK_ML.data_util import DimNames, Shape, \
DFloat, TypeShape, IOLabel
import networkx as nx
import time
@unittest.skipIf((os.environ.get('test_fast', False) in {'True', 'true', '1'}), 'time consuming')
class TestNeuralNetwork(unittest.TestCase):
def test_instantiation_USD_outputTypeShapes(self):
batch = 3
_data = TypeShape(DFloat, Shape((DimNames.BATCH, batch),
(DimNames.CHANNEL, 3), (DimNames.HEIGHT, 4),
(DimNames.WIDTH, 5)))
outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 60))
self.assertRaises(InvalidFunctionType, NeuralNetwork, **{
NeuralNetwork.arg_INPUTS: {'data_in', (_data, 'Dataset')},
NeuralNetwork.arg_OUTPUT_TARGETS: {'out0': TypeShape(DFloat, outShape)}})
def test_instantiation_USD_ONTS_Dense_Merge(self):
for i in range(10):
batch = 1
_data = TypeShape(DFloat, Shape((DimNames.BATCH, batch), (DimNames.UNITS, 20)))
IOLabel.DS1 = 'DS1'
IOLabel.DS2 = 'DS2'
inputs = {IOLabel.DS1: (IOLabel.DATA, _data, 'Dataset'),
IOLabel.DS2: (IOLabel.DATA, _data, 'Dataset')}
outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 10))
outShape1 = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 15))
outputs = {'out0': TypeShape(DFloat, outShape), 'out1': TypeShape(DFloat, outShape1)}
functions = [Merge, Dense]
NN = NeuralNetwork(**{NeuralNetwork.arg_INPUTS: inputs,
NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
NeuralNetwork.arg_FUNCTIONS: functions,
NeuralNetwork.arg_MAX_BRANCH: 2})
self.assertIsNotNone(NN)
pb = NN.get_pb()
state = NN.__getstate__()
NN_pb = NeuralNetwork.__new__(NeuralNetwork)
NN_pb.__setstate__(pb)
self.assertIsNot(NN, NN_pb)
NN_state = NeuralNetwork.__new__(NeuralNetwork)
NN_state.__setstate__(state)
self.assertIsNot(NN, NN_state)
NN_mut = NN.mutate(1)[0]
self.assertEqual(pb, NN.get_pb())
self.assertIsNot(NN, NN_mut)
self.assertNotEqual(NN, NN_mut)
f_ids = dict([(_id, None) for _, _id in NN_mut.inputs.values()])
for _f in NN_mut.functions:
f_ids[_f.id_name] = _f
for _f in NN_mut.functions:
for _f_input, (other_output, other_id) in _f.inputs.items():
if other_id not in f_ids:
self.assertTrue(False)
stack = [f_id for _, f_id in NN_mut.output_mapping.values()]
required_ids = set()
while stack:
f_id = stack.pop()
required_ids.add(f_id)
f_ = f_ids.get(f_id)
if f_ is not None:
stack.extend([f_id for _, f_id in f_.inputs.values()])
self.assertSetEqual(required_ids, set(f_ids.keys()))
NN_mut = NN.mutate(1)[0]
self.assertEqual(pb, NN.get_pb())
self.assertIsNot(NN, NN_mut)
self.assertNotEqual(NN, NN_mut)
f_ids = dict([(_id, None) for _, _id in NN_mut.inputs.values()])
for _f in NN_mut.functions:
f_ids[_f.id_name] = _f
for _f in NN_mut.functions:
for _f_input, (other_output, other_id) in _f.inputs.items():
if other_id not in f_ids:
self.assertTrue(False)
stack = [f_id for _, f_id in NN_mut.output_mapping.values()]
required_ids = set()
while stack:
f_id = stack.pop()
required_ids.add(f_id)
f_ = f_ids.get(f_id)
if f_ is not None:
stack.extend([f_id for _, f_id in f_.inputs.values()])
self.assertSetEqual(required_ids, set(f_ids.keys()))
NN_mut = NN.mutate(0)[0]
NN_mut._id_name = NN._id_name
self.assertNotEqual(NN, NN_mut)
def test_instantiation_Conv2D_Pool2D_Flatten(self):
for i in range(10):
batch = 1
_data = TypeShape(DFloat, Shape((DimNames.BATCH, batch),
(DimNames.HEIGHT, 64),
(DimNames.WIDTH, 64),
(DimNames.CHANNEL, 3)))
_target = TypeShape(DFloat, Shape((DimNames.BATCH, batch),
(DimNames.UNITS, 100),
))
outputs = {'out0': _target}
IOLabel.DS = 'DS'
inputs = {IOLabel.DS: (IOLabel.DATA, _data, 'Dataset')}
functions = [QConv2D, QPooling2D, Flatten]
NN1 = NeuralNetwork(**{NeuralNetwork.arg_INPUTS: inputs,
NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
NeuralNetwork.arg_FUNCTIONS: functions})
self.assertIsNotNone(NN1)
# print(i)
pb = NN1.get_pb()
state = NN1.__getstate__()
NN_pb = NeuralNetwork.__new__(NeuralNetwork)
NN_pb.__setstate__(pb)
self.assertIsNot(NN1, NN_pb)
NN_state = NeuralNetwork.__new__(NeuralNetwork)
NN_state.__setstate__(state)
self.assertIsNot(NN1, NN_state)
NN_mut = NN1.mutate(100)
self.assertIsNot(NN1, NN_mut)
self.assertNotEqual(NN1, NN_mut)
NN_mut = NN1.mutate(0)
self.assertIsNot(NN1, NN_mut)
self.assertNotEqual(NN1, NN_mut)
NN2 = NeuralNetwork(**{NeuralNetwork.arg_INPUTS: inputs,
NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
NeuralNetwork.arg_FUNCTIONS: functions})
NN_rec = NN1.recombine(NN2)[0]
self.assertIsNotNone(NN_rec)
f_ids = dict([(_id, None) for _, _id in NN_rec.inputs.values()])
for _f in NN_rec.functions:
f_ids[_f.id_name] = _f
for _f in NN_rec.functions:
for _f_input, (other_output, other_id) in _f.inputs.items():
if other_id not in f_ids:
self.assertTrue(False)
stack = [f_id for _, f_id in NN_rec.output_mapping.values()]
required_ids = set()
while stack:
f_id = stack.pop()
required_ids.add(f_id)
f_ = f_ids.get(f_id)
if f_ is not None:
stack.extend([f_id for _, f_id in f_.inputs.values()])
self.assertSetEqual(required_ids, set(f_ids.keys()))
def test_instantiation_Conv2D_Pool2D_Flatten_Dense(self):
for i in range(10):
batch = 1
_data = TypeShape(DFloat, Shape((DimNames.BATCH, batch),
(DimNames.HEIGHT, 32),
(DimNames.WIDTH, 32),
(DimNames.CHANNEL, 3)))
_target = TypeShape(DFloat, Shape((DimNames.BATCH, batch),
(DimNames.UNITS, 10),
))
outputs = {'out0': _target}
IOLabel.DS = 'DS'
inputs = {IOLabel.DS: (IOLabel.DATA, _data, 'Dataset')}
functions = [Conv2D, Pooling2D, Flatten, Dense]
NN = NeuralNetwork(**{NeuralNetwork.arg_INPUTS: inputs,
NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
NeuralNetwork.arg_FUNCTIONS: functions})
self.assertIsNotNone(NN)
pb = NN.get_pb()
state = NN.__getstate__()
f_ids = dict([(_id, None) for _, _id in NN.inputs.values()])
for _f in NN.functions:
f_ids[_f.id_name] = _f
for _f in NN.functions:
for _f_input, (other_output, other_id) in _f.inputs.items():
if other_id not in f_ids:
self.assertTrue(False)
stack = [f_id for _, f_id in NN.output_mapping.values()]
required_ids = set()
while stack:
f_id = stack.pop()
required_ids.add(f_id)
f_ = f_ids.get(f_id)
if f_ is not None:
stack.extend([f_id for _, f_id in f_.inputs.values()])
self.assertSetEqual(required_ids, set(f_ids.keys()))
NN_pb = NeuralNetwork.__new__(NeuralNetwork)
NN_pb.__setstate__(pb)
self.assertIsNot(NN, NN_pb)
NN_state = NeuralNetwork.__new__(NeuralNetwork)
NN_state.__setstate__(state)
self.assertIsNot(NN, NN_state)
NN_mut = NN.mutate(100)
self.assertIsNot(NN, NN_mut)
self.assertNotEqual(NN, NN_mut)
NN_mut = NN.mutate(0)
self.assertIsNot(NN, NN_mut)
self.assertNotEqual(NN, NN_mut)
def test_recombination_Dense_Merge(self):
for i in range(100):
batch = 1
_data = TypeShape(DFloat, Shape((DimNames.BATCH, batch), (DimNames.UNITS, 20)))
IOLabel.DS1 = 'DS1'
IOLabel.DS2 = 'DS2'
inputs = {IOLabel.DS1: (IOLabel.DATA, _data, 'Dataset'),
IOLabel.DS2: (IOLabel.DATA, _data, 'Dataset')}
outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 10))
outShape1 = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 15))
outputs = {'out0': TypeShape(DFloat, outShape),
'out1': TypeShape(DFloat, outShape1)}
functions = [Merge, Dense]
NN1 = NeuralNetwork(**{NeuralNetwork.arg_INPUTS: inputs,
NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
NeuralNetwork.arg_FUNCTIONS: functions,
NeuralNetwork.arg_RECOMBINATION_PROBABILITY: 1.0})
self.assertIsNotNone(NN1)
NN2 = NeuralNetwork(**{NeuralNetwork.arg_INPUTS: inputs,
NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
NeuralNetwork.arg_FUNCTIONS: functions,
NeuralNetwork.arg_RECOMBINATION_PROBABILITY: 1.0})
self.assertIsNotNone(NN2)
NN_rec = NN1.recombine(NN2)[0]
f_ids = dict([(_id, None) for _, _id in NN_rec.inputs.values()])
for _f in NN_rec.functions:
f_ids[_f.id_name] = _f
for _f in NN_rec.functions:
for _f_input, (other_output, other_id) in _f.inputs.items():
if other_id not in f_ids:
self.assertTrue(False)
stack = [f_id for _, f_id in NN_rec.output_mapping.values()]
required_ids = set()
while stack:
f_id = stack.pop()
required_ids.add(f_id)
f_ = f_ids.get(f_id)
if f_ is not None:
stack.extend([f_id for _, f_id in f_.inputs.values()])
self.assertSetEqual(required_ids, set(f_ids.keys()))
for f in NN_rec.functions:
if f.__class__ != Dense:
continue
kernel = [v for v in f.variables if v.name.endswith('|kernel')][0]
label, f_id = f.inputs['DATA_IN']
_f = f_ids[f_id]
if _f is not None:
self.assertEqual(kernel.shape,
(_f.outputs[label].shape[DimNames.UNITS],
f.attr[f.arg_OUT_NAMED_TYPE_SHAPES]['DATA_OUT'].shape[DimNames.UNITS]))
@unittest.skip('debug')
def test_reachable(self):
# target = TypeShape(DFloat, Shape((DimNames.BATCH, 1),
# (DimNames.UNITS, 20)))
input_shape = TypeShape(DFloat, Shape((DimNames.BATCH, 1),
(DimNames.HEIGHT, 32),
(DimNames.WIDTH, 32),
(DimNames.CHANNEL, 3)
))
depth = 8
for i in range(1, 100):
# input_shape = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, i)))
target = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, i * 10)))
print()
print(input_shape)
print(target)
print(NeuralNetwork.reachable(input_nts=input_shape,
target_nts=target,
max_depth=depth,
function_pool={Conv2D, Flatten}))
print(list(Dense.possible_output_shapes(input_ntss={IOLabel.DEFAULT: input_shape},
target_output=target,
is_reachable=
lambda x, y: NeuralNetwork.reachable(x, y, depth - 1, {Dense, Merge}),
)
))
pass
@unittest.skip('debugging')
def test_simple_path(self):
ntss = {IOLabel.DEFAULT: TypeShape(DFloat, Shape((DimNames.BATCH, 1),
(DimNames.UNITS, 23)))}
target = TypeShape(DFloat, Shape((DimNames.BATCH, 1),
(DimNames.UNITS, 154)))
depth = 5
debug_node = 'debug'
before = time.time()
for _ in range(1):
NeuralNetwork.reachable(next(iter(ntss)), target, depth, {Dense, Merge})
print('Time', time.time() - before)
print(NeuralNetwork.reachable(next(iter(ntss)), target, depth, {Dense, Merge}))
print(ntss)
runs = 10000
fails = 0
for i in range(runs):
blueprint = nx.DiGraph()
blueprint.add_node(debug_node,
ntss=ntss,
DataFlowObj=None)
out_node, nts_id, nodes = next(NeuralNetwork.simple_path(input_node=debug_node,
input_ntss=ntss,
output_shape=target,
output_label=IOLabel.DEFAULT,
blueprint=blueprint,
min_depth=0,
max_depth=depth,
function_pool={Dense, Merge},
), (None, None, None))
if out_node is None:
# print(i, 'Error')
fails += 1
# else:
# print(i, 'Success')
print('percentage failed:', fails / runs)
pass
@unittest.skip('debugging')
def test_func_children(self):
ntss = {IOLabel.DEFAULT: TypeShape(DFloat, Shape((DimNames.BATCH, 1),
(DimNames.HEIGHT, 10),
(DimNames.WIDTH, 10),
(DimNames.CHANNEL, 2)))}
target = TypeShape(DFloat, Shape((DimNames.BATCH, 1),
(DimNames.UNITS, 200)
# (DimNames.HEIGHT, 32),
# (DimNames.WIDTH, 32),
# (DimNames.CHANNEL, 3)
))
_f = Flatten
for _, out_nts, _ in _f.possible_output_shapes(
ntss, target, lambda x, y: NeuralNetwork.reachable(x, y, 0, {Flatten}), 10):
print(next(iter(out_nts.values())))
pass
```
#### File: LAMARCK_ML/datasets/interface.py
```python
from LAMARCK_ML.architectures import DataFlow
from LAMARCK_ML.data_util.attribute import pb2attr
from LAMARCK_ML.datasets.Dataset_pb2 import DatasetProto
class ResetState(Exception):
pass
class InvalidBatchSize(Exception):
pass
class DatasetInterface(DataFlow):
arg_NAME = 'name'
arg_CLSNAME = 'cls_name'
def __init__(self, **kwargs):
super(DatasetInterface, self).__init__(**kwargs)
self._id_name = kwargs.get(self.arg_NAME, 'None')
def get_pb(self, result=None):
if not isinstance(result, DatasetProto):
result = DatasetProto()
result.name_val = self._id_name
result.cls_name = self.__class__.__name__
return result
def restore_attributes(self, attr: dict):
raise NotImplementedError()
def __getstate__(self):
self.get_pb().SerializeToString()
@staticmethod
def getClassByName(cls_name: str):
stack = [DatasetInterface]
while stack:
cls = stack.pop(0)
if cls.__name__ == cls_name:
return cls
stack.extend(cls.__subclasses__())
raise Exception("Couldn't find class with name: " + cls_name)
def __setstate__(self, state):
if isinstance(state, str) or isinstance(state, bytes):
_dataset = DatasetProto()
_dataset.ParseFromString(state)
elif isinstance(state, DatasetProto):
_dataset = state
else:
return
cls_name = _dataset.cls_name
try:
self.__class__ = DatasetInterface.getClassByName(cls_name)
except:
pass
self._id_name = _dataset.name_val
attr_d = dict([pb2attr(attr) for attr in _dataset.attr_val])
self.restore_attributes(attr_d)
def __next__(self):
"""
:return: Dictionary (label, data)
"""
raise NotImplementedError()
def __iter__(self):
return self
def __eq__(self, other):
if isinstance(other, self.__class__) and \
self._id_name == other._id_name:
return True
return False
def __hash__(self):
return hash(int.from_bytes(self._id_name.encode('utf-8'), byteorder='big'))
@property
def id_name(self) -> str:
return self._id_name
@property
def inputs(self):
return {}
pass
```
#### File: LAMARCK_ML/data_util/attribute.py
```python
from enum import Enum
import numpy as np
from LAMARCK_ML.data_util import Shape, BaseType, TypeShape
from LAMARCK_ML.data_util.Attribute_pb2 import AttributeProto
def value2pb(value, v=None):
if v is None:
v = AttributeProto.Value()
if isinstance(value, int):
v.int_val = value
elif isinstance(value, float):
v.double_val = value
elif isinstance(value, bool):
v.bool_val = value
elif isinstance(value, str):
v.string_val = value
elif isinstance(value, bytes):
v.bytes_val = value
elif isinstance(value, Shape):
value.get_pb(v.shape_val)
elif isinstance(value, TypeShape):
value.get_pb(v.nts_val)
elif isinstance(value, set):
v.set_val.v.extend([value2pb(_v) for _v in value])
elif isinstance(value, list):
v.list_val.v.extend([value2pb(_v) for _v in value])
elif isinstance(value, tuple):
v.tuple_val.v.extend([value2pb(_v) for _v in value])
elif isinstance(value, dict):
# v.dict_val.vs.extend([attr2pb(name=_k, value=_v) for _k, _v in value.items()])
v.dict_val.v.extend([value2pb(kv) for kv in value.items()])
elif isinstance(value, Enum):
value2pb(value.value, v=v)
elif isinstance(value, np.ndarray):
value2pb(value.tolist(), v=v)
v.list_val.numpy = True
# elif inspect.isclass(value) and issubclass(value, BaseType):
elif isinstance(value, type) and issubclass(value, BaseType):
value.get_pb(v.type_val)
elif value is not None:
v.bytes_val = bytes(value)
return v
def attr2pb(name, value):
attr = AttributeProto()
attr.name = name
value2pb(value, attr.v)
return attr
def pb2val(pb):
whichone = pb.WhichOneof("v")
if whichone == 'shape_val':
shape_ = Shape.__new__(Shape)
shape_.__setstate__(getattr(pb, whichone))
return shape_
elif whichone == 'type_val':
return BaseType.pb2cls(getattr(pb, whichone))[0]
elif whichone == 'list_val':
_list = [pb2val(_pb) for _pb in pb.list_val.v]
return np.asarray(_list) if getattr(pb.list_val, 'numpy', False) else _list
elif whichone == 'set_val':
return set([pb2val(_pb) for _pb in pb.set_val.v])
elif whichone == 'tuple_val':
return tuple([pb2val(_pb) for _pb in pb.tuple_val.v])
elif whichone == 'nts_val':
return TypeShape.from_pb(pb.nts_val)
elif whichone == 'dict_val':
# return dict([(elem.name, pb2val(elem.v)) for elem in pb.dict_val.vs])
return dict([pb2val(elem) for elem in pb.dict_val.v])
else:
attr = str(whichone)
if attr != 'None':
return getattr(pb, attr)
return None
def pb2attr(attr):
return attr.name, pb2val(attr.v)
```
#### File: LAMARCK_ML/data_util/dataType.py
```python
from LAMARCK_ML.data_util.DType_pb2 import \
DInvalid as DInvalidProto, \
DBinary as DBinaryProto, \
DUInt as DUIntProto, \
DInt as DIntProto, \
DFloat as DFloatProto, \
DComplex as DComplexProto, \
DBool as DBoolProto, \
DString as DStringProto, \
DTypeProto
class InvalidDatatype(Exception):
pass
class BaseType(type):
attr = 'bytes_val'
pb = DInvalidProto
bits = None
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.__class__)
@staticmethod
def pb2cls(pb_type, _class=None):
if _class is None:
_class = BaseType
for _subclass in _class.__subclasses__(_class):
if _subclass.pb == pb_type.type_val and \
_subclass.bits == getattr(pb_type, 'bits_val', None):
return _subclass, True
else:
_sc, found = BaseType.pb2cls(pb_type, _subclass)
if found:
return _sc, found
return None, False
@staticmethod
def str2cls(str_type, _class=None):
if _class is None:
_class = BaseType
for _subclass in _class.__subclasses__(_class):
if _subclass.__name__ == str_type:
return _subclass, True
else:
_sc, found = BaseType.str2cls(str_type, _subclass)
if found:
return _sc, found
return None, False
@classmethod
def get_pb(cls, result=None):
if not isinstance(result, DTypeProto):
result = DTypeProto()
result.type_val = cls.pb
if cls.bits is not None:
result.bits_val = cls.bits
return result
@classmethod
def __str__(cls):
return cls.__name__ + ':' + str(cls.bits)
pass
class DHalf(BaseType):
attr = 'half_val'
pb = DFloatProto
bits = 16
class DFloat(BaseType):
attr = 'float_val'
pb = DFloatProto
bits = 32
class DDouble(BaseType):
attr = 'double_val'
pb = DFloatProto
bits = 64
class DInt8(BaseType):
attr = 'int_val'
pb = DIntProto
bits = 8
class DInt16(BaseType):
attr = 'int_val'
pb = DIntProto
bits = 16
class DInt32(BaseType):
attr = 'int_val'
pb = DIntProto
bits = 32
class DInt64(BaseType):
attr = 'int64_val'
pb = DIntProto
bits = 64
class DString(BaseType):
attr = 'string_val'
pb = DStringProto
class DBool(BaseType):
attr = 'bool_val'
pb = DBoolProto
bits = 1
class DComplex64(BaseType):
attr = 'scomplex_val'
pb = DComplexProto
bits = 64
class DComplex128(BaseType):
attr = 'dcomplex_val'
pb = DComplexProto
bits = 128
class DUInt8(BaseType):
attr = 'uint32_val'
pb = DUIntProto
bits = 8
class DUInt16(BaseType):
attr = 'uint32_val'
pb = DUIntProto
bits = 16
class DUInt32(BaseType):
attr = 'uint32_val'
pb = DUIntProto
bits = 32
class DUInt64(BaseType):
attr = 'uint64_val'
pb = DUIntProto
bits = 64
class DBinary(BaseType):
attr = 'bytes_val'
pb = DBinaryProto
bits = None
```
#### File: individuals/implementations/classifierIndividualACDG.py
```python
from LAMARCK_ML.architectures.functions import Dense
from LAMARCK_ML.architectures.losses import Reduce, LossInterface
from LAMARCK_ML.architectures.losses import SoftmaxCrossEntropyWithLogits, MeanSquaredError
from LAMARCK_ML.architectures.neuralNetwork import NeuralNetwork
from LAMARCK_ML.architectures.functions import Softmax
from LAMARCK_ML.data_util import IOLabel, DimNames
from LAMARCK_ML.individuals.implementations.networkIndividualInterface import NetworkIndividualInterface
from LAMARCK_ML.reproduction.methods import Mutation, Recombination
class ClassifierIndividualACDG(NetworkIndividualInterface, Mutation.Interface, Recombination.Interface):
arg_MAX_NN_DEPTH = 'max_depth'
arg_MIN_NN_DEPTH = 'min_depth'
arg_MAX_NN_BRANCH = 'max_branch'
arg_NN_FUNCTIONS = 'functions'
def __init__(self, **kwargs):
super(ClassifierIndividualACDG, self).__init__(**kwargs)
if len(self._networks) > 1:
raise Exception('Expected 1 or 0 networks got: ' + str(len(self._networks)))
elif len(self._networks) == 1:
self.network = self._networks[0]
else:
_input = (IOLabel.DATA, *self._data_nts[IOLabel.DATA])
_output = {IOLabel.TARGET: self._data_nts[IOLabel.TARGET][0]}
_input = {'NN_DATA': _input}
self.network = NeuralNetwork(**{
NeuralNetwork.arg_INPUTS: _input,
NeuralNetwork.arg_OUTPUT_TARGETS: _output,
NeuralNetwork.arg_FUNCTIONS: kwargs.get(self.arg_NN_FUNCTIONS, [Dense]),
NeuralNetwork.arg_MAX_DEPTH: kwargs.get(self.arg_MAX_NN_DEPTH, 7),
NeuralNetwork.arg_MIN_DEPTH: kwargs.get(self.arg_MIN_NN_DEPTH, 2),
NeuralNetwork.arg_MAX_BRANCH: kwargs.get(self.arg_MAX_NN_BRANCH, 1)
})
self._networks.append(self.network)
if len(self._losses) != 0:
raise Exception('Expected no loss!')
_output = self._data_nts[IOLabel.TARGET][0]
_output_units = _output.shape[DimNames.UNITS]
if _output_units == 1:
self.loss = MeanSquaredError(**{
LossInterface.arg_REDUCE: Reduce.MEAN,
})
else:
self.loss = SoftmaxCrossEntropyWithLogits(**{
LossInterface.arg_REDUCE: Reduce.MEAN
})
self._losses.append(self.loss)
def _cls_setstate(self, _individual):
super(ClassifierIndividualACDG, self)._cls_setstate(_individual)
if len(self._networks) != 1:
raise Exception('Restored individual has an invalid number of networks: ' + str(len(self._networks)))
self.network = self._networks[0]
if len(self._losses) != 1:
raise Exception('Restored individual has an invalid number of losses: ' + str(len(self._losses)))
self.loss = self._losses[0]
def __eq__(self, other):
if (super(ClassifierIndividualACDG, self).__eq__(other)
and self.loss == other.loss
and self.network == other.network
):
return True
return False
def mutate(self, prob):
result = ClassifierIndividualACDG.__new__(ClassifierIndividualACDG)
pb = self.get_pb()
result.__setstate__(pb)
result.network = self.network.mutate(prob=prob)[0]
result._networks = [result.network]
result._id_name = self.getNewName()
return [result]
def recombine(self, other):
result = ClassifierIndividualACDG.__new__(ClassifierIndividualACDG)
pb = self.get_pb()
result.__setstate__(pb)
result.network = self.network.recombine(other.network)[0]
result._networks = [result.network]
result._id_name = self.getNewName()
return [result]
def norm(self, other):
return self.network.norm(other.network)
def update_state(self, *args, **kwargs):
self.network.update_state(*args, **kwargs)
def build_instance(self, nn_framework):
nn_framework.init_model({IOLabel.DATA}, {IOLabel.TARGET})
f_id2obj = dict()
for f in self.network.functions:
nn_framework.add_function(f)
f_id2obj[f.id_name] = f
nn_framework.set_train_parameters(**{
nn_framework.arg_LOSS: self.loss.__class__,
})
softmax_out = list()
for label, f_id in self.network.output_mapping.values():
f_obj = f_id2obj[f_id]
softmax = Softmax(**Softmax.generateParameters(
input_dict={IOLabel.SOFTMAX_IN: (label, f_obj.outputs, f_id)},
expected_outputs={IOLabel.SOFTMAX_OUT: f_obj.outputs[label]},
)[0][0])
nn_framework.add_function(softmax)
softmax_out.append((IOLabel.SOFTMAX_OUT, softmax.id_name))
nn_framework.finalize_model(output_ids=softmax_out)
def train_instance(self, nn_framework):
return nn_framework.train()
```
#### File: individuals/implementations/weightAgnosticIndividual.py
```python
from itertools import product
from LAMARCK_ML.data_util import IOLabel
from LAMARCK_ML.reproduction.methods import Mutation, Recombination, RandomStep
from LAMARCK_ML.architectures.losses import Reduce, LossInterface
from LAMARCK_ML.architectures.losses import SoftmaxCrossEntropyWithLogits, MeanSquaredError
from LAMARCK_ML.individuals.implementations.networkIndividualInterface import NetworkIndividualInterface
from LAMARCK_ML.individuals.implementations.NetworkIndividual_pb2 import NetworkIndividualProto
from LAMARCK_ML.architectures.weightAgnosticNN import WeightAgnosticNeuralNetwork
from LAMARCK_ML.data_util.attribute import attr2pb, pb2attr
from LAMARCK_ML.data_util import TypeShape, Shape, DimNames
from LAMARCK_ML.architectures.functions import Perceptron
from LAMARCK_ML.metrics import Accuracy
class WeightAgnosticIndividual(NetworkIndividualInterface,
Recombination.Interface,
Mutation.Interface,
RandomStep.Interface,
Accuracy.Interface,
):
arg_WEIGHTS = 'test_weights'
arg_NODES = 'nodes'
arg_INITIAL_DEPTH = 'initial_depth'
def __init__(self, **kwargs):
super(WeightAgnosticIndividual, self).__init__(**kwargs)
if len(self._networks) > 1:
raise Exception('Expected 1 or 0 networks got: ' + str(len(self._networks)))
elif len(self._networks) == 1:
self.network = self._networks[0]
else:
_input = self._data_nts[IOLabel.DATA]
_output = self._data_nts[IOLabel.TARGET]
in_name = _input[1]
shapes = list()
batch = _input[0].shape[DimNames.BATCH]
has_batch = False
dtype = _input[0].dtype
for dim in _input[0].shape.dim:
if dim.name != DimNames.BATCH:
shapes.append(list(range(dim.size)))
else:
has_batch = True
_input = dict()
for p in product(*shapes):
key = ':'.join([str(i) for i in p])
_input[key] = (IOLabel.DATA,
TypeShape(dtype, Shape((DimNames.UNITS, 1) if not has_batch else
(DimNames.BATCH, batch), (DimNames.UNITS, 1))),
in_name + '_' + key)
shapes = list()
batch = _output[0].shape[DimNames.BATCH]
has_batch = False
dtype = _output[0].dtype
for dim in _output[0].shape.dim:
if dim.name != DimNames.BATCH:
shapes.append(list(range(dim.size)))
else:
has_batch = True
_output = dict()
for p in product(*shapes):
_output[':'.join([str(i) for i in p])] = \
TypeShape(dtype, Shape((DimNames.UNITS, 1) if not has_batch else
(DimNames.BATCH, batch), (DimNames.UNITS, 1)))
self.network = WeightAgnosticNeuralNetwork(**{
WeightAgnosticNeuralNetwork.arg_INPUTS: _input,
WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: _output,
WeightAgnosticNeuralNetwork.arg_FUNCTIONS: kwargs.get(self.arg_WEIGHTS, [Perceptron]),
WeightAgnosticNeuralNetwork.arg_INITIAL_DEPTH: kwargs.get(self.arg_INITIAL_DEPTH, 1),
})
self._networks.append(self.network)
weights = kwargs.get(self.arg_WEIGHTS)
if weights is None or not (isinstance(weights, list) and all([isinstance(w, float) for w in weights])):
weights = [i - 2 for i in range(5)]
self.attr[self.arg_WEIGHTS] = weights
if len(self._losses) != 0:
raise Exception('Expected no loss!')
_output = self._data_nts[IOLabel.TARGET][0]
_output_units = _output.shape[DimNames.UNITS]
if _output_units == 1:
self.loss = MeanSquaredError(**{
LossInterface.arg_REDUCE: Reduce.MEAN,
})
else:
self.loss = SoftmaxCrossEntropyWithLogits(**{
LossInterface.arg_REDUCE: Reduce.MEAN
})
self._losses.append(self.loss)
def __sub__(self, other):
if not isinstance(other, self.__class__):
return -1
return self.network - other.network
def _cls_setstate(self, state):
if isinstance(state, str) or isinstance(state, bytes):
_individual = NetworkIndividualProto()
_individual.ParseFromString(state)
elif isinstance(state, NetworkIndividualProto):
_individual = state
else:
return
self._networks = list()
for network in _individual.networks:
_obj = WeightAgnosticNeuralNetwork.__new__(WeightAgnosticNeuralNetwork)
_obj.__setstate__(network)
self._networks.append(_obj)
self._data_nts = dict([(d.label, (TypeShape.from_pb(d.tsp), d.id_name)) for d in _individual.data_sources])
self._losses = list()
for loss in _individual.losses:
_obj = LossInterface.__new__(LossInterface)
_obj.__setstate__(loss)
self._losses.append(_obj)
super(NetworkIndividualInterface, self)._cls_setstate(_individual.baseIndividual)
if len(self._networks) != 1:
raise Exception('Restored individual has an invalid number of networks: ' + str(len(self._networks)))
self.network = self._networks[0]
if len(self._losses) != 1:
raise Exception('Restored individual has an invalid number of losses: ' + str(len(self._losses)))
self.loss = self._losses[0]
def __eq__(self, other):
if (super(WeightAgnosticIndividual, self).__eq__(other)
and self.loss == other.loss
and self.network == other.network
):
return True
return False
def norm(self, other):
if not isinstance(other, self.__class__):
return 0
return self.network.norm(other.network)
def update_state(self, *args, **kwargs):
self.network.update_state(*args, **kwargs)
def mutate(self, prob):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.mutate(prob=prob)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def step(self, step_size):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.step(step_size=step_size)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def recombine(self, other):
result = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
result.metrics = dict()
result.attr = dict([pb2attr(attr2pb(key, value)) for key, value in self.attr.items()])
result._data_nts = {label: (nts.__copy__(), id_name) for label, (nts, id_name) in self._data_nts.items()}
result._losses = list(self._losses)
result.loss = self.loss
result._networks = self.network.recombine(other.network)
result.network = result._networks[0]
result._id_name = self.getNewName()
return [result]
def build_instance(self, nn_framework):
nn_framework.init_model()
for f in self.network.functions:
nn_framework.add_function(f)
nn_framework.set_train_parameters(**{
nn_framework.arg_LOSS: self.loss.__class__,
})
nn_framework.finalize_model(output_ids=self.network.output_mapping.values())
# nn_framework.train() # This individual doesn't need to be trained
def train_instance(self, nn_framework):
return dict()
def accuracy(self, nn_framework):
acc = 0
weights = self.attr.get(self.arg_WEIGHTS, [])
for w in weights:
nn_framework.set_weights(**{
f.id_name: w for f in self.network.functions
})
acc += nn_framework.accuracy(self)
return acc / len(weights)
```
#### File: LAMARCK_ML/metrics/interface.py
```python
class MetricInterface(object):
ID = 'NONE'
def __init__(self, **kwargs):
pass
def evaluate(self, individual, framework):
raise NotImplementedError("Function evaluateIndividual has to be inplemented!")
@staticmethod
def getMetricByName(name='NONE'):
stack = [MetricInterface]
while stack:
cls = stack.pop(0)
if cls.__name__ == name:
return cls
stack.extend(cls.__subclasses__())
raise Exception("Couldn't find class with name: " + name)
```
#### File: LAMARCK_ML/nn_framework/nvidia_tensorflow.py
```python
import os
from typing import Dict, List, Tuple, Set
from deprecated import deprecated
from LAMARCK_ML.architectures.functions import *
from LAMARCK_ML.architectures.losses import *
from LAMARCK_ML.architectures.variables.initializer import *
from LAMARCK_ML.architectures.variables.regularisation import *
from LAMARCK_ML.architectures.functions.activations import Activations
from LAMARCK_ML.data_util import DimNames, IOLabel, TypeShape
from LAMARCK_ML.data_util.dataType import *
from LAMARCK_ML.individuals import IndividualInterface
from LAMARCK_ML.metrics import Accuracy, TimeMetric, MemoryMetric, FlOps, Parameters
from LAMARCK_ML.nn_framework import NeuralNetworkFrameworkInterface
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.ops import nn_ops
from tensorflow.python.keras.utils import conv_utils
class NVIDIATensorFlow(NeuralNetworkFrameworkInterface,
Accuracy.Interface,
TimeMetric.Interface,
MemoryMetric.Interface,
FlOps.Interface,
Parameters.Interface):
arg_SESSION_CFG = 'session_cfg'
arg_BATCH_SIZE = 'batch_size'
arg_TMP_FILE = 'tmp_file'
arg_EPOCHS = 'epochs'
mapping_dtype = {
DHalf: tf.float16,
DFloat: tf.float32,
DDouble: tf.float64,
DInt8: tf.int8,
DInt16: tf.int16,
DInt32: tf.int32,
DInt64: tf.int64,
DUInt8: tf.uint8,
DUInt16: tf.uint16,
DUInt32: tf.uint32,
DUInt64: tf.uint64,
}
mapping_initializer = {
GlorotUniform: tf.keras.initializers.glorot_uniform,
GlorotNormal: tf.keras.initializers.glorot_normal,
Constant: tf.keras.initializers.constant,
Zeros: tf.keras.initializers.zeros,
Ones: tf.keras.initializers.ones,
}
mapping_regularizer = {
NoRegularisation: None,
L1: tf.keras.regularizers.l1,
L2: tf.keras.regularizers.l2,
L1L2: tf.keras.regularizers.l1_l2,
}
mapping_loss = {
SoftmaxCrossEntropyWithLogits: tf.keras.losses.CategoricalCrossentropy,
SparseSoftmaxCrossEntropyWithLogits: tf.keras.losses.SparseCategoricalCrossentropy,
BinaryCrossentropy: tf.keras.losses.BinaryCrossentropy,
MeanSquaredError: tf.keras.losses.MeanSquaredError,
MeanAbsoluteError: tf.keras.losses.MeanAbsoluteError,
}
nativ_activations = {
Activations.sigmoid: tf.keras.activations.sigmoid,
Activations.tanh: tf.keras.activations.tanh,
Activations.linear: tf.keras.activations.linear,
Activations.relu: tf.keras.activations.relu,
Activations.selu: tf.keras.activations.selu,
Activations.elu: tf.keras.activations.elu,
Activations.exponential: tf.keras.activations.exponential,
Activations.hard_sigmoid: tf.keras.activations.hard_sigmoid,
Activations.softmax: tf.keras.activations.softmax,
Activations.softplus: tf.keras.activations.softplus,
Activations.softsign: tf.keras.activations.softsign,
}
def __init__(self, **kwargs):
super(NVIDIATensorFlow, self).__init__(**kwargs)
self._sess_cfg = kwargs.get(self.arg_SESSION_CFG)
self.batch_size = kwargs.get(self.arg_BATCH_SIZE, 32)
self.tmp_file = kwargs.get(self.arg_TMP_FILE, 'state.ckpt')
self.epochs = kwargs.get(self.arg_EPOCHS, 10)
self._memory = None
self._time = None
self._flops = None
self._parameters = None
self._id2tfTensor = dict()
self._id2tfObj = dict()
self._inputs = list()
self._outputs = list()
self._train_params = dict()
self._scheduled_functions = list()
self._model = None
self._sess = None
self.QConv2D__ = self.Conv2D__
self.QPooling2D__ = self.Pooling2D__
class C_Dense(tf.keras.layers.Dense):
def __init__(self, kernel_trainable=True, bias_trainable=True, **kwargs):
super(NVIDIATensorFlow.C_Dense, self).__init__(**kwargs)
self.kernel_trainable = kernel_trainable
self.bias_trainable = bias_trainable
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=self.kernel_trainable)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units, ],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=self.bias_trainable)
else:
self.bias = None
self.built = True
@deprecated(version='0.2')
def setup_individual(self, individual: IndividualInterface):
self._memory = None
self._time = None
self._flops = None
self._parameters = None
self._sess = tf.compat.v1.Session(config=self._sess_cfg)
K.set_session(self._sess)
id2tfTensor = dict()
id2tfObj = dict()
self._inputs = list()
for ds in self.data_sets:
for output_id_name, output in ds.outputs.items():
if output_id_name != IOLabel.DATA:
continue
shape = tuple([dim.size for dim in output.shape.dim if dim.name != DimNames.BATCH])
batch_size = output.shape[DimNames.BATCH]
name = ds.id_name + '_' + output_id_name
dtype = self.mapping_dtype.get(output.dtype)
tfObj = tf.keras.Input(shape=shape, batch_size=batch_size, name=name, dtype=dtype)
id2tfTensor[ds.id_name] = {**id2tfTensor.get(ds.id_name, dict()), **{output_id_name: tfObj}}
self._inputs.append(tfObj)
functionStack = []
for network in individual._networks:
functionStack.extend(network.functions)
while functionStack:
_func = functionStack.pop(0)
all_found = True
func_inputs = dict()
for _input, out_mapping in _func.inputs.items():
out_dict = id2tfTensor.get(out_mapping[1])
if out_dict is None or out_dict.get(out_mapping[0]) is None:
all_found = False
break
func_inputs[_input] = out_dict.get(out_mapping[0])
if not all_found:
functionStack.append(_func)
continue
id2tfTensor[_func.id_name], id2tfObj[_func.id_name] = \
getattr(self, _func.__class__.__name__ + '__')(_func, **func_inputs)
self._outputs = list()
for label, id_name in individual.network.output_mapping.values():
out_dict = id2tfTensor.get(id_name)
if out_dict is not None and out_dict.get(label) is not None:
tfObj = out_dict.get(label)
tfObj = tf.keras.layers.Softmax()(tfObj)
self._outputs.append(tfObj)
self._model = tf.keras.Model(inputs=self._inputs, outputs=self._outputs)
self._model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=self.mapping_loss.get(individual.loss.__class__)(),
metrics=['accuracy']
)
self.data_sets[0]('train')
valid_exists = self.data_sets[0].valid_X is not None and self.data_sets[0].valid_Y is not None
self._model.fit(**{'x': self.data_sets[0].data_X,
'y': self.data_sets[0].data_Y,
'batch_size': self.batch_size,
'validation_data': (self.data_sets[0].valid_X,
self.data_sets[0].valid_Y)
if valid_exists else None,
'epochs': self.epochs,
'verbose': 0,
'callbacks': [tf.keras.callbacks.ModelCheckpoint(save_weights_only=True,
save_best_only=True,
filepath=self.tmp_file,
verbose=0),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
] if valid_exists else None,
})
if valid_exists:
self._model.load_weights(self.tmp_file)
functionStack = []
for network in individual._networks:
functionStack.extend(network.functions)
state = dict()
for f in functionStack:
tfObj, v_names = id2tfObj[f.id_name]
variable_names = tfObj.variables
state[f.id_name] = dict([(v_names[variable.name], value)
for variable, value in zip(variable_names, K.batch_get_value(variable_names))])
state[NeuralNetworkFrameworkInterface.arg_CMP] = self.cmp
return state
@deprecated(version='0.2')
def reset_framework(self):
self._memory = None
self._time = None
self._flops = None
self._parameters = None
del self._model
K.clear_session()
tf.compat.v1.reset_default_graph()
def init_model(self, dataset_input_data: Set[str], dataset_target_data: Set[str]):
self.reset()
self._sess = tf.compat.v1.Session(config=self._sess_cfg)
K.set_session(self._sess)
for ds in self.data_sets:
for output_id_name, output in ds.outputs.items():
if not output_id_name in dataset_input_data:
continue
shape = tuple([dim.size for dim in output.shape.dim if dim.name != DimNames.BATCH])
batch_size = output.shape[DimNames.BATCH]
name = ds.id_name + '_' + output_id_name
dtype = self.mapping_dtype.get(output.dtype)
tfObj = tf.keras.Input(shape=shape, batch_size=batch_size, name=name, dtype=dtype)
self._id2tfTensor[ds.id_name] = {**self._id2tfTensor.get(ds.id_name, dict()), **{output_id_name: tfObj}}
self._inputs.append(tfObj)
def finalize_model(self, output_ids: List[Tuple[str, str]]):
while self._scheduled_functions:
_func = self._scheduled_functions.pop(0)
all_found = True
func_inputs = dict()
for _input, (out_label, obj_id) in _func.inputs.items():
out_dict = self._id2tfTensor.get(obj_id)
if out_dict is None or out_dict.get(out_label) is None:
all_found = False
break
func_inputs[_input] = out_dict.get(out_label)
if not all_found:
self._scheduled_functions.append(_func)
continue
self._id2tfTensor[_func.id_name], self._id2tfObj[_func.id_name] = \
getattr(self, _func.__class__.__name__ + '__')(_func, **func_inputs)
for label, id_name in output_ids:
out_dict = self._id2tfTensor.get(id_name)
if out_dict is not None and out_dict.get(label) is not None:
t = out_dict.get(label)
self._outputs.append(out_dict.get(label))
self._model = tf.keras.Model(inputs=self._inputs, outputs=self._outputs)
self._model.compile(**self._train_params)
def set_weights(self, weights: Dict):
for id, value in weights.items():
try:
self._id2tfObj.get(id).set_weights(value)
except Exception as e:
print('Failed to set weights for ' + id + ': ' + str(e))
def set_train_parameters(self, **kwargs):
self._train_params = {
'optimizer': tf.keras.optimizers.Adam(),
'loss': self.mapping_loss.get(kwargs.get(self.arg_LOSS, tf.keras.losses.SparseCategoricalCrossentropy))(),
'metrics': ['accuracy'],
}
def add_function(self, function: Function):
self._scheduled_functions.append(function)
for _func in self._scheduled_functions:
all_found = True
func_inputs = dict()
for _input, (out_label, obj_id) in _func.inputs.items():
out_dict = self._id2tfTensor.get(obj_id)
if out_dict is None or out_dict.get(out_label) is None:
all_found = False
break
func_inputs[_input] = out_dict.get(out_label)
if not all_found:
continue
self._id2tfTensor[_func.id_name], self._id2tfObj[_func.id_name] = \
getattr(self, _func.__class__.__name__ + '__')(_func, **func_inputs)
self._scheduled_functions.remove(_func)
def train(self) -> Dict:
for data_set in self.data_sets:
data_set('train')
valid_exists = all(data_set.valid_X is not None and data_set.valid_Y is not None for data_set in self.data_sets)
self._model.fit(**{
'x': self.data_sets[0].data_X,
'y': self.data_sets[0].data_Y,
'batch_size': self.batch_size,
'validation_data': (self.data_sets[0].valid_X,
self.data_sets[0].valid_Y)
if valid_exists else None,
'epochs': self.epochs,
'verbose': 0,
'callbacks': [tf.keras.callbacks.ModelCheckpoint(save_weights_only=True,
save_best_only=True,
filepath=self.tmp_file,
verbose=0),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
] if valid_exists else None,
})
if valid_exists:
self._model.load_weights(self.tmp_file)
state = dict()
for f, (tfObj, vname_dict) in self._id2tfObj.items():
variables = tfObj.variables
state[f] = {vname_dict[variable.name]: value for variable, value in zip(variables, K.batch_get_value(variables))}
state[NeuralNetworkFrameworkInterface.arg_CMP] = self.cmp
return state
def reset(self):
self._memory = None
self._time = None
self._flops = None
self._parameters = None
self._sess = None
self._id2tfTensor = dict()
self._id2tfObj = dict()
self._inputs = list()
self._outputs = list()
self._train_params = dict()
self._scheduled_functions = list()
if hasattr(self, '_model'):
del self._model
tf.compat.v1.reset_default_graph()
K.clear_session()
def _build_activation(self, activation, x):
if activation is Activations.sign:
return tf.math.sign(x)
elif activation is Activations.sine:
return tf.math.sin(x)
elif activation is Activations.cosine:
return tf.math.cos(x)
elif activation is Activations.absolute:
return tf.math.abs(x)
elif activation is Activations.inverse:
return tf.math.reciprocal(x)
elif activation is Activations.gaussian:
print('Gaussian activation is currently not implemented: using linear')
return x
return x
def Dense__(self, func, **kwargs):
kernel = [var for var in func.variables if var.name.endswith('kernel')][0]
bias = [var for var in func.variables if var.name.endswith('bias')][0]
units = func.attr.get(func.arg_UNITS)
kernel_init = self.mapping_initializer.get(kernel.initializer.__class__)()
if kernel.value is not None:
kernel_init = tf.keras.initializers.Constant(value=kernel.value)
bias_init = self.mapping_initializer.get(bias.initializer.__class__)()
if bias.value is not None:
bias_init = tf.keras.initializers.Constant(value=bias.value)
kernel_reg = self.mapping_regularizer.get(kernel.regularisation.__class__)
bias_reg = self.mapping_regularizer.get(bias.regularisation.__class__)
f_activation = func.attr.get(func.arg_ACTIVATION)
if f_activation is None:
activation = tf.keras.activations.relu
else:
activation = self.nativ_activations.get(f_activation)
tfObj = NVIDIATensorFlow.C_Dense(
units=units,
# activation=tf.keras.activations.relu,
activation=activation,
use_bias=True,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
kernel_regularizer=kernel_reg() if kernel_reg is not None else None,
bias_regularizer=bias_reg() if bias_reg is not None else None,
kernel_trainable=kernel.trainable,
bias_trainable=bias.trainable,
name=func.id_name,
)
outNTS = next(iter(func.outputs))
func_input = next(iter(kwargs.values()))
# TODO: only working with units not images
tmp = tfObj(func_input)
if activation is None and f_activation is not None:
tmp = self._build_activation(f_activation, tmp)
return {outNTS: tmp}, (tfObj, dict([(v.name, 'Dense|kernel' if 'kernel' in v.name else 'Dense|bias')
for v in tfObj.variables]))
def BiasLessDense__(self, func, **kwargs):
kernel = [var for var in func.variables if var.name.endswith('kernel')][0]
units = func.attr.get(func.arg_UNITS)
kernel_init = self.mapping_initializer.get(kernel.initializer.__class__)()
if kernel.value is not None:
kernel_init = tf.keras.initializers.Constant(value=kernel.value)
kernel_reg = self.mapping_regularizer.get(kernel.regularisation.__class__)
f_activation = func.attr.get(func.arg_ACTIVATION)
if f_activation is None:
activation = tf.keras.activations.relu
else:
activation = self.nativ_activations.get(f_activation)
tfObj = NVIDIATensorFlow.C_Dense(
units=units,
activation=activation,
use_bias=False,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg(l=0.001) if kernel_reg is not None else tf.keras.regularizers.l2(l=0.001),
kernel_trainable=kernel.trainable,
name=func.id_name,
)
outNTS = next(iter(func.outputs))
func_input = next(iter(kwargs.values()))
tmp = tfObj(func_input)
if activation is None and f_activation is not None:
tmp = self._build_activation(f_activation, tmp)
return {outNTS: tmp}, (tfObj, {v.name: 'BiasLessDense|kernel' for v in tfObj.variables if 'kernel' in v.name})
def Merge__(self, func, **kwargs):
first = kwargs.get(func.inputLabels[0])
second = kwargs.get(func.inputLabels[1])
outNTS_id, outNTS = next(iter(func.outputs.items()))
axis = [i for i, d in enumerate(outNTS.shape.dim) if d.name == DimNames.UNITS or d.name == DimNames.CHANNEL][0]
tfObj = tf.keras.layers.Concatenate(axis=axis,
name=func.id_name.replace(':', '_'),
)
return {outNTS_id: tfObj([first, second])}, (tfObj, dict())
def Conv2D__(self, func, **kwargs):
class C_Conv2D(tf.keras.layers.Conv2D):
def __init__(self, kernel_trainable=True, bias_trainable=True, **kwargs):
super(C_Conv2D, self).__init__(**kwargs)
self.kernel_trainable = kernel_trainable
self.bias_trainable = bias_trainable
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=self.kernel_trainable,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=self.bias_trainable,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=op_padding,
data_format=conv_utils.convert_data_format(self.data_format,
self.rank + 2))
self.built = True
kernel = [var for var in func.variables if var.name.endswith('kernel')][0]
bias = [var for var in func.variables if var.name.endswith('bias')][0]
kernel_init = self.mapping_initializer.get(kernel.initializer.__class__)()
if kernel.value is not None:
kernel_init = tf.keras.initializers.Constant(value=kernel.value)
bias_init = self.mapping_initializer.get(bias.initializer.__class__)()
if bias.value is not None:
bias_init = tf.keras.initializers.Constant(value=bias.value)
kernel_reg = self.mapping_regularizer.get(kernel.regularisation.__class__)
bias_reg = self.mapping_regularizer.get(bias.regularisation.__class__)
f_activation = func.attr.get(func.arg_ACTIVATION)
if f_activation is None:
activation = tf.keras.activations.relu
else:
activation = self.nativ_activations.get(f_activation)
tfObj = C_Conv2D(filters=func.attr.get(func.arg_FILTER),
kernel_size=(func.attr.get(func.arg_KERNEL_HEIGHT),
func.attr.get(func.arg_KERNEL_WIDTH)),
strides=(func.attr.get(func.arg_STRIDE_HEIGHT),
func.attr.get(func.arg_STRIDE_WIDTH)),
padding=func.attr.get(func.arg_PADDING),
use_bias=True,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
kernel_regularizer=kernel_reg,
bias_regularizer=bias_reg,
kernel_trainable=kernel.trainable,
bias_trainable=bias.trainable,
# activation=tf.keras.activations.relu,
activation=activation,
data_format='channels_last',
name=func.id_name,
)
outNTS = next(iter(func.outputs))
func_input = next(iter(kwargs.values()))
tmp = tfObj(func_input)
if activation is None and f_activation is not None:
tmp = self._build_activation(f_activation, tmp)
return {outNTS: tmp}, (tfObj, dict(
[(v.name, func.__class__.__name__ + '|kernel' if 'kernel' in v.name else func.__class__.__name__ + '|bias')
for v in tfObj.variables]))
def Pooling2D__(self, func, **kwargs):
class C_MinPooling2D(tf.keras.layers.MaxPooling2D):
def pooling_function(inputs, pool_size, strides, padding, data_format):
return -K.pool2d(-inputs, pool_size, strides, padding, data_format,
pool_mode='max')
type2tfObj = {
Pooling2D.PoolingType.MIN.value: C_MinPooling2D,
Pooling2D.PoolingType.MAX.value: tf.keras.layers.MaxPooling2D,
Pooling2D.PoolingType.MEAN.value: tf.keras.layers.AveragePooling2D,
}
tfObj = type2tfObj.get(func.attr.get(Pooling2D.arg_POOLING_TYPE))(
pool_size=(func.attr.get(Pooling2D.arg_POOLING_HEIGHT),
func.attr.get(Pooling2D.arg_POOLING_WIDTH)),
strides=(func.attr.get(Pooling2D.arg_STRIDE_HEIGHT),
func.attr.get(Pooling2D.arg_STRIDE_WIDTH)),
padding=func.attr.get(Pooling2D.arg_PADDING),
data_format='channels_last',
name=func.id_name,
)
outNTS = next(iter(func.outputs))
func_input = next(iter(kwargs.values()))
return {outNTS: tfObj(func_input)}, (tfObj, list())
def Flatten__(self, func, **kwargs):
tfObj = tf.keras.layers.Flatten()
outNTS = next(iter(func.outputs))
func_inputs = next(iter(kwargs.values()))
return {outNTS: tfObj(func_inputs)}, (tfObj, list())
def Softmax__(self, func, **kwargs):
tfObj = tf.keras.layers.Softmax()
func_input = next(iter(kwargs.values()))
outNTS = next(iter(func.outputs))
return {outNTS: tfObj(func_input)}, (tfObj, dict())
def _time_memory_flops_params(self):
self.data_sets[0].batch = 1
random_input = next(iter(self.data_sets[0])).get(IOLabel.DATA)
run_meta = tf.RunMetadata()
self._sess.run(self._outputs[0].name, feed_dict={self._inputs[0]: random_input},
options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
options = tf.profiler.ProfileOptionBuilder.time_and_memory(min_micros=0, min_bytes=0, )
options['output'] = 'none'
options['verbose'] = 0
time_mem = tf.profiler.profile(self._sess.graph, run_meta=run_meta, cmd='op', options=options, )
def catch_sum(line='', text=list(), _ret=False):
if _ret:
return text[0]
if line.startswith('Total params'):
text.append(float(line.split(': ')[1].replace(',', '')))
self._model.summary(print_fn=catch_sum)
self._parameters = catch_sum(_ret=True)
options = tf.profiler.ProfileOptionBuilder.float_operation()
options['output'] = 'none'
options['verbose'] = 0
self._flops = tf.profiler.profile(graph=self._sess.graph,
run_meta=tf.RunMetadata(), cmd='op',
options=options).total_float_ops
self._time = time_mem.total_exec_micros
self._memory = time_mem.total_peak_bytes
def time(self):
if self._time is None:
self._time_memory_flops_params()
return float(self._time)
def memory(self):
if self._memory is None:
self._time_memory_flops_params()
return float(self._memory)
def accuracy(self, _):
for ds in self.data_sets:
ds('test')
_, acc = self._model.evaluate(self.data_sets[0].data_X,
self.data_sets[0].data_Y,
batch_size=self.batch_size,
verbose=0)
return float(acc)
@deprecated(version='0.2', reason='Shifted to internal representation since this '
'might not be supported by future versions of TF.')
def flops_per_sample(self):
if self._flops is None:
self._time_memory_flops_params()
return float(self._flops)
@deprecated(version='0.2', reason='Shifted to internal representation since this '
'might not be supported by future versions of TF.')
def parameters(self):
if self._parameters is None:
self._time_memory_flops_params()
return float(self._parameters)
pass
```
#### File: LAMARCK_ML/replacement/interface.py
```python
class ReplacementSchemeInterface():
"""
Base Class For Replacement Schemes
"""
def __init__(self, **kwargs):
pass
def new_generation(self, prev_gen, descendants):
raise NotImplementedError()
pass
```
#### File: LAMARCK_ML/replacement/replacement_test.py
```python
import random
import unittest
import os
from LAMARCK_ML.individuals import IndividualInterface
from LAMARCK_ML.replacement import \
GenerationalReplacement, \
NElitism, \
NWeakElitism, \
DeleteN, \
DeleteNLast
from LAMARCK_ML.reproduction.methods import Mutation
@unittest.skipIf((os.environ.get('test_fast', False) in {'True', 'true', '1'}), 'time consuming')
class TestReplacementSchemes(unittest.TestCase):
class OneMetricIndividual(IndividualInterface, Mutation.Interface):
def __init__(self):
self._fitness = random.random()
self._random_loc = random.random() * 1e2
self._id_name = self.getNewName()
self.metrics = dict()
self.attr = dict()
def __sub__(self, other):
return abs(self._random_loc - other._random_loc)
def mutate(self, prob):
return self
@staticmethod
def reverse_cmp(x, y):
return 0 if x == y else 1 if x < y else -1
def setUp(self) -> None:
self.prev_gen = [TestReplacementSchemes.OneMetricIndividual() for _ in range(int(1e2))]
self.descendants = [[TestReplacementSchemes.OneMetricIndividual() for _ in range(int(1e2))] for _ in range(10)]
def tearDown(self) -> None:
del self.prev_gen
del self.descendants
def test_GenerationalReplacement(self):
rep_obj = GenerationalReplacement()
new_gen = rep_obj.new_generation(self.prev_gen, self.descendants)
self.assertEqual(len(self.descendants[-1]), len(new_gen))
self.assertListEqual(new_gen, self.descendants[-1])
pass
def test_NElitism(self):
n = random.randint(1, 10)
rep_obj = NElitism(**{
NElitism.arg_N: n
})
new_gen = rep_obj.new_generation(self.prev_gen, self.descendants)
self.assertEqual(len(self.descendants[-1]) + n, len(new_gen))
self.assertEqual(len([ind for ind in new_gen if ind in self.prev_gen]), n)
rep_obj = NElitism(**{
NElitism.arg_N: n,
NElitism.arg_CMP: TestReplacementSchemes.reverse_cmp
})
new_gen = rep_obj.new_generation(self.prev_gen, self.descendants)
self.assertEqual(len(self.descendants[-1]) + n, len(new_gen))
self.assertEqual(len([ind for ind in new_gen if ind in self.prev_gen]), n)
pass
def test_NWeakElitism(self):
n = random.randint(1, 10)
rep_obj = NWeakElitism(**{
NWeakElitism.arg_N: n
})
new_gen = rep_obj.new_generation(self.prev_gen, self.descendants)
self.assertEqual(len(new_gen), len(self.prev_gen) + n)
self.assertEqual(len([ind for ind in new_gen if ind not in self.descendants[-1]]), n)
self.assertEqual(len([ind for ind in new_gen if ind in self.descendants[-1]]), len(self.descendants[-1]))
rep_obj = NWeakElitism(**{
NWeakElitism.arg_N: n,
NWeakElitism.arg_CMP: TestReplacementSchemes.reverse_cmp
})
new_gen = rep_obj.new_generation(self.prev_gen, self.descendants)
self.assertEqual(len(new_gen), len(self.prev_gen) + n)
pass
def test_DeleteN(self):
n = random.randint(1, 10)
rep_obj = DeleteN(**{
DeleteN.arg_N: n
})
new_gen = rep_obj.new_generation(self.prev_gen, self.descendants)
self.assertEqual(len(new_gen), len(self.prev_gen))
self.assertEqual(len([ind for ind in new_gen if ind in self.prev_gen]) + n, len(self.prev_gen))
pass
def test_DeleteNLast(self):
n = random.randint(1, 10)
rep_obj = DeleteN(**{
DeleteNLast.arg_N: n
})
new_gen = rep_obj.new_generation(self.prev_gen, self.descendants)
self.assertEqual(len(new_gen), len(self.prev_gen))
self.assertEqual(len([ind for ind in new_gen if ind in self.prev_gen]) + n, len(self.prev_gen))
rep_obj = DeleteN(**{
DeleteNLast.arg_N: n,
DeleteNLast.arg_CMP: TestReplacementSchemes.reverse_cmp
})
new_gen = rep_obj.new_generation(self.prev_gen, self.descendants)
self.assertEqual(len(new_gen), len(self.prev_gen))
self.assertEqual(len([ind for ind in new_gen if ind in self.prev_gen]) + n, len(self.prev_gen))
pass
```
#### File: LAMARCK/LAMARCK_ML/Test_Example.py
```python
import unittest
@unittest.skip("showing class skipping")
class TestExample(unittest.TestCase):
def setUp(self):
# self.widget = Widget('The widget')
pass
def tearDown(self):
# self.widget.dispose()
pass
@unittest.expectedFailure
def test_return5(self):
ex = Example()
self.assertEqual(ex.return5(), 6)
@unittest.skip("demonstrating skipping")
def test_nothing(self):
self.fail("shouldn't happen")
@unittest.skipIf(mylib.__version__ < (1, 3),
"not supported in this library version")
def test_format(self):
# Tests that work for only a certain version of the library.
pass
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_windows_support(self):
# windows specific testing code
pass
def test_even(self):
"""
Test that numbers between 0 and 5 are all even.
"""
for i in range(0, 6):
with self.subTest(i=i):
self.assertEqual(i % 2, 0)
```
#### File: LAMARCK_ML/utils/benchmark.py
```python
from LAMARCK_ML.models.interface import ModellUtil
from datetime import datetime
class Benchmark(ModellUtil):
def __init__(self, **kwargs):
super(Benchmark, self).__init__(**kwargs)
self.last_time_stamp = datetime.now()
def print(self, func, func_name):
print('%s: %s' % (func_name, datetime.now() - self.last_time_stamp))
before = datetime.now()
func()
end = datetime.now()
print('Hooks: %s' % (end - before))
print('============')
self.last_time_stamp = datetime.now()
def end_prepare(self, func):
def wrapper(model):
print('%s: %s' % ('Prepare', datetime.now() - self.last_time_stamp))
before = datetime.now()
func()
end = datetime.now()
print('Hooks: %s' % (end - before))
print('============')
self.last_time_stamp = datetime.now()
return wrapper
def end_evaluate(self, func):
def wrapper(model):
self.print(func, 'Evaluate')
return wrapper
def end_select(self, func):
def wrapper(model):
self.print(func, 'Select')
return wrapper
def end_replace(self, func):
def wrapper(model):
self.print(func, 'Replace')
return wrapper
def end_reproduce(self, func):
def wrapper(model):
self.print(func, 'Reproduce')
return wrapper
def end_reproduce_step(self, func):
def wrapper(model):
self.print(func, 'Reproduce Step')
return wrapper
def new_done(self, func):
def wrapper(model):
self.print(func, 'Done')
return wrapper
```
#### File: LAMARCK_ML/utils/compareClass_test.py
```python
import unittest
from LAMARCK_ML.utils.compareClass import CompareClass
class TestCompareClass(unittest.TestCase):
def test_comparefunction(self):
prim = 'a'
sec0 = 'b'
sec1 = 'c'
cmp = CompareClass(**{
CompareClass.arg_PRIMARY_ALPHA: -1,
CompareClass.arg_PRIMARY_OBJECTIVE: prim,
CompareClass.arg_PRIMARY_THRESHOLD: 0.5,
CompareClass.arg_SECONDARY_OBJECTIVES: {sec0: -10/0.1,
sec1: -20/0.1}
})
one = {prim: -0.7,
sec0: 10000,
sec1: 40}
other = {prim: -0.6,
sec0: 10000,
sec1: 30}
self.assertTrue(cmp.greaterThan(one, other))
self.assertFalse(cmp.greaterThan(other, one))
```
#### File: utils/dataSaver/dataSaver_test.py
```python
import os
import types
import unittest
from LAMARCK_ML.data_util import TypeShape, IOLabel, DFloat, Shape, DimNames
from LAMARCK_ML.individuals import ClassifierIndividualOPACDG, NetworkIndividualInterface
from LAMARCK_ML.models.models import GenerationalModel
from LAMARCK_ML.reproduction import Mutation, Recombination, AncestryEntity
from LAMARCK_ML.utils.dataSaver.dbSqlite3 import DSSqlite3
@unittest.skipIf((os.environ.get('test_fast', False) in {'True', 'true', '1'}), 'time consuming')
class TestDBSqlite3(unittest.TestCase):
class dummyModel(GenerationalModel):
def __init__(self, **kwargs):
super(TestDBSqlite3.dummyModel, self).__init__(**kwargs)
_data_nts = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 20)))
_target_nts = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 10)))
self.ci = ClassifierIndividualOPACDG(**{
NetworkIndividualInterface.arg_DATA_NTS: {IOLabel.DATA: (_data_nts, 'Dataset'),
IOLabel.TARGET: (_target_nts, 'Dataset')},
})
self.anc1 = ClassifierIndividualOPACDG(**{
NetworkIndividualInterface.arg_DATA_NTS: {IOLabel.DATA: (_data_nts, 'Dataset'),
IOLabel.TARGET: (_target_nts, 'Dataset')},
})
self.anc2 = ClassifierIndividualOPACDG(**{
NetworkIndividualInterface.arg_DATA_NTS: {IOLabel.DATA: (_data_nts, 'Dataset'),
IOLabel.TARGET: (_target_nts, 'Dataset')},
})
self._GENERATION = [self.ci]
self._GENERATION_IDX = 1
def mut(self):
mut = Mutation()
self._REPRODUCTION = [(mut, [AncestryEntity(mut.ID, self.anc1.id_name, [self.ci.id_name]),
AncestryEntity(mut.ID, self.anc2.id_name, [self.ci.id_name])])]
def rec(self):
rec = Recombination()
self._REPRODUCTION = [(rec, [AncestryEntity(rec.ID, self.ci.id_name, [self.anc1.id_name, self.anc2.id_name])])]
def test_db_generation(self):
db_file = './test_db_gen.db3'
ds = DSSqlite3(**{
DSSqlite3.arg_FILE: db_file,
})
dummyM = TestDBSqlite3.dummyModel()
setattr(dummyM, '_end_evaluate', types.MethodType(ds.end_evaluate(
getattr(dummyM, '_end_evaluate')), dummyM))
dummyM._end_evaluate()
origin = dummyM.generation[0]
ind = ds.get_individual_by_name(origin.id_name)
self.assertEqual(origin, ind)
self.assertIsNot(origin, ind)
os.remove(db_file)
def test_db_ancestry_mut(self):
db_file = './test_db_anc_mut.db3'
ds = DSSqlite3(**{
DSSqlite3.arg_FILE: db_file,
})
dummyM = TestDBSqlite3.dummyModel()
setattr(dummyM, '_end_reproduce', types.MethodType(ds.end_reproduce(
getattr(dummyM, '_end_reproduce')), dummyM))
dummyM.mut()
dummyM._end_reproduce()
_, anc_ent = ds.get_ancestry_for_ind(dummyM.anc1.id_name)
self.assertEqual(anc_ent.method, Mutation.ID)
self.assertEqual(anc_ent.descendant, dummyM.anc1.id_name)
self.assertListEqual(anc_ent.ancestors, [dummyM.ci.id_name])
_, anc_ent = ds.get_ancestry_for_ind(dummyM.anc2.id_name)
self.assertEqual(anc_ent.method, Mutation.ID)
self.assertEqual(anc_ent.descendant, dummyM.anc2.id_name)
self.assertListEqual(anc_ent.ancestors, [dummyM.ci.id_name])
self.assertEqual(ds.get_ancestry_for_ind(dummyM.ci.id_name), (None, None))
os.remove(db_file)
def test_db_ancestry_rec(self):
db_file = './test_db_anc_rec.db3'
ds = DSSqlite3(**{
DSSqlite3.arg_FILE: db_file,
})
dummyM = TestDBSqlite3.dummyModel()
setattr(dummyM, '_end_reproduce', types.MethodType(ds.end_reproduce(
getattr(dummyM, '_end_reproduce')), dummyM))
dummyM.rec()
dummyM._end_reproduce()
self.assertEqual(ds.get_ancestry_for_ind(dummyM.anc1.id_name), (None, None))
self.assertEqual(ds.get_ancestry_for_ind(dummyM.anc2.id_name), (None, None))
_, anc_ent = ds.get_ancestry_for_ind(dummyM.ci.id_name)
self.assertIsNotNone(anc_ent)
self.assertEqual(anc_ent.method, Recombination.ID)
self.assertEqual(anc_ent.descendant, dummyM.ci.id_name)
self.assertListEqual(anc_ent.ancestors, [dummyM.anc1.id_name, dummyM.anc2.id_name])
os.remove(db_file)
```
#### File: utils/dataSaver/interface.py
```python
from LAMARCK_ML.models.interface import ModellUtil
class DataSaverInterface(ModellUtil):
def __init__(self, **kwargs):
super(DataSaverInterface, self).__init__(**kwargs)
def get_individual_by_name(self, name):
raise NotImplementedError()
def get_ancestry_for_ind(self, ind_name):
raise NotImplementedError()
def get_ancestries(self):
raise NotImplementedError()
def get_individual_names(self):
raise NotImplementedError()
```
#### File: utils/evaluation/evaluationHelper.py
```python
from LAMARCK_ML.nn_framework import NeuralNetworkFrameworkInterface
from LAMARCK_ML.utils.evaluation.interface import EvaluationHelperInterface
from joblib import Parallel, delayed
import threading
import queue
import time
class BaseEH(EvaluationHelperInterface):
def __init__(self, **kwargs):
super(BaseEH, self).__init__()
def evaluate(self, generation, metrics):
for individual in generation:
individual.metrics = dict([(m.ID, m.evaluate(individual=individual, framework=None)) for m in metrics])
class LocalEH(EvaluationHelperInterface):
arg_NN_FRAMEWORK = 'framework'
def __init__(self, **kwargs):
super(LocalEH, self).__init__()
self._framework = kwargs.get(self.arg_NN_FRAMEWORK)
# TODO: setup framework
if not isinstance(self._framework, NeuralNetworkFrameworkInterface):
raise Exception()
def evaluate(self, generation, metrics):
for individual in generation:
individual.build_instance(self._framework)
state = individual.train_instance(self._framework)
individual.metrics = {m.ID: m.evaluate(individual=individual, framework=self._framework) for m in metrics}
individual.update_state(**state)
self._framework.reset()
class LocalParallelEH_joblib(EvaluationHelperInterface):
arg_NN_FRAMEWORK_CLASS = 'framework_cls'
arg_NN_FRAMEWORK_KWARGS = 'framework_kwargs'
arg_PARALLEL = 'parallel'
arg_ADAPT_KWARGS = 'adapt_kwargs'
def __init__(self, **kwargs):
super(LocalParallelEH_joblib, self).__init__()
self._parallel = kwargs.get(self.arg_PARALLEL, 1)
self._framework_cls = kwargs.get(self.arg_NN_FRAMEWORK_CLASS)
self._framework_kwargs = kwargs.get(self.arg_NN_FRAMEWORK_KWARGS, dict())
adapt_kwargs = kwargs.get(self.arg_ADAPT_KWARGS, [])
self._framworks = list()
for i in range(self._parallel):
kwargs = {k: (v if k not in adapt_kwargs else v.format(i)) for k, v in self._framework_kwargs.items()}
self._framworks.append(self._framework_cls(**kwargs))
def evaluate(self, generation, metrics):
def eval(ind):
framework = self._framworks.pop(0)
ind.build_instance(framework)
state = ind.train_instance(framework)
ind.metrics = {m.ID: m.evaluate(individual=ind, framework=framework) for m in metrics}
ind.update_state(**state)
framework.reset()
self._framworks.append(framework)
pass
for _ in Parallel(n_jobs=self._parallel, require='sharedmem')(
delayed(eval)(ind) for ind in generation
):
pass
class LocalParallelEH_threading(EvaluationHelperInterface):
arg_NN_FRAMEWORK_CLASS = 'framework_cls'
arg_NN_FRAMEWORK_KWARGS = 'framework_kwargs'
arg_PARALLEL = 'parallel'
arg_ADAPT_KWARGS = 'adapt_kwargs'
class nn_thread(threading.Thread):
def __init__(self, q_in, q_out, framework, *args, **kwargs):
super(LocalParallelEH_threading.nn_thread, self).__init__(*args, **kwargs)
self.q_in = q_in
self.q_out = q_out
self.framework = framework
def run(self):
while True:
try:
ind, metrics = self.q_in.get()
ind.build_instance(self.framework)
ind_state = ind.train_instance(self.framework)
ind_metrics = {m.ID: m.evaluate(individual=ind, framework=self.framework) for m in metrics}
self.q_out.put((ind.id_name, ind_state, ind_metrics))
self.framework.reset()
except queue.Empty:
continue
def __init__(self, **kwargs):
super(LocalParallelEH_threading, self).__init__()
self._parallel = kwargs.get(self.arg_PARALLEL, 1)
self._framework_cls = kwargs.get(self.arg_NN_FRAMEWORK_CLASS)
self._framework_kwargs = kwargs.get(self.arg_NN_FRAMEWORK_KWARGS, dict())
adapt_kwargs = kwargs.get(self.arg_ADAPT_KWARGS, [])
self.q_eval = queue.Queue()
self.q_res = queue.Queue()
self._framworks = list()
for i in range(self._parallel):
kwargs = {k: (v if k not in adapt_kwargs else v.format(i)) for k, v in self._framework_kwargs.items()}
f = LocalParallelEH_threading.nn_thread(self.q_eval, self.q_res, self._framework_cls(**kwargs))
f.start()
self._framworks.append(f)
def evaluate(self, generation, metrics):
waiting = dict()
for ind in generation:
self.q_eval.put((ind, metrics))
waiting[ind.id_name] = ind
while waiting:
ind_id, ind_state, ind_metrics = self.q_res.get()
ind = waiting.pop(ind_id)
ind.metrics = ind_metrics
ind.update_state(**ind_state)
class GraphLayoutEH(EvaluationHelperInterface):
def __init__(self, **kwargs):
super(GraphLayoutEH, self).__init__(**kwargs)
def evaluate(self, generation, metrics):
for individual in generation:
individual.metrics = dict([(m.ID, m.evaluate(individual=individual, framework=None))
for m in metrics])
```
#### File: LAMARCK_ML/utils/SlowDown.py
```python
from LAMARCK_ML.models import ModellUtil
import time
class SlowDown(ModellUtil):
arg_SLEEP_TIME = 'sleep_time'
def __init__(self, **kwargs):
super(SlowDown, self).__init__(**kwargs)
self.sleep_time = kwargs.get(self.arg_SLEEP_TIME, 0)
def end_replace(self, func):
def wrapper(model):
func()
if self.sleep_time > 0:
time.sleep(self.sleep_time)
return wrapper
```
#### File: LAMARCK_ML/utils/sortingClass.py
```python
class SortingClass(object):
def __init__(self, obj, cmp=None):
self.obj = obj
self.cmp = cmp
if self.cmp is None:
self.cmp = lambda x, y: 1 if x > y else -1 if y > x else 0
def __gt__(self, other):
return self.cmp(self.obj, other.obj) > 0
```
#### File: LAMARCK_ML/utils/stopGenerational.py
```python
from LAMARCK_ML.models import ModellUtil, NEADone
from LAMARCK_ML.utils.sortingClass import SortingClass
class StopByGenerationIndex(ModellUtil):
arg_GENERATIONS = 'index'
def __init__(self, **kwargs):
super(StopByGenerationIndex, self).__init__(**kwargs)
self.max_t = kwargs.get(self.arg_GENERATIONS, 100)
def end_select(self, func):
def wrapper(model):
func()
if model.abstract_timestamp >= self.max_t:
raise NEADone()
return wrapper
class StopByNoProgress(ModellUtil):
arg_PATIENCE = 'patience'
arg_CMP = 'cmp'
class MetricContainer():
pass
def __init__(self, **kwargs):
super(StopByNoProgress, self).__init__(**kwargs)
self.patience = kwargs.get(self.arg_PATIENCE, 5)
self.cmp = kwargs.get(self.arg_CMP)
self.best_ind_sc = None
self.best_ind_metrics = None
self.waiting = 0
def end_evaluate(self, func):
def wrapper(model):
# new_best_ind_sc = StopByNoProgress.MetricContainer()
new_best_ind_sc = max([SortingClass(obj=ind, cmp=self.cmp) for ind in model.generation])
new_best_metrics = dict(new_best_ind_sc.obj.metrics)
if (self.best_ind_sc is None or (self.cmp is not None and self.cmp(new_best_ind_sc.obj, self.best_ind_sc.obj)) or
new_best_ind_sc > self.best_ind_sc):
self.waiting = 0
self.best_ind_sc = new_best_ind_sc
self.best_ind_metrics = new_best_metrics
else:
self.waiting += 1
if self.waiting > self.patience:
raise NEADone()
func()
return wrapper
```
#### File: visualization/generational/main.py
```python
import colorsys
import math
import os
import random
import time
from shutil import copyfile
import dash
import dash_core_components as dcc
import dash_html_components as html
import numba
import plotly
import plotly.graph_objs as go
from dash.dependencies import Input, Output, ClientsideFunction, State
from dash.exceptions import PreventUpdate
from sklearn.manifold import TSNE
import umap
import numpy as np
from LAMARCK_ML.architectures.neuralNetwork import NeuralNetwork
from LAMARCK_ML.metrics import LayoutCrossingEdges, LayoutDistanceX, LayoutDistanceY
from LAMARCK_ML.models import GenerationalModel
from LAMARCK_ML.models.initialization import RandomGraphLayoutInitializer
from LAMARCK_ML.replacement import NElitism
from LAMARCK_ML.reproduction import Mutation, Recombination
from LAMARCK_ML.selection import ExponentialRankingSelection
from LAMARCK_ML.utils.dataSaver.dbSqlite3 import DSSqlite3
from LAMARCK_ML.utils.evaluation import GraphLayoutEH
from LAMARCK_ML.utils.stopGenerational import StopByGenerationIndex, StopByNoProgress
from LAMARCK_ML.reproduction import AncestryEntity
random.seed()
def test_save():
print('Debug save')
class dashVis():
"""
Visualizes the NEA parameters and its progress if connected to a database.
For Unix:
- expose server with 'server = obj.server'
- start server with 'gunicorn my_python_file:server'
"""
arg_DB_CONFIGS = 'db_configs'
arg_AS_SERVER = 'asServer'
blank_label = '#blank'
def __init__(self, **kwargs):
@numba.njit
def metric(a, b):
return a[0].norm(b[0])
db_config = kwargs.get(self.arg_DB_CONFIGS)
self.projection = []
projection_texts = dict()
projection_distances = dict()
individuals = dict()
# ancestry_inidividuals = set()
# ancestry_edges = dict()
metrics_individuals = set()
# metrics_values =
# train_samples = 1
# # train_X = [np.random.rand(12288).reshape((64, 64, 3)) for _ in range(train_samples)]
# train_X = [np.random.rand(20) for _ in range(train_samples)]
#
# # train_Y = [np.random.rand(1024) for _ in range(train_samples)]
# train_Y = [np.random.rand(10) for _ in range(train_samples)]
#
# _data = TypeShape(DFloat, Shape((DimNames.HEIGHT, 32),
# (DimNames.WIDTH, 32),
# (DimNames.CHANNEL, 3)))
# _data2 = TypeShape(DFloat, Shape((DimNames.HEIGHT, 32),
# (DimNames.WIDTH, 32),
# (DimNames.CHANNEL, 3)))
# # _data = TypeShape(DFloat, Shape((DimNames.UNITS, 20)))
# batch = 1
# dataset = UncorrelatedSupervised(train_X=train_X,
# train_Y=train_Y,
# batch=batch,
# typeShapes={IOLabel.DATA: _data,
# IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 10)))},
# name='Dataset')
# IOLabel.DATA2 = 'DATA2'
# dataset2 = UncorrelatedSupervised(train_X=train_X,
# train_Y=train_Y,
# batch=batch,
# typeShapes={IOLabel.DATA2: _data2,
# IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 10)))},
# name='Dataset2')
# datasets = [dataset]
# IOLabel.DS1 = 'DS1'
# IOLabel.DS2 = 'DS2'
# self.inputs = {IOLabel.DS1: (IOLabel.DATA, _data, dataset.id_name),
# IOLabel.DS2: (IOLabel.DATA2, _data, dataset2.id_name)}
# # self.inputs = {IOLabel.DS1: (IOLabel.DATA, _data, dataset.id_name)}
#
# # outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 512))
# outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 10))
# outShape1 = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 15))
# self.outputs = {'out0': TypeShape(DFloat, outShape), 'out1':TypeShape(DFloat, outShape1)}
# # self.outputs = {'out': TypeShape(DFloat, outShape)}
# self.functions = [Merge, Conv2D, Flatten, Dense]
# # self.functions = [Dense, Merge]
# blueprints = dict()
# self.projection = None
# self.index = 0
# _nn = NeuralNetwork(**{NeuralNetwork.arg_INPUTS: dict(self.inputs),
# NeuralNetwork.arg_OUTPUT_TARGETS: self.outputs,
# NeuralNetwork.arg_FUNCTIONS: self.functions,
# NeuralNetwork.arg_RECOMBINATION_PROBABILITY: 1})
# blueprints['Network_' + str(self.index)] = _nn
# self.index += 1
# for i in range(5):
# nn = NeuralNetwork(**{NeuralNetwork.arg_INPUTS: dict(self.inputs),
# NeuralNetwork.arg_OUTPUT_TARGETS: self.outputs,
# NeuralNetwork.arg_FUNCTIONS: self.functions,
# NeuralNetwork.arg_RECOMBINATION_PROBABILITY: 1})
# blueprints['Network_' + str(self.index)] = nn
# self.index += 1
# print(i)
#
# for i in range(5):
# nn = random.choice(list(blueprints.values())).mutate(1)[0]
# blueprints['Network_' + str(self.index)] = nn
# self.index += 1
# print(i)
#
# for i in range(5):
# nn = random.choice(list(blueprints.values())).recombine(random.choice(list(blueprints.values())))[0]
# blueprints['Network_' + str(self.index)] = nn
# self.index += 1
# print(i)
# self.manifold = MDS(n_components=2,
# max_iter=1,
# n_init=1,
# dissimilarity='precomputed'
# )
self.manifold = umap.UMAP(n_components=2,
n_neighbors=2,
min_dist=1,
random_state=0,
# metric='precomputed',
metric='euclidean',
# n_epochs=11,
# learning_rate=.1,
)
# self.manifold = TSNE()
self.metrics = [('metric_id0', 'metric label 0'),
('metric_id1', 'metric label 1')]
ss_info_ancestry_layout = {
'plot_bgcolor': '#333',
'paper_bgcolor': '#333',
'xaxis': {
'showgrid': False,
'zeroline': False,
'showticklabels': False
},
'yaxis': {
'showgrid': False,
'zeroline': False,
'showticklabels': False,
},
'margin': go.layout.Margin(
l=0, r=0, b=0, t=25, pad=0,
),
'showlegend': True,
'hovermode': 'closest',
'legend': {'font': {'color': '#ffffff'},
'xanchor': 'center',
'yanchor': 'top',
'x': 0.5,
'y': 0, },
# 'height': 800,
}
self.app = dash.Dash(__name__)
self.app.layout = html.Div(id='root', children=[
html.H1(children="LAMARCK_ML"),
dcc.Tabs(parent_className='custom-tabs', className='contentArea', id='plotArea', children=[
dcc.Tab(className='custom-tab', selected_className='custom-selected-tab', label='NEA Parameters',
style={'display': 'none'},
id='neaParamA',
children=[html.H1('NEA Parameters'),
html.P([
'Web-UI update interval [s]: ',
dcc.Input(
id='update-interval',
type='number',
value='300'
)
]),
]),
dcc.Tab(className='custom-tab', selected_className='custom-selected-tab', label='Search Space', id='SSViewA',
children=[html.H1('Search Space View'),
dcc.Interval(
id='auto-update-projection',
interval=1 * 1000,
# n_intervals=0
),
html.Div(className='graphArea', children=[
dcc.Graph(className='graph', id='searchSpaceProjection',
config={'modeBarButtonsToRemove': [
'select2d',
'lasso2d',
'hoverCompareCartesian',
'toggleSpikelines'
],
'displaylogo': False,
'displayModeBar': True,
}),
]),
html.Div(className='infoArea', children=[
dcc.Tabs(className='SideInfo', children=[
dcc.Tab(className='custom-tab', selected_className='custom-selected-tab',
label='Structure',
children=[dcc.Graph(className='graph', id='network-structure',
config={'modeBarButtonsToRemove': [
# 'zoom2d',
# 'pan2d',
# 'zoomIn2d',
# 'zoomOut2d',
# 'autoScale2d',
# 'resetScale2d',
'select2d',
'lasso2d',
# 'hoverClosestCartesian',
'hoverCompareCartesian',
# 'toImage',
'toggleSpikelines'
],
'displaylogo': False,
'displayModeBar': True,
}),
]),
dcc.Tab(className='custom-tab', selected_className='custom-selected-tab',
label='Ancestry',
children=[dcc.Graph(className='graph', id='searchSpaceAncestry',
config={'modeBarButtonsToRemove': ['select2d',
'lasso2d',
'hoverCompareCartesian',
'toggleSpikelines'],
'displaylogo': False,
'displayModeBar': True,
}),
]),
dcc.Tab(className='custom-tab', selected_className='custom-selected-tab',
label='Download',
children=[
html.Div(className='infoArea H1', children='Graphic',
style={'marginTop': 10}),
dcc.RadioItems(id='SSDownloadGraphic',
options=[
{'label': 'Projection', 'value': 'Projection'},
{'label': 'Structure', 'value': 'Structure'},
{'label': 'Ancestry', 'value': 'Ancestry'}
], value='Projection',
labelStyle={'display': 'block'}),
html.Div(className='infoArea spacer'),
html.Div(className='infoArea H1', children='Format'),
dcc.RadioItems(id='SSDownloadFormat',
options=[
{'label': 'SVG', 'value': 'svg'},
{'label': 'PNG', 'value': 'png'},
{'label': 'JPEG', 'value': 'jpeg'},
{'label': 'WebP', 'value': 'webp'},
{'label': 'PDF', 'value': 'pdf'},
{'label': 'EPS', 'value': 'eps'},
], value='svg',
labelStyle={'display': 'block'}),
html.Div(className='infoArea spacer'),
html.Div(className='infoArea H1', children='Filename'),
dcc.Input(id='SSDownloadFileName',
style={'width': '98.5%',
'height': '12pt',
},
value='LAMARCK_plot'),
html.Button('Download',
id='SearchSpaceDownload',
className='downloadButton',
style={'width': '100%',
'height': '50px',
'margin-top': 10,
},
),
html.Div(children='test', style={'display': 'none'},
id='dummy')
])
]),
]),
]),
dcc.Tab(className='custom-tab', selected_className='custom-selected-tab', label='Ancestry', id='AncestryViewA',
children=[html.H1('Ancestry View'),
dcc.Interval(
id='auto-update-ancestry',
interval=1 * 1000,
# n_intervals=0
),
html.Div(className='graphArea', children=[
dcc.Graph(className='graph', id='ancestry-vis',
config={'modeBarButtonsToRemove': [
'select2d',
'lasso2d',
'hoverCompareCartesian',
'toggleSpikelines'
],
'displaylogo': False,
'displayModeBar': True,
}),
]),
html.Div(className='infoArea', children=[
html.Div(className='infoArea H1',
children='Reproduction'),
dcc.RadioItems(options=[
{'label': 'Hover', 'value': 'hover'},
{'label': 'All', 'value': 'all'},
], value='hover', id='ancestry-rep-style'),
html.Div(className='infoArea spacer'),
html.Div(className='infoArea H1', children='Styling'),
dcc.Checklist(options=[
{'label': 'Repeat Individuals', 'value': 'repInd'}
], id='ancestry-ind-style', value=[]),
]),
]),
dcc.Tab(className='custom-tab', selected_className='custom-selected-tab', label='Metrics', id='plotsA',
children=[html.H1('Metrics'),
dcc.Interval(
id='auto-update-metrics',
interval=1 * 1000,
# n_intervals=0
),
html.Div(className='metricArea', children=[
dcc.Tabs(id='metric-tabs',
children=[
dcc.Tab(className='custom-tab', selected_className='custom-selected-tab',
label=metric_label, children=[
dcc.Graph(className='graph', id=metric_id),
]) for metric_id, metric_label in self.metrics]),
]),
])
]),
html.Div(id="output-clientside"),
])
self.app.clientside_callback(
ClientsideFunction(namespace="clientside", function_name="resize"),
Output("output-clientside", "children"),
[Input('neaParamA', ''),
Input('SSViewA', ''),
Input('AncestryViewA', ''),
Input('plotsA', '')]
)
@self.app.callback(
output=[Output(component_id='dummy', component_property='children')],
inputs=[Input(component_id='SearchSpaceDownload', component_property='n_clicks')],
state=[State(component_id='SSDownloadFileName', component_property='value'),
State(component_id='SSDownloadGraphic', component_property='value'),
State(component_id='SSDownloadFormat', component_property='value'),
State(component_id='searchSpaceProjection', component_property='figure'),
State(component_id='network-structure', component_property='figure'),
State(component_id='searchSpaceAncestry', component_property='figure')
]
)
def searchSpaceDownload(n_clicks,
_f, graphic_label, format_label,
projection_figure, structure_figure, ancestry_figure):
if graphic_label == 'Projection':
fig = projection_figure
elif graphic_label == 'Structure':
fig = structure_figure
else:
fig = ancestry_figure
if fig is None:
raise PreventUpdate
fig = go.Figure(fig)
plotly.io.write_image(fig, file='/tmp/' + _f, format=format_label)
time.sleep(1)
path = os.path.expanduser('~/Downloads')
file_name = '{}/{}.{}'.format(path, _f, format_label)
idx = 0
while os.path.exists(file_name):
file_name = '{}/{}_{}.{}'.format(path, _f, str(idx), format_label)
idx += 1
copyfile('/tmp/' + _f,
file_name)
return ['Test']
def getStructureColor(name, colors=dict(), remaining_val=[10]):
if name == dashVis.blank_label:
return '#777'
color = colors.get(name)
if color is None:
r = remaining_val.pop(0)
color = '#' + ''.join(['%0.2X' % int(v * 255) for v in colorsys.hsv_to_rgb(r / 360, .75, .75)]), r
colors[name] = color
if len(remaining_val) == 0:
add = 360 / (len(colors) * 2)
remaining_val.extend([v[1] + add for v in colors.values()])
return color[0]
def getAncColor(name, colors=dict(), remaining_val=[10]):
return getStructureColor(name, colors=colors, remaining_val=remaining_val)
def NetworkLayout2(nn: NeuralNetwork, datasets=[]):
edges = set()
for f in nn.functions:
for _, o_id in f.inputs.values():
edges.add((o_id, f.id_name))
model = GenerationalModel()
model.add([
RandomGraphLayoutInitializer(**{
RandomGraphLayoutInitializer.arg_DISTANCE: 1,
RandomGraphLayoutInitializer.arg_GEN_SIZE: 30,
RandomGraphLayoutInitializer.arg_EDGES: edges,
RandomGraphLayoutInitializer.arg_METRIC_WEIGHTS:
{
# LayoutCrossingEdges.ID: .25,
# LayoutDistanceX.ID: .5,
# LayoutDistanceY.ID: .25,
}
}),
GraphLayoutEH(),
LayoutCrossingEdges(),
LayoutDistanceX(),
LayoutDistanceY(),
# MaxDiversitySelection(**{MaxDiversitySelection.arg_LIMIT: 20}),
ExponentialRankingSelection(**{ExponentialRankingSelection.arg_LIMIT: 6}),
Recombination(),
Mutation(**{Mutation.arg_P: .25,
Mutation.arg_DESCENDANTS: 1}),
NElitism(**{NElitism.arg_N: 2}),
StopByGenerationIndex(**{StopByGenerationIndex.arg_GENERATIONS: 750}),
StopByNoProgress(**{StopByNoProgress.arg_PATIENCE: 100})
])
model.reset()
model.run()
ind = max(model.generation)
# print(ind.metrics, ind.fitness, model.generation_idx)
# print(min(ind.node2X.values()), max(ind.node2X.values()))
del model
n2d = dict([(n, d) for d, nodes in ind.depth2nodes.items() for n in nodes])
nodes = dict()
edges = dict()
for _in in set([id_name for _, id_name in nn.inputs.values()]):
node_x, node_y, node_text = nodes.get(_in, ([], [], []))
node_x.append(ind.node2X[_in])
node_y.append(n2d[_in])
node_text.append(',<br />'.join(
['{' + nts_id_name + ': ' + nts.dtype.__str__() + ', ' + str(nts.shape) + '}'
for d in datasets if d.id_name == _in for nts_id_name, nts in d.outputs.items()]))
if _in not in nodes:
nodes[_in] = (node_x, node_y, node_text)
pseudo_edges = dict()
stack = list(ind.edges)
while stack:
e = stack.pop(0)
e0, e1 = e
if e0 in ind.real_nodes:
tmp = [e]
container = [e1]
while container[0] not in ind.real_nodes:
for e_ in stack:
v0, v1 = e_
if v0 == container[0]:
tmp.append(e_)
stack.remove(e_)
container[0] = v1
break
pseudo_edges[(e0, container[0])] = tmp
else:
stack.append(e)
for _f in nn.functions:
f_name = _f.__class__.__name__
node_x, node_y, node_text = nodes.get(f_name, ([], [], []))
node_x.append(ind.node2X[_f.id_name])
node_y.append(n2d[_f.id_name])
node_text.append(',<br />'.join(
['{' + nts_id_name + ': ' + nts.dtype.__str__() + ', ' + str(nts.shape) + '}' for nts_id_name, nts in
_f.outputs.items()]))
if f_name not in nodes:
nodes[f_name] = (node_x, node_y, node_text)
for label_to, (label_from, node_from) in _f.inputs.items():
to_x, to_y = ind.node2X[_f.id_name], n2d[_f.id_name]
from_x, from_y = ind.node2X[node_from], n2d[node_from]
intermediat_edges = pseudo_edges[(node_from, _f.id_name)]
if len(intermediat_edges) <= 1:
m_x, m_y = (to_x + from_x) / 2, (to_y + from_y) / 2
edge_x, edge_y = edges.get(label_to, ([], []))
edge_x.extend([m_x, to_x, None])
edge_y.extend([m_y, to_y, None])
if label_to not in edges:
edges[label_to] = (edge_x, edge_y)
edge_x, edge_y = edges.get(label_from, ([], []))
edge_x.extend([from_x, m_x, None])
edge_y.extend([from_y, m_y, None])
if label_from not in edges:
edges[label_from] = (edge_x, edge_y)
else:
edge_x, edge_y = edges.get(label_from, ([], []))
m_id = intermediat_edges[0][1]
m_x, m_y = ind.node2X[m_id], n2d[m_id]
edge_x.extend([from_x, m_x, None])
edge_y.extend([from_y, m_y, None])
if label_from not in edges:
edges[label_from] = (edge_x, edge_y)
print(intermediat_edges)
for i in range(1, len(intermediat_edges) - 1):
# print(intermediat_edges[i])
m_id = intermediat_edges[i][0]
from_x, from_y = ind.node2X[m_id], n2d[m_id]
m_id = intermediat_edges[i][1]
m_x, m_y = ind.node2X[m_id], n2d[m_id]
edge_x, edge_y = edges.get(dashVis.blank_label, ([], []))
edge_x.extend([from_x, m_x, None])
edge_y.extend([from_y, m_y, None])
if dashVis.blank_label not in edges:
edges[dashVis.blank_label] = (edge_x, edge_y)
m_id = intermediat_edges[-1][0]
m_x, m_y = ind.node2X[m_id], n2d[m_id]
edge_x, edge_y = edges.get(label_to, ([], []))
edge_x.extend([m_x, to_x, None])
edge_y.extend([m_y, to_y, None])
if label_to not in edges:
edges[label_to] = (edge_x, edge_y)
return nodes, edges, \
(min(ind.depth2nodes.keys()) - 1, max(ind.depth2nodes.keys()) + 1), \
(min(ind.node2X.values()) - 1, max(ind.node2X.values()) + 1)
def NetworkLayout(nn: NeuralNetwork, datasets=[]):
stack = list(nn.functions)
df_name2obj = dict([(_f.id_name, _f) for _f in stack])
inputs = set([id_name for _, id_name in nn.inputs.values()])
pos_y = dict([(id_name, 0) for id_name in inputs])
y_pos = dict([(0, [id_name]) for id_name in inputs])
while stack:
_f = stack.pop()
y_coord = 0
all_found = True
for predecessor in [id_name for _, id_name in _f.inputs.values()]:
if (predecessor not in pos_y
and predecessor not in inputs):
predecessor = df_name2obj.get(predecessor)
stack.append(_f)
try:
stack.remove(predecessor)
stack.append(predecessor)
except ValueError:
pass
all_found = False
break
else:
y_coord = max(pos_y.get(predecessor) + 1, y_coord)
if all_found:
pos_y[_f.id_name] = y_coord
y_pos[y_coord] = y_pos.get(y_coord, []) + [_f]
pos_x = dict([(_id, x) for x, _id in enumerate(inputs)])
y_x = {0: len(inputs)}
for y in range(1, max(y_pos.keys()) + 1):
for _f in y_pos.get(y, []):
predecessors = list([id_name for _, id_name in _f.inputs.values()])
x_pos = set()
pred_x = 0
for pred in predecessors:
x = pos_x[pred]
if x in x_pos:
x += 1
for n in y_pos[pos_y[pred]]:
if isinstance(n, str):
_x = pos_x[n]
pos_x[n] = _x + 1 if _x >= x else _x
else:
_x = pos_x[n.id_name]
pos_x[n.id_name] = _x + 1 if _x >= x else _x
x_pos.add(x)
pred_x += x
_y_x = 0 if y_x.get(y) is None else y_x[y] + 1
y_x[y] = _y_x + pred_x
pred_x = max(pred_x * 1.0 / (len(predecessors) if len(predecessors) > 0 else 1), _y_x)
pos_x[_f.id_name] = pred_x
nodes = dict()
for _in in inputs:
node_x, node_y, node_text = nodes.get(_in, ([], [], []))
node_x.append(pos_x[_in])
node_y.append(pos_y[_in])
node_text.append(',<br />'.join(
['{' + nts_id_name + ': ' + nts.dtype.__str__() + ', ' + str(nts.shape) + '}'
for d in datasets if d.id_name == _in for nts_id_name, nts in d.outputs.items()]))
if _in not in nodes:
nodes[_in] = (node_x, node_y, node_text)
for _f in nn.functions:
f_name = _f.__class__.__name__
node_x, node_y, node_text = nodes.get(f_name, ([], [], []))
node_x.append(pos_x[_f.id_name])
node_y.append(pos_y[_f.id_name])
node_text.append(',<br />'.join(
['{' + nts_id_name + ': ' + nts.dtype.__str__() + ', ' + str(nts.shape) + '}' for nts_id_name, nts in
_f.outputs.items()]))
if f_name not in nodes:
nodes[f_name] = (node_x, node_y, node_text)
edges = dict()
for _f in nn.functions:
for label_to, (label_from, node_from) in _f.inputs.items():
to_x, to_y = pos_x[_f.id_name], pos_y[_f.id_name]
from_x, from_y = pos_x[node_from], pos_y[node_from]
m_x, m_y = (to_x + from_x) / 2, (to_y + from_y) / 2
edge_x, edge_y = edges.get(label_to, ([], []))
edge_x.extend([m_x, to_x, None])
edge_y.extend([m_y, to_y, None])
if label_to not in edges:
edges[label_to] = (edge_x, edge_y)
edge_x, edge_y = edges.get(label_from, ([], []))
edge_x.extend([from_x, m_x, None])
edge_y.extend([from_y, m_y, None])
if label_from not in edges:
edges[label_from] = (edge_x, edge_y)
return nodes, edges, max(y_pos.keys()) + 1
@self.app.callback(
[Output(component_id='network-structure', component_property='figure'),
Output(component_id='searchSpaceAncestry', component_property='figure')],
[Input(component_id='searchSpaceProjection', component_property='clickData')]
)
def update_searchSpace_info(input_data):
if input_data is None:
return [{'layout': ss_info_ancestry_layout}, {}]
id_name = input_data['points'][0]['text']
dataSaver = DSSqlite3(**db_config)
individual = dataSaver.get_individual_by_name(id_name)
# TODO: got exception in next line
_, ancestry = dataSaver.get_ancestry_for_ind(id_name)
if ancestry is not None:
levelOneAnc = [dataSaver.get_ancestry_for_ind(ind)[1] for ind in ancestry.ancestors]
del dataSaver
nodes, edges, y_range, x_range = NetworkLayout2(individual.network)
adapted_layout = dict(ss_info_ancestry_layout)
adapted_layout['height'] = y_range[1] * 30 + 200
adapted_layout['yaxis'] = dict(adapted_layout['yaxis'])
adapted_layout['yaxis']['range'] = [y_range[0], y_range[1]]
adapted_layout['xaxis'] = dict(adapted_layout['xaxis'])
adapted_layout['xaxis']['range'] = [x_range[0], x_range[1]]
nodes = [{'x': node_x,
'y': node_y,
'text': node_text,
'mode': 'markers',
'marker': {'size': 10,
'symbol': 'circle',
'color': getStructureColor(name)},
'hoverinfo': 'text',
# 'textposition': 'center right',
'showlegend': True,
'name': name
} for name, (node_x, node_y, node_text) in nodes.items()]
edges = [{'x': edge_x,
'y': edge_y,
'mode': 'lines',
'hoverinfo': 'none',
'name': name,
'showlegend': name != dashVis.blank_label,
'line': {'color': getStructureColor(name)}
} for name, (edge_x, edge_y) in edges.items()]
structure_fig = {
'data': edges + nodes,
'layout': adapted_layout,
}
# ==================
nodes = list()
edges = dict()
nodes.append({
'x': [0],
'y': [0],
'mode': 'markers',
'hoverinfo': 'text',
'text': id_name,
'name': 'selected',
'showlegend': False,
'marker': {'size': 10,
'symbol': 'dot',
'color': '#a55'}
})
if ancestry is not None:
tmp = -(len(ancestry.ancestors) - 1) * .5
offsets = [tmp + i for i in range(len(ancestry.ancestors))]
xs, ys = edges.get(ancestry.method, ([], []))
xs.extend([x for offset in offsets for x in [0, offset, None]])
ys.extend([y for _ in offsets for y in [0, 1, None]])
edges[ancestry.method] = xs, ys
nodes.append({
'x': [offset for offset in offsets],
'y': [1 for _ in offsets],
'mode': 'markers',
'hoverinfo': 'text',
'text': ancestry.ancestors,
'name': 'ancestors 0',
'showlegend': False,
'marker': {'size': 10,
'symbol': 'dot',
'color': [getAncColor(c)
for c in ancestry.ancestors]}
})
# TODO: fix this
# for anc, mid in zip(levelOneAnc, offsets):
# if anc is not None:
# anc_l = len(anc.ancestors)
# tmp = -(anc_l - 1) / anc_l * .8 + mid
# _offsets = [tmp + i * .8 / (anc_l - 1) for i in range(anc_l)]
# xs, ys = edges.get(anc.method, ([], []))
# xs.extend([x for offset in _offsets for x in [mid, offset, None]])
# ys.extend([y for _ in _offsets for y in [1, 2, None]])
# edges[anc.method] = xs, ys
# nodes.append({
# 'x': [offset for offset in _offsets],
# 'y': [2 for _ in _offsets],
# 'mode': 'markers',
# 'hoverinfo': 'text',
# 'text': anc.ancestors,
# 'name': 'ancestors 1',
# 'showlegend': False,
# 'marker': {'size': 10,
# 'symbol': 'dot',
# 'color': [getAncColor(c)
# for c in anc.ancestors]}
# })
edges = [{
'x': xs,
'y': ys,
'mode': 'lines',
'hoverinfo': 'none',
'name': method,
'showleged': method != dashVis.blank_label,
'line': {'color': getAncColor(method)}
} for method, (xs, ys) in edges.items()]
adapted_layout = dict(ss_info_ancestry_layout)
adapted_layout['yaxis'] = dict(adapted_layout['yaxis'])
adapted_layout['yaxis']['range'] = [-.5, 2.5]
ancestry_fig = {
'data': edges + nodes,
'layout': adapted_layout,
}
return [structure_fig, ancestry_fig]
@self.app.callback(
[Output(component_id='auto-update-projection', component_property='interval'),
Output(component_id='auto-update-ancestry', component_property='interval'),
Output(component_id='auto-update-metrics', component_property='interval')],
[Input(component_id='update-interval', component_property='value')]
)
def change_update_interval(input_data):
interval = int(input_data) * 1000
return interval, interval, interval
@self.app.callback(
Output(component_id='searchSpaceProjection', component_property='figure')
, [Input(component_id='auto-update-projection', component_property='n_intervals')]
)
def auto_update_projection(data):
print('=============== begin projection ===============')
dataSaver = DSSqlite3(**db_config)
abstract_time_stamps = sorted(dataSaver.get_abstract_time_stamps())
base_ind = [set(dataSaver.get_individual_functions(name)) for name in
dataSaver.get_individual_names_by_abstract_time_stamp(abstract_time_stamps[0])]
function_vectors = dict()
projection_texts = dict()
# for time_stamp in abstract_time_stamps[15:]:
for name in dataSaver.get_individual_names():
print(name)
ind_f = set(dataSaver.get_individual_functions(name))
v = [len(i.union(ind_f))-len(i.intersection(ind_f)) for i in base_ind]
function_vectors[name] = v
projection_texts[name] = dataSaver.get_individual_metrics(name).get('ACC', 0)
#next(iter(ind.metrics.values())) if ind.metrics else 0)
xs, ys = zip(*self.manifold.fit_transform(list(function_vectors.values())))
del dataSaver
# all_ind_names = set(dataSaver.get_individual_names())
# set_names = set(projection_texts.keys())
# new_ind_names = [n for n in all_ind_names if n not in set_names]
# all_ind_names = set_names.union(new_ind_names)
#
# idx_m = len(new_ind_names)
# print(new_ind_names)
# for i, ind0 in enumerate(new_ind_names):
# # ind0_ = dataSaver.get_individual_by_name(ind0)
# ind0_functions = set(dataSaver.get_individual_functions(ind0))
# # projection_texts[ind0] = (next(iter(ind0_.metrics.values())) if ind0_.metrics else 0)
# projection_texts[ind0] = dataSaver.get_individual_metrics(ind0).get('ACC', 0)
# # print(ind0, projection_texts[ind0])
# for ind1 in set_names:
# if (ind0, ind1) not in projection_distances:
# # dist = ind0_.norm(dataSaver.get_individual_by_name(ind1))
# ind1_functions = set(dataSaver.get_individual_functions(ind1))
# dist = len(ind0_functions.union(ind1_functions))-len(ind0_functions.intersection(ind1_functions))
# projection_distances[ind0, ind1] = dist
# projection_distances[ind1, ind0] = dist
# for j in range(i + 1, idx_m):
# ind1 = new_ind_names[j]
# # dist = ind0_.norm(dataSaver.get_individual_by_name(ind1))
# ind1_functions = set(dataSaver.get_individual_functions(ind1))
# dist = len(ind0_functions.union(ind1_functions)) - len(ind0_functions.intersection(ind1_functions))
# projection_distances[ind0, ind1] = dist
# projection_distances[ind1, ind0] = dist
# projection_distances[ind0, ind0] = 0
#
# del dataSaver
#
# if len(new_ind_names) > 0:
# distance = [projection_distances[ind0, ind1] for ind0 in all_ind_names for ind1 in all_ind_names]
# distance = np.asarray(distance)
# distance = distance.reshape((len(all_ind_names), -1))
# self.projection = self.manifold.fit_transform(distance)
# xs, ys = zip(*self.projection)
ssprojection = {
'data': [{
'x': list(xs),
'y': list(ys),
'text': list(projection_texts.keys()),
'name': 'text',
'mode': 'markers',
'marker': {'size': 7,
'symbol': ['cross-thin-open' if ev else 'dot' for ev in projection_texts.values()],
'color': list(projection_texts.values()),
'colorscale': 'Oranges',
'showscale': True,
},
'hoverinfo': 'text',
'showlegend': False,
}],
'layout': {
'plot_bgcolor': '#333',
'paper_bgcolor': '#333',
'xaxis': {
'showgrid': False,
'zeroline': False,
'showticklabels': False
},
'yaxis': {
'showgrid': False,
'zeroline': False,
'showticklabels': False,
},
'margin': go.layout.Margin(
l=0, r=0, b=0, t=25, pad=0,
),
'hovermode': 'closest',
}
}
print('update projection')
return ssprojection
def hash2binary(h_value, size, mem=dict(), values=dict()):
if h_value not in mem.get(size, dict()):
_values = values.get(size, set())
_size = 2 ** (size ** 2)
if len(_values) >= _size:
raise Exception('collision!')
v = h_value % _size
while v in _values:
v += 1
v = v % _size
_values.add(v)
values[size] = _values
binary = [v & (1 << i) != 0 for i in range(size ** 2)]
mem_size = mem.get(size, dict())
mem_size[h_value] = binary
mem[size] = mem_size
return mem[size][h_value]
def binary2coordinates(binary, center=(0, 0), size=1.0):
edge_l = int(math.sqrt(len(binary)))
c_x, c_y = center
coordinates = {True: [], False: []}
if edge_l % 2 == 0:
d = size / edge_l
idx = 0
for i in range(1, edge_l):
for j in range(i):
xs = [c_x + j * d * 2 - (i - 1) * d + k * d for k in range(2)] + \
[c_x + j * d * 2 - (i - 1) * d - k * d for k in range(2)] + \
[c_x + j * d * 2 - (i - 1) * d, None]
ys = [c_y + size - k * d - (i - 1) * d for k in range(3)] + \
[c_y + size - k * d - (i - 1) * d for k in range(1, -1, -1)] + [None]
coordinates[binary[idx]].append((xs, ys))
idx += 1
for i in range(edge_l, 0, -1):
for j in range(i):
xs = [c_x + j * d * 2 - (i - 1) * d + k * d for k in range(2)] + \
[c_x + j * d * 2 - (i - 1) * d - k * d for k in range(2)] + \
[c_x + j * d * 2 - (i - 1) * d, None]
ys = [c_y - size - k * d + (i + 1) * d for k in range(3)] + \
[c_y - size - k * d + (i + 1) * d for k in range(1, -1, -1)] + [None]
coordinates[binary[idx]].append((xs, ys))
idx += 1
else:
pass
return coordinates
@self.app.callback(
Output(component_id='ancestry-vis', component_property='figure'),
[Input(component_id='auto-update-ancestry', component_property='n_intervals'),
Input(component_id='ancestry-rep-style', component_property='value'),
Input(component_id='ancestry-ind-style', component_property='value')]
)
def auto_update_ancestry(int_v, rep_style, ind_style):
# return {}
print('rep_style', rep_style)
print('ind_style', ind_style)
marker_size = 1
marker_gapX = .5
marker_gapY = 1
def get_width_depth(individual, anc_dict, mem):
if individual not in mem:
if individual in anc_dict:
mem[individual] = (sum([get_width_depth(anc, anc_dict, mem)[0]
for anc in anc_dict[individual].ancestors]) + \
(len(anc_dict[individual].ancestors) - 1) * marker_gapX,
max([get_width_depth(anc, anc_dict, mem)[1]
for anc in anc_dict[individual].ancestors]) + marker_gapY + marker_size)
else:
mem[individual] = marker_size, marker_size
return mem[individual]
def get_x_y(individual, anc_dict, mem, edges, coords, x_offset, y_offset):
w, h = get_width_depth(individual, anc_dict, mem)
if individual in anc_dict:
x = 0
y = y_offset + marker_size / 2
edge_to = list()
_x_offset = x_offset
w -= (len(anc_dict[individual].ancestors) - 1) * marker_gapX
for anc in anc_dict[individual].ancestors:
dw, dh = get_width_depth(anc, anc_dict, mem)
dx, dy = get_x_y(anc, anc_dict, mem, edges, coords, _x_offset, y_offset + marker_gapY + marker_size)
edge_to.append((dx, dy))
x += dw / w * dx
_x_offset += dw + marker_gapX
for e in edge_to:
edges[anc_dict[individual].method] = edges.get(anc_dict[individual].method, []) + [(e, (x, y))]
else:
x, y = w / 2 + x_offset, y_offset + marker_size / 2
coords[(x, y)] = individual
return x, y
coordinates = {True: [], False: []}
ancestry_edges = dict()
dataSaver = DSSqlite3(**db_config)
abstract_time_ancestries = dict()
for abstract_time, ancestry in dataSaver.get_ancestries():
abstract_time_ancestries[abstract_time] = abstract_time_ancestries.get(abstract_time, []) + [ancestry]
y_offset = 0
for abstract_time in sorted(abstract_time_ancestries.keys())[:5]:
descendants = set()
ancestors = set()
anc_dict = dict()
for ancestry in abstract_time_ancestries[abstract_time]:
if isinstance(ancestry, AncestryEntity):
descendants.add(ancestry.descendant)
ancestors.update(set(ancestry.ancestors))
anc_dict[ancestry.descendant] = ancestry
else:
pass
mem = dict()
max_depth = max([get_width_depth(next_gen_ind, anc_dict, mem)[1]
for next_gen_ind in descendants.difference(ancestors)])
x_offset = 0
for next_gen_ind in descendants.difference(ancestors):
coords = dict()
edges = dict()
coords[get_x_y(next_gen_ind, anc_dict, mem, edges, coords, x_offset, y_offset)] = next_gen_ind
for (x, y), individual in coords.items():
c = binary2coordinates(hash2binary(hash(individual), 4), center=(x, y), size=.5)
coordinates[True].extend(c[True])
coordinates[False].extend(c[False])
x_offset += get_width_depth(next_gen_ind, anc_dict, mem)[0] + 2 * marker_gapX
for anc_method, edge_list in edges.items():
method_edges = ancestry_edges.get(anc_method, {'x': [], 'y': []})
for (f_x, f_y), (t_x, t_y) in edge_list:
method_edges['x'].extend([f_x, t_x, None])
method_edges['y'].extend([f_y, t_y, None])
ancestry_edges[anc_method] = method_edges
y_offset -= max_depth + marker_gapY
ancestry = {
'data': [{
'x': coordinates['x'],
'y': coordinates['y'],
'mode': 'lines',
'line': {
'color': getAncColor(method),
'width': 2.0,
},
'name': method,
'showlegend': True
} for method, coordinates in ancestry_edges.items()] + [{
'x': xs,
'y': ys,
'mode': 'none',
'fill': 'tozeroy',
'fillcolor': '#ffffffff',
'hoverinfo': 'none',
'line': {
'color': '#000f',
'width': .5,
},
'showlegend': False,
} for xs, ys in coordinates[True]] + [{
'x': xs,
'y': ys,
'mode': 'none',
'fill': 'tozeroy',
'fillcolor': '#000000ff',
'hoverinfo': 'none',
'line': {
'color': '#ffff',
'width': .5,
},
'showlegend': False,
} for xs, ys in coordinates[False]],
'layout': {
'plot_bgcolor': '#333',
'paper_bgcolor': '#333',
'xaxis': {
'showgrid': False,
'zeroline': False,
'showticklabels': False,
},
'yaxis': {
'showgrid': False,
'zeroline': False,
'showticklabels': True,
'scaleanchor': 'x',
'scaleratio': 1,
},
'margin': go.layout.Margin(
l=0, r=0, b=0, t=25, pad=0,
),
'hovermode': 'closest',
}
}
return ancestry
@self.app.callback(
[Output(component_id='metric-tabs', component_property='children')],
[Input(component_id='auto-update-metrics', component_property='n_intervals')]
)
def auto_update_metrics(data):
dataSaver = DSSqlite3(**db_config)
individual_names = dataSaver.get_individual_names()
data = dict()
count = 0
for ind in individual_names:
if ind in individuals:
continue
count += 1
if count > 10:
break
# individual = dataSaver.get_individual_by_name(ind)
metrics = dataSaver.get_individual_metrics(ind)
time_stamps = dataSaver.time_stamps_by_individual_name(ind)
individuals[ind] = metrics, time_stamps
# if len(time_stamps) > 1:
# print(metrics.items())
# print(time_stamps)
# print(ind)
for metrics, time_stamps in individuals.values():
for m, value in metrics.items():
data_m = data.get(m, dict())
for real, abstract in time_stamps:
data_m[abstract] = data_m.get(abstract, []) + [value]
data[m] = data_m
del dataSaver
# print('================')
tabs = list()
for m_label, m_data in data.items():
xs = list()
ys = list()
for t in sorted(m_data.keys()):
for v in m_data[t]:
xs.append(t)
ys.append(v)
tabs.append(dcc.Tab(className='custom-tab', selected_className='custom-selected-tab',
label=m_label, children=[
dcc.Graph(className='graph', id=m_label, figure={'data': [{'x': xs,
'y': ys,
'mode': 'markers'}],
'layout': {}})
]))
return [tabs]
if not kwargs.get(self.arg_AS_SERVER, False):
self.app.run_server(debug=False, port=8050)
@property
def server(self):
return self.app.server
pass
if __name__ == '__main__':
dashVis_ = dashVis(**{dashVis.arg_AS_SERVER: False,
dashVis.arg_DB_CONFIGS: {
DSSqlite3.arg_FILE: '/path/to/dabase/file.db3'
}})
# else:
# dashVis_ = dashVis(asServer=True)
# server = dashVis_.server
#
# dcc.Dropdown(
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': u'Montréal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# value='MTL'
# ),
# dcc.Dropdown(
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': u'Montréal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# value=['MTL', 'SF'],
# multi=True
# ),
# dcc.RadioItems(
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': u'Montréal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# value='MTL'
# ),
# dcc.Checklist(
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': u'Montréal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# value=['MTL', 'SF']
# ),
# dcc.Input(value='MTL', type='text'),
# dcc.Slider(
# min=0,
# max=9,
# marks={i: 'Label {}'.format(i) if i == 1 else str(i) for i in range(1, 6)},
# value=5,
# ),
# html.Button(id='submit-button', n_clicks=0, children='Submit'),
# [Input('submit-button', 'n_clicks')],
# dcc.Input(id='input-1-state', type='text', value='Montréal'),
# dcc.Input(id='input-2-state', type='text', value='Canada'),
# [State('input-1-state', 'value'),
# State('input-2-state', 'value')])
# dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),
``` |
{
"source": "jonas-eberle/pyEOM",
"score": 2
} |
#### File: predefined/MODIS/MCD45A1.py
```python
__author__ = 'we32zac'
from pyEOM.datasets import Dataset as DatasetAbs
class Dataset(DatasetAbs):
shortname = 'MCD45A1'
platform = 'Combined'
collection = '005'
rastertype = 'Tile'
timeInterval = 'P1M'
host = 'http://e4ftl01.cr.usgs.gov'
dir = '/MODIS_Composites/MOTA/MCD45A1.005'
sources = ['LPDAAC']
def getDownloadInfo(self):
return dict(shortname=self.shortname, platform=self.platform, collection=self.collection, rastertype=self.rastertype, host=self.host, directory=self.dir, sources=self.sources)
def getBands(self):
return self.bands
def getThematicBands(self):
return [self.bands['Burndate']]
def getQualityBands(self):
return []
bands = dict(Burndate={
'name': 'MOD_GRID_Monthly_500km_BA:burndate',
'nodata': 11111,
'scale': None,
'offset': None,
'imagetype': 'thematicClassification',
'identifier': 'MODIS_MCD45_A1_BurntArea_Date_Series',
'title': 'Monthly Burnt Area Date from MODIS Aqua and Terra',
'abstract': 'Time-series of monthly MODIS Aqua and Terra Burnt Area at 500 m spatial resolution. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOTA/).',
'keywords': 'Modis,Aqua,Terra,Siberia,Burnt Area,Fire,Global,Monthly,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOTA/) and processed with GDAL 1.9.0.',
'datasetname': 'Burnt Area',
'datatype': 'RASTER',
'resolution': 500.0,
'layername': 'mcd45a1_burndate',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Combined Burnt Area Date Monthly',
'wms_description': 'MODIS Combined Burnt Area Date Monthly',
'colormap': 'burntarea_colorbar.map',
'resolution_unit': 'm',
'unit': 'None'
}
)
```
#### File: predefined/MODIS/MOD10A1.py
```python
__author__ = 'we32zac'
from pyEOM.datasets import Dataset as DatasetAbs
class Dataset(DatasetAbs):
shortname = 'MOD10A1'
platform = 'Terra'
collection = '005'
rastertype = 'Tile'
timeInterval = 'P1D'
documentation = 'http://nsidc.org/data/docs/daac/mod10_modis_snow/version_5/mod10a1_local_attributes.html'
host = 'n5eil01u.ecs.nsidc.org'
dir = '/SAN/MOST/MOD10A1.005/'
sources = ['NSIDC']
def getDownloadInfo(self):
return dict(shortname=self.shortname, platform=self.platform, collection=self.collection, rastertype=self.rastertype, host=self.host, directory=self.dir, sources=self.sources)
def getBands(self):
return self.bands
def getThematicBands(self):
return [self.bands['SnowCover'], self.bands['SnowAlbedo'], self.bands['FractSnowCover']]
def getQualityBands(self):
return [self.bands['SnowQA']]
bands = dict(SnowCover={
'name': 'MOD_Grid_Snow_500m:Snow_Cover_Daily_Tile',
'nodata': 255,
'scale': 1,
'imagetype': 'thematicClassification',
'offset': None,
'identifier': 'MODIS_MOD10_A1_SnowCover_Series',
'title': 'Daily Snow Cover from MODIS Terra',
'abstract': '',
'keywords': '',
'lineage': '',
'datasetname': 'Snow Cover',
'datatype': 'RASTER',
'resolution': 500,
'layername': 'mod10a1_snowcover',
'templates': 'template_header_snow.html',
'wcs_description': 'MODIS Terra Snow Cover Daily',
'wms_description': 'MODIS Terra Snow Cover Daily',
'colormap': 'snow_colorbar.map',
'resolution_unit': 'm',
'unit': 'Index'
}, SnowQA={
'name': 'MOD_Grid_Snow_500m:Snow_Spatial_QA',
'scale': 1,
'nodata': 255,
'imagetype': 'qualityInformation',
'offset': None,
'quality_datatype': 'int',
'identifier': 'MODIS_MOD10_A2_SnowSpatialQA_Series',
'title': 'Daily Snow Spatial QA from MODIS Terra',
'abstract': '',
'keywords': '',
'lineage': '',
'datasetname': 'Snow Spatial QA',
'datatype': 'RASTER',
'resolution': 500,
'layername': 'mod10a1_snowspatialqa',
'templates': 'template_header_snowspatialqa.html',
'wcs_description': 'MODIS Terra Snow Spatial QA Daily',
'wms_description': 'MODIS Terra Snow Spatial QA Daily',
'colormap': 'snowspatialqa_colorbar.map',
'resolution_unit': 'm',
'unit': ''
}, SnowAlbedo={
'name': 'MOD_Grid_Snow_500m:Snow_Albedo_Daily_Tile',
'scale': 1,
'nodata': -6,
'imagetype': 'thematicClassification',
'offset': None,
'identifier': 'MODIS_MOD10_A2_SnowAlbedo_Series',
'title': 'Daily Snow Albedo from MODIS Terra',
'abstract': '',
'keywords': '',
'lineage': '',
'datasetname': 'Snow Albedo',
'datatype': 'RASTER',
'resolution': 500,
'layername': 'mod10a1_snowalbedo',
'templates': 'template_header_snowalbedo.html',
'wcs_description': 'MODIS Terra Snow Albedo Daily',
'wms_description': 'MODIS Terra Snow Albedo Daily',
'colormap': 'snowalbedo_colorbar.map',
'resolution_unit': 'm',
'unit': ''
}, FractSnowCover={
'name': 'MOD_Grid_Snow_500m:Fractional_Snow_Cover',
'scale': 1,
'nodata': 255,
'imagetype': 'thematicClassification',
'offset': None,
'identifier': 'MODIS_MOD10_A2_FractSnowCover_Series',
'title': 'Daily Fractional Snow Cover from MODIS Terra',
'abstract': '',
'keywords': '',
'lineage': '',
'datasetname': 'Fractional Snow Cover',
'datatype': 'RASTER',
'resolution': 500,
'layername': 'mod10a1_factsnowcover',
'templates': 'template_header_factsnowcover.html',
'wcs_description': 'MODIS Terra Fractional Snow Cover Daily',
'wms_description': 'MODIS Terra Fractional Snow Cover Daily',
'colormap': 'factsnowcover_colorbar.map',
'resolution_unit': 'm',
'unit': ''
}
)
```
#### File: predefined/MODIS/MOD10CM.py
```python
__author__ = 'we32zac'
from pyEOM.datasets import Dataset as DatasetAbs
class Dataset(DatasetAbs):
shortname = 'MOD10CM'
platform = 'Terra'
collection = '005'
rastertype = 'CMG'
timeInterval = 'P1M'
documentation = 'http://nsidc.org/data/docs/daac/mod10_modis_snow/version_5/mod10cm_local_attributes.html'
host = 'n5eil01u.ecs.nsidc.org'
dir = '/SAN/MOST/MOD10CM.005/'
sources = ['NSIDC']
def getDownloadInfo(self):
return dict(shortname=self.shortname, platform=self.platform, collection=self.collection, rastertype=self.rastertype, host=self.host, directory=self.dir, sources=self.sources)
def getBands(self):
return self.bands
def getThematicBands(self):
return [self.bands['SnowCover']]
def getQualityBands(self):
return [self.bands['SnowQA']]
bands = dict(SnowCover={
'name': 'MOD_CMG_Snow_5km:Snow_Cover_Monthly_CMG',
'nodata': 255,
'scale': 1,
'imagetype': 'thematicClassification',
'offset': None
}, SnowQA={
'name': 'MOD_CMG_Snow_5km:Snow_Spatial_QA',
'scale': 1,
'nodata': 255,
'imagetype': 'qualityInformation',
'offset': None,
'quality_datatype': 'int'
}
)
```
#### File: predefined/MODIS/MOD13Q1.py
```python
__author__ = 'we32zac'
from pyEOM.datasets import Dataset as DatasetAbs
class Dataset(DatasetAbs):
shortname = 'MOD13Q1'
platform = 'Terra'
collection = '005'
rastertype = 'Tile'
timeInterval = 'P16D'
host = 'http://e4ftl01.cr.usgs.gov'
dir = '/MODIS_Composites/MOLT/MOD13Q1.005'
sources = ['LPDAAC','GEE']
def getDownloadInfo(self):
return dict(shortname=self.shortname, platform=self.platform, collection=self.collection, rastertype=self.rastertype, host=self.host, directory=self.dir, sources=self.sources)
def getBands(self):
return self.bands
def getThematicBands(self):
return [self.bands['EVI'], self.bands['NDVI']]
def getQualityBands(self):
return [self.bands['PR'], self.bands['QC']]
bands = dict(EVI={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days EVI',
'nodata': -3000,
'scale': 0.0001,
'offset': None,
'imagetype': 'thematicClassification',
'identifier': 'MODIS_MOD13_Q1_EVI_Series',
'title': '16-daily Enhanced Vegetation Index from MODIS Terra',
'abstract': '',
'keywords': '',
'lineage': '',
'datasetname': 'Enhanced Vegetation Index',
'datatype': 'RASTER',
'resolution': 250,
'layername': 'mod13q1_evi',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Terra EVI 16-daily',
'wms_description': 'MODIS Terra EVI 16-daily',
'colormap': 'evi_colorbar.map',
'resolution_unit': 'm',
'unit': 'Index'
}, NDVI={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days NDVI',
'scale': 0.0001,
'nodata': -3000,
'imagetype': 'thematicClassification',
'offset': None,
'identifier': 'MODIS_MOD13_Q1_NDVI_Series',
'title': '16-daily Normalized Difference Vegetation Index from MODIS Terra',
'abstract': '',
'keywords': '',
'lineage': '',
'datasetname': 'Normalized Difference Vegetation Index',
'datatype': 'RASTER',
'resolution': 250,
'layername': 'mod13q1_ndvi',
'templates': 'template_header_ndvi.html',
'wcs_description': 'MODIS Terra NDVI 16-daily',
'wms_description': 'MODIS Terra NDVI 16-daily',
'colormap': 'ndvi_colorbar.map',
'resolution_unit': 'm',
'unit': 'Index'
}, PR={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days pixel reliability',
'scale': 1,
'nodata': -1,
'imagetype': 'qualityInformation',
'offset': None,
'quality_datatype': 'int',
'identifier': 'MODIS_MOD13_Q1_PR_Series',
'title': '16-daily Vegetation Indices Pixel Reliability from MODIS Terra',
'abstract': '',
'keywords': '',
'lineage': '',
'datasetname': 'Pixel reliability',
'datatype': 'RASTER',
'resolution': 250,
'layername': 'mod13q1_pr',
'templates': 'template_header_vi_qc.html',
'wcs_description': 'MODIS Terra VI Pixel Reliability 16-daily',
'wms_description': 'MODIS Terra VI Pixel Reliability 16-daily',
'colormap': 'vi_pr_colormap.map',
'resolution_unit': 'm',
'unit': None
}, QC={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days VI Quality',
'scale': 1,
'nodata': 65535,
'imagetype': 'qualityInformation',
'offset': None,
'quality_datatype': 'bit',
'identifier': 'MODIS_MOD13_Q1_QC_Series',
'title': '16-daily Vegetation Indices Quality from MODIS Terra',
'abstract': '',
'keywords': '',
'lineage': '',
'datasetname': 'Quality Flags',
'datatype': 'RASTER',
'resolution': 250,
'layername': 'mod13q1_qc',
'templates': 'template_header_vi_qc.html',
'wcs_description': 'MODIS Terra VI Quality 16-daily',
'wms_description': 'MODIS Terra VI Quality 16-daily',
'colormap': 'vi_qc_colormap.map',
'resolution_unit': 'm',
'unit': None
}
)
```
#### File: predefined/MODIS/MYD13Q1.py
```python
__author__ = 'we32zac'
from pyEOM.datasets import Dataset as DatasetAbs
class Dataset(DatasetAbs):
shortname = 'MYD13Q1'
platform = 'Aqua'
collection = '005'
rastertype = 'Tile'
timeInterval = 'P16D'
host = 'http://e4ftl01.cr.usgs.gov'
dir = '/MODIS_Composites/MOLA/MYD13Q1.005'
sources = ['LPDAAC']
def getDownloadInfo(self):
return dict(shortname=self.shortname, platform=self.platform, collection=self.collection, rastertype=self.rastertype, host=self.host, directory=self.dir, sources=self.sources)
def getBands(self):
return self.bands
def getThematicBands(self):
return [self.bands['EVI'], self.bands['NDVI']]
def getQualityBands(self):
return [self.bands['PR'], self.bands['QC']]
bands = dict(PR={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days pixel reliability',
'nodata': -1,
'scale': 1,
'offset': None,
'imagetype': 'qualityInformation',
'identifier': 'MODIS_MYD13_Q1_PR_Series',
'title': '16-daily Vegetation Indices Pixel Reliability from MODIS Aqua',
'abstract': 'Pixel Reliability from time-series of 16-daily Aqua MODIS Vegetation Indices at 250 m spatial resolution. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLT/).',
'keywords': 'MODIS,Aqua,Quality,Pixel,Reliability,Vegetation,NDVI,EVI,Global,16-daily,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLT/) and processed with GDAL 1.9.0.',
'datasetname': 'Pixel Reliability',
'datatype': 'RASTER',
'resolution': 250.0,
'layername': 'myd13q1_pr',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Aqua VI Pixel Reliability 16-daily',
'wms_description': 'MODIS Aqua VI Pixel Reliability 16-daily',
'colormap': 'vi_pr_colormap.map',
'resolution_unit': 'm',
'unit': 'Index'
},QC={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days VI Quality',
'nodata': 65535,
'scale': 1,
'offset': None,
'imagetype': 'qualityInformation',
'identifier': 'MODIS_MYD13_Q1_QC_Series',
'title': '16-daily Vegetation Indices Quality from MODIS Aqua',
'abstract': 'Quality data from time-series of 16-daily Aqua MODIS Vegetation Indices at 250 m spatial resolution. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLT/).',
'keywords': 'MODIS,Aqua,Quality,Vegetation,NDVI,EVI,Global,16-daily,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLT/) and processed with GDAL 1.9.0.',
'datasetname': 'Quality Flags',
'datatype': 'RASTER',
'resolution': 250.0,
'layername': 'myd13q1_qc',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Aqua VI Quality 16-daily',
'wms_description': 'MODIS Aqua VI Quality 16-daily',
'colormap': 'vi_qc_colormap.map',
'resolution_unit': 'm',
'unit': 'Index'
},NDVI={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days NDVI',
'nodata': -3000,
'scale': 0.0001,
'offset': None,
'imagetype': 'thematicClassification',
'identifier': 'MODIS_MYD13_Q1_NDVI_Series',
'title': '16-daily Normalized Difference Vegetation Index from MODIS Aqua',
'abstract': 'Time-series of 16-daily Aqua MODIS Normalized Difference Vegetation Index (NDVI) at 250 m spatial resolution. To retrieve actual values a scale factor of 0.0001 has to be applied. The unscaled nodata value is encoded as 0. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLA/).',
'keywords': 'MODIS,Aqua,Siberia,NDVI,Normalized Difference Vegetation Index,Vegetation,Index,Global,16-daily,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLA/) and processed with GDAL 1.9.0.',
'datasetname': 'Normalized Difference Vegetation Index',
'datatype': 'RASTER',
'resolution': 250.0,
'layername': 'myd13q1_ndvi',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Aqua NDVI 16-daily',
'wms_description': 'MODIS Aqua NDVI 16-daily',
'colormap': 'ndvi_colorbar.map',
'resolution_unit': 'm',
'unit': 'None'
},EVI={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days EVI',
'nodata': -3000,
'scale': 0.0001,
'offset': None,
'imagetype': 'thematicClassification',
'identifier': 'MODIS_MYD13_Q1_EVI_Series',
'title': '16-daily Enhanced Vegetation Index from MODIS Aqua',
'abstract': 'Time-series of 16-daily Aqua MODIS Enhanced Vegetation Index (EVI) at 250 m spatial resolution. To retrieve actual values a scale factor of 0.0001 has to be applied. The unscaled nodata value is encoded as 0. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLA/).',
'keywords': 'MODIS,Aqua,Siberia,EVI,Enhanced Vegetation Index,Vegetation,Index,Global,16-daily,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLA/) and processed with GDAL 1.9.0.',
'datasetname': 'Enhanced Vegetation Index',
'datatype': 'RASTER',
'resolution': 250.0,
'layername': 'myd13q1_evi',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Aqua EVI 16-daily',
'wms_description': 'MODIS Aqua EVI 16-daily',
'colormap': 'evi_colorbar.map',
'resolution_unit': 'm',
'unit': 'None'
}
)
``` |
{
"source": "jonas-eberle/stactools",
"score": 2
} |
#### File: tests/landsat/test_create_stac.py
```python
import os
from tempfile import TemporaryDirectory
from typing import cast
import pystac
from pystac.utils import is_absolute_href
from shapely.geometry import box, shape, mapping
import rasterio
from stactools.core.projection import reproject_geom
from stactools.landsat.assets import SR_ASSET_DEFS, THERMAL_ASSET_DEFS
from stactools.landsat.commands import create_landsat_command
from stactools.landsat.constants import (L8_SR_BANDS, L8_SP_BANDS)
from tests.utils import CliTestCase
from tests.landsat.data import TEST_MTL_PATHS
class CreateItemTest(CliTestCase):
def create_subcommand_functions(self):
return [create_landsat_command]
def test_create_item(self):
def check_proj_bbox(item, tif_bounds):
bbox = item.bbox
bbox_shp = box(*bbox)
proj_bbox = item.ext.projection.bbox
self.assertEqual(proj_bbox, list(tif_bounds))
proj_bbox_shp = box(*proj_bbox)
reproj_bbox_shp = shape(
reproject_geom(f"epsg:{item.ext.projection.epsg}", "epsg:4326",
mapping(proj_bbox_shp)))
self.assertLess((reproj_bbox_shp - bbox_shp).area,
0.0001 * reproj_bbox_shp.area)
for mtl_path in TEST_MTL_PATHS:
with self.subTest(mtl_path):
base_path = "_".join(mtl_path.split("_")[:-1])
tif_path = f"{base_path}_SR_B3.TIF"
with rasterio.open(tif_path) as dataset:
tif_bounds = dataset.bounds
with TemporaryDirectory() as tmp_dir:
cmd = [
'landsat', 'create-item', '--mtl', mtl_path,
'--output', tmp_dir
]
self.run_command(cmd)
jsons = [
p for p in os.listdir(tmp_dir) if p.endswith('.json')
]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.read_file(os.path.join(tmp_dir, fname))
item.validate()
bands_seen = set()
for asset in item.assets.values():
self.assertTrue(is_absolute_href(asset.href))
bands = item.ext.eo.get_bands(asset)
if bands is not None:
bands_seen |= set(b.name for b in bands)
if item.properties['landsat:processing_level'] == 'L2SP':
self.assertEqual(
bands_seen,
set(L8_SR_BANDS.keys()) | set(L8_SP_BANDS.keys()))
else:
self.assertEqual(bands_seen, set(L8_SR_BANDS.keys()))
check_proj_bbox(item, tif_bounds)
def test_convert_and_create_agree(self):
def get_item(output_dir: str) -> pystac.Item:
jsons = [p for p in os.listdir(output_dir) if p.endswith('.json')]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = cast(pystac.Item,
pystac.read_file(os.path.join(output_dir, fname)))
item.validate()
return item
for mtl_path in TEST_MTL_PATHS:
with self.subTest(mtl_path):
with TemporaryDirectory() as tmp_dir:
create_dir = os.path.join(tmp_dir, 'create')
convert_dir = os.path.join(tmp_dir, 'convert')
original_dir = os.path.join(tmp_dir, 'original')
os.makedirs(create_dir, exist_ok=True)
os.makedirs(convert_dir, exist_ok=True)
os.makedirs(original_dir, exist_ok=True)
create_cmd = [
'landsat', 'create-item', '--mtl', mtl_path,
'--output', create_dir
]
self.run_command(create_cmd)
stac_path = mtl_path.replace('_MTL.xml', '_SR_stac.json')
import shutil
shutil.copy(
stac_path,
os.path.join(original_dir,
os.path.basename(stac_path)))
convert_cmd = [
'landsat', 'convert', '--stac', stac_path, '--dst',
convert_dir
]
self.run_command(convert_cmd)
created_item = get_item(create_dir)
# Ensure media_type is set
for asset in created_item.assets.values():
self.assertTrue(asset.media_type is not None)
for asset_def in SR_ASSET_DEFS:
self.assertIn(asset_def.key, created_item.assets)
if created_item.properties[
'landsat:processing_level'] == 'L2SP':
for asset_def in THERMAL_ASSET_DEFS:
self.assertIn(asset_def.key, created_item.assets)
# TODO: Resolve disagreements between convert and create.
# This might best be informed by USGS's own STAC 1.0.* items
# when they are made available.
# created_item = get_item(create_dir)
# converted_item = get_item(convert_dir)
# self.assertTrue(
# set(converted_item.assets.keys()).issubset(
# set(created_item.assets.keys())),
# msg=
# f"{set(converted_item.assets.keys()) - set(created_item.assets.keys())}"
# )
``` |
{
"source": "jonaseck2/kubernetes-labs",
"score": 3
} |
#### File: lab1/python/app.py
```python
from flask import Flask
import os
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello " + os.environ.get("NAME", "you") + "\n"
if __name__ == "__main__":
port = int(os.environ.get("PORT", 3000))
app.run(debug=True,host='0.0.0.0',port=port)
``` |
{
"source": "jonaseck2/rabbitmq-credentials-test",
"score": 2
} |
#### File: jonaseck2/rabbitmq-credentials-test/start.py
```python
import pika
import os
config = {
'rabbitmq': {
'host': 'localhost',
'port': 5672,
'virtual_host': '/',
'user': 'guest',
'password': '<PASSWORD>',
}
}
for section in config:
for key,value in config[section].items():
env = os.getenv(section.upper() + '_' + key.upper())
if env:
print("%s_%s=\"%s\" as %s" % (section.upper(), key.upper(), env, type(value)))
config[section][key] = type(value)(env)
config['rabbitmq']['credentials'] = pika.PlainCredentials(config['rabbitmq'].pop('user'), config['rabbitmq'].pop('password'))
connection = pika.BlockingConnection(pika.ConnectionParameters(**config['rabbitmq']))
channel = connection.channel()
channel.queue_declare(queue='test')
# publish
body='Hello World!'
print(" [x] Sent %r" % body)
channel.basic_publish(exchange='',
routing_key='test',
body=body)
# subscribe
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
connection.close()
channel.basic_consume(callback,
queue='test',
no_ack=True)
channel.start_consuming()
``` |
{
"source": "jonasehrlich/argparse-shell",
"score": 3
} |
#### File: argparse-shell/argparse_shell/decorators.py
```python
import typing as ty
from . import constants
def no_shell_cmd(func: ty.Any) -> ty.Any:
"""
Decorator to mark a function, that it is not added as a command to the argument parser or the interactive shell
"""
setattr(func, constants.ARGPARSE_SHELL_CMD_ATTRIBUTE_NAME, False)
return func
def command_name(name: str) -> ty.Any:
"""Decorator to explicitly set a name for a command"""
def inner(func: ty.Any):
setattr(func, constants.ARGPARSE_SHELL_CMD_ATTRIBUTE_NAME, name)
return func
return inner
```
#### File: argparse-shell/argparse_shell/namespace.py
```python
from __future__ import annotations
import collections
import inspect
import typing as ty
from . import utils
from .command import Command, UnsupportedCommandTypeError, UnboundCommand
__all__ = ["Namespace", "UnboundNamespace"]
T = ty.TypeVar("T")
Command_T = ty.TypeVar("Command_T")
class DuplicateCommandNameError(KeyError):
"""Raised if a command name is added for a second time to a namespace"""
class _NamespaceBase(collections.UserDict, ty.Dict[str, Command_T]):
def __repr__(self) -> str:
return f"{self.__class__.__name__}({super().__repr__()})"
def __setitem__(self, key: str, item: Command_T) -> None:
if key in self:
raise DuplicateCommandNameError(f"Command '{key}' is already defined in namespace")
return super().__setitem__(key, item)
class Namespace(_NamespaceBase[Command]):
@classmethod
def from_object(
cls: ty.Type[T], obj: ty.Any, nested_namespaces: ty.Optional[ty.Mapping[str, UnboundNamespace]] = None
) -> T:
"""Build a namespace from an object. The namespace is a mapping of command names to callback functions.
This layer wraps coroutine functions and descriptors in functions, to allow them being called directly.
:param obj: Object to build the namespace from
:type obj: ty.Any
:param nested_namespaces: Mappin
:return: Mapping of command names defined in an object
:rtype: Namespace
"""
unbound_namespace = UnboundNamespace.from_object(obj, nested_namespaces=nested_namespaces)
return unbound_namespace.bind(obj, cls)
class UnboundNamespace(_NamespaceBase[UnboundCommand]):
def bind(self, obj: ty.Any, namespace_cls: ty.Type[T] = Namespace) -> T:
namespace = namespace_cls()
for cmd in self.values():
namespace[cmd.name] = cmd.bind(obj)
return namespace
@classmethod
def from_object( # noqa: C901
cls: ty.Type[T], obj: ty.Any, nested_namespaces: ty.Optional[ty.Mapping[str, UnboundNamespace]] = None
) -> T:
"""Build a namespace from an object. The namespace is a mapping of command names to callback functions.
This layer wraps coroutine functions and descriptors in functions, to allow them being called directly.
:param obj: Object to build the namespace from
:type obj: ty.Any
:param nested_namespaces: Mapping of namespace names to unbound, nested namespaces, defaults to None
:type nested_namespaces: ty.Mapping[str, UnboundNamespace], optional
:return: Mapping of command names defined in a namespace to :py:class:`UnboundCommand` objects
:rtype: UnboundNamespace
"""
namespace = cls()
nested_namespaces = dict(nested_namespaces) if nested_namespaces else dict()
if inspect.isclass(obj) or inspect.ismodule(obj):
detect_obj = obj
is_instance = False
else:
# Use the class of arbitrary objects to build a namespace
detect_obj = obj.__class__
is_instance = True
for name, nested_command in inspect.getmembers(detect_obj):
if not utils.is_shell_cmd(nested_command, name):
continue
nested_namespace = nested_namespaces.pop(name, None)
if nested_namespace:
for cmd_name, nested_command in nested_namespace.items():
namespace_cmd = nested_command.for_namespace(name)
namespace[namespace_cmd.name] = namespace_cmd
continue
cmd_name = utils.get_command_name(nested_command, name)
try:
namespace[cmd_name] = UnboundCommand.from_callable(cmd_name, nested_command)
except UnsupportedCommandTypeError:
pass
if nested_namespaces and is_instance:
# We have still nested namespaces left and the object argument was not a class or a module,
# check if the namespaces exist in the object and were defined during initialization of the class
nested_namespaces_copy = dict(nested_namespaces)
instance_attributes = set(dir(obj))
for name, nested_namespace in nested_namespaces_copy.items():
if name in instance_attributes:
for cmd_name, nested_command in nested_namespace.items():
namespace_cmd = nested_command.for_namespace(name)
namespace[namespace_cmd.name] = nested_command.for_namespace(name)
nested_namespaces.pop(name, None)
if nested_namespaces:
# Nested namespaces were defined but could not be found, raise a RuntimeError
raise RuntimeError(
f"Defined nested namespaces: {list(nested_namespaces)} could not be found in object {obj!r}"
)
return namespace
```
#### File: argparse-shell/test/test_argparse_shell.py
```python
import types
import typing as ty
import pytest
from argparse_shell import ArgparseShell
T = ty.TypeVar("T", int, float, str, bytes)
class Calculator:
"""Super calculator"""
def add(self, a: T, b: T) -> T:
"""Add two numbers
:param a: First number
:param b: Second number
:return: Sum of two numbers
"""
return a + b
def div(self, a: float, b: float) -> float:
"""
Divide numbers
:param a: First number
:param b: Second number
:return: Division of two numbers"""
return a / b
def multiply(self, a: float, b: float) -> float:
"""Multiply two numbers
:param a: First number
:param b: Second number
:return: Product of two numbers
"""
return a * b
calculator_module = types.ModuleType("calculator")
setattr(calculator_module, "add", lambda a, b: a + b)
setattr(calculator_module, "div", lambda a, b: a / b)
def test_cli_instance(capsys: pytest.CaptureFixture, subtests):
"""Test that we can run methods through a CLI created with an instance and that the output is printed"""
drv = Calculator()
shell = ArgparseShell.from_object(drv, "calc")
a = 1
b = 5
with subtests.test("add"):
shell.main(["add", str(a), str(b)])
captured = capsys.readouterr()
assert captured.out.strip() == str(a + b)
with subtests.test("div"):
shell.main(["div", str(a), str(b)])
captured = capsys.readouterr()
assert captured.out.strip() == str(a / b)
with subtests.test("div0"):
with pytest.raises(ZeroDivisionError):
shell.main(["div", str(a), "0"])
def test_cli_module(capsys: pytest.CaptureFixture, subtests):
"""Test that we can run methods through a CLI created with a and that the output is printed"""
shell = ArgparseShell.from_object(calculator_module, "calc")
a = 1
b = 5
with subtests.test("add"):
shell.main(["add", str(a), str(b)])
captured = capsys.readouterr()
assert captured.out.strip() == str(a + b)
with subtests.test("div"):
shell.main(["div", str(a), str(b)])
captured = capsys.readouterr()
assert captured.out.strip() == str(a / b)
with subtests.test("div0"):
with pytest.raises(ZeroDivisionError):
shell.main(["div", str(a), "0"])
``` |
{
"source": "jonasek369/puzzle-game",
"score": 3
} |
#### File: jonasek369/puzzle-game/mapGenerator.py
```python
import time
import random
from colorama import Fore
class mapGenerator:
def __init__(self, current_level, preset="abcdefghij"):
"""
:param current_level: Int : number of current level
:param preset: Ste : string of level layout
"""
self.map = dict()
self.biom_info = {"land": 0, "box": 0, "destination": 0}
self.torf = [
True,
False
]
self.max_boxes = current_level
self.alphabet = list(preset)
self.borders = []
for i in range(len(self.alphabet)):
self.borders.append(f"{preset[0]}{i + 1}")
for i in self.alphabet:
self.borders.append(f"{i}1")
for i in range(len(self.alphabet)):
self.borders.append(f"{self.alphabet[len(self.alphabet) - 1]}{i + 1}")
for i in self.alphabet:
self.borders.append(f"{i}{len(self.alphabet)}")
# all directions used in game
self.directions = [
"up",
"down",
"left",
"right"
]
# biom/tile type
self.bioms = [
"land",
"box",
"destination"
]
def generate_map(self, debugger=False):
"""
:param debugger: Bool : T/F if you want to see debugger info for normal game False
:return:
"""
if debugger:
start = time.time()
for pos, x in enumerate(self.alphabet):
for y in range(len(self.alphabet)):
y = y + 1
up, down, left, right = self.create_cords_for_axies(x, y, pos)
self.map[f"{x}{y}"] = {
"cords": f"{x}{y}",
"biom": "",
"up": up,
"left": left,
"right": right,
"down": down
}
if debugger:
print("generated map with layout of", self.alphabet, "took", round(time.time() - start, 10) * 1000, "ms")
return self.map
def generate_bioms(self, debugger=False):
"""
:param debugger: Bool : T/F if you want to see debugger info for normal game False
:return:
"""
if debugger:
start = time.time()
self.biom_info["box"] = 0
self.biom_info["destination"] = 0
while self.biom_info["destination"] != self.max_boxes:
if self.biom_info["box"] == self.max_boxes:
break
random_x = random.choice(self.alphabet)
random_y = random.randint(1, len(self.alphabet))
if self.map[f"{random_x}{random_y}"]["cords"] not in self.borders:
if self.map[f"{random_x}{random_y}"]["biom"] == "":
self.map[f"{random_x}{random_y}"]["biom"] = "box"
self.biom_info["box"] += 1
else:
continue
else:
continue
while True:
if self.biom_info["destination"] == self.max_boxes:
break
random_x = random.choice(self.alphabet)
random_y = random.randint(1, len(self.alphabet))
if self.map[f"{random_x}{random_y}"]["biom"] == "":
self.map[f"{random_x}{random_y}"]["biom"] = "destination"
self.biom_info["destination"] += 1
else:
continue
# cleanup
for i in self.map:
if self.map[i]["biom"] == "":
self.map[i]["biom"] = "land"
else:
continue
if debugger:
print("created bioms with set of", self.biom_info, "took", round(time.time() - start, 10) * 1000, "ms")
def show_map(self, player_pos):
# render fuction
"""
:param player_pos: Str : example a1 pos of user for displaying on map
:return:
"""
form_map = ""
for i in self.map:
if self.map[i]["cords"] == player_pos:
if str(len(self.alphabet)) in self.map[i]["cords"]:
form_map += Fore.MAGENTA + i + Fore.WHITE + "\n"
continue
else:
form_map += Fore.MAGENTA + i + Fore.WHITE
continue
if self.map[i]["biom"] == "land":
if str(len(self.alphabet)) in self.map[i]["cords"]:
form_map += Fore.GREEN + i + Fore.WHITE + "\n"
continue
else:
form_map += Fore.GREEN + i + Fore.WHITE
continue
#
if self.map[i]["biom"] == "box":
if str(len(self.alphabet)) in self.map[i]["cords"]:
form_map += Fore.YELLOW + i + Fore.WHITE + "\n"
continue
else:
form_map += Fore.YELLOW + i + Fore.WHITE
continue
if self.map[i]["biom"] == "destination":
if str(len(self.alphabet)) in self.map[i]["cords"]:
form_map += Fore.RED + i + Fore.WHITE + "\n"
continue
else:
form_map += Fore.RED + i + Fore.WHITE
continue
#
if self.map[i]["biom"] == "locked_box":
if str(len(self.alphabet)) in self.map[i]["cords"]:
form_map += Fore.CYAN + i + Fore.WHITE + "\n"
continue
else:
form_map += Fore.CYAN + i + Fore.WHITE
continue
if str(len(self.alphabet)) in self.map[i]["cords"]:
form_map += i + "\n"
continue
else:
form_map += i
continue
return form_map
def create_cords_for_axies(self, x, y, pos):
"""
:param x: Str : examole a or h
:param y: Int : examole 1 or 3
:param pos: Int : pos in enumrate of the preset
:return:
"""
if pos - 1 >= 0:
try:
up = str(self.alphabet[pos - 1]) + str(y)
except IndexError:
up = None
else:
up = None
# down
try:
down = str(self.alphabet[pos + 1]) + str(y)
except IndexError:
down = None
# left
if len(self.alphabet) >= y - 1 > 0:
left = x + str(y - 1)
else:
left = None
# right
if len(self.alphabet) >= y + 1 > 0:
right = x + str(y + 1)
else:
right = None
return up, down, left, right
def create_new_level(self):
# cleares current map and create map with +1 level
self.map.clear()
self.max_boxes += 1
mapGenerator.generate_map(self, debugger=False) # enabled debugger for more info (timing)
mapGenerator.generate_bioms(self, debugger=False) # enabled debugger for more info (timing)
``` |
{
"source": "jonasengelmann/bibliographic_corpus_webapp",
"score": 2
} |
#### File: bibliographic_corpus_webapp/app/app.py
```python
from flask import Flask
# Local imports
from app import main, corpus
from app.extensions import sparqlstore
def create_app(config_object='app.settings'):
'''
Create application factory, as explained
here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
'''
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
return app
def register_blueprints(app):
'''Register Flask blueprints.'''
app.register_blueprint(main.views.blueprint)
app.register_blueprint(corpus.views.blueprint)
return None
def register_extensions(app):
'''Register Flask extensions.'''
sparqlstore.init_app(app)
```
#### File: app/main/views.py
```python
from flask import Blueprint, render_template
blueprint = Blueprint('main', __name__, static_folder='../static')
@blueprint.route('/')
@blueprint.route('/query')
def query():
return render_template('main/query.html')
```
#### File: bibliographic_corpus_webapp/flask_sparql/flask_sparql.py
```python
from flask import current_app, _app_ctx_stack
from SPARQLWrapper import SPARQLWrapper, SPARQLExceptions
class SPARQLStore(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
pass
def connect(self):
return SPARQLWrapper(
endpoint=current_app.config['SPARQL_ENDPOINT'],
returnFormat='json'
)
def connection(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'sparqlstore'):
ctx.sparqlstore = self.connect()
return ctx.sparqlstore
def query(self, query):
sparqlstore = self.connection()
sparqlstore.setMethod('GET')
sparqlstore.setQuery(query)
try:
results = sparqlstore.queryAndConvert()
except SPARQLExceptions.SPARQLWrapperException as ex:
raise ex
return results['results']['bindings']
``` |
{
"source": "JonasEngstrom/hkrmlcourse",
"score": 3
} |
#### File: src/hkrmlcourse/drop_old_stocks.py
```python
import pandas
def drop_old_stocks(stock_data: pandas.DataFrame,
threshold: int = 15000) -> pandas.DataFrame:
"""
Drops stocks from a data frame downloaded from Yahoo Finance based on an
age threshold, i.e. stocks that have data for more days than the
threshold value are dropped. Default threshold is 15,000 days.
:param stock_data: Stock data from Yahoo Finance.
:type stock_data: pandas.DataFrame
:param threshold: Maximum allowed number of days for which data can exist.
:type threshold: int
:return: A data frame without the dropped stocks.
:rtype: pandas.DataFrame
"""
ticker_ages = stock_data.Open.count()
old_tickers = ticker_ages[ticker_ages > threshold].index
return stock_data.drop(old_tickers, axis = 1, level = 1)
```
#### File: src/hkrmlcourse/drop_young_stocks.py
```python
import pandas
def drop_young_stocks(stock_data: pandas.DataFrame,
threshold: int = 3650) -> pandas.DataFrame:
"""
Drops stocks from a data frame downloaded from Yahoo Finance based on an
age threshold, i.e. stocks that have data for fewer days than the
threshold value are dropped. Default threshold is 3,650 days.
:param stock_data: Stock data from Yahoo Finance.
:type stock_data: pandas.DataFrame
:param threshold: Minimum required number of days for which data exists.
:type threshold: int
:return: A data frame without the dropped stocks.
:rtype: pandas.DataFrame
"""
ticker_ages = stock_data.Open.count()
young_tickers = ticker_ages[ticker_ages < threshold].index
return stock_data.drop(young_tickers, axis = 1, level = 1)
```
#### File: src/hkrmlcourse/generate_appreciation_labels.py
```python
import pandas
import hkrmlcourse.generate_multiindex
def generate_appreciation_labels(stock_history: pandas.DataFrame,
shift_days: int = -1) -> pandas.DataFrame:
"""
Takes a stock history data frame from Yahoo Finance and returns a data
frame with boolean labels whether the stock appreciated the day after
(with default settings).
:param stock_history: Stock history from Yahoo Finance.
:type stock_history: pandas.DataFrame
:param shift_days: Number of days to shift labels (default -1).
:type shift_days: int
:return: A labels indexed by date and ticker symbols.
:rtype: pandas.DataFrame
"""
label_data_frame = stock_history.Close / stock_history.Open - 1 > 0
shifted_data_frame = label_data_frame.shift(shift_days)
column_titles = hkrmlcourse.generate_multiindex.generate_multiindex(
shifted_data_frame,
f"BoolIntraDayAppreciation{abs(shift_days)}Day"
f"{'s' if abs(shift_days) != 1 else ''}InThe"
f"{'Future' if shift_days <= 0 else 'Past'}"
)
shifted_data_frame.columns = column_titles
return shifted_data_frame
```
#### File: src/hkrmlcourse/make_divisible_by.py
```python
import pandas
def make_divisible_by(input_data_frame: pandas.DataFrame,
divisor: int) -> pandas.DataFrame:
"""
Removes the first rows of a data frame as to make the row number
divisible by a desired number.
:param input_data_frame: The input data frame.
:type input_data_frame: pandas.DataFrame
:param divisor: The desired divisor.
:type divisor: int
:return: A dataframe with a number of rows divisible by the divisor.
:rtype: pandas.DataFrame
"""
return input_data_frame[len(input_data_frame) % divisor]
```
#### File: src/hkrmlcourse/setup_tpu.py
```python
import os
import tensorflow as tf
from tensorflow import keras
def setup_tpu() -> tf.distribute.TPUStrategy:
"""
Sets up TPU for usage in Google Colab. Returns a TPU strategy.
:return: A TPU strategy.
:rtype: tf.distribute.TPUStrategy
"""
TF_MASTER = f"grpc://{os.environ['COLAB_TPU_ADDR']}"
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(TF_MASTER)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
return tf.distribute.TPUStrategy(resolver)
``` |
{
"source": "JonasErkert/ULTA",
"score": 2
} |
#### File: JonasErkert/ULTA/__init__.py
```python
bl_info = {
"name": "UlTA",
"description": "Useful little tools for gamedev",
"author": "<NAME>",
"version": (0, 1),
"blender": (2, 80, 0),
"location": "View3D",
"category": "Object",
}
import bpy
from . ulta_menus import ULTA_MT_Pie_Menu
from . ulta_quick_export import ULTA_QuickExport
from . ulta_join_multiuser import ULTA_JoinMultiuser
from . ulta_create_bb import ULTA_CreateBB
from . ulta_prefix_selected import ULTA_PrefixSelected
from . ulta_apply_modifiers import ULTA_ApplyModifiers
addon_keymaps = []
classes = (
ULTA_QuickExport,
ULTA_JoinMultiuser,
ULTA_CreateBB,
ULTA_PrefixSelected,
ULTA_ApplyModifiers,
ULTA_MT_Pie_Menu
)
def register():
for c in classes:
bpy.utils.register_class(c)
# add keymap entry
kc = bpy.context.window_manager.keyconfigs.addon
km = kc.keymaps.new(name='3D View', space_type='VIEW_3D')
kmi_menu = km.keymap_items.new("wm.call_menu_pie", "COMMA", "PRESS", shift=True)
kmi_menu.properties.name = ULTA_MT_Pie_Menu.bl_idname
addon_keymaps.append((km, kmi_menu))
def unregister():
for c in classes:
bpy.utils.unregister_class(c)
# remove keymap entry
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
```
#### File: JonasErkert/ULTA/ulta_prefix_selected.py
```python
import bpy
from bpy.types import Operator
class ULTA_PrefixSelected(Operator):
bl_idname = "object.prefix_selected"
bl_label = "Prefix Selected"
bl_description = "Prefix selected objects with SM_, SK_ and materials with M_ or UCX"
bl_options = {'REGISTER', 'UNDO'}
is_collision_mesh: bpy.props.BoolProperty(
name="Is Collision Mesh",
description="Prefix with UCX_ and append _0x",
default=False
)
@classmethod
def poll(cls, context):
return context.object.select_get()
def execute(self, context):
selected_objs = bpy.context.selected_objects
if not self.is_collision_mesh:
static_mesh_prefix = 'SM_'
skeletal_mesh_prefix = 'SK_'
material_prefix = 'M_'
# rename meshes and armature
for obj in selected_objs:
obj_name = obj.name
if obj.type == 'MESH' and static_mesh_prefix not in obj_name:
obj.name = static_mesh_prefix + obj_name
if obj.type == 'ARMATURE' and skeletal_mesh_prefix not in obj_name:
obj.name = skeletal_mesh_prefix + obj.name
# rename materials
for mat in bpy.data.materials:
if material_prefix not in mat.name:
mat.name = material_prefix + mat.name
else:
collision_mesh_prefix = "UCX_"
# check if already prefixed
for i, obj in enumerate(selected_objs):
if collision_mesh_prefix not in obj.name:
obj.name = collision_mesh_prefix + obj.name + "_0" + str(i+1)
return {'FINISHED'}
```
#### File: JonasErkert/ULTA/ulta_utils.py
```python
import bpy
def activate_collection_of_selected_obj():
obj = bpy.context.object
object_coll = obj.users_collection
# recursively transverse layer_collection for a particular name
def recur_layer_collection(layer_coll, coll_name):
found = None
if layer_coll.name == coll_name:
return layer_coll
for layer in layer_coll.children:
found = recur_layer_collection(layer, coll_name)
if found:
return found
# switching active Collection to active Object selected
for coll in object_coll:
layer_collection = bpy.context.view_layer.layer_collection
layer_coll = recur_layer_collection(layer_collection, coll.name)
bpy.context.view_layer.active_layer_collection = layer_coll
# This doesn't work, only gets correct key when creating new collection
# collection = bpy.data.collections.new('My Collection')
# bpy.context.scene.collection.children.link(collection)
# parent_collection = selected_obj.users_collection
# layer_collection = bpy.context.view_layer.layer_collection.children[parent_collection[0].name]
# bpy.context.view_layer.active_layer_collection = layer_collection
``` |
{
"source": "jonas-eschle/hep_ml",
"score": 3
} |
#### File: hep_ml/hep_ml/speedup.py
```python
from __future__ import division, print_function, absolute_import
import numpy
import pandas
from collections import OrderedDict
from sklearn.base import ClassifierMixin, BaseEstimator, clone
from .commonutils import to_pandas_dataframe, check_xyw, check_sample_weight, weighted_quantile
__author__ = '<NAME>'
class LookupClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, base_estimator, n_bins=16, max_cells=500000000, keep_trained_estimator=True):
"""
LookupClassifier splits each of features into bins, trains a base_estimator to use this data.
To predict class for new observation, results of base_estimator are kept for all possible combinations of bins,
and saved together
:param n_bins:
* int: how many bins to use for each axis
* dict: feature_name -> int, specialize how many bins to use for each axis
* dict: feature_name -> list of floats, set manually edges of bins
By default, the (weighted) quantiles are used to compute bin edges.
:type n_bins: int | dict
:param int max_cells: raise error if lookup table will have more items.
:param bool keep_trained_estimator: if True, trained estimator will be saved.
See also: this idea is used inside LHCb triggers, see <NAME>, <NAME>, 'Bonsai BDT'
Resulting formula is very simple and can be rewritten in other language or environment (C++, CUDA, etc).
"""
self.base_estimator = base_estimator
self.n_bins = n_bins
self.max_cells = max_cells
self.keep_trained_estimator = keep_trained_estimator
def fit(self, X, y, sample_weight=None):
"""Train a classifier and collect predictions for all possible combinations.
:param X: pandas.DataFrame or numpy.array with data of shape [n_samples, n_features]
:param y: array with labels of shape [n_samples]
:param sample_weight: None or array of shape [n_samples] with weights of events
:return: self
"""
self.classes_ = numpy.unique(y)
X, y, normed_weights = check_xyw(X, y, sample_weight=sample_weight, classification=True)
X = to_pandas_dataframe(X)
normed_weights = check_sample_weight(y, sample_weight=normed_weights, normalize_by_class=True, normalize=True)
self.bin_edges = self._compute_bin_edges(X, normed_weights=normed_weights)
n_parameter_combinations = numpy.prod([len(bin_edge) + 1 for name, bin_edge in self.bin_edges.items()])
assert n_parameter_combinations <= self.max_cells, \
'the total size of lookup table exceeds {}, ' \
'reduce n_bins or number of features in use'.format(self.max_cells)
transformed_data = self.transform(X)
trained_estimator = clone(self.base_estimator)
fit_params = {}
if sample_weight is not None:
fit_params['sample_weights'] = sample_weight
trained_estimator.fit(transformed_data, y, **fit_params)
all_lookup_indices = numpy.arange(int(n_parameter_combinations))
all_combinations = self.convert_lookup_index_to_bins(all_lookup_indices)
self._lookup_table = trained_estimator.predict_proba(all_combinations)
if self.keep_trained_estimator:
self.trained_estimator = trained_estimator
return self
def _compute_bin_edges(self, X, normed_weights):
"""
Compute edges of bins, weighted quantiles are used,
"""
bins_over_axis = OrderedDict()
for column in X.columns:
if isinstance(self.n_bins, int):
bins_over_axis[column] = self.n_bins
else:
bins_over_axis[column] = self.n_bins[column]
bin_edges = OrderedDict()
for column, column_bins in bins_over_axis.items():
if isinstance(column_bins, int):
quantiles = numpy.linspace(0., 1., column_bins + 1)[1:-1]
bin_edges[column] = weighted_quantile(X[column], quantiles=quantiles, sample_weight=normed_weights)
else:
bin_edges[column] = numpy.array(list(column_bins))
return bin_edges
def convert_bins_to_lookup_index(self, bins_indices):
"""
:param bins_indices: numpy.array of shape [n_samples, n_columns], filled with indices of bins.
:return: numpy.array of shape [n_samples] with corresponding index in lookup table
"""
lookup_indices = numpy.zeros(len(bins_indices), dtype=int)
bins_indices = numpy.array(bins_indices)
assert bins_indices.shape[1] == len(self.bin_edges)
for i, (column_name, bin_edges) in enumerate(self.bin_edges.items()):
lookup_indices *= len(bin_edges) + 1
lookup_indices += bins_indices[:, i]
return lookup_indices
def convert_lookup_index_to_bins(self, lookup_indices):
"""
:param lookup_indices: array of shape [n_samples] with positions at lookup table
:return: array of shape [n_samples, n_features] with indices of bins.
"""
result = numpy.zeros([len(lookup_indices), len(self.bin_edges)], dtype='uint8')
for i, (column_name, bin_edges) in list(enumerate(self.bin_edges.items()))[::-1]:
n_columns = len(bin_edges) + 1
result[:, i] = lookup_indices % n_columns
lookup_indices = lookup_indices // n_columns
return result
def transform(self, X):
"""Convert data to bin indices.
:param X: pandas.DataFrame or numpy.array with data
:return: pandas.DataFrame, where each column is replaced with index of bin
"""
X = to_pandas_dataframe(X)
assert list(X.columns) == list(self.bin_edges.keys()), 'passed dataset with wrong columns'
result = numpy.zeros(X.shape, dtype='uint8')
for i, column in enumerate(X.columns):
edges = self.bin_edges[column]
result[:, i] = numpy.searchsorted(edges, X[column])
return pandas.DataFrame(result, columns=X.columns)
def predict(self, X):
"""Predict class for each event
:param X: pandas.DataFrame with data
:return: array of shape [n_samples] with predicted class labels.
"""
return self.classes_[numpy.argmax(self.predict_proba(X), axis=1)]
def predict_proba(self, X):
""" Predict probabilities for new observations
:param X: pandas.DataFrame with data
:return: probabilities, array of shape [n_samples, n_classes]
"""
bins_indices = self.transform(X)
lookup_indices = self.convert_bins_to_lookup_index(bins_indices)
return self._lookup_table[lookup_indices]
```
#### File: hep_ml/tests/test_gradientboosting.py
```python
from __future__ import division, print_function, absolute_import
import numpy
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.base import clone
from hep_ml.commonutils import generate_sample
from hep_ml.losses import LogLossFunction, MSELossFunction, AdaLossFunction
from hep_ml import losses
from hep_ml.gradientboosting import UGradientBoostingClassifier, UGradientBoostingRegressor
import copy
def test_gb_with_ada_and_log(n_samples=1000, n_features=10, distance=0.6):
testX, testY = generate_sample(n_samples, n_features, distance=distance)
trainX, trainY = generate_sample(n_samples, n_features, distance=distance)
for loss in [LogLossFunction(), AdaLossFunction()]:
clf = UGradientBoostingClassifier(loss=loss, min_samples_split=20, max_depth=5, learning_rate=.2,
subsample=0.7, n_estimators=10, train_features=None)
clf.fit(trainX, trainY)
assert clf.n_features == n_features
assert len(clf.feature_importances_) == n_features
# checking that predict proba works
for p in clf.staged_predict_proba(testX):
assert p.shape == (n_samples, 2)
assert numpy.all(p == clf.predict_proba(testX))
assert roc_auc_score(testY, p[:, 1]) > 0.8, 'quality is too low'
# checking clonability
_ = clone(clf)
clf_copy = copy.deepcopy(clf)
assert (clf.predict_proba(trainX) == clf_copy.predict_proba(trainX)).all(), 'copied classifier is different'
def test_gradient_boosting(n_samples=1000):
"""
Testing workability of GradientBoosting with different loss function
"""
# Generating some samples correlated with first variable
distance = 0.6
testX, testY = generate_sample(n_samples, 10, distance)
trainX, trainY = generate_sample(n_samples, 10, distance)
# We will try to get uniform distribution along this variable
uniform_features = ['column0']
loss1 = LogLossFunction()
loss2 = AdaLossFunction()
loss3 = losses.CompositeLossFunction()
loss4 = losses.KnnAdaLossFunction(uniform_features=uniform_features, uniform_label=1)
loss5 = losses.KnnAdaLossFunction(uniform_features=uniform_features, uniform_label=[0, 1])
loss6bin = losses.BinFlatnessLossFunction(uniform_features, fl_coefficient=2., uniform_label=0)
loss7bin = losses.BinFlatnessLossFunction(uniform_features, fl_coefficient=2., uniform_label=[0, 1])
loss6knn = losses.KnnFlatnessLossFunction(uniform_features, fl_coefficient=2., uniform_label=1)
loss7knn = losses.KnnFlatnessLossFunction(uniform_features, fl_coefficient=2., uniform_label=[0, 1])
for loss in [loss1, loss2, loss3, loss4, loss5, loss6bin, loss7bin, loss6knn, loss7knn]:
clf = UGradientBoostingClassifier(loss=loss, min_samples_split=20, max_depth=5, learning_rate=0.2,
subsample=0.7, n_estimators=25, train_features=None) \
.fit(trainX[:n_samples], trainY[:n_samples])
result = clf.score(testX, testY)
assert result >= 0.7, "The quality is too poor: {} with loss: {}".format(result, loss)
trainX['fake_request'] = numpy.random.randint(0, 4, size=len(trainX))
for loss in [losses.MSELossFunction(),
losses.MAELossFunction(),
losses.RankBoostLossFunction(request_column='fake_request')]:
print(loss)
clf = UGradientBoostingRegressor(loss=loss, max_depth=3, n_estimators=50, learning_rate=0.01, subsample=0.5,
train_features=list(trainX.columns[1:]))
clf.fit(trainX, trainY)
roc_auc = roc_auc_score(testY, clf.predict(testX))
assert roc_auc >= 0.7, "The quality is too poor: {} with loss: {}".format(roc_auc, loss)
def test_gb_regression(n_samples=1000):
X, _ = generate_sample(n_samples, 10, distance=0.6)
y = numpy.tanh(X.sum(axis=1))
clf = UGradientBoostingRegressor(loss=MSELossFunction())
clf.fit(X, y)
y_pred = clf.predict(X)
zeromse = 0.5 * mean_squared_error(y, y * 0.)
assert mean_squared_error(y, y_pred) < zeromse, 'something wrong with regression quality'
def test_gb_ranking(n_samples=1000):
distance = 0.6
testX, testY = generate_sample(n_samples, 10, distance)
trainX, trainY = generate_sample(n_samples, 10, distance)
rank_variable = 'column1'
trainX[rank_variable] = numpy.random.randint(0, 3, size=len(trainX))
testX[rank_variable] = numpy.random.randint(0, 3, size=len(testX))
rank_loss1 = losses.RankBoostLossFunction(request_column=rank_variable, update_iterations=1)
rank_loss2 = losses.RankBoostLossFunction(request_column=rank_variable, update_iterations=2)
rank_loss3 = losses.RankBoostLossFunction(request_column=rank_variable, update_iterations=10)
for loss in [rank_loss1, rank_loss2, rank_loss3]:
clf = UGradientBoostingRegressor(loss=loss, min_samples_split=20, max_depth=5, learning_rate=0.2,
subsample=0.7, n_estimators=25, train_features=None) \
.fit(trainX[:n_samples], trainY[:n_samples])
result = roc_auc_score(testY, clf.predict(testX))
assert result >= 0.8, "The quality is too poor: {} with loss: {}".format(result, loss)
def test_constant_fitting(n_samples=1000, n_features=5):
"""
Testing if initial constant fitted properly
"""
X, y = generate_sample(n_samples=n_samples, n_features=n_features)
y = y.astype(numpy.float) + 1000.
for loss in [MSELossFunction(), losses.MAELossFunction()]:
gb = UGradientBoostingRegressor(loss=loss, n_estimators=10)
gb.fit(X, y)
p = gb.predict(X)
assert mean_squared_error(p, y) < 0.5
def test_weight_misbalance(n_samples=1000, n_features=10, distance=0.6):
"""
Testing how classifiers work with highly misbalanced (in the terms of weights) datasets.
"""
testX, testY = generate_sample(n_samples, n_features, distance=distance)
trainX, trainY = generate_sample(n_samples, n_features, distance=distance)
trainW = trainY * 10000 + 1
testW = testY * 10000 + 1
for loss in [LogLossFunction(), AdaLossFunction(), losses.CompositeLossFunction()]:
clf = UGradientBoostingClassifier(loss=loss, min_samples_split=20, max_depth=5, learning_rate=.2,
subsample=0.7, n_estimators=10, train_features=None)
clf.fit(trainX, trainY, sample_weight=trainW)
p = clf.predict_proba(testX)
assert roc_auc_score(testY, p[:, 1], sample_weight=testW) > 0.8, 'quality is too low'
```
#### File: hep_ml/tests/test_speedup.py
```python
from __future__ import division, print_function, absolute_import
__author__ = '<NAME>'
import numpy
import pandas
from hep_ml.speedup import LookupClassifier
from hep_ml.commonutils import generate_sample
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import roc_auc_score
from collections import OrderedDict
import time
def test_lookup(n_samples=10000, n_features=7, n_bins=8):
X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=0.6)
base_estimator = GradientBoostingClassifier()
clf = LookupClassifier(base_estimator=base_estimator, n_bins=n_bins, keep_trained_estimator=True).fit(X, y)
p = clf.predict_proba(X)
assert roc_auc_score(y, p[:, 1]) > 0.8, 'quality of classification is too low'
assert p.shape == (n_samples, 2)
assert numpy.allclose(p.sum(axis=1), 1), 'probabilities are not summed up to 1'
# checking conversions
lookup_size = n_bins ** n_features
lookup_indices = numpy.arange(lookup_size, dtype=int)
bins_indices = clf.convert_lookup_index_to_bins(lookup_indices=lookup_indices)
lookup_indices2 = clf.convert_bins_to_lookup_index(bins_indices=bins_indices)
assert numpy.allclose(lookup_indices, lookup_indices2), 'something wrong with conversions'
assert len(clf._lookup_table) == n_bins ** n_features, 'wrong size of lookup table'
# checking speed
X = pandas.concat([X] * 10)
start = time.time()
p1 = clf.trained_estimator.predict_proba(clf.transform(X))
time_old = time.time() - start
start = time.time()
p2 = clf.predict_proba(X)
time_new = time.time() - start
print(time_old, ' now takes ', time_new)
assert numpy.allclose(p1, p2), "pipeline doesn't work as expected"
def test_sizes(n_samples=10000, n_features=4, n_bins=8):
X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=0.6)
base_estimator = GradientBoostingClassifier(n_estimators=1)
clf = LookupClassifier(base_estimator=base_estimator, n_bins=n_bins).fit(X, y)
bin_indices = clf.transform(X)
assert numpy.allclose(numpy.max(bin_indices, axis=0) + 1, n_bins)
maximals = OrderedDict()
for column in X.columns:
maximals[column] = numpy.random.randint(low=n_bins // 2, high=n_bins)
clf = LookupClassifier(base_estimator=base_estimator, n_bins=maximals).fit(X, y)
bin_indices = clf.transform(X)
assert numpy.allclose(numpy.max(bin_indices, axis=0) + 1, list(maximals.values()))
assert numpy.allclose(numpy.min(bin_indices, axis=0), 0)
``` |
{
"source": "jonas-eschle/jax",
"score": 2
} |
#### File: jax/experimental/global_device_array.py
```python
from collections import Counter
import dataclasses
import functools
import numpy as np
from typing import Callable, Sequence, Tuple, Union, Mapping, Optional, List, Dict, NamedTuple
from jax import core
from jax._src.lib import xla_bridge as xb
from jax._src.lib import xla_client as xc
from jax.interpreters import pxla, xla
from jax._src.util import prod, safe_zip, cache
from jax._src.api import device_put
from jax.interpreters.sharded_jit import PartitionSpec
Shape = Tuple[int, ...]
MeshAxes = Sequence[Union[str, Tuple[str], None]]
DeviceArray = xc.Buffer
Device = xc.Device
ArrayLike = Union[np.ndarray, DeviceArray]
Index = Tuple[slice, ...]
_hashed_index = lambda x: hash(tuple((v.start, v.stop) for v in x))
def _convert_list_args_to_tuple(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
args = [tuple(a) if isinstance(a, list) else a for a in args]
kwargs = {k: (tuple(v) if isinstance(v, list) else v) for k, v in kwargs.items()}
return f(*args, **kwargs)
return wrapper
def _canonicalize_mesh_axes(mesh_axes):
if not isinstance(mesh_axes, PartitionSpec):
pspec = PartitionSpec(*mesh_axes)
else:
pspec = mesh_axes
return pspec
def _get_indices(global_shape: Shape, global_mesh: pxla.Mesh,
mesh_axes: MeshAxes) -> Tuple[Index, ...]:
# Import here to avoid cyclic import error when importing gda in pjit.py.
from jax.experimental.pjit import get_array_mapping, _prepare_axis_resources
pspec = _canonicalize_mesh_axes(mesh_axes)
parsed_pspec, _, _ = _prepare_axis_resources(pspec, "mesh_axes")
array_mapping = get_array_mapping(parsed_pspec)
# The dtype doesn't matter for creating sharding specs.
aval = core.ShapedArray(global_shape, np.float32)
sharding_spec = pxla.mesh_sharding_specs(
global_mesh.shape, global_mesh.axis_names)(aval, array_mapping)
indices = pxla.spec_to_indices(global_shape, sharding_spec)
return indices # type: ignore
@_convert_list_args_to_tuple
@cache()
def get_shard_indices(global_shape: Shape, global_mesh: pxla.Mesh,
mesh_axes: MeshAxes) -> Mapping[Device, Index]:
indices = _get_indices(global_shape, global_mesh, mesh_axes)
# The type: ignore is to ignore the type returned by `spec_to_indices`.
return dict(
(d, i)
for d, i in safe_zip(global_mesh.devices.flat, indices)) # type: ignore
@_convert_list_args_to_tuple
@cache()
def get_shard_indices_replica_ids(
global_shape: Shape, global_mesh: pxla.Mesh,
mesh_axes: MeshAxes) -> Mapping[Device, Tuple[Index, int]]:
return _get_shard_indices_replica_ids_uncached(global_shape, global_mesh, mesh_axes)
def _get_shard_indices_replica_ids_uncached(
global_shape: Shape, global_mesh: pxla.Mesh,
mesh_axes: MeshAxes) -> Mapping[Device, Tuple[Index, int]]:
indices = _get_indices(global_shape, global_mesh, mesh_axes)
index_to_replica: Dict[int, int] = Counter()
out = {}
unique_shards = 0
for device, index in safe_zip(global_mesh.devices.flat, indices):
h_index = _hashed_index(index)
replica_id = index_to_replica[h_index]
if replica_id == 0:
unique_shards += 1
index_to_replica[h_index] += 1
out[device] = (index, replica_id)
shard_shape = get_shard_shape(global_shape, global_mesh, mesh_axes)
expected_unique_shards = prod(
[g // s for g, s in safe_zip(global_shape, shard_shape) if g != 0 or s != 0])
if expected_unique_shards != unique_shards:
raise RuntimeError(
f'Number of expected unique shards are: {expected_unique_shards} but '
f'got {unique_shards}. Please file a bug at '
'https://github.com/google/jax/issues.')
return out
@_convert_list_args_to_tuple
@cache()
def get_shard_shape(global_shape, global_mesh, mesh_axes) -> Shape:
chunk_size = []
for mesh_axis, size in zip(mesh_axes, global_shape):
if not mesh_axis:
chunk_size.append(size)
elif isinstance(mesh_axis, tuple):
m = prod([global_mesh.shape[ma] for ma in mesh_axis])
chunk_size.append(size // m)
else:
chunk_size.append(size // global_mesh.shape[mesh_axis])
if len(chunk_size) != len(global_shape):
chunk_size.extend(global_shape[len(chunk_size):])
return tuple(chunk_size)
@dataclasses.dataclass(frozen=True)
class Shard:
"""A single data shard of a GlobalDeviceArray.
Args:
device : Which device this shard resides on.
index : The index into the global array of this shard.
replica_id : Integer id indicating which replica of the global array this
shard is part of. Always 0 for fully sharded data
(i.e. when there’s only 1 replica).
data : The data of this shard. None if ``device`` is non-local.
"""
device: Device
index: Index
replica_id: int
# None if this `Shard` lives on a non-local device.
data: Optional[DeviceArray] = None
class _GdaFastPathArgs(NamedTuple):
global_indices_replica_ids: Mapping[Device, Tuple[Index, int]]
local_devices: Sequence[Device]
class GlobalDeviceArray:
"""A logical array with data sharded across multiple devices and processes.
If you’re not already familiar with JAX’s multi-process programming model,
please read https://jax.readthedocs.io/en/latest/multi_process.html.
A GlobalDeviceArray (GDA) can be thought of as a view into a single logical
array sharded across processes. The logical array is the “global” array, and
each process has a GlobalDeviceArray object referring to the same global array
(similarly to how each process runs a multi-process pmap or pjit). Each process
can access the shape, dtype, etc. of the global array via the GDA, pass the
GDA into multi-process pjits, and get GDAs as pjit outputs (coming soon: xmap
and pmap). However, each process can only directly access the shards of the
global array data stored on its local devices.
GDAs can help manage the inputs and outputs of multi-process computations.
A GDA keeps track of which shard of the global array belongs to which device,
and provides callback-based APIs to materialize the correct shard of the data
needed for each local device of each process.
A GDA consists of data shards. Each shard is stored on a different device.
There are local shards and global shards. Local shards are those on local
devices, and the data is visible to the current process. Global shards are
those across all devices (including local devices), and the data isn’t visible
if the shard is on a non-local device with respect to the current process.
Please see the ``Shard`` class to see what information is stored inside that
data structure.
Note: to make pjit output GlobalDeviceArrays, set the environment variable
``JAX_PARALLEL_FUNCTIONS_OUTPUT_GDA=true`` or add the following to your code:
``jax.config.update('jax_parallel_functions_output_gda', True)``
Args:
global_shape : The global shape of the array.
global_mesh : The global mesh representing devices across multiple
processes.
mesh_axes : A sequence with length less than or equal to the rank of the
global array (i.e. the length of the global shape). Each element can be:
* An axis name of ``global_mesh``, indicating that the corresponding
global array axis is partitioned across the given device axis of
``global_mesh``.
* A tuple of axis names of ``global_mesh``. This is like the above option
except the global array axis is partitioned across the product of axes
named in the tuple.
* None indicating that the corresponding global array axis is not
partitioned.
For more information, please see:
https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html#more-information-on-partitionspec
device_buffers: DeviceArrays that are on the local devices of ``global_mesh``.
Attributes:
shape : Global shape of the array.
dtype : Dtype of the global array.
local_shards : List of :class:`Shard` on the local devices of the current process.
Data is materialized for all local shards.
global_shards : List of all :class:`Shard` of the global array. Data isn’t
available if a shard is on a non-local device with respect to the current
process.
is_fully_replicated : True if the full array value is present on all devices
of the global mesh.
Example::
# Logical mesh is (hosts, devices)
assert global_mesh.shape == {'x': 4, 'y': 8}
global_input_shape = (64, 32)
mesh_axes = P('x', 'y')
# Dummy example data; in practice we wouldn't necessarily materialize global data
# in a single process.
global_input_data = np.arange(
np.prod(global_input_shape)).reshape(global_input_shape)
def get_local_data_slice(index):
# index will be a tuple of slice objects, e.g. (slice(0, 16), slice(0, 4))
# This method will be called per-local device from the GDA constructor.
return global_input_data[index]
gda = GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, get_local_data_slice)
f = pjit(lambda x: x @ x.T, out_axis_resources = P('y', 'x'))
with mesh(global_mesh.shape, global_mesh.axis_names):
out = f(gda)
print(type(out)) # GlobalDeviceArray
print(out.shape) # global shape == (64, 64)
print(out.local_shards[0].data) # Access the data on a single local device,
# e.g. for checkpointing
print(out.local_shards[0].data.shape) # per-device shape == (8, 16)
print(out.local_shards[0].index) # Numpy-style index into the global array that
# this data shard corresponds to
# `out` can be passed to another pjit call, out.local_shards can be used to
# export the data to non-jax systems (e.g. for checkpointing or logging), etc.
"""
def __init__(self, global_shape: Shape, global_mesh: pxla.Mesh,
mesh_axes: MeshAxes, device_buffers: Sequence[DeviceArray],
_gda_fast_path_args: Optional[_GdaFastPathArgs] = None):
self._global_shape = global_shape
self._global_mesh = global_mesh
self._mesh_axes = mesh_axes
self._device_buffers = device_buffers
# Optionally precomputed for performance.
self._gda_fast_path_args = _gda_fast_path_args
self._current_process = xb.process_index()
if self._gda_fast_path_args is None:
self._local_devices = self._global_mesh.local_devices
else:
self._local_devices = self._gda_fast_path_args.local_devices
for db, ld in safe_zip(device_buffers, self._local_devices):
if db.device() != ld:
raise ValueError(
"The `global_mesh.local_devices` and `device_buffers` device order "
"doesn't match. Please use `global_mesh.local_devices` to put "
"arrays on devices instead of `jax.local_devices()`")
self._local_shards = self._create_local_shards()
ss = get_shard_shape(self._global_shape, self._global_mesh, self._mesh_axes)
assert all(db.shape == ss for db in device_buffers), (
f"Expected shard shape {ss} doesn't match the device buffer "
f"shape {device_buffers[0].shape}")
dtype = device_buffers[0].dtype
assert all(db.dtype == dtype for db in device_buffers), (
"Input arrays to GlobalDeviceArray must have matching dtypes, "
f"got: {[db.dtype for db in device_buffers]}")
self.dtype = dtype
def __eq__(self, other: object):
raise NotImplementedError(
"GlobalDeviceArray equality is intentionally unimplemented. "
"Implement desired functionality explicitly, e.g. to check if all "
"values are equal: "
"pjit(lambda x, y: x == y, "
"in_axis_resources=FROM_GDA, out_axis_resources=None)"
)
def __str__(self):
return f'GlobalDeviceArray(shape={self.shape}, dtype={self.dtype})'
def __repr__(self):
return (f'GlobalDeviceArray(shape={self.shape}, dtype={self.dtype}, '
f'global_mesh_shape={dict(self._global_mesh.shape)}, '
f'mesh_axes={self._mesh_axes})')
@property
def shape(self) -> Shape:
return self._global_shape
@property
def is_fully_replicated(self) -> bool:
return self.shape == self.local_data(0).shape
def _create_local_shards(self) -> Sequence[Shard]:
if self._gda_fast_path_args is not None:
global_indices_rid = self._gda_fast_path_args.global_indices_replica_ids
else:
global_indices_rid = get_shard_indices_replica_ids(
self._global_shape, self._global_mesh, self._mesh_axes)
out = []
for db in self._device_buffers:
device = db.device()
index, rid = global_indices_rid[device]
out.append(Shard(device, index, rid, db))
return out
@pxla.maybe_cached_property
def local_shards(self) -> Sequence[Shard]:
for s in self._local_shards:
# Ignore the type because mypy thinks data is None but local_shards
# cannot have data=None which is checked in `_create_local_shards`.
if s.data.aval is None: # type: ignore
s.data.aval = core.ShapedArray(s.data.shape, s.data.dtype) # type: ignore
return self._local_shards
@pxla.maybe_cached_property
def global_shards(self) -> Sequence[Shard]:
# Populating global_shards lazily (i.e. when requested) because populating
# sthem eagerly leads to a performance regression when training on large
# models.
# Also as this a cached property, once calculated, it should be cached. So
# multiple accesses should be cheap.
global_indices_rid = get_shard_indices_replica_ids(
self._global_shape, self._global_mesh, self._mesh_axes)
device_to_buffer = dict((db.device(), db) for db in self._device_buffers)
global_shards = []
for device, (index, rid) in global_indices_rid.items():
local_shard = device.process_index == self._current_process
buf = device_to_buffer[device] if local_shard else None
if buf is not None and buf.aval is None:
buf.aval = core.ShapedArray(buf.shape, buf.dtype)
sh = Shard(device, index, rid, buf)
global_shards.append(sh)
return global_shards
def local_data(self, index) -> DeviceArray:
return self.local_shards[index].data
@classmethod
def from_callback(cls, global_shape: Shape, global_mesh: pxla.Mesh,
mesh_axes: MeshAxes, data_callback: Callable[[Index],
ArrayLike]):
"""Constructs a GlobalDeviceArray via data fetched from ``data_callback``.
``data_callback`` is used to fetch the data for each local slice of the returned GlobalDeviceArray.
Example::
global_input_shape = (8, 2)
global_input_data = np.arange(prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh, mesh_axes, cb)
Args:
global_shape : The global shape of the array
global_mesh : The global mesh representing devices across multiple
processes.
mesh_axes : See the ``mesh_axes`` parameter of GlobalDeviceArray.
data_callback : Callback that takes indices into the global array value as input and
returns the corresponding data of the global array value. The data can be returned
as any array-like object, e.g. a ``numpy.ndarray``.
"""
global_indices_rid = get_shard_indices_replica_ids(
global_shape, global_mesh, mesh_axes)
local_devices = global_mesh.local_devices
dbs = [
device_put(data_callback(global_indices_rid[device][0]), device)
for device in local_devices
]
return cls(global_shape, global_mesh, mesh_axes, dbs,
_gda_fast_path_args=_GdaFastPathArgs(global_indices_rid, local_devices))
@classmethod
def from_batched_callback(cls, global_shape: Shape,
global_mesh: pxla.Mesh, mesh_axes: MeshAxes,
data_callback: Callable[[Sequence[Index]],
Sequence[ArrayLike]]):
"""Constructs a GlobalDeviceArray via batched data fetched from ``data_callback``.
Like ``from_callback``, except the callback function is called only once to fetch all data
local to this process.
Example::
global_input_shape = (8, 2)
global_input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def batched_cb(indices):
self.assertEqual(len(indices),len(global_mesh.local_devices))
return [global_input_data[index] for index in indices]
gda = GlobalDeviceArray.from_batched_callback(global_input_shape, global_mesh, mesh_axes, batched_cb)
Args:
global_shape : The global shape of the array
global_mesh : The global mesh representing devices across multiple
processes.
mesh_axes : See the ``mesh_axes`` parameter of GlobalDeviceArray.
data_callback : Callback that takes a batch of indices into the global array value with
length equal to the number of local devices as input and returns the corresponding data for each index.
The data can be returned as any array-like objects, e.g. ``numpy.ndarray``
"""
global_indices_rid = get_shard_indices_replica_ids(
global_shape, global_mesh, mesh_axes)
local_devices = global_mesh.local_devices
local_indices = [global_indices_rid[d][0] for d in local_devices]
local_arrays = data_callback(local_indices)
dbs = pxla.device_put(local_arrays, local_devices)
return cls(global_shape, global_mesh, mesh_axes, dbs,
_gda_fast_path_args=_GdaFastPathArgs(global_indices_rid, local_devices))
@classmethod
def from_batched_callback_with_devices(
cls, global_shape: Shape, global_mesh: pxla.Mesh,
mesh_axes: MeshAxes,
data_callback: Callable[[Sequence[Tuple[Index, Tuple[Device, ...]]]],
Sequence[DeviceArray]]):
"""Constructs a GlobalDeviceArray via batched DeviceArrays fetched from ``data_callback``.
Like ``from_batched_callback``, except the callback function is responsible for returning on-device data (e.g. by calling ``jax.device_put``).
Example::
global_input_shape = (8, 2)
global_input_data = np.arange(prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(cb_inp):
self.assertLen(cb_inp, len(global_mesh.local_devices))
dbs = []
for inp in cb_inp:
index, devices = inp
array = global_input_data[index]
dbs.extend([jax.device_put(array, device) for device in devices])
return dbs
gda = GlobalDeviceArray.from_batched_callback_with_devices(global_input_shape, global_mesh, mesh_axes, cb)
Args:
global_shape : The global shape of the array
global_mesh : The global mesh representing devices across multiple
processes.
mesh_axes : See the ``mesh_axes`` parameter of GlobalDeviceArray.
data_callback : Callback that takes agets batch of indices into the global array value with
length equal to the number of local devices as input and returns the corresponding data for
each index. The data must be returned as jax DeviceArrays.
"""
global_indices_rid = get_shard_indices_replica_ids(
global_shape, global_mesh, mesh_axes)
local_devices = global_mesh.local_devices
index_to_device: Dict[int, Tuple[Index, List[Device]]] = {}
for device in local_devices:
index = global_indices_rid[device][0]
h_index = _hashed_index(index)
if h_index not in index_to_device:
index_to_device[h_index] = (index, [device])
else:
index_to_device[h_index][1].append(device)
cb_inp = [
(index, tuple(devices)) for index, devices in index_to_device.values()
]
dbs = data_callback(cb_inp)
return cls(global_shape, global_mesh, mesh_axes, dbs,
_gda_fast_path_args=_GdaFastPathArgs(global_indices_rid, local_devices))
core.pytype_aval_mappings[GlobalDeviceArray] = lambda x: core.ShapedArray(
x.shape, x.dtype)
xla.pytype_aval_mappings[GlobalDeviceArray] = lambda x: core.ShapedArray(
x.shape, x.dtype)
xla.canonicalize_dtype_handlers[GlobalDeviceArray] = pxla.identity
def _gda_shard_arg(x, devices, indices):
return [s.data for s in x.local_shards]
pxla.shard_arg_handlers[GlobalDeviceArray] = _gda_shard_arg
def _gda_array_result_handler(global_aval, out_axis_resources, global_mesh):
global_idx_rid = get_shard_indices_replica_ids(global_aval.shape, global_mesh,
out_axis_resources)
local_devices = global_mesh.local_devices
fast_path_args = _GdaFastPathArgs(global_idx_rid, local_devices)
return lambda bufs: GlobalDeviceArray(
global_aval.shape, global_mesh, out_axis_resources, bufs, fast_path_args)
pxla.global_result_handlers[core.ShapedArray] = _gda_array_result_handler
pxla.global_result_handlers[core.ConcreteArray] = _gda_array_result_handler
```
#### File: jax/tests/global_device_array_test.py
```python
from absl.testing import absltest
from absl.testing import parameterized
import unittest
import numpy as np
import jax
from jax import core
from jax._src import test_util as jtu
from jax._src.util import prod, safe_zip
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import Mesh
import jax.experimental.global_device_array as gda_lib
from jax.experimental.global_device_array import GlobalDeviceArray, get_shard_indices
from jax.config import config
config.parse_flags_with_absl()
class GDATest(jtu.JaxTestCase):
@parameterized.named_parameters(
("mesh_x_y", ["x", "y"],
# There are more slices but for convienient purposes, checking for only
# 2. The indices + shard_shape + replica_id should be unique enough.
((slice(0, 2), slice(0, 1)), (slice(0, 2), slice(1, 2))),
(2, 1),
[0, 0, 0, 0, 0, 0, 0, 0], False),
("mesh_x_y_pspec", P("x", "y"),
((slice(0, 2), slice(0, 1)), (slice(0, 2), slice(1, 2))),
(2, 1),
[0, 0, 0, 0, 0, 0, 0, 0], False),
("mesh_x", ["x"],
((slice(0, 2), slice(None)), (slice(0, 2), slice(None))),
(2, 2),
[0, 1, 0, 1, 0, 1, 0, 1], False),
("mesh_y", ["y"],
((slice(0, 4), slice(None)), (slice(4, 8), slice(None))),
(4, 2),
[0, 0, 1, 1, 2, 2, 3, 3], False),
("mesh_none_y", [None, "y"],
((slice(None), slice(0, 1)), (slice(None), slice(1, 2))),
(8, 1),
[0, 0, 1, 1, 2, 2, 3, 3], False),
("mesh_xy", [("x", "y")],
((slice(0, 1), slice(None)), (slice(1, 2), slice(None))),
(1, 2),
[0, 0, 0, 0, 0, 0, 0, 0], False),
("mesh_fully_replicated", [],
((slice(None), slice(None)), (slice(None), slice(None))),
(8, 2),
[0, 1, 2, 3, 4, 5, 6, 7], True),
)
def test_gda_2d_shard(self, mesh_axes, expected_index, expected_shard_shape,
expected_replica_ids, expected_is_fully_replicated):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
global_input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,
mesh_axes, cb)
self.assertEqual(gda.local_shards[0].index, expected_index[0])
self.assertArraysEqual(gda.local_data(0),
global_input_data[expected_index[0]])
self.assertEqual(gda.local_shards[1].index, expected_index[1])
self.assertArraysEqual(gda.local_data(1),
global_input_data[expected_index[1]])
self.assertEqual(gda.local_data(0).shape, expected_shard_shape)
replica_ids = [i.replica_id for i in gda.local_shards]
self.assertListEqual(replica_ids, expected_replica_ids)
self.assertListEqual([i.device.id for i in gda.local_shards],
[0, 1, 2, 3, 4, 5, 6, 7])
self.assertEqual(gda.is_fully_replicated, expected_is_fully_replicated)
for s in gda.local_shards:
self.assertEqual(s.data.aval,
core.ShapedArray(expected_shard_shape, s.data.dtype))
for g, l in safe_zip(gda.global_shards, gda.local_shards):
self.assertEqual(g.device, l.device)
self.assertEqual(g.index, l.index)
self.assertEqual(g.replica_id, l.replica_id)
self.assertEqual(g.data.aval, l.data.aval)
self.assertArraysEqual(g.data, l.data)
@parameterized.named_parameters(
("mesh_x_y_z", ["x", "y", "z"],
# There are more slices but for convienient purposes, checking for only
# 2. The indices + shard_shape + replica_id should be unique enough.
((slice(0, 4), slice(0, 2), slice(0, 1)), (slice(0, 4), slice(0, 2), slice(1, 2))),
(4, 2, 1),
[0, 0, 0, 0, 0, 0, 0, 0]),
("mesh_xy_z", [("x", "y"), "z"],
((slice(0, 2), slice(0, 2), slice(None)), (slice(0, 2), slice(2, 4), slice(None))),
(2, 2, 2),
[0, 0, 0, 0, 0, 0, 0, 0]),
("mesh_z", ["z"],
((slice(0, 4), slice(None), slice(None)), (slice(4, 8), slice(None), slice(None))),
(4, 4, 2),
[0, 0, 1, 1, 2, 2, 3, 3]),
)
def test_gda_3d_shard(self, mesh_axes, expected_index, expected_shard_shape,
expected_replica_ids):
global_mesh = jtu.create_global_mesh((2, 2, 2), ('x', 'y', 'z'))
global_input_shape = (8, 4, 2)
global_input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,
mesh_axes, cb)
self.assertEqual(gda.local_shards[0].index, expected_index[0])
self.assertArraysEqual(gda.local_data(0),
global_input_data[expected_index[0]])
self.assertEqual(gda.local_shards[1].index, expected_index[1])
self.assertArraysEqual(gda.local_data(1),
global_input_data[expected_index[1]])
self.assertEqual(gda.local_data(0).shape, expected_shard_shape)
replica_ids = [i.replica_id for i in gda.local_shards]
self.assertListEqual(replica_ids, expected_replica_ids)
@parameterized.named_parameters(
("mesh_x", ["x"],
# There are more slices but for convienient purposes, checking for only
# 2. The indices + shard_shape + replica_id should be unique enough.
((slice(0, 2),), (slice(2, 4),)),
(2,),
[0, 0, 0, 0, 0, 0, 0, 0]),
("mesh_none", [],
((slice(None),), (slice(None),)),
(16,),
[0, 1, 2, 3, 4, 5, 6, 7]),
)
def test_gda_1d_shard(self, mesh_axes, expected_index, expected_shard_shape,
expected_replica_ids):
global_mesh = jtu.create_global_mesh((8,), ('x'))
global_input_shape = (16,)
global_input_data = np.arange(prod(global_input_shape)).reshape(-1)
def cb(index):
return global_input_data[index]
gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,
mesh_axes, cb)
self.assertEqual(gda.local_shards[0].index, expected_index[0])
self.assertArraysEqual(gda.local_data(0),
global_input_data[expected_index[0]])
self.assertEqual(gda.local_shards[1].index, expected_index[1])
self.assertArraysEqual(gda.local_data(1),
global_input_data[expected_index[1]])
self.assertEqual(gda.local_data(0).shape, expected_shard_shape)
replica_ids = [i.replica_id for i in gda.local_shards]
self.assertListEqual(replica_ids, expected_replica_ids)
def test_gda_shape_0_1d_mesh(self):
global_mesh = jtu.create_global_mesh((8,), ('x'))
global_input_shape = (0,)
mesh_axes = [None]
def cb(index):
return np.array([])
gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,
mesh_axes, cb)
for i, s in enumerate(gda.local_shards):
self.assertEqual(s.index, (slice(None),))
self.assertEqual(s.replica_id, i)
self.assertArraysEqual(s.data.to_py(), np.array([]))
self.assertEqual(gda.dtype, np.float32)
self.assertEqual(
gda_lib.get_shard_shape(global_input_shape, global_mesh, mesh_axes),
(0,))
@parameterized.named_parameters(
("mesh_x_y", ["x", "y"],
# There are more slices but for convienient purposes, checking for only
# 2. The indices + shard_shape + replica_id should be unique enough.
((slice(0, 4), slice(0, 1)), (slice(0, 4), slice(1, 2))),
(4, 1),
[0, 0, 0, 0]),
)
def test_gda_subset_devices(self, mesh_axes, expected_index,
expected_shard_shape, expected_replica_ids):
global_mesh = jtu.create_global_mesh((2, 2), ('x', 'y'))
global_input_shape = (8, 2)
global_input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda = GlobalDeviceArray.from_callback(global_input_shape, global_mesh,
mesh_axes, cb)
self.assertEqual(gda.local_shards[0].index, expected_index[0])
self.assertArraysEqual(gda.local_data(0),
global_input_data[expected_index[0]])
self.assertEqual(gda.local_shards[1].index, expected_index[1])
self.assertArraysEqual(gda.local_data(1),
global_input_data[expected_index[1]])
self.assertEqual(gda.local_data(0).shape, expected_shard_shape)
replica_ids = [i.replica_id for i in gda.local_shards]
self.assertListEqual(replica_ids, expected_replica_ids)
for g, l in safe_zip(gda.global_shards, gda.local_shards):
self.assertEqual(g.device, l.device)
self.assertEqual(g.index, l.index)
self.assertEqual(g.replica_id, l.replica_id)
self.assertArraysEqual(g.data, l.data)
def test_gda_batched_callback(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = [('x', 'y')]
global_input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(indices):
self.assertEqual(len(indices), len(global_mesh.local_devices))
return [global_input_data[index] for index in indices]
gda = GlobalDeviceArray.from_batched_callback(
global_input_shape, global_mesh, mesh_axes, cb)
expected_first_shard_value = np.array([[0, 1]])
self.assertArraysEqual(gda.local_data(0).to_py(),
expected_first_shard_value)
expected_second_shard_value = np.array([[2, 3]])
self.assertArraysEqual(gda.local_data(1).to_py(),
expected_second_shard_value)
def test_gda_batched_callback_with_devices(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(cb_inp):
self.assertLen(cb_inp, 4)
dbs = []
for inp in cb_inp:
index, devices = inp
self.assertLen(devices, 2)
array = global_input_data[index]
dbs.extend([jax.device_put(array, device) for device in devices])
return dbs
gda = GlobalDeviceArray.from_batched_callback_with_devices(
global_input_shape, global_mesh, mesh_axes, cb)
expected_first_shard_value = np.array([[0, 1], [2, 3]], dtype=np.float32)
self.assertArraysEqual(gda.local_data(0).to_py(),
expected_first_shard_value)
expected_second_shard_value = np.array([[0, 1], [2, 3]], dtype=np.float32)
self.assertArraysEqual(gda.local_data(1).to_py(),
expected_second_shard_value)
def test_gda_str_repr(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = [('x', 'y')]
global_input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda = GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
self.assertEqual(str(gda),
'GlobalDeviceArray(shape=(8, 2), dtype=int32)')
self.assertEqual(
repr(gda),
("GlobalDeviceArray(shape=(8, 2), dtype=int32, "
"global_mesh_shape={'x': 4, 'y': 2}, mesh_axes=[('x', 'y')])"))
def test_gda_equality_raises_not_implemented(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None,)
global_input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
input_gda = GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
same_input_gda = GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesRegex(NotImplementedError,
'GlobalDeviceArray equality is intentionally unimplemented.'):
input_gda == same_input_gda
def test_mesh_hash(self):
global_mesh1 = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_mesh2 = jtu.create_global_mesh((2, 4), ('x', 'y'))
global_mesh3 = jtu.create_global_mesh((4, 2), ('x', 'y'))
self.assertNotEqual(hash(global_mesh1), hash(global_mesh2))
self.assertEqual(hash(global_mesh1), hash(global_mesh3))
def test_device_mismatch(self):
devices = jax.devices()
if len(devices) < 8:
raise unittest.SkipTest("Test requires 8 global devices.")
mesh_devices = np.array([[devices[0], devices[2]],
[devices[3], devices[1]],
[devices[4], devices[6]],
[devices[7], devices[5]]])
global_mesh = Mesh(mesh_devices, ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x', 'y']
global_input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
indices = get_shard_indices(global_input_shape, global_mesh, mesh_axes)
dbs = [
jax.device_put(global_input_data[indices[d]], d)
for d in jax.local_devices()
]
with self.assertRaisesRegex(
ValueError,
'The `global_mesh.local_devices` and `device_buffers` device order'):
GlobalDeviceArray(global_input_shape, global_mesh, mesh_axes, dbs)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
``` |
{
"source": "jonas-eschle/raredecay",
"score": 2
} |
#### File: raredecay/analysis/compatibility_tools.py
```python
from raredecay.tools import dev_tool
def _make_data(
original_data,
target_data=None,
features=None,
target_from_data=False,
weights_ratio=0,
weights_original=None,
weights_target=None,
):
"""Return the concatenated data, weights and labels for classifier training.
Differs to only *make_dataset* from the |hepds_type| by providing the
possibility of using other weights.
"""
# make temporary weights if specific weights are given as parameters
temp_ori_weights = None
temp_tar_weights = None
if not dev_tool.is_in_primitive(weights_original, None):
temp_ori_weights = original_data.weights
original_data.set_weights(weights_original)
if not dev_tool.is_in_primitive(weights_target, None):
temp_tar_weights = target_data.weights
target_data.set_weights(weights_target)
# create the data, target and weights
data_out = original_data.make_dataset(
target_data,
columns=features,
targets_from_data=target_from_data,
weights_ratio=weights_ratio,
)
# reassign weights if specific weights have been used
if not dev_tool.is_in_primitive(temp_ori_weights, None):
original_data.set_weights(temp_ori_weights)
if not dev_tool.is_in_primitive(temp_tar_weights, None):
original_data.set_weights(temp_tar_weights)
return data_out
```
#### File: raredecay/analysis/reweight.py
```python
import sys # noqa
import warnings # noqa
import numpy as np # noqa
from matplotlib import pyplot as plt # noqa
from raredecay.tools import dev_tool, data_tools, data_storage # noqa
from .. import meta_config # noqa
from ..globals_ import out # noqa
import copy
import hep_ml.reweight
import pandas as pd
# import configuration
from .. import meta_config as meta_cfg
from .. import config as cfg
# HACK as reweight also uses meta_cfg for reweight_cfg
meta_cfg_module = meta_cfg
logger = dev_tool.make_logger(__name__, **cfg.logger_cfg)
def reweight_train(
mc,
real,
columns=None,
reweighter="gb",
reweight_cfg=None,
reweight_saveas=None,
weights_ratio=1,
weights_mc=None,
weights_real=None,
):
"""Return a trained reweighter from a (mc/real) distribution comparison.
| Reweighting a distribution is a "making them the same" by changing the
weights of the bins (instead of 1) for each event. Mostly, and therefore
the naming, you want to change the mc-distribution towards the real one.
| There are two possibilities
* normal bins reweighting:
divides the bins from one distribution by the bins of the other
distribution. Easy and fast, but unstable and inaccurat for higher
dimensions.
* Gradient Boosted reweighting:
uses several decision trees to reweight the bins. Slower, but more
accurat. Very useful in higher dimensions.
But be aware, that you can easily screw up things by overfitting.
Parameters
----------
mc : |hepds_type|
The Monte-Carlo data to compare with the real data.
real : |hepds_type|
Same as *mc_data* but for the real data.
columns : list of strings
The columns/features/branches you want to use for the reweighting.
reweighter : {'gb', 'bins'}
Specify which reweighter to be used.
- **gb**: The GradientBoosted Reweighter from REP,
:func:`~hep_ml.reweight.GBReweighter`
- **bins**: The simple bins reweighter from REP,
:func:`~hep_ml.reweight.BinsReweighter`
reweight_saveas : string
To save a trained reweighter in addition to return it. The value
is the filepath + name.
reweight_cfg : dict
Contains the parameters for the bins/gb-reweighter. See also
:func:`~hep_ml.reweight.BinsReweighter` and
:func:`~hep_ml.reweight.GBReweighter`.
weights_ratio : numeric or None, False
The ratio of the sum of mc weights / sum of real weights. If set to
one, the reweighter will learn from nicely normalized distributions.
A value greater than 1 means there are in total more mc events
than data points.
weights_mc : numpy.array [n_samples]
Explicit weights for the Monte-Carlo data. Only specify if you don't
want to use the weights in the |hepds_type|.
weights_real : numpy.array [n_samples]
Explicit weights for the real data. Only specify if you don't
want to use the weights in the |hepds_type|.
Returns
-------
out : object of type reweighter
Reweighter is trained to the data. Can, for example,
be used with :func:`~hep_ml.reweight.GBReweighter.predict_weights`
"""
__REWEIGHT_MODE = {"gb": "GB", "bins": "Bins", "bin": "Bins"}
# HACK
from raredecay.analysis.compatibility_tools import _make_data
# Python 2/3 compatibility, str
columns = dev_tool.entries_to_str(columns)
reweighter = dev_tool.entries_to_str(reweighter)
reweight_saveas = dev_tool.entries_to_str(reweight_saveas)
reweight_cfg = dev_tool.entries_to_str(reweight_cfg)
# check for valid user input
if data_tools.is_pickle(reweighter):
return data_tools.adv_return(reweighter, save_name=reweight_saveas)
if reweighter not in __REWEIGHT_MODE:
raise ValueError("Reweighter invalid: " + reweighter)
reweighter = __REWEIGHT_MODE.get(reweighter.lower())
reweighter += "Reweighter"
# logging and writing output
msg = ["Reweighter:", reweighter, "with config:", reweight_cfg]
logger.info(msg)
out.add_output(
msg
+ [
"\nData used:\n",
mc.name,
" and ",
real.name,
"\ncolumns used for the reweighter training:\n",
columns,
],
section="Training the reweighter",
obj_separator=" ",
)
if columns is None:
# use the intesection of both colomns
common_cols = set(mc.columns)
common_cols.intersection_update(real.columns)
columns = list(common_cols)
if columns != mc.columns or columns != real.columns:
logger.warning(
"No columns specified for reweighting, took intersection"
+ " of both dataset, as it's columns are not equal."
+ "\nTherefore some columns were not used!"
)
reweight_cfg.warning_occured()
# create data
normalize_real = 1 if weights_ratio else None
mc, _t, mc_weights = _make_data(
original_data=mc,
features=columns,
weights_original=weights_mc,
weights_ratio=weights_ratio,
)
real, _t, real_weights = _make_data(
real,
features=columns,
weights_original=weights_real,
weights_ratio=normalize_real,
)
del _t
# train the reweighter
reweight_cfg = {} if reweight_cfg is None else reweight_cfg
if reweighter == "GBReweighter":
reweighter = hep_ml.reweight.GBReweighter(**reweight_cfg)
elif reweighter == "BinsReweighter":
reweighter = hep_ml.reweight.BinsReweighter(**reweight_cfg)
reweighter.fit(
original=mc, target=real, original_weight=mc_weights, target_weight=real_weights
)
return data_tools.adv_return(reweighter, save_name=reweight_saveas)
def reweight_weights(
apply_data, reweighter_trained, columns=None, normalize=True, add_weights=True
):
"""Apply reweighter to the data and (add +) return the weights (multiplied by already existing weights).
Can be seen as a wrapper for the
:py:func:`~hep_ml.reweight.GBReweighter.predict_weights` method.
Additional functionality:
* Takes a trained reweighter as argument, but can also unpickle one
from a file.
Parameters
----------
apply_data : |hepds_type|
The data for which the weights are predicted.
reweighter_trained : (pickled) reweighter (*from hep_ml*)
The trained reweighter, which predicts the new weights.
columns : list(str, str, str,...)
The columns to use for the reweighting.
normalize : boolean or int
If True, the weights will be normalized (scaled) to the value of
normalize.
add_weights : boolean
If set to False, the weights will only be returned and not updated in
the data (|hepds_type|). If you want to use the data later on
in the script with the new weights, set this value to True.
Returns
------
out : :py:class:`~pd.Series`
Return an instance of pandas Series of shape [n_samples] containing the
new weights.
"""
# HACK
# Python 2/3 compatibility, str
reweighter_trained = dev_tool.entries_to_str(reweighter_trained)
columns = dev_tool.entries_to_str(columns)
normalize = 1 if normalize is True else normalize
reweighter_trained = data_tools.try_unpickle(reweighter_trained)
if columns is None:
columns = reweighter_trained.columns
# new_weights = reweighter_trained.predict_weights(reweight_data.pandasDF(),
new_weights = reweighter_trained.predict_weights(
apply_data.pandasDF(columns=columns), original_weight=apply_data.weights
)
# write to output
out.add_output(
[
"Using the reweighter:\n",
reweighter_trained,
"\n to reweight ",
apply_data.name,
],
obj_separator="",
)
if isinstance(normalize, (int, float)) and not isinstance(normalize, bool):
warnings.warn(
"Normalizing weights. This does not 'correctly' normalize by using the training"
" weights but just uses the predictet weights. May consider using `normalize=False` and"
" normalize by hand correctly."
)
new_weights *= new_weights.size / new_weights.sum() * normalize
new_weights = pd.Series(new_weights, index=apply_data.index)
if add_weights:
apply_data.set_weights(new_weights)
return new_weights
# NEW
def reweight(
apply_data=None,
mc=None,
real=None,
columns=None,
reweighter="gb",
reweight_cfg=None,
n_reweights=1,
add_weights=True,
normalize=True,
):
"""(Train a reweighter and) apply the reweighter to get new weights (multiplied by already existing weights).
Train a reweighter from the real data and the corresponding MC differences.
Then, try to correct the apply data (MC as well) the same as the first
MC would have been corrected to look like its real counterpart.
Parameters
----------
apply_data : |hepds_type|
The data which shall be corrected
real : |hepds_type|
The real data to train the reweighter on
mc : |hepds_type|
The MC data to train the reweighter on
columns : list(str, str, str,...)
The branches to use for the reweighting process.
reweighter : {'gb', 'bins'} or trained hep_ml-reweighter (also pickled)
Either a string specifying which reweighter to use or an already
trained reweighter from the hep_ml-package. The reweighter can also
be a file-path (str) to a pickled reweighter.
reweight_cfg : dict
A dict containing all the keywords and values you want to specify as
parameters to the reweighter.
n_reweights : int
To get more stable weights, the mean of each weight over many
reweighting runs (training and predicting) can be used. The
n_reweights specifies how many runs to do.
add_weights : boolean
If True, the weights will be added to the data directly, therefore
the data-storage will be modified.
normalize : bool
If True, normalizes the weights to the value given. This is maybe improper handling. Preferably use `False`
and normalize on your own.
Return
------
out : dict
Return a dict containing the weights as well as the reweighter.
The keywords are:
- *reweighter* : The trained reweighter
- *weights* : pandas Series containing the new weights of the data. The weights are multiplied with the
already existing weights in `apply_data`
"""
# from raredecay.globals_ import out
from raredecay.tools import data_tools
output = {}
reweighter_list = False
new_reweighter_list = []
reweighter = data_tools.try_unpickle(reweighter)
if isinstance(reweighter, list):
n_reweights = len(reweighter)
reweighter_list = copy.deepcopy(reweighter)
for run in range(n_reweights):
if reweighter_list:
reweighter = reweighter_list[run]
reweighter = data_tools.try_unpickle(reweighter)
if reweighter in ("gb", "bins"):
new_reweighter = reweight_train(
mc=mc,
real=real,
columns=columns,
reweight_cfg=reweight_cfg,
reweighter=reweighter,
)
# TODO: hack which adds columns, good idea?
assert not hasattr(
new_reweighter, "columns"
), "Newly created reweighter has column attribute, which should be set on the fly now. Changed object reweighter?"
new_reweighter.columns = data_tools.to_list(columns)
else:
new_reweighter = reweighter
if n_reweights > 1:
new_reweighter_list.append(new_reweighter)
else:
new_reweighter_list = new_reweighter
if apply_data is not None:
tmp_weights = reweight_weights(
apply_data=apply_data,
columns=columns,
reweighter_trained=new_reweighter,
add_weights=False,
normalize=normalize,
)
if run == 0:
new_weights = tmp_weights
else:
new_weights += tmp_weights
if apply_data is not None:
new_weights /= n_reweights
new_weights.sort_index()
if add_weights:
apply_data.set_weights(new_weights)
output["weights"] = new_weights
output["reweighter"] = new_reweighter_list
return output
def reweight_kfold(
mc,
real,
columns=None,
n_folds=10,
reweighter="gb",
reweighter_cfg=None,
n_reweights=1,
add_weights=True,
normalize=True,
):
"""Kfold reweight the data by "itself" for *scoring* and hyper-parameters.
.. warning::
Do NOT use for the real reweighting process! (except if you really want
to reweight the data "by itself")
If you want to figure out the hyper-parameters for a reweighting process
or just want to find out how good the reweighter works, you may want to
apply this to the data itself. This means:
- train a reweighter on mc/real
- apply it to get new weights for mc
- compare the mc/real distribution
The problem arises with biasing your reweighter. As in classification
tasks, where you split your data into train/test sets for Kfolds, you
want to do the same here. Therefore:
- split the mc data into (n_folds-1)/n_folds (training)
- train the reweighter on the training mc/complete real (if
mcreweighted_as_real_score is True, the real data will be folded too
for unbiasing the score)
- reweight the leftout mc test-fold
- do this n_folds times
- getting unbiased weights
The parameters are more or less the same as for the
:py:func:`~raredecay.analysis.ml_analysis.reweight_train` and
:py:func:`~raredecay.analysis.ml_analysis.reweight_weights`
Parameters
----------
mc : |hepds_type|
The Monte-Carlo data, which has to be "fitted" to the real data.
real : |hepds_type|
Same as *mc_data* but for the real data.
columns : list of strings
The columns/features/branches you want to use for the reweighting.
n_folds : int >= 1
The number of folds to split the data. Usually, the more folds the
"better" the reweighting (especially for small datasets).
If n_folds = 1, the data will be reweighted directly and the benefit
of Kfolds and the unbiasing *disappears*
reweighter : {'gb', 'bins'}
Specify which reweighter to use.
- **gb**: GradientBoosted Reweighter from REP
- **bins**: Binned Reweighter from REP
reweighter_cfg : dict
Contains the parameters for the bins/gb-reweighter. See also
:func:`~hep_ml.reweight.BinsReweighter` and
:func:`~hep_ml.reweight.GBReweighter`.
n_reweights : int
As the reweighting often yields different weights depending on random
parameters like the splitting of the data, the new weights can be
produced by taking the average of the weights over many reweighting
runs. n_reweights is the number of reweight runs to average over.
add_weights : boolean
If True, the new weights will be added (in place) to the mc data and
returned. Otherwise, the weights will only be returned.
normalize : bool
If True, normalizes the weights to the value given. This is maybe improper handling. Preferably use `False`
and normalize on your own.
Return
------
out : :py:class:`~pd.Series`
Return the new weights obtained from the reweighting _multiplied_ by the
already existing weights in `mc`.
"""
# Python 2/3 compatibility, str
columns = dev_tool.entries_to_str(columns)
reweighter = dev_tool.entries_to_str(reweighter)
reweighter_cfg = dev_tool.entries_to_str(reweighter_cfg)
normalize = 1 if normalize is True else normalize
output = {}
out.add_output(
["Doing reweighting_Kfold with ", n_folds, " folds"],
title="Reweighting Kfold",
obj_separator="",
)
# create variables
assert n_folds >= 1 and isinstance(
n_folds, int
), "n_folds has to be >= 1, its currently" + str(n_folds)
assert isinstance(
mc, data_storage.HEPDataStorage
), "wrong data type. Has to be HEPDataStorage, is currently" + str(type(mc))
assert isinstance(
real, data_storage.HEPDataStorage
), "wrong data type. Has to be HEPDataStorage, is currently" + str(type(real))
new_weights_tot = pd.Series(np.zeros(len(mc)), index=mc.index)
if not add_weights:
old_mc_tot_weights = mc.weights
for run in range(n_reweights):
new_weights_all = []
new_weights_index = []
# split data to folds and loop over them
mc.make_folds(n_folds=n_folds)
real.make_folds(n_folds=n_folds)
def do_reweighting(fold):
"""
Inline loop for parallelization
Parameters
----------
fold : int
Which fold
Returns
-------
"""
# create train/test data
if n_folds > 1:
train_real, test_real = real.get_fold(fold)
train_mc, test_mc = mc.get_fold(fold)
else:
train_real = test_real = real
train_mc = test_mc = mc
# if mcreweighted_as_real_score:
# old_mc_weights = test_mc.get_weights()
# plot the first fold as example (the first one surely exists)
plot_importance1 = 2 if fold == 0 else 1
if n_folds > 1 and plot_importance1 > 1 and run == 0:
train_real.plot(
figure="Reweighter trainer, example, fold " + str(fold),
importance=plot_importance1,
)
train_mc.plot(
figure="Reweighter trainer, example, fold " + str(fold),
importance=plot_importance1,
)
# train reweighter on training data
reweighter_trained = reweight_train(
mc=train_mc,
real=train_real,
columns=columns,
reweighter=reweighter,
reweight_cfg=reweighter_cfg,
)
new_weights = reweight_weights(
apply_data=test_mc,
reweighter_trained=reweighter_trained,
columns=columns,
add_weights=True,
) # fold only, not full data
# plot one for example of the new weights
if (n_folds > 1 and plot_importance1 > 1) or max(new_weights) > 50:
out.save_fig(
"new weights of fold " + str(fold), importance=plot_importance1
)
plt.hist(new_weights, bins=40, log=True)
return (new_weights, test_mc.get_index())
weights_and_indexes = map(do_reweighting, range(n_folds))
for w, i in weights_and_indexes:
new_weights_all.append(w)
new_weights_index.append(i)
if n_folds == 1:
new_weights_all = np.array(new_weights_all)
new_weights_index = np.array(new_weights_index)
else:
new_weights_all = np.concatenate(new_weights_all)
new_weights_index = np.concatenate(new_weights_index)
new_weights_tot += pd.Series(new_weights_all, index=new_weights_index)
out.save_fig(figure="New weights of run " + str(run), importance=3)
hack_array = np.array(new_weights_all)
plt.hist(hack_array, bins=30, log=True)
plt.title("New weights of reweighting at end of run " + str(run))
# after for loop for weights creation
new_weights_tot /= n_reweights
if add_weights:
mc.set_weights(new_weights_tot)
else:
mc.set_weights(old_mc_tot_weights)
out.save_fig(figure="New weights of total mc", importance=4)
plt.hist(new_weights_tot, bins=30, log=True)
plt.title("New weights of reweighting with Kfold")
if isinstance(normalize, (int, float)) and not isinstance(normalize, bool):
new_weights_tot *= new_weights_tot.size / new_weights_tot.sum() * normalize
output["weights"] = new_weights_tot
return output
```
#### File: raredecay/analysis/statistics.py
```python
from .. import config as cfg # noqa
import numpy as np
# from raredecay.globals_ import out
from .. import meta_config as meta_cfg
from ..tools import dev_tool
# import matplotlib.pyplot as plt
def ks_2samp_ds(data1, data2, column):
"""
Parameters
----------
data1 : |hepds_type|
Data set one for the 2sample test
data2 : |hepds_type|
Data set two for the 2sample test
column : str
Which column to use. Has to be the same name in both data sets
Returns
-------
numeric
Return the K-S two sample test hypothesis score.
"""
# Python 2/3 compatibility, str
column = str(column)
# create data from HEPDS
data1, _, weights1 = data1.make_dataset(columns=column)
data2, _, weights2 = data2.make_dataset(columns=column)
weights1 = np.array(weights1)
weights2 = np.array(weights2)
data1 = np.array(data1[column].values)
data2 = np.array(data2[column].values)
# call ks_test
ks_score = ks_2samp(data1=data1, data2=data2, weights1=weights1, weights2=weights2)
return ks_score
def ks_2samp(data1, data2, weights1=None, weights2=None):
"""Weighted two sample Komlogorov-Smirnov hypothesis test.
The weighted version of the Kolmogorov-Smirnov test if two samples
*data1* and *data2* with weights *weights1* and *weights2* respectively
are drawn from the same continuos distribution.
Parameters
----------
data1 : array-like
The first distribution.
data2 : array-like
The second distribution.
weights1 : array-like
The weights of the first distribution. The length has to be equal
to the length of *data1*.
weights2 : array-like
The weights of the second distribution. The length has to be equal
to the length of *data2*.
Returns
-------
numeric
Return the K-S two sample test hypothesis score.
"""
# check and set input
weights1 = (
np.ones(len(data1))
if dev_tool.is_in_primitive(weights1)
else np.array(weights1)
)
weights2 = (
np.ones(len(data2))
if dev_tool.is_in_primitive(weights2)
else np.array(weights2)
)
data1 = np.array(data1)
data2 = np.array(data2)
# start calculation
ix1 = np.argsort(data1)
ix2 = np.argsort(data2)
data1 = data1[ix1]
data2 = data2[ix2]
weights1 = weights1[ix1]
weights2 = weights2[ix2]
data = np.concatenate([data1, data2])
cwei1 = np.hstack([0, np.cumsum(weights1) / sum(weights1)])
cwei2 = np.hstack([0, np.cumsum(weights2) / sum(weights2)])
cdf1we = cwei1[[np.searchsorted(data1, data, side="right")]]
cdf2we = cwei2[[np.searchsorted(data2, data, side="right")]]
return np.max(np.abs(cdf1we - cdf2we))
ks_2samp_ds.__doc__ = ks_2samp.__doc__.split("Parameter", 1)[0] + ks_2samp_ds.__doc__
def ad_2samp(data1, data2, column):
# Python 2/3 compatibility, str
column = str(column)
# prepare data
data1, targets1, weights1 = data1.make_dataset(columns=column)
data2, targets2, weights2 = data2.make_dataset(columns=column)
weights1 = np.array(weights1)
weights2 = np.array(weights2)
data1 = np.array(data1[column].values)
data2 = np.array(data2[column].values)
# sort data
ix1 = np.argsort(data1)
ix2 = np.argsort(data2)
data1 = data1[ix1]
data2 = data2[ix2]
weights1 = weights1[ix1]
weights2 = weights2[ix2]
n = np.sum(weights1)
m = np.sum(weights2)
def _anderson_2samp_right(samples, data_sorted, data_unique_sorted, n_events):
n_tot = sum(n_events)
def fit_mass(
data,
column,
x,
sig_pdf=None,
bkg_pdf=None,
n_sig=None,
n_bkg=None,
blind=False,
nll_profile=False,
second_storage=None,
log_plot=False,
pulls=True,
sPlot=False,
bkg_in_region=False,
importance=3,
plot_importance=3,
):
"""Fit a given pdf to a variable distribution.
A quite versatile function doing several things connected to fitting.
Parameters
----------
data : |hepds_type|
The data containing the variable to fit to
column : str
The name of the column to fit the pdf to
x : RooRealVar
The RooRealVar to fit to.
sig_pdf : RooFit pdf
The signal Probability Density Function. The variable to fit to has
to be named 'x'.
bkg_pdf : RooFit pdf
The background Probability Density Function. The variable to fit to has
to be named 'x'.
n_sig : None or numeric
The number of signals in the data. If it should be fitted, use None.
n_bkg : None or numeric
The number of background events in the data.
If it should be fitted, use None.
blind : boolean or tuple(numberic, numberic)
If False, the data is fitted. If a tuple is provided, the values are
used as the lower (the first value) and the upper (the second value)
limit of a blinding region, which will be omitted in plots.
Additionally, no true number of signal will be returned but only fake.
nll_profile : boolean
If True, a Negative Log-Likelihood Profile will be generated. Does not
work with blind fits.
second_storage : |hepds_type|
A second data-storage that will be concatenated with the first one.
importance : |importance_type|
|importance_docstring|
plot_importance : |plot_importance_type|
|plot_importance_docstring|
Return
------
tuple(numerical, numerical)
Return the number of signals and the number of backgrounds in the
signal-region. If a blind fit is performed, the signal will be a fake
number. If no number of background events is required, -999 will be
returned.
"""
import ROOT
from ROOT import (
RooRealVar,
RooArgList,
RooArgSet,
RooAddPdf,
RooDataSet,
RooAbsReal,
)
from ROOT import RooFit, RooCBShape, RooExponential
from ROOT import RooGaussian, RooMinuit
from ROOT import (
TCanvas,
) # HACK to prevent not plotting canvas by root_numpy import. BUG.
from root_numpy import array2tree
from ROOT import RooCategory, RooUnblindPrecision
# Python 2/3 compatibility, str
column = dev_tool.entries_to_str(column)
if not (isinstance(column, str) or len(column) == 1):
raise ValueError(
"Fitting to several columns " + str(column) + " not supported."
)
if type(sig_pdf) == type(bkg_pdf) == None:
raise ValueError("sig_pdf and bkg_pdf are both None-> no fit possible")
if blind is not False:
lower_blind, upper_blind = blind
blind = True
n_bkg_below_sig = -999
# create data
data_name = data.name
data_array, _t1, _t2 = data.make_dataset(second_storage, columns=column)
del _t1, _t2
# double crystalball variables
min_x, max_x = min(data_array[column]), max(data_array[column])
# x = RooRealVar("x", "x variable", min_x, max_x)
# create data
data_array = np.array([i[0] for i in data_array.as_matrix()])
try:
data_array.dtype = [("x", np.float64)]
except:
data_array.dtype = [("x", np.float64)]
print("hack needed")
tree1 = array2tree(data_array, "x")
data = RooDataSet("data", "Data", RooArgSet(x), RooFit.Import(tree1))
# # TODO: export somewhere? does not need to be defined inside...
# mean = RooRealVar("mean", "Mean of Double CB PDF", 5280, 5100, 5600)#, 5300, 5500)
# sigma = RooRealVar("sigma", "Sigma of Double CB PDF", 40, 0.001, 200)
# alpha_0 = RooRealVar("alpha_0", "alpha_0 of one side", 5.715)#, 0, 150)
# alpha_1 = RooRealVar("alpha_1", "alpha_1 of other side", -4.019)#, -200, 0.)
# lambda_0 = RooRealVar("lambda_0", "Exponent of one side", 3.42)#, 0, 150)
# lambda_1 = RooRealVar("lambda_1", "Exponent of other side", 3.7914)#, 0, 500)
#
# # TODO: export somewhere? pdf construction
# frac = RooRealVar("frac", "Fraction of crystal ball pdfs", 0.479, 0.01, 0.99)
#
# crystalball1 = RooCBShape("crystallball1", "First CrystalBall PDF", x,
# mean, sigma, alpha_0, lambda_0)
# crystalball2 = RooCBShape("crystallball2", "Second CrystalBall PDF", x,
# mean, sigma, alpha_1, lambda_1)
# doubleCB = RooAddPdf("doubleCB", "Double CrystalBall PDF",
# crystalball1, crystalball2, frac)
# n_sig = RooRealVar("n_sig", "Number of signals events", 10000, 0, 1000000)
# test input
if n_sig == n_bkg == 0:
raise ValueError("n_sig as well as n_bkg is 0...")
if n_bkg is None:
n_bkg = RooRealVar("n_bkg", "Number of background events", 10000, 0, 500000)
elif n_bkg >= 0:
n_bkg = RooRealVar("n_bkg", "Number of background events", int(n_bkg))
else:
raise ValueError("n_bkg is not >= 0 or None")
if n_sig is None:
n_sig = RooRealVar("n_sig", "Number of signal events", 1050, 0, 200000)
# START BLINDING
blind_cat = RooCategory("blind_cat", "blind state category")
blind_cat.defineType("unblind", 0)
blind_cat.defineType("blind", 1)
if blind:
blind_cat.setLabel("blind")
blind_n_sig = RooUnblindPrecision(
"blind_n_sig",
"blind number of signals",
"wasistdas",
n_sig.getVal(),
10000,
n_sig,
blind_cat,
)
else:
# blind_cat.setLabel("unblind")
blind_n_sig = n_sig
print("n_sig value " + str(n_sig.getVal()))
# END BLINDING
elif n_sig >= 0:
n_sig = RooRealVar("n_sig", "Number of signal events", int(n_sig))
else:
raise ValueError("n_sig is not >= 0")
# if not blind:
# blind_n_sig = n_sig
# # create bkg-pdf
# lambda_exp = RooRealVar("lambda_exp", "lambda exp pdf bkg", -0.00025, -1., 1.)
# bkg_pdf = RooExponential("bkg_pdf", "Background PDF exp", x, lambda_exp)
if blind:
comb_pdf = RooAddPdf(
"comb_pdf",
"Combined DoubleCB and bkg PDF",
RooArgList(sig_pdf, bkg_pdf),
RooArgList(blind_n_sig, n_bkg),
)
else:
comb_pdf = RooAddPdf(
"comb_pdf",
"Combined DoubleCB and bkg PDF",
RooArgList(sig_pdf, bkg_pdf),
RooArgList(n_sig, n_bkg),
)
# create test dataset
# mean_gauss = RooRealVar("mean_gauss", "Mean of Gaussian", 5553, -10000, 10000)
# sigma_gauss = RooRealVar("sigma_gauss", "Width of Gaussian", 20, 0.0001, 300)
# gauss1 = RooGaussian("gauss1", "Gaussian test dist", x, mean_gauss, sigma_gauss)
# lambda_data = RooRealVar("lambda_data", "lambda exp data", -.002)
# exp_data = RooExponential("exp_data", "data example exp", x, lambda_data)
# frac_data = RooRealVar("frac_data", "Fraction PDF of data", 0.15)
#
# data_pdf = RooAddPdf("data_pdf", "Data PDF", gauss1, exp_data, frac_data)
# data = data_pdf.generate(RooArgSet(x), 30000)
# data.printValue()
# xframe = x.frame()
# data_pdf.plotOn(xframe)
# print "n_cpu:", meta_cfg.get_n_cpu()
# input("test")
# comb_pdf.fitTo(data, RooFit.Extended(ROOT.kTRUE), RooFit.NumCPU(meta_cfg.get_n_cpu()))
# HACK to get 8 cores in testing
c5 = TCanvas("c5", "RooFit pdf not fit vs " + data_name)
c5.cd()
x_frame1 = x.frame()
# data.plotOn(x_frame1)
# comb_pdf.pdfList()[1].plotOn(x_frame1)
if __name__ == "__main__":
n_cpu = 8
else:
n_cpu = meta_cfg.get_n_cpu()
print("n_cpu = ", n_cpu)
# HACK
# n_cpu = 8
result_fit = comb_pdf.fitTo(
data,
RooFit.Minos(ROOT.kTRUE),
RooFit.Extended(ROOT.kTRUE),
RooFit.NumCPU(n_cpu),
)
# HACK end
if bkg_in_region:
x.setRange("signal", bkg_in_region[0], bkg_in_region[1])
bkg_pdf_fitted = comb_pdf.pdfList()[1]
int_argset = RooArgSet(x)
# int_argset = x
# int_argset.setRange("signal", bkg_in_region[0], bkg_in_region[1])
integral = bkg_pdf_fitted.createIntegral(
int_argset, RooFit.NormSet(int_argset), RooFit.Range("signal")
)
bkg_cdf = bkg_pdf_fitted.createCdf(int_argset, RooFit.Range("signal"))
bkg_cdf.plotOn(x_frame1)
# integral.plotOn(x_frame1)
n_bkg_below_sig = integral.getVal(int_argset) * n_bkg.getVal()
x_frame1.Draw()
if plot_importance >= 3:
c2 = TCanvas("c2", "RooFit pdf fit vs " + data_name)
c2.cd()
x_frame = x.frame()
# if log_plot:
# c2.SetLogy()
# x_frame.SetTitle("RooFit pdf vs " + data_name)
x_frame.SetTitle(data_name)
if pulls:
pad_data = ROOT.TPad("pad_data", "Pad with data and fit", 0, 0.33, 1, 1)
pad_pulls = ROOT.TPad("pad_pulls", "Pad with data and fit", 0, 0, 1, 0.33)
pad_data.SetBottomMargin(0.00001)
pad_data.SetBorderMode(0)
if log_plot:
pad_data.SetLogy()
pad_pulls.SetTopMargin(0.00001)
pad_pulls.SetBottomMargin(0.2)
pad_pulls.SetBorderMode(0)
pad_data.Draw()
pad_pulls.Draw()
pad_data.cd()
else:
if log_plot:
c2.SetLogy()
if blind:
# HACK
column = "x"
# END HACK
x.setRange("lower", min_x, lower_blind)
x.setRange("upper", upper_blind, max_x)
range_str = "lower,upper"
lower_cut_str = (
str(min_x) + "<=" + column + "&&" + column + "<=" + str(lower_blind)
)
upper_cut_str = (
str(upper_blind) + "<=" + column + "&&" + column + "<=" + str(max_x)
)
sideband_cut_str = "(" + lower_cut_str + ")" + "||" + "(" + upper_cut_str + ")"
n_entries = data.reduce(sideband_cut_str).numEntries() / data.numEntries()
# raw_input("n_entries: " + str(n_entries))
if plot_importance >= 3:
data.plotOn(
x_frame, RooFit.CutRange(range_str), RooFit.NormRange(range_str)
)
comb_pdf.plotOn(
x_frame,
RooFit.Range(range_str),
RooFit.Normalization(n_entries, RooAbsReal.Relative),
RooFit.NormRange(range_str),
)
if pulls:
# pull_hist(pull_frame=x_frame, pad_data=pad_data, pad_pulls=pad_pulls)
x_frame_pullhist = x_frame.pullHist()
else:
if plot_importance >= 3:
data.plotOn(x_frame)
comb_pdf.plotOn(x_frame)
if pulls:
pad_pulls.cd()
x_frame_pullhist = x_frame.pullHist()
pad_data.cd()
comb_pdf.plotOn(
x_frame,
RooFit.Components(sig_pdf.namePtr().GetName()),
RooFit.LineStyle(ROOT.kDashed),
)
comb_pdf.plotOn(
x_frame,
RooFit.Components(bkg_pdf.namePtr().GetName()),
RooFit.LineStyle(ROOT.kDotted),
)
# comb_pdf.plotPull(n_sig)
if plot_importance >= 3:
x_frame.Draw()
if pulls:
pad_pulls.cd()
x_frame.SetTitleSize(0.05, "Y")
x_frame.SetTitleOffset(0.7, "Y")
x_frame.SetLabelSize(0.04, "Y")
# c11 = TCanvas("c11", "RooFit\ pulls" + data_name)
# c11.cd()
# frame_tmp = x_frame
frame_tmp = x.frame()
# frame_tmp.SetTitle("significance")
frame_tmp.SetTitle(r"Roofit\ pulls\ " + data_name)
frame_tmp.addObject(x_frame_pullhist)
frame_tmp.SetMinimum(-5)
frame_tmp.SetMaximum(5)
# frame_tmp.GetYaxis().SetTitle("significance")
frame_tmp.GetYaxis().SetNdivisions(5)
frame_tmp.SetTitleSize(0.1, "X")
frame_tmp.SetTitleOffset(1, "X")
frame_tmp.SetLabelSize(0.1, "X")
frame_tmp.SetTitleSize(0.1, "Y")
frame_tmp.SetTitleOffset(0.5, "Y")
frame_tmp.SetLabelSize(0.1, "Y")
frame_tmp.Draw()
# raw_input("")
if not blind and nll_profile:
# nll_range = RooRealVar("nll_range", "Signal for nLL", n_sig.getVal(),
# -10, 2 * n_sig.getVal())
sframe = n_sig.frame(RooFit.Bins(20), RooFit.Range(1, 1000))
# HACK for best n_cpu
lnL = comb_pdf.createNLL(data, RooFit.NumCPU(8))
# HACK end
lnProfileL = lnL.createProfile(ROOT.RooArgSet(n_sig))
lnProfileL.plotOn(sframe, RooFit.ShiftToZero())
c4 = TCanvas("c4", "NLL Profile")
c4.cd()
# input("press ENTER to show plot")
sframe.Draw()
if plot_importance >= 3:
pass
params = comb_pdf.getVariables()
params.Print("v")
# print bkg_cdf.getVal()
if sPlot:
sPlotData = ROOT.RooStats.SPlot(
"sPlotData",
"sPlotData",
data, # variable fitted to, RooDataSet
comb_pdf, # fitted pdf
ROOT.RooArgList(
n_sig,
n_bkg,
# NSigB0s
),
)
sweights = np.array(
[sPlotData.GetSWeight(i, "n_sig") for i in range(data.numEntries())]
)
return n_sig.getVal(), n_bkg_below_sig, sweights
if blind:
return blind_n_sig.getVal(), n_bkg_below_sig, comb_pdf
else:
return n_sig.getVal(), n_bkg_below_sig, comb_pdf
# nll_plot = RooRealVar("nll_plot", "NLL plotting range", 0.01, 0.99)
# nll_frame = nll_plot.frame()
# my_nll = comb_pdf.createNLL(data, RooFit.NumCPU(8))
# RooMinuit(my_nll).migrad()
# my_nll.plotOn(nll_frame)
# nll_frame.Draw()
# data.plotOn(xframe)
# comb_pdf.plotOn(xframe)
# xframe.Draw()
# return xframe
def pull_hist(pull_frame, pad_data, pad_pulls):
"""Add pulls into the current pad."""
# import ROOT
# from ROOT import RooRealVar, RooArgList, RooArgSet, RooAddPdf, RooDataSet, RooAbsReal
# from ROOT import RooFit, RooCBShape, RooExponential
# from ROOT import RooGaussian, RooMinuit
# from ROOT import TCanvas # HACK to prevent not plotting canvas by root_numpy import. BUG.
# from root_numpy import array2tree
# from ROOT import RooCategory, RooUnblindPrecision
pad_data.cd()
dataHist = pull_frame.getHist("datahistogram")
curve1 = pull_frame.getObject(
1
) # 1 is index in the list of RooPlot items (see printout from massplot->Print("V")
curve2 = pull_frame.getObject(2)
hresid1 = dataHist.makePullHist(curve1, True)
hresid2 = dataHist.makePullHist(curve2, True)
# RooHist* hresid = massplot->pullHist("datahistogram","blindtot")
pad_pulls.cd()
# resid = M_OS.frame()
pull_frame.addPlotable(hresid1, "P")
pull_frame.addPlotable(hresid2, "P")
pull_frame.SetTitle("")
# pull_frame.GetXaxis().SetTitle("#it{m}(#it{#pi}^{ #plus}#it{#pi}^{ #minus}) [MeV/#it{c}^{2}]")
# gStyle->SetPadLeftMargin(0.1)
def metric_vs_cut_fitted(
data,
predict_col,
fit_col,
sig_pdf,
bkg_pdf,
x,
region,
second_storage=None,
metric="punzi",
n_sig=None,
n_bkg=None,
stepsize=0.025,
plot_importance=3,
):
"""Calculate a metric vs a given cut by estimating the bkg from the fit.
Parameters
----------
data : |hepds_type|
predict_col : str
fit_col : str
region : tuple(numerical, numerical)
The lower and upper points to integrate over.
x : RooRealVar
"""
from raredecay.tools.metrics import punzi_fom, precision_measure
predict_col = dev_tool.entries_to_str(predict_col)
fit_col = dev_tool.entries_to_str(fit_col)
metric_name = metric
if metric == "punzi":
metric = punzi_fom
elif metric == "precision":
metric = precision_measure
# TODO: convert meric strings to metric
n_steps = int(np.floor_divide(1, stepsize))
if n_steps < 1:
raise ValueError("stepsize has to be smaller then 1, not", stepsize)
cuts = np.linspace(0, 1, num=n_steps, endpoint=False)
plots = int(10 / n_steps)
current_plot = 0
if not isinstance(predict_col, str) or not isinstance(fit_col, str):
raise TypeError("predict_col and/or fit_col is not a string but has to be.")
scores = []
for cut in cuts:
if plot_importance > 2:
temp_plot_importance = plot_importance if plots > current_plot else 0
temp_data = data.copy_storage(columns=[predict_col, fit_col], add_to_name="")
temp_df = temp_data.pandasDF()
temp_df = temp_df[cut < temp_df[predict_col]]
temp_data.set_data(temp_df)
n_sig_weighted = sum(temp_data.get_weights()[temp_data.get_targets() == 1])
if second_storage is not None:
temp_second_storage = second_storage.copy_storage(
columns=[predict_col, fit_col], add_to_name=""
)
temp_df = temp_second_storage.pandasDF()
temp_df = temp_df[cut < temp_df[predict_col]]
temp_second_storage.set_data(temp_df)
n_sig_weighted += sum(
temp_second_storage.get_weights()[
temp_second_storage.get_targets() == 1
]
)
else:
temp_second_storage = second_storage
n_sig_fit, n_bkg_fit = fit_mass(
data=temp_data,
column=fit_col,
x=x,
sig_pdf=sig_pdf,
bkg_pdf=bkg_pdf,
n_sig=n_sig,
n_bkg=n_bkg,
blind=False,
nll_profile=False,
second_storage=temp_second_storage,
plot_importance=temp_plot_importance,
bkg_in_region=region,
)
scores.append(metric(n_signal=n_sig_weighted, n_background=n_bkg_fit))
return cuts, scores
if __name__ == "__main__":
import ROOT
from ROOT import (
RooRealVar,
RooArgList,
RooArgSet,
RooAddPdf,
RooDataSet,
RooAbsReal,
)
from ROOT import RooFit, RooCBShape, RooExponential
from ROOT import RooGaussian, RooMinuit
from ROOT import (
TCanvas,
) # HACK to prevent not plotting canvas by root_numpy import. BUG.
from root_numpy import array2tree
from ROOT import RooCategory, RooUnblindPrecision
# data = RooDataSet("data", )
from raredecay.tools.data_storage import HEPDataStorage
import pandas as pd
import matplotlib.pyplot as plt
# np.random.seed(40)
mode = "fit"
# mode = 'fit_metric'
# mode = "sPlot"
# mode = 'ks'
# create signal pdf BEGIN
lower_bound = 4800
# lower_bound = 5000
x = RooRealVar("x", "x variable", lower_bound, 6000)
# x = RooRealVar("x", "x variable", 4800, 6000)
# TODO: export somewhere? does not need to be defined inside...
mean = RooRealVar(
"mean", "Mean of Double CB PDF", 5280, 5270, 5290
) # , 5300, 5500)
sigma = RooRealVar("sigma", "Sigma of Double CB PDF", 40, 0, 45)
alpha_0 = RooRealVar("alpha_0", "alpha_0 of one side", 40, 30, 50)
alpha_1 = RooRealVar("alpha_1", "alpha_1 of other side", -40, -50, -30.0)
lambda_0 = RooRealVar("lambda_0", "Exponent of one side", 40, 30, 50)
lambda_1 = RooRealVar("lambda_1", "Exponent of other side", 40, 30, 50)
# TODO: export somewhere? pdf construction
frac = RooRealVar("frac", "Fraction of crystal ball pdfs", 0.479, 0.01, 0.99)
crystalball1 = RooCBShape(
"crystallball1", "First CrystalBall PDF", x, mean, sigma, alpha_0, lambda_0
)
crystalball2 = RooCBShape(
"crystallball2", "Second CrystalBall PDF", x, mean, sigma, alpha_1, lambda_1
)
doubleCB = RooAddPdf(
"doubleCB", "Double CrystalBall PDF", crystalball1, crystalball2, frac
)
# create signal pdf END
# create bkg-pdf BEGIN
lambda_exp = RooRealVar(
"lambda_exp", "lambda exp pdf bkg", -0.002, -10.0, -0.000001
)
bkg_pdf = RooExponential("bkg_pdf", "Background PDF exp", x, lambda_exp)
# create bkg-pdf END
n_sig = 25000
data = pd.DataFrame(
np.random.normal(loc=5280, scale=37, size=(n_sig, 3)),
columns=["x", "y", "pred"],
)
# data['pred'] = np.array([min((abs(y), 0.99)) for y in np.random.normal(loc=0.6, scale=0.25, size=n_sig)])
bkg_data = np.array(
[
i
for i in (np.random.exponential(scale=300, size=(7500, 3)) + 4800)
if i[0] < 6000
]
)
bkg_data[:, 2] = np.array(
[
min((abs(y), 0.96))
for y in np.random.normal(loc=0.4, scale=0.4, size=len(bkg_data))
]
)
data = pd.concat(
[data, pd.DataFrame(bkg_data, columns=["x", "y", "pred"])], ignore_index=True
)
data = HEPDataStorage(
data, target=np.concatenate((np.ones(n_sig), np.zeros(len(bkg_data))))
)
data_copy = data.copy_storage()
if mode == "fit":
fit_result = fit_mass(
data=data,
column="x",
sig_pdf=doubleCB,
x=x,
bkg_pdf=bkg_pdf,
# blind=False,
blind=(5100, 5380),
plot_importance=4, # bkg_in_region=(5100, 5380)
)
print(fit_result)
print("True values: nsig =", n_sig, " n_bkg =", len(bkg_data))
elif mode == "fit_metric":
result = metric_vs_cut_fitted(
data=data,
predict_col="pred",
fit_col="x",
sig_pdf=doubleCB,
bkg_pdf=bkg_pdf,
x=x,
region=(5100, 5380),
stepsize=0.01,
)
print(result)
plt.plot(*result)
elif mode == "sPlot":
fit_result = fit_mass(
data=data,
column="x",
sig_pdf=doubleCB,
x=x,
bkg_pdf=bkg_pdf,
blind=False,
plot_importance=1, # bkg_in_region=(5100, 5380)
sPlot=True,
)
n_sig, n_bkg, sweights = fit_result
import copy
sweights = copy.deepcopy(sweights)
plt.figure("new figure")
# plt.hist(range(100))
# plt.figure("new figure")
plt.hist(sweights, bins=30)
data_copy.set_weights(sweights)
data_copy.plot()
elif mode == "ks":
pass
input("Finished, press 'Enter' to close ROOT plots.")
plt.show()
input("Finished, press 'Enter' to close plots.")
```
#### File: raredecay/raredecay/meta_config.py
```python
import pickle as pickle
import multiprocessing
import random
import numpy as np
# ==============================================================================
# Parameters which can be changed WITHOUT affecting stability of a single run.
# Be aware: certain tasks like loading a pickled file may fail if the file-
# endings are changed.
# ==============================================================================
__all__ = [
"PROMPT_FOR_COMMENT",
"MULTITHREAD",
"MULTIPROCESSING",
"n_cpu_max",
"use_gpu",
"use_stratified_folding",
"get_n_cpu",
"set_parallel_profile",
"PICKLE_DATATYPE",
"ROOT_DATATYPE",
"PICKLE_PATH",
"GIT_DIR_PATH",
"PICKLE_PROTOCOL",
"SUPPRESS_WRONG_SKLEARN_VERSION",
"SUPPRESS_FUTURE_IMPORT_ERROR",
"MAX_AUTO_FOLDERS",
"NO_PROMPT_ASSUME_YES",
"MAX_ERROR_COUNT",
"MAX_FIGURES",
"DEFAULT_OUTPUT_FOLDERS",
"DEFAULT_HIST_SETTINGS",
"DEFAULT_SAVE_FIG",
"DEFAULT_EXT_SAVE_FIG",
"DEFAULT_LOGGER_CFG",
"DEFAULT_CLF_XGB",
"DEFAULT_CLF_TMVA",
"DEFAULT_CLF_RDF",
"DEFAULT_CLF_GB",
"DEFAULT_CLF_ADA",
"DEFAULT_CLF_NN",
"DEFAULT_CLF_CONFIG",
"DEFAULT_CLF_NAME",
"max_difference_feature_selection",
"DEFAULT_HYPER_GENERATOR",
"loggers",
"verbosity",
"plot_verbosity",
"set_verbosity",
"set_plot_verbosity",
"rand_seed",
"randint",
"randfloat",
"set_seed",
"error_occured",
"warning_occured",
]
# ------------------------------------------------------------------------------
# General run parameters
# ------------------------------------------------------------------------------
PROMPT_FOR_COMMENT = False # let you add an extension to the run/file name
MULTITHREAD = True # if False, no parallel work will be done
MULTIPROCESSING = True # requires MULTITHREAD to be true, else it's False
n_cpu_max = 1 # VAGUE ESTIMATION but not a strict limit. If None, number of cores will be assigned
use_gpu = (
False # If True, optimisation for GPU use is done (e.g. nn not parallel on cpu).
)
# This does NOT use the GPU yet, but "not use the cpu" where the GPU will be invoked
use_stratified_folding = (
True # StratifiedKFolding is better, from a statistical point of view,
)
# but also needs more memory, mostly insignificantly but can be large
def get_n_cpu(n_cpu=None):
"""Return the number of cpus to use. None means all. Can be -1, -2..."""
if n_cpu is None:
n_cpu = -1
if isinstance(n_cpu, int):
if n_cpu < 0:
n_cpu = max([n_cpu_max + n_cpu + 1, 1]) #
n_cpu = min([n_cpu, n_cpu_max])
return n_cpu
# set meta-config variables
def set_parallel_profile(n_cpu=-1, gpu_in_use=False, stratified_kfolding=True):
"""Set the number of cpus and whether a gpu is in use or not."""
global MULTIPROCESSING, MULTITHREAD, n_cpu_max, use_gpu, use_stratified_folding
use_stratified_folding = stratified_kfolding
MULTIPROCESSING = MULTITHREAD = True
if n_cpu == 1:
n_cpu_max = 1
elif n_cpu is None:
pass
elif isinstance(n_cpu, int):
if n_cpu > 1:
n_cpu_max = n_cpu
elif n_cpu < 0:
n_cpu_max = max(
[multiprocessing.cpu_count() + n_cpu + 1, 1]
) # -1 is "all cpus"
else:
raise ValueError("Invalid n_cpu argument: " + str(n_cpu))
else:
raise TypeError(
"Wrong n_cpu argument, type: " + str(type(n_cpu)) + " not allowed"
)
use_gpu = gpu_in_use if gpu_in_use is not None else use_gpu
# ------------------------------------------------------------------------------
# Datatype ending variables
# ------------------------------------------------------------------------------
# The ending of a certain variable type. Change with caution and good reason.
PICKLE_DATATYPE = "pickle" # default: 'pickle'
ROOT_DATATYPE = "root" # default 'root'
# ------------------------------------------------------------------------------
# SHARED OBJECT PATHES INPUT & OUTPUT
# ------------------------------------------------------------------------------
# folder where the pickled objects are stored
PICKLE_PATH = "/home/mayou/Documents/uniphysik/Bachelor_thesis/analysis/pickle/"
# folder where the git-directory is located. Can be an empty string
GIT_DIR_PATH = (
"/home/mayou/Documents/uniphysik/Bachelor_thesis/"
+ "python_workspace/raredecay/raredecay"
)
# ------------------------------------------------------------------------------
# Debug related options
# ------------------------------------------------------------------------------
# This options should not directly affect the behaviour (except of speed etc)
# IF the right environment is used. Don't touch until you have good reasons to do.
PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL # default: pickle.HIGHEST_PROTOCOL
SUPPRESS_WRONG_SKLEARN_VERSION = False # Should NOT BE CHANGED.
SUPPRESS_FUTURE_IMPORT_ERROR = False
# ==============================================================================
# Parameters which may affect stability
# setting for example MAX_AUTO_FOLDERS to 0, it will surely not work
# ==============================================================================
# ------------------------------------------------------------------------------
# Limits for auto-methods
# ------------------------------------------------------------------------------
# If a folder already exists and no overwrite is in use, a new folder (with a
# trailing number) will be created. There can be set a limit to prevent a full
# disk in case of an endless loop-error or similar.
MAX_AUTO_FOLDERS = 10000 # max number of auto-generated folders by initialize
NO_PROMPT_ASSUME_YES = (
True # no userinput required, assumes yes (e.g. when overwritting files)
)
MAX_ERROR_COUNT = (
1000 # set a maximum number of possible errors (not able to save figure etc.)
)
# Criticals will end the run anyway.
MAX_FIGURES = 1000 # max number of figures to be plotted
# ==============================================================================
# DEFAULT SETTINGS for different things
# ==============================================================================
# ------------------------------------------------------------------------------
# Output and plot configurations
# ------------------------------------------------------------------------------
# available output folders. Do NOT CHANGE THE KEYS as modules depend on them!
# You may add additional key-value pairs or just change some values
# The name of the folders created inside the run-folder
DEFAULT_OUTPUT_FOLDERS = dict(
log="log", # contains the logger informations
plots="plots", # contains all the plots
results="results", # contains the written output
config="config", # NOT YET IMPLEMENTED, but cound contain the config file used
)
# The default histogram settings used for some plots
DEFAULT_HIST_SETTINGS = dict(
bins=40, # default: 40
density=True, # default: True, useful for shape comparison of distributions
alpha=0.5, # transparency [0.0, 1.0]
histtype="stepfilled",
)
# Default configuration for most of the figures for save_fig from OutputHandler()
DEFAULT_SAVE_FIG = dict(
file_format=["png", "pdf"], # default: ['png', 'svg'], the file formats
dpi=150, # to be saved to. For implementations, see OutputHandler()
to_pickle=True, # whether to pickle the plot (and therefore be able to replot)
# save_cfg=None
)
# Default configuration for additional figures (plots you mostly do not care
# about but may be happy to have them saved somewhere)
DEFAULT_EXT_SAVE_FIG = dict(
file_format=["png", "pdf"],
to_pickle=True
# save_cfg=None
)
# A logger writes some stuff during the run just for the control of the
# correct execution. The log will be written to console, to file, or both.
# Each message has a level ranging from the lowest (most unimportant) 'debug'
# to 'critical'. You can specify which level (+ the more important one) will
# appear where.
# Example: you can set console to 'error'. and file to 'info'. This way you
# collect also seemingly unneccesary informations (which are maybe later nice
# to check if a variable was meaningful) but on the screen you will only see
# if an error or critical occurs.
DEFAULT_LOGGER_CFG = dict(
logging_mode="console", # define where the logger is written to
# take 'both', 'file', 'console' or 'no'
log_level_file="debug", # 'debug', 'info', warning', 'error', 'critical'
# specifies the level to be logged to the file
log_level_console="debug", # 'debug', 'info', warning', 'error', 'critical'
# specify the level to be logged to the console
overwrite_file=True,
# specifies whether it should overwrite the log file each time
# or instead make a new one each run
log_file_name="AAlastRun",
# the beginning ofthe name of the logfile, like 'project1'
log_file_dir=DEFAULT_OUTPUT_FOLDERS.get("log"),
)
# ------------------------------------------------------------------------------
# Classifier configurations
# ------------------------------------------------------------------------------
# Some modules use classifiers for different tasks where it is mostly not
# important to have a fully optimized classifier but just a "good enough" one.
# Like in the data_ROC where you can see how well two datasets differ from
# each other.
# Changing this default values will surely affect your results (over- or
# underfitting for example), but is mostly not required at all.
DEFAULT_CLF_XGB = dict(
n_estimators=150, # default 75
eta=0.1, # default 0.1, learning-rate
min_child_weight=0, # #0 stage 2 to optimize
max_depth=5, # #6 stage 2 to optimize
gamma=0.1, # stage 3, minimum loss-reduction required to make a split.
# Higher value-> more conservative
subsample=0.8, # stage 4, subsample of data. 1 means all data, 0.7 means only 70% of data
# for a tree
colsample=1,
)
DEFAULT_CLF_TMVA = dict(method="kBDT")
DEFAULT_CLF_RDF = dict(
n_estimators=150,
max_features=None,
# max_depth=100
)
DEFAULT_CLF_GB = dict(
n_estimators=200, learning_rate=0.15, max_depth=5, subsample=0.9, max_features=None
)
DEFAULT_CLF_ADA = dict(n_estimators=200, learning_rate=0.2)
DEFAULT_CLF_NN = dict(
layers=[500, 500, 500],
hidden_activation="logistic",
output_activation="linear",
input_noise=0, # [0,1,2,3,4,5,10,20],
hidden_noise=0,
input_dropout=0,
hidden_dropout=0.03,
decode_from=1,
weight_l1=0.01,
weight_l2=0.01,
scaler="standard",
trainers=[
{
"optimize": "adagrad",
"patience": 10,
"learning_rate": 0.1,
"min_improvement": 0.01,
"momentum": 0.5,
"nesterov": True,
"loss": "xe",
}
],
)
# default clf config collection
DEFAULT_CLF_CONFIG = dict(
xgb=DEFAULT_CLF_XGB,
tmva=DEFAULT_CLF_TMVA,
gb=DEFAULT_CLF_GB,
ada=DEFAULT_CLF_ADA,
nn=DEFAULT_CLF_NN,
rdf=DEFAULT_CLF_RDF,
)
# default clf names collection
DEFAULT_CLF_NAME = dict(
xgb="XGBoost clf",
tmva="TMVA clf",
gb="Gradient Boosted Trees clf",
ada="AdaBoost over Trees clf",
nn="Theanets Neural Network clf",
knn="K-Nearest Neighbour clf",
rdf="Random Forest clf",
)
# ------------------------------------------------------------------------------
# Hyper parameter optimization
# ------------------------------------------------------------------------------
# The backwards feature selection uses first all features and determines the ROC AUC.
# Then it removes one feature at a time, the one which yields the smallest difference
# to the 'all_features' roc auc is then removed. This continues until the smallest
# score difference is bigger then max_difference_feature_selection.
max_difference_feature_selection = (
0.08 # the biggest score difference to 'all features'
)
# allowed in auc when removing features
DEFAULT_HYPER_GENERATOR = "subgrid" # The default cenerater for the hyperspace search
# ==============================================================================
# END OF CONFIGURABLE PARAMETERS - DO NOT CHANGE WHAT IS BELOW
# ==============================================================================
# DO NOT CROSS THIS LINE DO NOT CROSS THIS LINE DO NOT CROSS THIS LINE
# DO NOT CROSS THIS LINE DO NOT CROSS THIS LINE DO NOT CROSS THIS LINE
# DO NOT CROSS THIS LINE DO NOT CROSS THIS LINE DO NOT CROSS THIS LINE
# ==============================================================================
# START INTERNAL CONFIGURATION - DO NOT CHANGE
# ==============================================================================
# run_config = "raredecay.run_config.config" # manipulated by OutputHandler()
loggers = {}
verbosity = 4
plot_verbosity = 3
def set_verbosity(new_verbosity):
"""Set the verbosity."""
global verbosity
verbosity = round(new_verbosity)
_check_verbosity(verbosity)
def set_plot_verbosity(new_plot_verbosity):
"""Set the plot verbosity."""
global plot_verbosity
plot_verbosity = round(new_plot_verbosity)
_check_verbosity(plot_verbosity)
def _check_verbosity(verbosity):
if verbosity not in list(range(-1, 7)):
raise ValueError("Verbosity has to be int {0, 1, 2, 3, 4, 5}")
# ==============================================================================
# Random integer generator for pseudo random generator (or other things)
# ==============================================================================
rand_seed = 42
# random.randint(123, 1512412) # 357422 or 566575
# random.seed(rand_seed)
def randint():
"""Return random integer."""
return random.randint(51, 523753)
def randfloat():
"""Return a random float between 0 and 1."""
return random.random()
def set_seed(seed):
"""Set the global random seed."""
global rand_seed
rand_seed = seed
random.seed(rand_seed)
np.random.seed(rand_seed)
# ------------------------------------------------------------------------------
# parallel profile
# ------------------------------------------------------------------------------
# ==============================================================================
# ERROR HANDLING
# ==============================================================================
_error_count = 0 # increases if an error happens
_warning_count = 0 # increases if an error happens
def error_occured(max_error_count=MAX_ERROR_COUNT):
"""Call this function every time a non-critical error (saving etc) occurs."""
global _error_count
_error_count += 1
if _error_count >= max_error_count:
raise RuntimeError("Too many errors encountered from different sources")
def warning_occured():
"""Call this function every time a warning occurs."""
global _warning_count
_warning_count += 1
if __name__ == "__main__":
pass
```
#### File: raredecay/tools/data_storage.py
```python
import copy
import warnings
import math
import random
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from rep.data.storage import LabeledDataStorage
from ..tools import data_tools, dev_tool
try:
from raredecay.globals_ import out
out_imported = True
except ImportError:
warnings.warn(
ImportWarning,
"could not import out. Some functions regarding output"
+ "(save figure etc.) won't be available",
)
out_imported = False
# TODO: import config not needed?? remove because its from the old structure
# import configuration
from .. import meta_config as meta_cfg
from .. import config as cfg
modul_logger = dev_tool.make_logger(__name__, **cfg.logger_cfg)
class HEPDataStorage:
"""Data-storage for data, weights, targets; conversion; plots and more"""
# define constants for independence
__ROOT_DATATYPE = meta_cfg.ROOT_DATATYPE
__figure_number = 0
__figure_dic = {}
latex_replacements = {
# "CHI2": r"\chi^2",
r"_PT": r" p_T",
r"JPsi": r"J/\psi",
r"K1": r"K_1 ",
r"_1270": "",
r"_ENDVERTEX_CHI2": r"\ \chi^2_{VTX}",
r"_IPCHI2": r"\ \chi^2_{IP}",
r"_FDCHI2": r"\ \chi^2_{FD}",
r"_TRACK_CHI2": r"\ \chi^2_{track}",
r"_OWNPV": r"\ ownPV",
r"_CosTheta": r"\ cos(\theta)",
r"NDOF": r"/N_{degree of freedom}",
r"AMAXDOCA": r"\ AMAXDOCA",
# "_": "\ "
}
def __init__(
self,
data,
index=None,
target=None,
sample_weights=None,
data_name=None,
data_name_addition=None,
column_alias=None,
):
"""Initialize instance and load data.
Parameters
----------
data : |data_type|
The data itself. This can be two different types
- **root-tree dict** (*root-dict*):
Dictionary which specifies all the information to convert a root-
tree to an array. Directly given to :py:func:`~root_numpy.root2rec`
- **pandas DataFrame**:
A pandas DataFrame. The index (if not explicitly defined)
and column names will be taken.
index : 1-D array-like
The indices of the data that will be used.
target : list or 1-D array or int {0, 1}
Labels the data for the machine learning. Usually the y.
sample_weights : |sample_weights_type|
|sample_weights_docstring|
.. note:: If None or 1 specified, 1 will be assumed for all.
data_name : str
| Name of the data, human-readable. Displayed in the title of
plots.
| *Example: 'Bu2K1piee mc', 'beta-decay real data' etc.*
data_name_addition : str
| Additional remarks to the data, human readable. Displayed in
the title of plots.
| *Example: 'reweighted', 'shuffled', '5 GeV cut applied' etc.*
column_alias : |column_alias_type|
|column_alias_docstring|
"""
# initialize logger
self.logger = modul_logger
# initialize index
# self._index = None
# initialize data
# self._data = None
self._data_type = None
self.column_alias = {} if column_alias is None else column_alias
self._fold_index = None # list with indeces of folds
self._fold_status = None # tuple (my_fold_number, total_n_folds)
self._length = None
self.set_data(data=data, index=index)
# self._columns = None
# data name
self._name = ["", "", ""]
data_name = "unnamed data" if data_name is None else data_name
self.data_name = data_name
self.data_name_addition = data_name_addition
self.fold_name = None
# initialize targets
self._set_target(target=target)
# # data-labels human readable, initialize with the column name
# self._label_dic = {}
# self._label_dic = {col: col for col in self.columns if self._label_dic.get(col) is None}
# TODO: delete?
# self.set_labels(data_labels=data_labels)
# initialize weights
self._weights = None
self.set_weights(sample_weights)
# plot settings
hist_settings = meta_cfg.DEFAULT_HIST_SETTINGS
self.hist_settings = hist_settings
self.supertitle_fontsize = 18
def __len__(self):
if self._length is None:
self._set_length()
return self._length
# TODO: remove obsolet
def get_name(self):
"""Return the human-readable name of the data as a string."""
warnings.warn(
"Depreceated, obj.get_name() will be removed. Use obj.name instead.",
FutureWarning,
)
return self._get_name()
@property
def name(self):
"""Return the **full** human-readable name of the data as a string."""
return self._get_name()
def _get_name(self):
out_str = data_tools.obj_to_string(self._name, separator=" ")
return out_str
def _set_name(self, data_name=None, data_name_addition=None, fold_name=None):
"""Set the data name."""
# set the new name in self._name
for i, name in enumerate([data_name, data_name_addition, fold_name]):
if name is not None:
self._name[i] = str(name)
# TODO: change the naming into a dict?
@property
def data_name(self):
"""The name of the data."""
return self._name[0]
@property
def data_name_addition(self):
"""The data name addition."""
return self._name[1]
@property
def fold_name(self):
"""The name of the fold (like *fold 2 of 5*)."""
return self._name[2]
@data_name.setter
def data_name(self, data_name):
self._set_name(data_name=data_name)
@data_name_addition.setter
def data_name_addition(self, data_name_addition):
self._set_name(data_name_addition=data_name_addition)
@fold_name.setter
def fold_name(self, fold_name):
self._set_name(fold_name=fold_name)
@property
def data_type(self):
""" "Return the data-type like 'root', 'df' etc."""
return self._data_type
def get_index(self):
"""Return the index used inside the DataStorage. Advanced feature."""
warnings.warn(
"Will be removed in the future. Use obj.index instead", FutureWarning
)
return self._make_index()
@property
def index(self):
"""The internal index"""
return self._make_index()
@index.setter
def index(self, index):
self._set_index(index)
def _make_index(self, index=None):
"""Return the index, else the self._index. If none exist, **create**
the normal one
It has the following priorities:
1. if the given index is not None, it will be taken
2. next look for the self._index. If there is one, it will be returned
3. otherwise, a list of indeces as usuall (0, len-1) will be returned
"""
if index is None:
temp = list(range(len(self))) if self._index is None else self._index
else:
temp = index
return temp
def _set_index(self, index):
"""If index is not None -> assign. Else try to get from data"""
if index is None:
self._index = None
if self._data_type == "root":
pass # no index contained in root-dicts
elif self._data_type == "array":
pass # no index information contained in an array
elif self._data_type == "df":
index_list = self._data.index.tolist()
# TODO: remove HACK with length, replace with len(self)
if not index_list == list(range(len(self))): # if special indexing
self._index = index_list
else:
self._index = index
@property
def columns(self):
"""The columns/branches of the data"""
return self._columns
@columns.setter
def columns(self, columns):
# TODO: maybe check?
if columns is not None:
columns = data_tools.to_list(columns)
columns = dev_tool.entries_to_str(columns)
self._set_columns(columns=columns)
def _set_columns(self, columns):
if columns is None:
if self._data_type == "root":
self._columns = data_tools.to_list(self._data["branches"])
elif self._data_type == "df":
self._columns = data_tools.to_list(self._data.columns.values)
# TODO: remove below?
# elif self._data_type == 'array':
# self._columns = ['feature_' + str(i) for i in range(len(self._data))]
else:
self._columns = data_tools.to_list(columns)
self._columns = [str(col) for col in self._columns]
def _set_length(self):
# determine whether to set length individually from the data or not
index = self._index
if index is None:
if self._data_type == "root":
temp_root_dict = copy.deepcopy(self._data)
temp_branch = temp_root_dict.pop(
"branches"
) # remove to only use one branch
temp_branch = data_tools.to_list(temp_branch)
self._length = len(
data_tools.to_pandas(
dict(branches=temp_branch[0], **temp_root_dict)
)
)
elif self._data_type == "df":
self._length = len(self._data)
# TODO: remove below?
# elif self._data_type == 'array':
# self._length = self._data.shape[1]
else:
self._length = len(index)
@staticmethod
def _get_data_type(data):
"""Return the type of the data.
- 'df' : pandas DataFrame
- 'root': root-file
- 'array': numpy array
"""
data_type = None
if isinstance(data, dict):
if "filenames" in data and data["filenames"].endswith(
HEPDataStorage.__ROOT_DATATYPE
):
data_type = "root"
elif isinstance(data, pd.DataFrame):
data_type = "df"
# TODO: remove below
# elif isinstance(data, (np.ndarray, np.array)):
# data_type = 'array'
return data_type
@property
def data(self):
"""Return the data as is without conversion, e.g. a root-dict, pandasDF etc."""
return self._data
def set_data(self, data, index=None, columns=None, column_alias=None):
"""Set the data and also change index and columns.
Parameters
----------
data : |data_type|
The new data
index : |index_type|
|index_docstring|
columns : list(str, str, str,...)
The columns for the data to use
column_alias : |column_alias_type|
|column_alias_docstring|
"""
if column_alias is not None:
self.column_alias.update(column_alias)
self.column_alias = dev_tool.entries_to_str(column_alias)
self._set_data(data=data, index=index, columns=columns)
def _set_data(self, data, index=None, columns=None):
"""Set the data, length- and columns-attribute.
Convert the data to the right (root-dict, df etc.) format (and save).
Also set the length and columns.
currently implemented:
- ROOT-data file (*root-dict*)
- Pandas DataFrame
"""
# Python2/3 compatibility, str
if isinstance(data, dict):
data = dev_tool.entries_to_str(data)
# get the data_type
self._data = data
self._data_type = self._get_data_type(data)
self.index = index
self.columns = columns
self._set_length()
# convert the data (and save it)
# root data
if self._data_type == "root":
pass
# pandas DataFrame
elif self._data_type == "df":
self._data = self._make_df(index=self._index) # No cols, it's set above
# TODO: remove below?
# elif self._data_type == 'array':
# self._data = self._make_df(index=self._index)
# warnings.warn(DeprecationWarning, "Not safe, it's better to use pandas DataFrame")
else:
raise NotImplementedError("Other dataformats are not yet implemented")
def get_weights(self, index=None, normalize=True, **kwargs):
"""Return the weights of the specified indeces or, if None, return all.
Parameters
----------
normalize : boolean or float > 0
If True, the weights will be normalized to 1 (the mean is 1).
If a float is provided, the mean of the weights will be equal
to *normalize*. So *True* and *1* will yield the same results.
index : |index_type|
|index_docstring|
Return
------
out: 1-D pandas Series
Return the weights as pandas Series
"""
index = self._index if index is None else list(index)
length = len(self) if index is None else len(index)
normalize = 1 if normalize is True else normalize
normalize = 0 if normalize is None or normalize is False else normalize
second_storage = kwargs.get("second_storage")
normalize_1 = 1
normalize_2 = 1
# HACK
weights_ratio = normalize
# TODO: implement if targets are different
if weights_ratio > 0 and second_storage is not None:
weights_1 = self.get_weights(index=index)
weights_2 = second_storage.get_weights()
sum_weight_1 = float(sum(weights_1))
sum_weight_2 = float(sum(weights_2))
ratio_1 = weights_ratio * sum_weight_2 / sum_weight_1
self.logger.info("ratio_1 = " + str(ratio_1))
if ratio_1 >= 1:
ratio_2 = 1.0
else:
ratio_2 = 1.0 / ratio_1
ratio_1 = 1.0
normalize_1 = ratio_1
normalize_2 = ratio_2
elif weights_ratio > 0 and second_storage is None:
normalize_1 = weights_ratio
else:
normalize_1 = normalize_2 = False
weights_out = self._get_weights(index=index, normalize=normalize_1)
if dev_tool.is_in_primitive(weights_out, (None, 1)):
weights_out = pd.Series(data=np.ones(length), index=index) * normalize_1
if second_storage is not None:
weights_2 = second_storage.get_weights(normalize=normalize_2)
weights_out = np.concatenate((weights_out, weights_2))
return weights_out
def _get_weights(self, index=None, normalize=True):
"""Return pandas Series of weights or None, 1."""
# initialize values
index = self._index if index is None else list(index)
length = len(self) if index is None else len(index)
# TODO: allow other primitive weights
if dev_tool.is_in_primitive(self._weights, (None, 1)):
weights_out = self._weights
if normalize != 1 or normalize is not True:
weights_out = pd.Series(np.ones(length), index=index)
else:
normalize = False
elif index is None:
weights_out = self._weights
else:
weights_out = self._weights.loc[index]
weights_out = copy.deepcopy(weights_out)
if normalize or normalize > 0:
normalize = 1 if normalize is True else normalize
weights_out *= normalize / weights_out.mean()
return weights_out
@property
def weights(self):
return self.get_weights(index=None, normalize=False)
@weights.setter
def weights(self, sample_weights):
"""Set the weights of the sample.
Parameters
----------
sample_weights : |sample_weights_type|
|sample_weights_docstring|
"""
self.set_weights(sample_weights=sample_weights)
def set_weights(self, sample_weights, index=None):
"""Set the weights of the sample.
Parameters
----------
sample_weights : |sample_weights_type|
|sample_weights_docstring|
index : 1-D array or list or None
The indeces for the weights to be set. Only the index given will be
set/used as weights.
"""
index = self._index if index is None else index
if isinstance(sample_weights, (str, dict)) and self._data_type == "root":
assert (
isinstance(sample_weights, list) and (len(sample_weights) == 1)
) or isinstance(sample_weights, str), "Can only be one branche"
assert isinstance(
self._data, dict
), "data should be root-dict but is no more..."
tmp_root = copy.deepcopy(self._data)
if isinstance(sample_weights, str):
sample_weights = {"branches": sample_weights}
tmp_root.update(sample_weights)
sample_weights = data_tools.to_ndarray(tmp_root)
self._set_weights(sample_weights=sample_weights, index=index)
def _set_weights(self, sample_weights, index=None):
"""Set the weights"""
index = self.index if index is None else index
length = len(self) if index is None else len(index)
if dev_tool.is_in_primitive(sample_weights, (None, 1)):
if index is None or len(self) == len(index):
self._weights = 1
return
else:
sample_weights = pd.Series(np.ones(len(index)), index=index)
# else:
# sample_weights = np.ones(length)
elif isinstance(sample_weights, pd.Series):
sample_weights = sample_weights[index]
else:
sample_weights = pd.Series(sample_weights, index=index, dtype="f8")
if len(self) == length and index is None:
self._weights = sample_weights
else:
if dev_tool.is_in_primitive(self._weights, (None, 1)):
self._weights = pd.Series(np.ones(len(self)), index=self._index)
self._weights.update(sample_weights)
def set_root_selection(self, selection, exception_if_failure=True):
"""Set the selection in a root-file. Only possible if a root-file is provided."""
warnings.warn("Method set_root_selection very unsafe currently!")
meta_cfg.warning_occured()
if self._data_type == "root":
self.data["selection"] = selection
self.set_data(self.data, columns=self.columns)
self.data_name_addition += "INDEX CRASHED!"
elif exception_if_failure:
raise RuntimeError("selection could not be applied, no root-dict")
else:
self.logger.error("selection not applied, no root-dict")
def pandasDF(self, columns=None, index=None):
"""Return a pandas DataFrame representation of the data
Return a pandas DataFrame.
Parameters
---------
columns : str
Arguments for the :py:func:`~root_numpy.root2rec` ls
function.
index : |index_type|
|index_docstring|
"""
# initialize variables
index = None if index is None else list(index)
if columns is None:
columns = None
else:
columns = data_tools.to_list(columns)
columns = dev_tool.entries_to_str(columns)
# create data
data_out = self._make_df(columns=columns, index=index, copy=True)
# TODO: leave away below?!
# if not data_out.index.tolist() == range(len(data_out)): # if not, convert the indices to
# data_out.reset_index(drop=True, inplace=True)
return data_out
def _make_df(self, columns=None, index=None, copy=False):
"""Return a DataFrame from the internal data. Does some dirty, internal work."""
# initialize data
# TODO: remove trailing comment?
data = self._data # if dev_tool.is_in_primitive(data) else data
columns = self.columns if columns is None else data_tools.to_list(columns)
index = self._index if index is None else data_tools.to_list(index)
if self._data_type == "root":
# update root dictionary
# TODO: change keyword branches or something, due to incompatibility with root_pandas
temp_root_dict = dict(data, **{"branches": columns})
for key, val in list(temp_root_dict.items()):
if dev_tool.is_in_primitive(val, None):
temp_root_dict[key] = self.data.get(key)
data = data_tools.to_pandas(temp_root_dict, columns=columns, index=index)
# if index is not None:
# data.set_index([index], inplace=True, verify_integrity=True)
# TODO: remove below?
# elif self._data_type == 'array':
# data = pd.DataFrame(data, index=index, columns=columns, copy=copy)
elif self._data_type == "df":
if columns is not None:
data = data[columns]
else:
raise NotImplementedError("Unknown/not yet implemented data type")
assert isinstance(data, pd.DataFrame), "data did not convert correctly"
data = data if index is None else data.loc[index]
if isinstance(self.column_alias, dict) and len(self.column_alias) > 0:
data.rename(columns=self.column_alias, inplace=True, copy=False)
return data
# def get_labels(self, columns=None, as_list=False):
# """Return the human readable branch-labels of the data.
#
# Parameters
# ----------
# columns : list with str or str
# The labels of the columns to return
# as_list : boolean
# If true, the labels will be returned as a list instead of a dict.
#
# Return
# ------
# out : list or dict
# Return a list or dict containing the labels.
# """
# if columns is None:
# columns = self.columns
# columns = data_tools.to_list(columns)
# if as_list:
# labels_out = [self._label_dic.get(col, col) for col in columns]
# else:
# labels_out = {key: self._label_dic.get(key) for key in columns}
# return labels_out
# TODO: delete?
# def set_labels(self, data_labels, replace=False):
# """Set the human readable data-labels (for the columns).
#
# Sometimes you want to change the labels(names) of columns. This can be
# done by passing a dictionary containing the column as key and a
# human-readable name as value.
#
# Parameters
# ----------
# data_labels : dict
# It has the form: {column: name}
# replace : boolean
# """
# if data_labels is None:
# return
# assert isinstance(data_labels, dict), "Not a dictionary"
# self._set_data_labels(data_labels=data_labels, replace=replace)
#
# def _set_data_labels(self, data_labels, replace):
# """Update the data labels"""
# if replace:
# self._label_dic = data_labels
# else:
# self._label_dic.update(data_labels)
@property
def targets(self):
return self.get_targets()
@targets.setter
def targets(self, targets):
self.set_targets(targets=targets)
def get_targets(self, index=None):
"""Return the targets of the data as a pandas Series."""
# assing defaults
index = self._index if index is None else list(index)
length = len(self) if index is None else len(index)
# get targets via internal method
out_targets = self._get_targets(index=index)
# create targets if targets are "simpel" for output
if isinstance(out_targets, (int, float)) or out_targets is None:
if self._target is None:
self.logger.warning("Target list consists of None!")
out_targets = dev_tool.make_list_fill_var([], length, self._target)
out_targets = pd.Series(out_targets, index=index)
return out_targets
def _get_targets(self, index=None):
"""Return targets as pandas Series or primitive type."""
# assign defaults
index = self._index if index is None else list(index)
# length = len(self) if index is None else len(index)
if index is None or dev_tool.is_in_primitive(self._target, (-1, 0, 1, None)):
out_targets = self._target
else:
out_targets = self._target.loc[index]
return out_targets
def set_targets(self, targets, index=None):
"""Set the targets of the data. Either an array-like object or {0, 1}."""
if not dev_tool.is_in_primitive(targets, (-1, 0, 1, None)):
assert len(self) == len(targets), "Invalid targets"
self._set_target(target=targets, index=index)
def _set_target(self, target, index=None):
"""Set the target. Attention with Series, index must be the same as data-index."""
index = self._index if dev_tool.is_in_primitive(index) else index
if isinstance(target, (list, np.ndarray, pd.Series)):
target = pd.Series(target, index=index, copy=True)
target.sort_index(inplace=True)
self._target = target
def make_dataset(
self,
second_storage=None,
index=None,
index_2=None,
columns=None,
weights_ratio=0,
shuffle=False,
targets_from_data=False,
):
"""Create data, targets and weights of the instance (and another one).
In machine-learning, it is very often required to have data, it's
targets (or labeling, the 'y') and the weights. In most cases, we
are not only interested in one such pair, but need to concatenate
it to other data (for example signal and background).
This is exactly, what make_dataset does.
Parameters
----------
second_storage : |hepds_type|
A second data-storage. If provided, the data/targets/weights
will be concatenated and returned as one.
index : |index_type|
The index for the **calling** (the *first*) storage instance.
|index_docstring|
index_2 : list(int, int, int, ...)
The index for the (optional) **second storage instance**.
|index_docstring|
columns : list(str, str, str, ...)
The columns to be used of **both** data-storages.
weights_ratio : float >= 0
The (relative) normalization. If a second data storage is provided
it is assumed (will be changed in future ?)
that the two storages can be seen as the two different targets.
If zero, nothing happens. If it is bigger than zero, it
represents the ratio of the sum of the weights from the first
to the second storage. If set to 1, they both are equally
weighted.
If no second storage is provided, it is the normalization of the
storage called.
Ratio := sum(weights_1) / sum(weights_2) with a second storage
Ratio := sum(weights_1) / mean(weights_1)
shuffle : boolean or int
If True or int, the dataset will be shuffled before returned. If an
int is provided, it will be used as a seed to the pseudo-random
generator.
targets_from_data
OUTDATED, dont use it. Use two datastorage, one labeled 0, one 1
"""
# initialize values
# normalize_1 = 1
# normalize_2 = 1
#
# if weights_ratio > 0 and second_storage is not None:
# weights_1 = self.get_weights(index=index)
# weights_2 = second_storage.get_weights(index=index_2)
#
# sum_weight_1 = float(sum(weights_1))
# sum_weight_2 = float(sum(weights_2))
#
# ratio_1 = weights_ratio * sum_weight_2 / sum_weight_1
# self.logger.info("ratio_1 = " + str(ratio_1))
# if ratio_1 >= 1:
# ratio_2 = 1.0
# else:
# ratio_2 = 1.0 / ratio_1
# ratio_1 = 1.0
#
# normalize_1 = ratio_1
# normalize_2 = ratio_2
# elif weights_ratio > 0 and second_storage is None:
# normalize_1 = weights_ratio
# else:
# normalize_1 = None
if shuffle:
index = self.index if index is None else index
index = copy.deepcopy(index)
if isinstance(shuffle, int) and shuffle is not True:
rand_seed = shuffle
rand_seed_2 = shuffle + 74
else:
rand_seed = rand_seed_2 = None
random.shuffle(index, random=rand_seed)
data = self.pandasDF(columns=columns, index=index)
if second_storage is None:
targets = self.get_targets(index=index)
# weights = self.get_weights(index=index, normalize=normalize_1)
if second_storage is not None:
assert isinstance(
second_storage, HEPDataStorage
), "Wrong type, not an HEPDataStorage"
if shuffle is not False:
index_2 = second_storage.index if index_2 is None else index_2
index_2 = copy.deepcopy(index_2)
random.shuffle(index_2, random=rand_seed_2)
data_2 = second_storage.pandasDF(columns=columns, index=index_2)
data = pd.concat((data, data_2), ignore_index=True, copy=False)
targets_1 = self.get_targets()
targets_2 = second_storage.get_targets()
targets = np.concatenate((targets_1, targets_2))
if (
max(targets_1) != min(targets_1) or max(targets_2) != min(targets_2)
) and weights_ratio > 0:
raise ValueError(
"Very unfortunately is the case of mixed targets in a HEPDataStorage and weights_ratio"
+ " > 0, this case is not yet implemented. Please make an issue!"
)
weights = self.get_weights(
normalize=weights_ratio, second_storage=second_storage
)
return data, targets, weights
def copy_storage(self, columns=None, index=None, add_to_name=" cp"):
"""Return a copy of self (with only some of the columns, indices etc).
Parameters
----------
columns : str or list(str, str, str, ...)
The columns which will be in the new storage.
index : |index_type|
The indices of the rows (and corresponding weights, targets etc.)
for the new storage.
|index_docstring|
add_to_name : str
An addition to the data_name_addition of the copy.
"""
index = self._index if index is None else list(index)
columns = self.columns if columns is None else columns
new_data = copy.deepcopy(self.data)
new_targets = copy.deepcopy(self._get_targets(index=index))
new_weights = copy.deepcopy(self._get_weights(index=index, normalize=False))
new_index = copy.deepcopy(index)
new_column_alias = copy.deepcopy(self.column_alias)
new_storage = HEPDataStorage(
new_data,
target=new_targets,
sample_weights=new_weights,
index=new_index,
column_alias=new_column_alias,
data_name=self.data_name,
data_name_addition=self.data_name_addition + add_to_name,
)
new_storage.columns = columns
return new_storage
# TODO: add second data_storage
def get_LabeledDataStorage(self, columns=None, index=None, shuffle=False):
"""Create and return an instance of class "LabeledDataStorage" from the REP repository.
Parameters
----------
columns : str or list(str, str, str, ...)
The columns to use for the LabeledDataStorage.
index : |index_type|
|index_docstring|
shuffle : boolean
Argument is passed to the LabeledDataStorage. If True, the data
will be shuffled.
Return
------
out: LabeledDataStorage instance
Return a Labeled Data Storage instance created with the data
from inside this instance.
"""
index = self.index if index is None else list(index)
if columns is None:
columns = self.columns
else:
columns = columns
columns = dev_tool.entries_to_str(columns)
random_state = meta_cfg.randint()
new_lds = LabeledDataStorage(
self.pandasDF(columns=columns, index=index),
target=self.get_targets(index=index),
sample_weight=self.get_weights(index=index),
random_state=random_state,
shuffle=shuffle,
)
return new_lds
def make_folds(self, n_folds=10, shuffle=True):
"""Create shuffled train-test folds which can be accessed via :py:meth:`~raredecay.data.HEPDataStorage.get_fold()`.
Split the data into n folds (for usage in KFold validaten etc.).
Then every fold consists of a train dataset, which consists of
n-1/n part of the data and a test dataset, which consists of 1/n part
of the whole data.
The folds will be created as |hepds_type|.
To get a certain fold (train-test pair), use
:py:meth:`~raredecay.data.HEPDataStorage.get_fold()`
Parameters
----------
n_folds : int > 1
The number of folds to be created from the data. If you want, for
example, a simple 2/3-1/3 split, just specify n_folds = 3 and
just take one fold.
shuffle : boolean or int
If True or int, shuffle the data before slicing.
"""
if not n_folds > 1:
raise ValueError("Number of folds has to be higher then 1")
self._fold_index = []
# split indices of shuffled list
length = len(self)
temp_indeces = [int(round(length / n_folds)) * i for i in range(n_folds)]
temp_indeces.append(length) # add last index. len(index) = n_folds + 1
# get a copy of index and shuffle it if True
temp_index = copy.deepcopy(self._make_index())
if shuffle is not False:
random.shuffle(temp_index, random=meta_cfg.randfloat)
for i in range(n_folds):
self._fold_index.append(temp_index[temp_indeces[i] : temp_indeces[i + 1]])
def get_fold(self, fold):
"""Return the specified fold: train and test data as instance of |hepds_type|.
Parameters
----------
fold : int
The number of the fold to return. From 0 to n_folds - 1
Return
------
out : tuple(|hepds_type|, |hepds_type|)
Return the *train* and the *test* data in a |hepds_type|
"""
assert self._fold_index is not None, (
"Tried to get a fold but data has no folds."
+ " First create them (make_folds())"
)
assert isinstance(fold, int) and fold < len(
self._fold_index
), "Value of fold is invalid"
train_index = []
for i, index_slice in enumerate(self._fold_index):
if i == fold:
test_index = copy.deepcopy(index_slice)
else:
train_index += copy.deepcopy(index_slice)
n_folds = len(self._fold_index)
test_DS = self.copy_storage(index=test_index)
test_DS._fold_status = (fold, n_folds)
# + 1 human-readable
test_DS.fold_name = "test set fold " + str(fold + 1) + " of " + str(n_folds)
train_DS = self.copy_storage(index=train_index)
train_DS._fold_status = (fold, n_folds)
train_DS.fold_name = "train set fold " + str(fold + 1) + " of " + str(n_folds)
return train_DS, test_DS
def get_n_folds(self):
"""Return how many folds are currently availabe or 0 if no folds have been created.
Return
------
out : int
The number of folds which are currently available.
"""
return 0 if self._fold_index is None else len(self._fold_index)
def plot_correlation(
self,
second_storage=None,
figure=None,
columns=None,
method="pearson",
plot_importance=5,
):
"""
.. warning:: does not support weights. Maybe in the future.
Plot the feature correlation for the data (combined with other data)
Calculate the feature correlation, return it and plot them.
Parameters
----------
second_storage : |hepds_type| or None
If a second data-storage is provided, the data will be merged and
then the correlation will be calculated. Otherwise, only this
datas correlation will be calculated and plotted.
method : str {'pearson', 'kendall', 'spearman'}
The method to calculate the correlation.
plot_importance : int {1, 2, 3, 4, 5}
The higher the more likely it gets plotted. Depends on the
plot_verbosity. To make sure the correlation...
- *does* get plotted, chose 5
- does *not* get plotted, chose 1
Return
------
out : pandas DataFrame
Return the feature-correlations in a pandas DataFrame
"""
from statsmodels.stats.weightstats import DescrStatsW
columns = self.columns if columns is None else columns
data_name = self.name
if second_storage is not None:
data_name += " and " + second_storage.name
data, _tmp, weights = self.make_dataset(
second_storage=second_storage, shuffle=True, columns=columns
)
del _tmp
out.save_fig(figure, importance=plot_importance)
ds = DescrStatsW(data.as_matrix(), weights=weights)
correlation = ds.cov
correlation = data.corr(method=method)
corr_plot = sns.heatmap(correlation.T)
corr_plot.set_title("Correlation of " + data_name)
# turn the axis label
for item in corr_plot.get_yticklabels():
item.set_rotation(0)
for item in corr_plot.get_xticklabels():
item.set_rotation(90)
return correlation
def plot(
self,
figure=None,
columns=None,
index=None,
title=None,
sub_title=None,
data_name=None,
bins=None,
log_y_axes=False,
plot_range=None,
x_label=None,
y_label="probability density",
sample_weights=None,
importance=3,
see_all=False,
hist_settings=None,
figure_kwargs=None,
):
"""Draw histograms of the data.
.. warning:: Only 99.98% of the newest plotted data will be shown to focus
on the essential parts (the axis limits will be set accordingly).
This implies a risk of cutting the previously (in the same figure)
plotted data (mostly, if they do not overlap a lot). To ensure that
all data is plotted, set *see_all* to *True*.
Parameters
----------
figure : str or int
The name of the figure. If the figure already exists, the plots
will be plotted in the same window (can be intentional, for
example to compare data)
columns : str or list(str, str, str, ...)
The columns of the data to be plotted. If None, all are plotted.
index : |index_type|
|index_docstring|
title : str
| The title of the whole plot (NOT of the subplots). If several
titles for the same figures are given, they will be *concatenated*.
| So for a "simple" title, specify the title only once.
data_name:
| Additional, (to the *data_name* and *data_name_addition*), human-
readable name for the legend.
| Examples: "before cut", "after cut" etc
bins : int
Number of bins to plot.
log_y_axes : boolean
If True, the y-axes will be scaled logarithmically.
plot_range : tuple (float, float) or None
The lower and upper range of the bins. If None, 99.98% of the data
will be plottet automatically.
sample_weights : pandas Series
The weights for the data, how "high" a bin is. Actually, how much
it should account for the whole distribution or how "often" it
occures. If None is specified, the weights are taken from the data.
importance : |importance_type|
|importance_docstring|
see_all : boolean
If True, all data (not just 99.98%) will be plotted.
hist_settings : dict
A dictionary containing the settings as keywords for the
:py:func:`~matplotlib.pyplot.hist()` function.
"""
# ==============================================================================
# initialize values
# ==============================================================================
if sample_weights is None:
sample_weights = self._get_weights(index=index)
if dev_tool.is_in_primitive(sample_weights, 1):
sample_weights = None
figure_kwargs = {} if figure_kwargs is None else figure_kwargs
# update hist_settings
if dev_tool.is_in_primitive(hist_settings, None):
hist_settings = {}
if isinstance(hist_settings, dict):
hist_settings = dict(meta_cfg.DEFAULT_HIST_SETTINGS, **hist_settings)
if bins is not None:
hist_settings["bins"] = bins
if plot_range is not None:
hist_settings["range"] = plot_range
# create data
data_plot = self.pandasDF(columns=columns, index=index)
columns = data_plot.columns.values
self.logger.debug("plot columns from pandasDataFrame: " + str(columns))
# set the right number of rows and columns for the subplot
subplot_col = int(math.ceil(math.sqrt(len(columns))))
subplot_row = int(math.ceil(float(len(columns)) / subplot_col))
# assign a free figure if argument is None
if dev_tool.is_in_primitive(figure, None):
while True:
safety = 0
figure = self.__figure_number + 1
self.__figure_number += 1
assert safety < meta_cfg.MAX_FIGURES, "stuck in an endless while loop"
if figure not in list(self.__figure_dic.keys()):
x_limits_col = {}
# TODO: improve figure dict with title....
self.__figure_dic.update(
{figure: x_limits_col, str(figure) + "_title": ""}
)
break
elif figure not in list(self.__figure_dic.keys()):
x_limits_col = {}
self.__figure_dic.update({figure: x_limits_col, str(figure) + "_title": ""})
out_figure = out.save_fig(
figure,
importance=importance,
figure_kwargs=figure_kwargs,
**cfg.save_fig_cfg
)
# create a label
label_name = data_tools.obj_to_string(
[self._name[0], self._name[1], data_name], separator=" - "
)
self.__figure_dic[str(figure) + "_title"] += "" if title is None else title
plt.suptitle(
self.__figure_dic.get(str(figure) + "_title"),
fontsize=self.supertitle_fontsize,
)
# ==============================================================================
# Start plotting
# ==============================================================================
# plot the distribution column by column
for col_id, column in enumerate(columns, 1):
# create sub title
sub_title_tmp = column if sub_title is None else sub_title
x_label = "" if x_label is None else x_label
# only plot in range x_limits, otherwise the plot is too big
x_limits = self.__figure_dic.get(figure).get(column)
lower, upper = np.percentile(np.hstack(data_plot[column]), [0.01, 99.99])
if dev_tool.is_in_primitive(x_limits, None):
x_limits = (lower, upper)
elif see_all: # choose the maximum range. Bins not nicely overlapping.
x_limits = (min(x_limits[0], lower), max(x_limits[1], upper))
if "range" in hist_settings:
x_limits = hist_settings.pop("range")
self.__figure_dic[figure].update({column: x_limits})
plt.subplot(subplot_row, subplot_col, col_id)
plt.hist(
data_plot[column],
weights=sample_weights,
log=log_y_axes,
range=x_limits,
label=label_name,
**hist_settings
)
# set labels, titles...
plt.title(self.latex_replacements.get(sub_title_tmp, sub_title_tmp))
ha = "center"
plt.xlabel(x_label, ha=ha, position=(0.5, 0))
if y_label is not None:
plt.ylabel(y_label, ha=ha, position=(0, 0.5))
plt.legend()
return out_figure
def plot_parallel_coordinates(self, columns=None, figure=0, second_storage=None):
"""Plot the parallel coordinates.
.. warning::
No weights supported so far!
"""
data, targets, weights = self.make_dataset(
second_storage=second_storage, columns=columns
)
targets.name = "targets"
data = pd.concat([data, targets], axis=1)
out_figure = out.save_fig(figure)
pd.tools.plotting.parallel_coordinates(data, "targets")
return out_figure
def plot2Dhist(self, x_columns, y_columns=None):
"""Plot a 2D hist of x_columns vs itself or y_columns.
.. warning:: this can produce A LOT of plots! (x_columns * y_columns)
Parameters
----------
x_columns : list(str, str, str,...)
The x columns to plot agains
y_columns : list(str, str, str,...)
The y columns to plot agains
"""
x_columns = self.columns if x_columns == "all" else x_columns
y_columns = self.columns if y_columns == "all" else y_columns
y_columns = x_columns if y_columns is None else y_columns
for x_col in x_columns:
for y_col in y_columns:
df = self.pandasDF(columns=[x_col, y_col])
df.plot.hexbin(x_col, y_col, gridsize=30)
def plot2Dscatter(self, x_branch, y_branch, dot_scale=20, color="b", figure=None):
"""Plot two columns against each other to see the distribution.
The dots size is proportional to the weights, so you have a good
overview on the data and the weights.
Parameters
----------
x_branch : str
The x column to plot
x_branch : str
Thy y column to plot
dot_scale : int or float
The overall scaling factor for the dots
color : str
A valid (matplotlib.pyplot-compatible) color
figure : str or int or figure
The figure to be plotted in
Return
------
out : figure
Return the figure
"""
# TODO: make nice again
out_figure = out.save_fig(figure)
weights = self.get_weights()
assert len(weights) == len(self), "Wrong length of weigths"
size = weights * dot_scale
temp_label = data_tools.obj_to_string([i for i in self._name])
plt.scatter(
self.pandasDF(columns=x_branch),
self.pandasDF(columns=y_branch),
s=size,
c=color,
alpha=0.5,
label=temp_label,
)
plt.xlabel(x_branch)
plt.ylabel(y_branch)
plt.legend()
return out_figure
# TODO: add correlation matrix
# ==============================================================================
# Docs
# ==============================================================================
data_storage_docstring = """
.. |data_type| replace:: root-tree dict (:py:func:`~raredecay.tools.data_tools.make_root_dict`) or :py:class:`~pd.DataFrame`
.. |sample_weights_type| replace:: :py:class:`~pd.Series` or :py:class:`~np.array`
or int {1} or str/dict for root-trees (:py:func:`~raredecay.tools.data_tools.make_root_dict`)
.. |sample_weights_docstring| replace::
The new weights for the dataset.
If the new weights are a pandas Series, the index must match the
internal index
If the data is a root-tree file,
a string (naming the branche) or a whole root-dict can be given,
pointing to the weights stored.
.. |index_type| replace:: list or :py:class:`~np.array`
.. |index_docstring| replace:: The index of the data to use.
.. |column_alias_type| replace:: dict{str: str, str: str, ...}
.. |column_alias_docstring| replace::
To change the name of a branch. The argument should be a dict looking like
{'current_branch_name_in_root_tree/DataFrame': 'desired_name'}.
The current_branch has to exist in the root-tree or DataFrame,
the desired_name can be anything.
"""
try:
HEPDataStorage.__doc__ += data_storage_docstring
except AttributeError: # Python 2
pass
```
#### File: raredecay/tools/data_tools.py
```python
import warnings
import os
import copy
import pandas as pd
import numpy as np
import uproot
import pickle
from . import dev_tool
# both produce error (27.07.2016) when importing them if run from main.py.
# No problem when run as main...
# from raredecay.tools import dev_tool
from .. import meta_config as meta_cfg
def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):
"""Search for best cut on value to still keep percent_sig_to_keep of signal
Parameters
----------
signal_data : 1-D numpy array
The signal
bkg_data : 1-D numpy array
The background data
percent_sig_to_keep : 0 < float <= 100
What percentage of the data to keep in order to apply the cuts.
"""
# if percent_sig_to_keep < 100:
# raise NotImplementedError("percentage of < 100 not yet imlemented")
percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep
bkg_length_before = len(bkg_data)
bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length
lower_cut, upper_cut = np.percentile(signal_data, percentile)
cut_bkg = np.count_nonzero(
np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut)
)
rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length
return [lower_cut, upper_cut], rejected_bkg
def make_root_dict(path_to_rootfile, tree_name, branches):
"""Returns a root_numpy compatible "root-dict" of a root-tree.
Parameters
----------
path_to_rootfile : str
The exact path to the root-tree including the filename. Example:
/home/user1/data/myRootTree1.root
tree_name : str
The name of the tree
branches : str or list[str, str, str,... ]
The branches of the tree to use
"""
output = dict(filenames=path_to_rootfile, treename=tree_name, branches=branches)
output = dev_tool.entries_to_str(output)
return output
def add_to_rootfile(rootfile, new_branch, branch_name=None, overwrite=True):
"""Adds a new branch to a given root file.
.. warning:: Overwrite not working currently!
Parameters
----------
rootfile : root-dict
The ROOT-file where the data should be added
new_branch : numpy.array 1-D, list, root-dict
A one-dimensional numpy array that contains the data.
branch_name : str
The name of the branche resp. the name in the dtype of the array.
"""
from root_numpy import array2root
from rootpy.io import root_open
rootfile = dev_tool.entries_to_str(rootfile)
new_branch = dev_tool.entries_to_str(new_branch)
branch_name = dev_tool.entries_to_str(branch_name)
# get the right parameters
# TODO: what does that if there? an assertion maybe?
write_mode = "update"
branch_name = "new_branch1" if branch_name is None else branch_name
if isinstance(rootfile, dict):
filename = rootfile.get("filenames")
treename = rootfile.get("treename")
new_branch = to_ndarray(new_branch)
# new_branch.dtype = [(branch_name, 'f8')]
# write to ROOT-file
write_to_root = False
if os.path.isfile(filename):
with root_open(filename, mode="a") as root_file:
tree = getattr(root_file, treename) # test
if not tree.has_branch(branch_name):
write_to_root = True
# array2tree(new_branch, tree=tree)
# f.write("", TObject.kOverwrite) # overwrite, does not create friends
else:
write_mode = "recreate"
write_to_root = True
if write_to_root:
arr = np.core.records.fromarrays([new_branch], names=branch_name)
array2root(arr=arr, filename=filename, treename=treename, mode=write_mode)
return 0
else:
return 1
# TODO: remove? outdated
def format_data_weights(data_to_shape, weights):
"""Format the data and the weights perfectly. Same length and more.
Change the data to pandas.DataFrame and fill the weights with ones where
nothing or None is specified. Returns both in lists.
Very useful to loop over several data and weights.
Parameters
----------
data_to_shape : (root_dict, numpy.array, pandas.DataFrame)
The data for which we apply the weights. Usual 2-D shape.
weights : (list, numpy.array, pandas.DataFrame, None)
The weights to be reshaped
*Best format* :
[array(weights),array(weights), None, array(weights),...]
*None* can be used if no special weights are specified.
If weights contains less "weight-containing array-like objects" then
data_to_shape does, the difference will be filled with *1*
Return
------
out : list(pandas.DataFrame(data), pandas.DataFrame(data),...)
Return a list containing data
out : list(numpy.array(weight), numpy.array(weight),...)
Return a list with the weights, converted and filled.
"""
# conver the data
if not isinstance(data_to_shape, list):
data_to_shape = [data_to_shape]
data_to_shape = list(map(to_pandas, data_to_shape))
# convert the weights
if not isinstance(weights, list):
weights = [weights]
if weights[0] is not None:
if len(weights[0]) == 1:
weights = [weights]
# convert to pandas
assert isinstance(weights, list), "weights could not be converted to list"
for data_id, data in enumerate(data_to_shape):
if data_id >= len(weights):
weights.append(None)
if weights[data_id] is None:
weights[data_id] = np.array([1] * len(data))
weights[data_id] = to_pandas(weights[data_id]).squeeze().values
return data_to_shape, weights
def obj_to_string(objects, separator=None):
"""Return a string containing all objects as strings, separated by the separator.
Useful for automatic conversion for different types. The following objects
will automatically be converted:
- None will be omitted
Parameters
----------
objects : any object or list(obj, obj, ...) with a string representation
The objects will be converted to a string and concatenated, separated
by the separator.
separator : str
The separator between the objects. Default is " - ".
"""
objects = dev_tool.entries_to_str(objects)
if isinstance(objects, str): # no need to change things
return objects
separator = " - " if separator is None else separator
assert isinstance(separator, str), "Separator not a str"
objects = to_list(objects)
objects = [str(obj) for obj in objects if obj not in (None, "")] # remove Nones
string_out = ""
for word in objects:
string_out += word + separator if word != objects[-1] else word
return string_out
def is_root(data_to_check):
"""Check whether a given data is a root file. Needs dicts to be True."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, dict):
path_name = data_to_check.get("filenames")
# assert isinstance(path_name, str), ("'filenames' of the dictionary " +
# str(data_to_check) + "is not a string")
if path_name.endswith(meta_cfg.ROOT_DATATYPE):
flag = True
return flag
def is_list(data_to_check):
"""Check whether the given data is a list."""
flag = False
if isinstance(data_to_check, list):
flag = True
return flag
def is_ndarray(data_to_check):
"""Check whether a given data is an ndarray."""
flag = False
if isinstance(data_to_check, np.ndarray):
flag = True
return flag
def is_pickle(data_to_check):
"""Check if the file is a pickled file (checks the ending)."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, str):
if data_to_check.endswith(meta_cfg.PICKLE_DATATYPE):
flag = True
return flag
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
def to_ndarray(data_in, float_array=False):
"""Convert data to numpy array (containing only floats).
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
import uproot
if is_root(data_in):
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
branches = to_list(data_in["branches"])
loaded = tree.arrays(branches, library="np")
loaded = np.stack([loaded[branch] for branch in branches])
if len(branches) == 1:
loaded = loaded[0]
data_in = loaded
# change numpy.void to normal floats
if isinstance(data_in, (pd.Series, pd.DataFrame)):
test_sample = data_in.iloc[0]
else:
test_sample = data_in[0]
if isinstance(test_sample, np.void):
data_in = np.array([val[0] for val in data_in])
if isinstance(data_in, (np.recarray, np.ndarray)):
data_in = data_in.tolist()
if is_list(data_in) or isinstance(data_in, pd.Series):
data_in = np.array(data_in)
if not isinstance(data_in[0], (int, float, str, bool)):
if float_array:
iter_data = copy.deepcopy(data_in)
# HACK
data_in = np.ndarray(shape=len(data_in), dtype=data_in.dtype)
# HACK END
for i, element in enumerate(iter_data):
if not isinstance(element, (int, float, str, bool)):
# does that work or should we iterate over copy?
try:
element_len = len(element)
except TypeError:
element_len = 1
if element_len > 1:
data_in[i] = to_ndarray(element)
float_array = False
elif element_len == 1:
data_in[i] = float(element)
warnings.warn("Could not force float array")
if float_array:
data_in = np.asfarray(data_in)
assert is_ndarray(data_in), "Error, could not convert data to numpy array"
return data_in
def to_pandas_old(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
# TODO: generalize
root_index_name = "__index__"
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
root_index = None
import root_numpy
if root_index_name in root_numpy.list_branches(
filename=data_in["filenames"], treename=data_in.get("treename")
):
root_index = root_numpy.root2array(
filenames=data_in["filenames"],
treename=data_in.get("treename"),
selection=data_in.get("selection"),
branches=root_index_name,
)
data_in = root_numpy.root2array(**data_in) # why **? it's a root dict
if is_list(data_in):
data_in = np.array(data_in)
if is_ndarray(data_in):
if (isinstance(columns, (list, tuple)) and len(columns) == 1) or isinstance(
columns, str
):
data_in = to_ndarray(data_in)
data_in = pd.DataFrame(data_in, columns=columns, index=root_index)
if index is not None:
data_in = data_in.loc[index]
elif isinstance(data_in, pd.DataFrame):
pass
else:
raise TypeError("Could not convert data to pandas. Data: " + data_in)
return data_in
def to_pandas(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
if columns is None:
columns = data_in["branches"]
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
if "__index__" in tree.keys(): # legacy, we can also convert this
return to_pandas_old(data_in=data_in, index=index, columns=columns)
branches = to_list(columns)
loaded = tree.arrays(branches, library="pd")
if index is not None:
loaded = loaded.loc[index]
return loaded
else:
# HACK START
return to_pandas_old(data_in=data_in, index=index, columns=columns)
# HACK END
# from root_pandas import read_root
#
# root_pandas_numpy_map = dict(filenames='paths', treename='key', branches='columns',
# selection='where')
#
# if is_root(data_in):
# is_root2array = False
# for key, val in copy.deepcopy(list(data_in.items())):
# if key in root_pandas_numpy_map:
# is_root2array = True
# del data_in[key]
# data_in[root_pandas_numpy_map[key]] = val
# data_in['columns'] = to_list(data_in['columns'])
# if is_root2array:
# data_in['columns'] = ['noexpand:'+col for col in data_in['columns'] if not col.startswith('noexpand:')]
# remove the noexpand:
# data_in = read_root(**data_in) # why **? it's a root dict
# if is_list(data_in):
# data_in = np.array(data_in)
# if is_ndarray(data_in):
# if ((isinstance(columns, (list, tuple)) and len(columns) == 1) or
# isinstance(columns, string)):
#
# data_in = to_ndarray(data_in)
# data_in = pd.DataFrame(data_in, columns=columns)
# if index is not None:
# data_in = data_in.loc[index]
# elif isinstance(data_in, pd.DataFrame):
# pass
# else:
# raise TypeError("Could not convert data to pandas. Data: " + data_in)
# return data_in
def adv_return(return_value, save_name=None):
"""Save the value if save_name specified, otherwise just return input.
Can be wrapped around the return value. Without any arguments, the return
of your function will be exactly the same. With arguments, the value can
be saved (**pickled**) before it is returned.
Parameters
----------
return_value : any python object
The python object which should be pickled.
save_name : str, None
| The (file-)name for the pickled file. File-extension will be added \
automatically if specified in *raredecay.meta_config*.
| If *None* is passed, the object won't be pickled.
Return
------
out : python object
Return return_value without changes.
**Usage**:
Instead of a simple return statement
>>> return my_variable/my_object
one can use the **completely equivalent** statement
>>> return adv_return(my_variable/my_object)
If the return value should be saved in addition to be returned, use
>>> return adv_return(my_variable/my_object, save_name='my_object.pickle')
(*the .pickle ending is not required but added automatically if omitted*)
which returns the value and saves it.
"""
save_name = dev_tool.entries_to_str(save_name)
if save_name not in (None, False):
if isinstance(save_name, str):
save_name = meta_cfg.PICKLE_PATH + save_name
if not is_pickle(save_name):
save_name += "." + meta_cfg.PICKLE_DATATYPE
with open(str(save_name), "wb") as f:
pickle.dump(return_value, f, meta_cfg.PICKLE_PROTOCOL)
print(str(return_value) + " pickled to " + save_name)
else:
pass
# HACK how to solve logger problem?
# logger.error("Could not pickle data, name for file (" +
# str(save_name) + ") is not a string!" +
# "\n Therefore, the following data was only returned" +
# " but not saved! \n Data:" + str(return_value))
return return_value
def try_unpickle(file_to_unpickle, use_metapath_bkwcomp=False):
"""Try to unpickle a file and return, otherwise just return input."""
file_to_unpickle = dev_tool.entries_to_str(file_to_unpickle)
if is_pickle(file_to_unpickle):
extra_path = meta_cfg.PICKLE_PATH if use_metapath_bkwcomp else ""
with open(extra_path + file_to_unpickle, "rb") as f:
file_to_unpickle = pickle.load(f)
return file_to_unpickle
```
#### File: raredecay/tools/ml_scores.py
```python
import math as mt
import numpy as np
from . import dev_tool, data_storage
def mayou_score(
mc_data, real_data, features=None, old_mc_weights=1, clf="xgb", splits=2, n_folds=10
):
"""An experimental score using a "loss" function for data-similarity"""
import raredecay.analysis.ml_analysis as ml_ana
from raredecay.globals_ import out
features = dev_tool.entries_to_str(features)
clf = dev_tool.entries_to_str(clf)
# initialize variables
output = {}
score_mc_vs_mcr = []
score_mcr_vs_real = []
# splits *= 2 # because every split is done with fold 0 and 1 (<- 2 *)
# loop over number of splits, split the mc data
mc_data.make_folds(n_folds)
real_data.make_folds(n_folds)
# mc reweighted vs mc
for fold in range(n_folds):
mc_data_train, mc_data_test = mc_data.get_fold(fold)
# TODO: no real folds? It is better to test on full data always?
# mc_data_train, mc_data_test = real_data.get_fold(fold)
for split in range(splits * 2): # because two possibilities per split
if split % 2 == 0:
mc_data_train.make_folds(2)
mc_normal, mc_reweighted = mc_data_train.get_fold(split % 2)
mc_normal.set_weights(old_mc_weights)
score_mc_vs_mcr.append(
ml_ana.classify(
original_data=mc_normal,
target_data=mc_reweighted,
features=features,
validation=[mc_data_test, real_data],
clf=clf,
plot_importance=1,
# TODO: no weights ratio? (roc auc)
weights_ratio=0,
)[1]
)
out.add_output(
[
"mayou_score mc vs mc reweighted test on mc vs real score: ",
score_mc_vs_mcr,
"\nMean: ",
np.mean(score_mc_vs_mcr),
" +-",
np.std(score_mc_vs_mcr) / mt.sqrt(len(score_mc_vs_mcr) - 1),
],
subtitle="Mayou score",
to_end=True,
)
output["mc_distance"] = np.mean(score_mc_vs_mcr)
# mc_reweighted vs real
for fold in range(n_folds):
real_train, real_test = real_data.get_fold(fold)
mc_train, mc_test = mc_data.get_fold(fold)
mc_test.set_weights(old_mc_weights)
score_mcr_vs_real.append(
ml_ana.classify(
original_data=mc_train,
target_data=real_train,
features=features,
validation=[mc_test, real_test],
clf=clf,
plot_importance=1,
# TODO: no weights ratio? (roc auc)
weights_ratio=0,
)[1]
)
out.add_output(
[
"mayou_score real vs mc reweighted test on mc vs real score: ",
score_mcr_vs_real,
"\nMean: ",
np.mean(score_mcr_vs_real),
" +-",
np.std(score_mcr_vs_real) / mt.sqrt(len(score_mcr_vs_real) - 1),
],
to_end=True,
)
output["real_distance"] = np.mean(score_mcr_vs_real)
def train_similar(
mc_data,
real_data,
features=None,
n_checks=10,
n_folds=10,
clf="xgb",
test_max=True,
test_shuffle=True,
test_mc=False,
old_mc_weights=1,
test_predictions=False,
clf_pred="rdf",
):
"""Score for reweighting. Train clf on mc reweighted/real, test on real; minimize score.
Enter two datasets and evaluate the score described below. Return a
dictionary containing the different scores. The test_predictions is
another scoring, which is built upon the train_similar method.
**Scoring method description**
**Idea**:
A clf is trained on the reweighted mc as well as on the real data of a
certain decay. Therefore, the classifier learns to distinguish between
Monte-Carlo data and real data. Then we let the classifier predict some
real data (an unbiased test set) and see, how many he is able to classify
as real events. The lower the score, the less differences he was able to
learn from the train data therefore the more similar the train data
therefore the better the reweighting.
**Advandages**: It is quite difficult to cheat on this method. Most of all
it is robust to single high-weight events (which mcreweighted_as_real is
not) and, in general, seems to be the best scoring so far.
**Disadvantages**: If you insert a gaussian shaped 1.0 as mc and a gaussian
shaped 1.1 as real, the score will be badly (around 0.33). So far, this was
only observed for "artificial" distributions (even dough, of course, we
do not know if it affects real distributions aswell partly)
**Output explanation**
The return is a dictionary containing several values. Of course, only the
values, which are set to be evaluated, are contained. The keys are:
- '**score**' : The average of all train_similar scores (as we use KFolding,
there will be n_folds scores). *The* score.
- '**score_std**' : The std of a single score, just for curiosity
- '**score_max**' : The (average of all) "maximum" score. Actually the
train_similar score but
with mc instead of *reweighted* mc. Should be higher then the
reweighted score.
- '**score_max_std**' : The std of a single score, just for curiosity
- '**score_pred**' : The score of the test_predictions method.
- '**score_mc_pred**' : The score of the test_predictions method but on the
predictions of the mc instead of the *reweighted* mc.
Parameters
----------
mc_data : |hepds_type|
The reweighted Monte-Carlo data, assuming the new weights are applied
already.
real_data : |hepds_type|
The real data
n_checks : int >= 1
Number of checks to perform. Has to be <= n_folds
n_folds : int > 1
Number of folds the data will be split into
clf : str
The name of a classifier to be used in
:py:func:`~raredecay.analysis.ml_analysis.classify`.
test_max : boolean
If true, test for the "maximum value" by training also on mc/real
(instead of *reweighted* mc/real)
and test on real. The score for only mc should be higher than for
reweighted mc/real. It *should* most probably but does not have to
be!
old_mc_weights : array-like or 1
If *test_max* is True, the weights for mc before reweighting will be
taken to be *old_mc_weights*, the weights the mc distribution had
before the reweighting. The default is 1.
test_predictions : boolean
If true, try to distinguish the predictions. Advanced feature and not
yet really discoverd how to interpret. Gives very high ROC somehow.
clf_pred : str
The classifier to be used to distinguish the predictions. Required for
the *test_predictions*.
Return
------
out : dict
A dictionary conaining the different scores. Description see above.
"""
import raredecay.analysis.ml_analysis as ml_ana
from raredecay.globals_ import out
features = dev_tool.entries_to_str(features)
clf = dev_tool.entries_to_str(clf)
clf_pred = dev_tool.entries_to_str(clf_pred)
# initialize variables
assert (
1 <= n_checks <= n_folds and n_folds > 1
), "wrong n_checks/n_folds. Check the docs"
assert isinstance(mc_data, data_storage.HEPDataStorage), (
"mc_data wrong type:" + str(type(mc_data)) + ", has to be HEPDataStorage"
)
assert isinstance(real_data, data_storage.HEPDataStorage), (
"real_data wrong type:" + str(type(real_data)) + ", has to be HEPDataStorage"
)
# assert isinstance(clf, str),\
# "clf has to be a string, the name of a valid classifier. Check the docs!"
output = {}
scores = np.ones(n_checks)
scores_shuffled = np.ones(n_checks)
scores_mc = np.ones(n_checks)
scores_max = np.ones(n_checks) # required due to output of loop
scores_mc_max = np.ones(n_checks)
# scores_weighted = []
scores_max_weighted = []
probas_mc = []
probas_reweighted = []
weights_mc = []
weights_reweighted = []
real_pred = []
real_test_index = []
real_mc_pred = []
# initialize data
tmp_mc_targets = mc_data.get_targets()
mc_data.set_targets(0)
real_data.make_folds(n_folds=n_folds)
if test_mc:
mc_data.make_folds(n_folds=n_folds)
for fold in range(n_checks):
real_train, real_test = real_data.get_fold(fold)
if test_mc:
mc_train, mc_test = mc_data.get_fold(fold)
mc_test.set_targets(0)
else:
mc_train = mc_data.copy_storage()
mc_train.set_targets(0)
real_test.set_targets(1)
real_train.set_targets(1)
tmp_out = ml_ana.classify(
mc_train,
real_train,
validation=real_test,
clf=clf,
plot_title="train on mc reweighted/real, test on real",
weights_ratio=1,
get_predictions=True,
features=features,
plot_importance=1,
importance=1,
)
clf_trained, scores[fold], pred_reweighted = tmp_out
tmp_weights = mc_train.get_weights()
if test_shuffle:
import copy
shuffled_weights = copy.deepcopy(tmp_weights)
shuffled_weights.reindex(np.random.permutation(shuffled_weights.index))
mc_train.set_weights(shuffled_weights)
tmp_out = ml_ana.classify(
mc_train,
real_train,
validation=real_test,
clf=clf,
plot_title="train on mc reweighted/real, test on real",
weights_ratio=1,
get_predictions=True,
features=features,
plot_importance=1,
importance=1,
)
scores_shuffled[fold] = tmp_out[1]
mc_train.set_weights(tmp_weights)
if test_mc:
clf_trained, scores_mc[fold] = ml_ana.classify(
validation=mc_test,
clf=clf_trained,
plot_title="train on mc reweighted/real, test on mc",
weights_ratio=1,
get_predictions=False,
features=features,
plot_importance=1,
importance=1,
)
# del clf_trained, tmp_pred
probas_reweighted.append(pred_reweighted["y_proba"])
weights_reweighted.append(pred_reweighted["weights"])
real_pred.extend(pred_reweighted["y_pred"])
real_test_index.extend(real_test.get_index())
if test_max:
temp_weights = mc_data.get_weights()
mc_data.set_weights(old_mc_weights)
tmp_out = ml_ana.classify(
mc_data,
real_train,
validation=real_test,
plot_title="real/mc NOT reweight trained, validate on real",
weights_ratio=1,
get_predictions=True,
clf=clf,
features=features,
plot_importance=1,
importance=1,
)
clf_trained, scores_max[fold], pred_mc = tmp_out
if test_mc:
clf_trained, scores_mc_max[fold] = ml_ana.classify(
validation=mc_test,
clf=clf_trained,
plot_title="train on mc NOT reweighted/real, test on mc",
weights_ratio=1,
get_predictions=False,
features=features,
plot_importance=1,
importance=1,
)
del clf_trained
# HACK
tmp_pred = pred_mc["y_proba"][:, 1] * pred_mc["weights"]
scores_max_weighted.extend(tmp_pred * (pred_mc["y_true"] * 2 - 1))
# HACK END
mc_data.set_weights(temp_weights)
probas_mc.append(pred_mc["y_proba"])
weights_mc.append(pred_mc["weights"])
real_mc_pred.extend(pred_mc["y_pred"])
output["score"] = np.round(scores.mean(), 4)
output["score_std"] = np.round(scores.std(), 4)
if test_shuffle:
output["score_shuffled"] = np.round(scores_shuffled.mean(), 4)
output["score_shuffled_std"] = np.round(scores_shuffled.std(), 4)
if test_mc:
output["score_mc"] = np.round(scores_mc.mean(), 4)
output["score_mc_std"] = np.round(scores_mc.std(), 4)
out.add_output(
[
"Score train_similar (recall, lower means better): ",
str(output["score"]) + " +- " + str(output["score_std"]),
],
subtitle="Clf trained on real/mc reweight, tested on real",
)
if test_max:
output["score_max"] = np.round(scores_max.mean(), 4)
output["score_max_std"] = np.round(scores_max.std(), 4)
if test_mc:
output["score_mc_max"] = np.round(scores_mc_max.mean(), 4)
output["score_mc_max_std"] = np.round(scores_mc_max.std(), 4)
out.add_output(["No reweighting score: ", round(output["score_max"], 4)])
if test_predictions:
# test on the reweighted/real predictions
real_data.set_targets(targets=real_pred, index=real_test_index)
tmp_, score_pred = ml_ana.classify(
real_data,
target_from_data=True,
clf=clf_pred,
features=features,
plot_title="train on predictions reweighted/real, real as target",
weights_ratio=1,
validation=n_checks,
plot_importance=3,
)
output["score_pred"] = round(score_pred, 4)
if test_predictions and test_max:
# test on the mc/real predictions
real_data.set_targets(targets=real_mc_pred, index=real_test_index)
tmp_, score_mc_pred = ml_ana.classify(
real_data,
target_from_data=True,
clf=clf_pred,
validation=n_checks,
plot_title="mc not rew/real pred, real as target",
weights_ratio=1,
plot_importance=3,
)
output["score_mc_pred"] = np.round(score_mc_pred, 4)
mc_data.set_targets(tmp_mc_targets)
output["similar_dist"] = similar_dist(
predictions=np.concatenate(probas_reweighted)[:, 1],
weights=np.concatenate(weights_reweighted),
)
return output
def estimate_weights_bias(mc, real, columns=None, n_folds=10, clf="xgb"):
pass
def train_similar_new(
mc,
real,
columns=None,
n_checks=10,
n_folds=10,
clf="xgb",
test_max=True,
test_shuffle=True,
test_mc=False,
old_mc_weights=1,
test_predictions=False,
clf_pred="rdf",
):
"""Score for reweighting. Train clf on mc reweighted/real, test on real; minimize score.
Enter two datasets and evaluate the score described below. Return a
dictionary containing the different scores. The test_predictions is
another scoring, which is built upon the train_similar method.
**Scoring method description**
**Idea**:
A clf is trained on the reweighted mc as well as on the real data of a
certain decay. Therefore, the classifier learns to distinguish between
Monte-Carlo data and real data. Then we let the classifier predict some
real data (an unbiased test set) and see, how many he is able to classify
as real events. The lower the score, the less differences he was able to
learn from the train data therefore the more similar the train data
therefore the better the reweighting.
**Advandages**: It is quite difficult to cheat on this method. Most of all
it is robust to single high-weight events (which mcreweighted_as_real is
not) and, in general, seems to be the best scoring so far.
**Disadvantages**: If you insert a gaussian shaped 1.0 as mc and a gaussian
shaped 1.1 as real, the score will be badly (around 0.33). So far, this was
only observed for "artificial" distributions (even dough, of course, we
do not know if it affects real distributions aswell partly)
**Output explanation**
The return is a dictionary containing several values. Of course, only the
values, which are set to be evaluated, are contained. The keys are:
- '**score**' : The average of all train_similar scores (as we use KFolding,
there will be n_folds scores). *The* score.
- '**score_std**' : The std of a single score, just for curiosity
- '**score_max**' : The (average of all) "maximum" score. Actually the
train_similar score but
with mc instead of *reweighted* mc. Should be higher then the
reweighted score.
- '**score_max_std**' : The std of a single score, just for curiosity
- '**score_pred**' : The score of the test_predictions method.
- '**score_mc_pred**' : The score of the test_predictions method but on the
predictions of the mc instead of the *reweighted* mc.
Parameters
----------
mc : |hepds_type|
The reweighted Monte-Carlo data, assuming the new weights are applied
already.
real : |hepds_type|
The real data
n_checks : int >= 1
Number of checks to perform. Has to be <= n_folds
n_folds : int > 1
Number of folds the data will be split into
clf : str
The name of a classifier to be used in
:py:func:`~raredecay.analysis.ml_analysis.classify`.
test_max : boolean
If true, test for the "maximum value" by training also on mc/real
(instead of *reweighted* mc/real)
and test on real. The score for only mc should be higher than for
reweighted mc/real. It *should* most probably but does not have to
be!
old_mc_weights : array-like or 1
If *test_max* is True, the weights for mc before reweighting will be
taken to be *old_mc_weights*, the weights the mc distribution had
before the reweighting. The default is 1.
test_predictions : boolean
If true, try to distinguish the predictions. Advanced feature and not
yet really discoverd how to interpret. Gives very high ROC somehow.
clf_pred : str
The classifier to be used to distinguish the predictions. Required for
the *test_predictions*.
Return
------
out : dict
A dictionary conaining the different scores. Description see above.
"""
import raredecay.analysis.ml_analysis as ml_ana
from raredecay.tools.data_storage import HEPDataStorage
from raredecay.analysis import statistics
# Python 2/3 compatibility, str
columns = dev_tool.entries_to_str(columns)
clf = dev_tool.entries_to_str(clf)
clf_pred = dev_tool.entries_to_str(clf_pred)
# initialize variables
assert (
1 <= n_checks <= n_folds and n_folds > 1
), "wrong n_checks/n_folds. Check the docs"
assert isinstance(mc, data_storage.HEPDataStorage), (
"mc_data wrong type:" + str(type(mc)) + ", has to be HEPDataStorage"
)
assert isinstance(real, data_storage.HEPDataStorage), (
"real_data wrong type:" + str(type(real)) + ", has to be HEPDataStorage"
)
# assert isinstance(clf, str),\
# "clf has to be a string, the name of a valid classifier. Check the docs!"
output = {}
predictions = []
predictions_weights = []
predictions_max = []
predictions_max_weights = []
predictions_min = []
predictions_min_weights = []
# initialize data
tmp_mc_targets = mc.get_targets()
mc.set_targets(0)
real.make_folds(n_folds=n_folds)
for fold in range(n_checks):
real_train, real_test = real.get_fold(fold)
mc_train = mc.copy_storage()
mc_train.set_targets(0)
real_test.set_targets(1)
real_train.set_targets(1)
tmp_out = ml_ana.classify(
mc_train,
real_train,
validation=real_test,
clf=clf,
plot_title="train on mc reweighted/real, test on real",
weights_ratio=1,
get_predictions=True,
features=columns,
plot_importance=1,
importance=1,
)
clf_trained, _, pred = tmp_out
predictions.append(pred["y_proba"][:, 1])
predictions_weights.append(pred["weights"])
temp_weights = mc_train.weights
mc_train.set_weights(old_mc_weights)
tmp_out = ml_ana.classify(
original_data=mc_train,
target_data=real_train,
validation=real_test,
plot_title="real/mc NOT reweight trained, validate on real",
weights_ratio=1,
get_predictions=True,
clf=clf,
features=columns,
plot_importance=1,
importance=1,
)
clf_trained, _, pred = tmp_out
predictions_max.append(pred["y_proba"][:, 1])
predictions_max_weights.append(pred["weights"])
mc_train.set_weights(temp_weights)
predictions = np.concatenate(predictions)
predictions_weights = np.concatenate(predictions_weights)
predictions_max = np.concatenate(predictions_max)
predictions_max_weights = np.concatenate(predictions_max_weights)
# mix mc and real to get a nice shape of two similar dists
# TODO: commented below out
mc.set_weights(old_mc_weights)
mc.make_folds(2)
real.make_folds(2)
mc1, mc2 = mc.get_fold(0)
real1, real2 = real.get_fold(0)
data1, target1, weights1 = mc1.make_dataset(real1)
data2, target2, weights2 = mc2.make_dataset(real2)
data1 = HEPDataStorage(data=data1, sample_weights=weights1, target=0)
data2 = HEPDataStorage(data=data2, sample_weights=weights2, target=1)
tmp_out = ml_ana.classify(
original_data=data1,
target_data=data2,
validation=n_folds,
plot_title="real/mc mixed",
weights_ratio=1,
get_predictions=True,
clf=clf,
features=columns,
plot_importance=1,
importance=1,
)
clf_trained, _, pred = tmp_out
predictions_min = np.array(pred["y_proba"][:, 1])
predictions_min_weights = np.array(pred["weights"])
mc.set_weights(temp_weights)
mc.set_targets(tmp_mc_targets)
# HACK
import matplotlib.pyplot as plt
n_bins = 20
plt.figure("comparing the predictions")
plt.hist(predictions, alpha=0.3, label="predictions", bins=n_bins, density=1)
plt.hist(
predictions_min, alpha=0.3, label="predictions_min", bins=n_bins, density=1
)
plt.hist(
predictions_max, alpha=0.3, label="predictions_max", bins=n_bins, density=1
)
plt.legend()
# plt.autoscale()
output["similar_ks_minimize"] = statistics.ks_2samp(
predictions,
predictions_min,
weights1=predictions_weights,
weights2=predictions_min_weights,
)
output["similar_ks_max"] = statistics.ks_2samp(
predictions_max,
predictions_min,
weights1=predictions_max_weights,
weights2=predictions_min_weights,
)
output["similar_ks_maximize"] = statistics.ks_2samp(
predictions,
predictions_max,
weights1=predictions_weights,
weights2=predictions_max_weights,
)
return output
def similar_dist(predictions, weights=None, true_y=1, threshold=0.5):
"""Metric to evaluate the predictions on one label only for similarity test.
This metric is used inside the mayou_score
Parameters
----------
predictions : :py:class:`~np.array`
The predicitons
weights : array-like
The weights for the predictions
true_y : {0 , 1}
The "true" label of the data
threshold : float
The threshold for the predictions to decide whether a point belongs
to 0 or 1.
"""
# HACK
scale = 2 # otherwise, the predictions will be [-0.5, 0.5]
# HACK END
data_valid = min(predictions) < threshold < max(predictions)
if not data_valid:
raise ValueError("Predictions are all above or below the threshold")
if true_y == 0:
predictions = 1 - predictions
predictions -= threshold
predictions *= scale
true_pred = predictions[predictions > 0]
false_pred = predictions[predictions <= 0] * -1
true_weights = false_weights = 1
if not dev_tool.is_in_primitive(weights, None):
true_weights = weights[predictions > 0]
false_weights = weights[predictions <= 0]
score = sum(
((np.exp(1.3 * np.square(true_pred + 0.6)) - 1.5969) * 0.5) * true_weights
)
score -= sum(
((np.sqrt(false_pred) - np.power(false_pred, 0.8)) * 2) * false_weights
)
score /= sum(weights)
return score
```
#### File: raredecay/tools/output.py
```python
import os
import subprocess
import sys
import timeit
import time
import io as StringIO
import copy
import matplotlib.pyplot as plt
import pickle as pickle
import seaborn as sns
from .. import meta_config as meta_cfg
from ..tools import dev_tool # , data_tools
class OutputHandler:
"""Class for output handling."""
__SAVE_STDOUT = sys.stdout
_IMPLEMENTED_FORMATS = {"png", "jpg", "pdf", "svg"}
_MOST_REPLACE_CHAR = [" ", "-", "<", ">", "&", "!", "?", "=", "*", "%", "."]
_REPLACE_CHAR = _MOST_REPLACE_CHAR + ["/"]
def __init__(self):
"""Initialize an output handler"""
self.output = ""
self.end_output = ""
self._loud_end_output = ""
self._IO_string = ""
self.logger = None
self._logger_cfg = None
self._is_initialized = False
self._save_output = False
self._run_name = ""
self._output_path = None
self._output_folders = None
self._path_to_be_overriden = None
self._figures = {}
self._formats_used = set()
self._pickle_folder = False
# start timer and log current time
self._start_timer = timeit.default_timer()
self._start_time = time.strftime("%c")
# set plotting style
sns.set_context("poster")
plt.rc("figure", figsize=(20, 20))
setattr(self, "print", self._print)
def _check_initialization(self, return_error=False):
if not self._is_initialized and not return_error:
self.initialize()
elif not self._is_initialized and return_error:
raise Exception(
"OutputHandler not initialized! You have to initialize it first"
)
def initialize_save(
self,
output_path,
run_name="",
run_message="",
output_folders=None,
del_existing_folders=False,
logger_cfg=None,
):
output_path = dev_tool.entries_to_str(output_path)
"""Initialize the run. Create the neccesary folders.
Parameters
----------
Best Practice: enter a whole config file
output_path : str
Absolute path to the folder where the run output folder will be
created (named after the run) which will contain all the output
folders (logfile, figures, output etc)
run_name : str
The name of the run and also of the output folder.
run_message : str
A message that is displayed below the titel: a further comment
on what you do in the script
output_folders : dict
Contain the name of the folders for the different outputs. For the
available keys
see :py:const:`~raredecay.meta_config.__DEFAULT_OUTPUT_FOLDERS`.
del_existing_dir : boolean
If True, an already existing folder with the same name will be deleted.
If False and the folder exists already, an exception will be raised.
logger_cfg : dict
The configuration for the logger, which will be created later. If
not specified (or only a few arguments), the meta_config will be
taken.
"""
run_name = dev_tool.entries_to_str(run_name)
run_message = dev_tool.entries_to_str(run_message)
output_folders = dev_tool.entries_to_str(output_folders)
logger_cfg = dev_tool.entries_to_str(logger_cfg)
self._save_output = True
# initialize defaults
logger_cfg = {} if logger_cfg is None else logger_cfg
self._logger_cfg = dict(meta_cfg.DEFAULT_LOGGER_CFG, **logger_cfg)
assert isinstance(output_path, str), "output_path not a string"
output_folders = {} if output_folders is None else output_folders
self._output_folders = dict(meta_cfg.DEFAULT_OUTPUT_FOLDERS, **output_folders)
# make sure no blank spaces are left in the folder names
for key, value in list(self._output_folders.items()):
assert isinstance(value, str), "path is not a string: " + str(value)
self._output_folders[key] = value.replace(" ", "_")
# ask if you want to add something to the run_name (and folder name)
if meta_cfg.PROMPT_FOR_COMMENT:
prompt_message = (
"Enter an (optional) extension to the run-name and press 'enter':\n"
)
temp_add = str(input(prompt_message))
run_name += " " + temp_add if temp_add != "" else ""
# del temp_add
# TODO: implement promt with timeout
self._run_name = run_name
# "clean" and correct the path-name
for char in self._REPLACE_CHAR:
run_name = run_name.replace(char, "_")
output_path += run_name if output_path.endswith("/") else "/" + run_name
self._output_path = os.path.expanduser(
output_path
) # replaces ~ with /home/myUser
# find a non-existing folder
temp_i = 1
while os.path.isdir(self._output_path):
if del_existing_folders:
self._path_to_be_overriden = output_path
if not self._path_to_be_overriden.endswith("/"):
self._path_to_be_overriden += "/"
self._output_path = output_path + "_" + str(temp_i)
temp_i += 1
assert (
temp_i < meta_cfg.MAX_AUTO_FOLDERS
), "possible endless loop when trying to create a non-existing folder"
self._output_path += "" if output_path.endswith("/") else "/"
# create subfolders
for value in list(self._output_folders.values()):
subprocess.call(["mkdir", "-p", self._output_path + value])
subprocess.call(
["touch", self._output_path + "run_NOT_finished"]
) # show that ongoing run
# set meta-config variables
meta_cfg.set_parallel_profile(
n_cpu=meta_cfg.n_cpu_max, gpu_in_use=meta_cfg.use_gpu
)
self._is_initialized = True
self.add_output(
run_message,
title="Run: " + self._run_name,
importance=0,
subtitle="Comments about the run",
)
def initialize(self, run_name="", prompt_for_comment=False):
"""Initialization for external use, no folders created, config.py logger."""
# initialize defaults
from raredecay.globals_ import logger_cfg
run_name = dev_tool.entries_to_str(run_name)
self._logger_cfg = logger_cfg
self._is_initialized = True
self.make_me_a_logger()
# ask if you want to add something to the run_name (and folder name)
if prompt_for_comment:
prompt_message = (
"Enter an (optional) extension to the run-name and press 'enter':\n"
)
temp_add = str(input(prompt_message))
run_name += " " + temp_add if temp_add != "" else ""
self._run_name = str(run_name)
def get_logger_path(self):
"""Return the path for the log folder."""
if self._save_output:
path_out = self._output_path + self._output_folders.get("log")
path_out += "" if path_out.endswith("/") else "/"
else:
path_out = None
return path_out
def get_plots_path(self):
"""Return the path for the log folder."""
if self._save_output:
path_out = self._output_path + self._output_folders.get("plots")
path_out += "" if path_out.endswith("/") else "/"
else:
path_out = None
return path_out
def make_me_a_logger(self):
"""Create a logger in OutputHandler instance. Dependency "hack".
Call after :py:meth:`~raredecay.tools.output.OutputHandler.initialize_save()`
has ben called.
"""
# create logger
self.logger = dev_tool.make_logger(__name__, **self._logger_cfg)
def IO_to_string(self):
"""Rederict stdout (print etc.) to string."""
self._IO_string = ""
self._IO_string = StringIO.StringIO()
sys.stdout = self._IO_string
def IO_to_sys(self, importance=3, **add_output_kwarg):
"""Direct stdout back to the sys.stdout and return/save string to output.
Parameters
----------
importance : int {0, 1, 2, 3, 4, 5}
| The importance of the output. The higher, the more likely it will
| be added to the output. To not add it at all but only rederict
| the output, choose 0.
| Additional keyword-arguments for the
| :py:meth:`~raredecay.tools.output.add_output()` method can be
| passed.
Return
------
out : str
Returns the collected string from the redirection.
"""
sys.stdout = self.__SAVE_STDOUT
add_output_kwarg = dev_tool.entries_to_str(add_output_kwarg)
self.add_output(
self._IO_string.getvalue(), importance=importance, **add_output_kwarg
)
return self._IO_string.getvalue()
def figure(self, *args, **kwargs):
"""FUTURE: Wrapper around save_fig()."""
return self.save_fig(*args, **kwargs)
def save_fig(
self,
figure,
importance=3,
file_format=None,
to_pickle=True,
figure_kwargs=None,
**save_cfg
):
"""Advanced :py:meth:`matplotlib.pyplot.figure()`. Create and save a
certain figure at the end of the run.
To create and save a figure, you just enter an already created or a new
figure as a Parameters and specify the fileformats it should be saved
to. The figure can also be pickled so that it can be re-plotted
anytime.
.. note:: The figure will be saved at the end of the run
(by calling :py:meth:`~raredecay.tools.output.OutputHandler.finalize`)
so any change you make until the end will be applied to the plot.
Parameters
----------
figure : instance of :py:class:`matplotlib.figure.Figure` (e.g. returned \
by :func:`matplotlib.figure`)
The figure to be saved.
importance : {0, 1, 2, 3, 4, 5}
Specify the importance level, ranging from 1 to 5, of the plot.
The higher the importance level, the more important.
If the importance level is *higher*
the more it will be plotted. If it is plotted depends on the
plot verbosity set (5 - importance_level < plot_verbosity).
Therefore, a 0 corresponds to "no plot" and a 5 means "always plot".
file_format : str or list(str, str, str,...)
The ending of the desired format, example: 'png' (default).
If you don't want to save it, enter a blank list.
to_pickle : boolean
If True, the plot will be saved to a pickle file.
**save_cfg : keyword args
Will be used as arguments in :py:func:`~matplotlib.pyplot.savefig()`
Return
------
out : :py:class:`~matplotlib.pyplot.figure`
Return the figure.
"""
figure = dev_tool.entries_to_str(figure)
file_format = dev_tool.entries_to_str(file_format)
save_cfg = dev_tool.entries_to_str(save_cfg)
plot = 5 - round(importance) < meta_cfg.plot_verbosity # to plot or not to plot
figure_kwargs = {} if figure_kwargs is None else figure_kwargs
if self._save_output:
self._pickle_folder = self._pickle_folder or to_pickle
if isinstance(figure, (int, str)):
figure = plt.figure(figure, **figure_kwargs) # TODO: changeable?
file_format = (
meta_cfg.DEFAULT_SAVE_FIG["file_format"]
if file_format is None
else file_format
)
if isinstance(file_format, str):
file_format = [file_format]
file_format = set(file_format)
file_format.intersection_update(self._IMPLEMENTED_FORMATS)
self._formats_used.update(file_format)
# change layout of figures
# figure.tight_layout()
# figure.set_figheight(20)
# figure.set_figwidth(20)
# add figure to dict for later output to file
figure_dict = {
"figure": figure,
"file_format": file_format,
"to_pickle": to_pickle,
"plot": plot,
"save_cfg": save_cfg,
}
self._figures[figure.get_label()] = figure_dict
else:
self._check_initialization()
if plot and isinstance(figure, (int, str)):
figure = plt.figure(figure, **figure_kwargs)
return figure
def _figure_to_file(self):
"""Write all figures from the _figures dictionary to file."""
# check if there are figures to plot, else return
if self._figures == {}:
self.logger.info("_figure_to_file called but nothing to save/plot")
return None
# create folders if they don't exist already
path = self.get_plots_path()
for format_ in self._formats_used:
assert isinstance(format_, str), "Format is not a str: " + str(format_)
subprocess.call(["mkdir", "-p", path + format_])
if self._pickle_folder:
subprocess.call(["mkdir", "-p", path + meta_cfg.PICKLE_DATATYPE])
# save figures to file
for fig_name, fig_dict in list(self._figures.items()):
for char in self._REPLACE_CHAR:
fig_name = fig_name.replace(char, "_")
for extension in fig_dict.get("file_format"):
file_path = path + extension + "/"
file_name = file_path + fig_name + "." + extension
try:
figure_tmp = fig_dict["figure"]
# figure_tmp.tight_layout()
figure_tmp.savefig(
file_name, format=extension, **fig_dict.get("save_cfg")
)
except:
self.logger.error("Could not save figure" + str(figure_tmp))
meta_cfg.error_occured()
if fig_dict.get("to_pickle"):
file_name = (
path
+ meta_cfg.PICKLE_DATATYPE
+ "/"
+ fig_name
+ "."
+ meta_cfg.PICKLE_DATATYPE
)
try:
with open(str(file_name), "wb") as f:
pickle.dump(fig_dict.get("figure"), f, meta_cfg.PICKLE_PROTOCOL)
except:
self.logger.error(
"Could not open file"
+ str(file_name)
+ " OR pickle the figure to it"
)
meta_cfg.error_occured()
# delete if it is not intended to be plotted
if not fig_dict.get("plot"):
plt.close(fig_dict.get("figure"))
# clear the _figures dict
self._figures = {}
@staticmethod
def _make_title(title, title_format):
"""Create a title/subtitle/section in the reST-format and return it as
a string.
Parameters
----------
title : str
The title in words
title_format : (str, str)
| The surrounding lines. The titel will be:
|
| title_format[0] * len(title)
| title
| title_format[1] * len(title)
"""
out_str = ""
if title is not None:
title = str(title)
out_str += "\n" + title_format[0] * len(title)
out_str += "\n" + title
out_str += "\n" + title_format[1] * len(title) + "\n"
return out_str
def _print(
self,
data,
to_end=False,
importance=3,
title=None,
subtitle=None,
section=None,
obj_separator=" ",
data_separator="\n\n",
force_newline=True,
):
return self.add_output(
data_out=data,
to_end=to_end,
importance=importance,
title=title,
subtitle=subtitle,
section=section,
obj_separator=obj_separator,
data_separator=data_separator,
force_newline=force_newline,
)
def add_output(
self,
data_out,
to_end=False,
importance=3,
title=None,
subtitle=None,
section=None,
obj_separator=" ",
data_separator="\n\n",
force_newline=True,
):
"""A method to collect the output and format it nicely.
All the objects in data_out get converted to strings and concatenated
with obj_separator in between. After the objects, a data_separator is
added. In the end, the whole output gets printed to a file and saved.
Available options:
- You can add the data at the end of the output file instead of
right in place.
- You can add the data to the output "silently", without printing.
- Add title, subtitle and section on top of the data.
Parameters
----------
data_out : obj or list(obj, obj, obj, ...)
The data to be added to the output. Has to be convertible to str!
to_end : boolean
If True, the data will be added at the end of the file and not
printed. For example all information which is not interesting in
the run but maybe later, like configuration, version number etc.
importance : int {0, 1, 2, 3, 4, 5}
The importance of the output. The higher, the more likely it gets
printed (otherwise only saved). A 0 means "don't print, only save".
The decisive variable is the verbosity level. The lower the
verbosity level, the less likely the output will be printed.
title : str
The title of the data_out, like "roc auc of different classifiers".
If None, no title will be set.
subtitle : str
A subtitle which can be additional to a title or exclusive.
section : str
The section title. Can be additional to the others or exclusive.
obj_separator : str
The separator between the objects in data_out.
Default is a new line.
data_separator : str
Separates the data_outs from each other. Inserted at the end and
creates a separation from the next call of add_output.
Default is a blank line as separation.
force_newline : boolean
If true, the data_out will be written on a new line and not just
concatenated to the data written before
"""
title = dev_tool.entries_to_str(title)
subtitle = dev_tool.entries_to_str(subtitle)
section = dev_tool.entries_to_str(section)
obj_separator = dev_tool.entries_to_str(obj_separator)
data_separator = dev_tool.entries_to_str(data_separator)
data_out = dev_tool.entries_to_str(data_out)
# initialize defaults
assert isinstance(obj_separator, str), (
str(obj_separator)
+ " is of type "
+ str(type(obj_separator))
+ " instead of string"
)
assert isinstance(data_separator, str), (
str(data_separator)
+ " is of type "
+ str(type(data_separator))
+ " instead of string"
)
self._check_initialization()
do_print = 5 - round(importance) < meta_cfg.verbosity
data_out = dev_tool.make_list_fill_var(data_out)
temp_out = ""
# enforce new line
if (len(self.output) > 0) and (not self.output.endswith("\n")):
temp_out = "\n" if force_newline else ""
# set title, subtitle and section with title_format, subtitle_format...
title_f = ("=", "=")
subtitle_f = ("-", "-")
section_f = ("", "=")
for name, form in (
(title, title_f),
(subtitle, subtitle_f),
(section, section_f),
):
temp_out += self._make_title(name, form)
# Concatenation of the objects
for word in data_out:
# Make nice format for dictionaries
if isinstance(word, dict):
word = dev_tool.entries_to_str(word)
for key, val in list(word.items()):
if isinstance(val, dict):
temp_out += self._make_title(" " + str(key), ("", "^"))
for key2, val2 in list(val.items()):
temp_out += " " + str(key2) + " : " + str(val2) + "\n"
else:
sepr = "" if temp_out.endswith("\n") else "\n"
temp_out += sepr + " " + str(key) + " : " + str(val)
else:
temp_out += str(word)
temp_out += obj_separator if word is not data_out[-1] else data_separator
# print and add to output collector
if do_print:
if to_end:
self._loud_end_output += temp_out
sys.stdout.write(temp_out) # to print even dough the print is redirected
if to_end:
self.end_output += temp_out
self.output += temp_out
def finalize(self, show_plots=True, play_sound_at_end=False):
"""Finalize the run. Save everything and plot.
Parameters
----------
show_plots : boolean
If True, show the plots. Equivalent to writing plt.show().
play_sound_at_end : boolean
If True, tries to play a beep-sound at the end of a run to let you
know it finished.
"""
# ==============================================================================
# write all the necessary things to the output
# ==============================================================================
self.add_output("\n", title="END OF RUN " + self._run_name, importance=4)
self.add_output(
["Random generator seed", meta_cfg.rand_seed],
title="Different parameters",
obj_separator=" : ",
importance=2,
)
# print the output which should be printed at the end of the run
sys.stdout.write(
self._loud_end_output
) # to print even dough the print is redirected
self.output += self.end_output
# add current version (if available)
if self._save_output and os.path.isdir(meta_cfg.GIT_DIR_PATH):
try:
git_version = subprocess.check_output(
["git", "-C", meta_cfg.GIT_DIR_PATH, "describe"]
)
self.add_output(
["Program version from Git", git_version],
section="Git information",
importance=0,
obj_separator=" : ",
)
except:
meta_cfg.error_occured()
self.logger.error("Could not get version number from git")
# time information
elapsed_time = timeit.default_timer() - self._start_timer
elapsed_time = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
self.add_output(
["Run startet at", self._start_time, "\nand lasted for", elapsed_time],
section="Time information",
obj_separator=" ",
)
# error information
self.add_output(
["Errors encountered during run", meta_cfg._error_count],
obj_separator=" : ",
)
self.add_output(
["Warnings encountered during run", meta_cfg._warning_count],
obj_separator=" : ",
)
output = copy.deepcopy(self.output)
# ==============================================================================
# save output to file
# ==============================================================================
if self._save_output:
# save figures to file
self._figure_to_file()
# Write output to file
# ---------------------
# remove leading blank lines
for i in range(1, 100):
if not self.output.startswith("\n" * i): # "break" condition
self.output = self.output[i - 1 :]
break
temp_out_file = (
self._output_path + self._output_folders.get("results") + "/output.txt"
)
try:
with open(temp_out_file, "w") as f:
f.write(self.output)
except:
self.logger.error("Could not save output to file")
meta_cfg.error_occured()
warnings.warn("Could not save output. Check the logs!", RuntimeWarning)
# if a folder to overwrite exists, delete it and move the temp folder
if self._path_to_be_overriden is not None:
stop_del = ""
if not meta_cfg.NO_PROMPT_ASSUME_YES:
stop_del = str(
input(
"ATTENTION! The folder "
+ self._path_to_be_overriden
+ " will be deleted and replaced with the output "
+ "of the current run.\nTo DELETE that folder and "
+ "overwrite, press ENTER.\n\nIf you want to keep the "
+ "folder and save the current run under "
+ self._output_path
+ ", please enter any input "
+ "and press enter.\n\nYour input:"
)
)
if stop_del == "":
subprocess.call(["rm", "-r", self._path_to_be_overriden])
subprocess.call(
["mv", self._output_path, self._path_to_be_overriden]
)
path = self._path_to_be_overriden
else:
path = self._output_path
else:
path = self._output_path
print("All output saved under: " + path)
subprocess.call(["rm", path + "run_NOT_finished"])
# .finished shows if the run finished
subprocess.call(["touch", path + "run_finished_succesfully"])
self.output = self._loud_end_output = self.end_output = ""
if play_sound_at_end:
try:
from raredecay.tools.dev_tool import play_sound
play_sound()
except:
print("BEEEEEP, no sound could be played")
if show_plots:
if not meta_cfg.NO_PROMPT_ASSUME_YES:
str(input(["Run finished, press Enter to show the plots"]))
plt.show()
return output
if __name__ == "__main__":
out = OutputHandler()
out.initialize("test")
out.add_output(["test: ", {"a": 1, "b": 2}], importance=5)
print("hello world")
out.finalize()
```
#### File: jonas-eschle/raredecay/setup.py
```python
import io
import os
import subprocess
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "requirements.txt")) as f:
requirements = f.read().splitlines()
def readme():
with open("README.rst") as f:
return f.read()
git_version = "2.3.0"
if __name__ == "__main__":
setup(
name="raredecay",
version=git_version,
description="A package with multivariate analysis and reweighting "
"algorithms",
long_description=readme(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Information Analysis",
],
keywords="particle physics, analysis, machine learning, reweight, high energy physics",
url="https://github.com/mayou36/raredecay",
author="<NAME>",
author_email="<EMAIL>",
license="Apache-2.0 License",
install_requires=requirements,
packages=[
"raredecay",
"raredecay.analysis",
"raredecay.tools",
],
include_package_data=True,
python_requires=">2.7,!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
zip_safe=False,
)
# build docs
try:
subprocess.Popen(
"chmod u+x " + os.path.join(here, "docs/make_docs.sh"), shell=True
)
subprocess.Popen("bash " + os.path.join(here, "docs/make_docs.sh"), shell=True)
except Exception as err:
print("Failed to build docs.")
raise err
``` |
{
"source": "jonas-eschmann/SoftActorCritic-Pendulum",
"score": 2
} |
#### File: jonas-eschmann/SoftActorCritic-Pendulum/SAC.py
```python
import datetime
import gym
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.losses import MSE
from tensorflow.keras.optimizers import Adam
import tensorflow_probability as tfp
warm_start_interactions = 1000
gamma = 0.99
alpha = 0.2
lr = 3e-4
state_dim = 3
action_dim = 1
replay_buffer_pos = 0
replay_buffer_cap = 1000000
replay_buffer_populated = False
replay_buffer = {
"states": np.zeros((replay_buffer_cap, state_dim)),
"actions": np.zeros((replay_buffer_cap, action_dim)),
"rewards": np.zeros((replay_buffer_cap, )),
"next_states": np.zeros((replay_buffer_cap, state_dim)),
"done": np.zeros((replay_buffer_cap, ))
}
def add_to_replay_buffer(s, a, r, s2, d):
global replay_buffer_pos, replay_buffer_populated
if replay_buffer_pos >= replay_buffer_cap:
replay_buffer_pos = 0
replay_buffer_populated = True
replay_buffer["states"][replay_buffer_pos, :] = s
replay_buffer["actions"][replay_buffer_pos, :] = a
replay_buffer["rewards"][replay_buffer_pos] = r
replay_buffer["next_states"][replay_buffer_pos, :] = s2
replay_buffer["done"][replay_buffer_pos] = d
replay_buffer_pos += 1
def build_critic(input_dim):
return Sequential([
Input(input_dim),
Dense(64, activation="relu"),
Dense(64, activation="relu"),
Dense(1)
])
critic_1, critic_2 = [build_critic(state_dim + action_dim) for _ in range(2)]
critic_1_target, critic_2_target = [tf.keras.models.clone_model(c) for c in [critic_1, critic_2]]
critic_opt = Adam(learning_rate=lr)
def polyak(target, source, factor=0.995):
for lt, ls in zip(target.layers, source.layers):
for wt, ws in zip(lt.trainable_weights, ls.trainable_weights):
wt.assign(wt * factor + (1-factor)*ws)
def build_actor():
input = Input(state_dim)
stub = Sequential([
Dense(64, activation="relu"),
Dense(64, activation="relu"),
])(input)
mean = Dense(action_dim)(stub)
log_std = Dense(action_dim)(stub)
return Model(inputs=input, outputs=(mean, log_std))
actor = build_actor()
aopt = Adam(learning_rate=lr)
def sample_action(state, deterministic=False):
mean, log_std = actor(state)
action_dist = tfp.distributions.Normal(mean, tf.exp(log_std))
action = action_dist.sample() if not deterministic else mean
action_log_prob = tf.reduce_sum(action_dist.log_prob(action), axis=-1)
action_log_prob -= tf.reduce_sum(2 * (tf.math.log(2.0) - action - tf.math.softplus(-2*action)), axis=-1)
action_squashed = tf.tanh(action)
return action_squashed, action_log_prob
def training_step(batch_size=256, done_is_dead=False):
upper = replay_buffer_cap if replay_buffer_populated else replay_buffer_pos
idx = np.arange(upper)
np.random.shuffle(idx)
batch_idx = idx[:batch_size]
states = replay_buffer["states"][batch_idx]
actions = replay_buffer["actions"][batch_idx]
rewards = replay_buffer["rewards"][batch_idx]
next_states = replay_buffer["next_states"][batch_idx]
done = replay_buffer["done"][batch_idx]
next_actions, next_actions_log_prob = sample_action(next_states)
next_critic_input = tf.concat((next_states, next_actions), axis=-1)
next_min_q = tf.minimum(critic_1_target(next_critic_input)[:, 0], critic_2_target(next_critic_input)[:, 0])
q_target = rewards + gamma * (next_min_q - alpha * next_actions_log_prob) * (1.0 - done if done_is_dead else 1.0)
critic_input = tf.concat((states, actions), axis=-1)
with tf.GradientTape() as c_tape:
critic_loss = MSE(critic_1(critic_input)[:, 0], q_target) + MSE(critic_2(critic_input)[:, 0], q_target)
c1_grad, c2_grad = c_tape.gradient(critic_loss, [critic_1.trainable_weights, critic_2.trainable_weights])
critic_opt.apply_gradients(zip(c1_grad, critic_1.trainable_weights))
critic_opt.apply_gradients(zip(c2_grad, critic_2.trainable_weights))
polyak(critic_1_target, critic_1)
polyak(critic_2_target, critic_2)
with tf.GradientTape() as a_tape:
new_actions, new_actions_log_prob = sample_action(states)
new_critic_input = tf.concat((states, new_actions), axis=-1)
new_min_q = tf.minimum(critic_1(new_critic_input)[:, 0], critic_2(new_critic_input)[:, 0])
actor_loss = -(new_min_q - alpha * new_actions_log_prob)
a_grad = a_tape.gradient(actor_loss, actor.trainable_weights)
aopt.apply_gradients(zip(a_grad, actor.trainable_weights))
def main():
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log_dir = 'logs/SACpy/' + current_time
summary_writer = tf.summary.create_file_writer(log_dir)
env = gym.make("Pendulum-v0")
state = env.reset()
interaction = 0
episode = 0
episode_return = 0
render_interval = 5
while True:
if interaction < warm_start_interactions:
action = np.random.rand() * 2 - 1
else:
action, _ = sample_action(state.reshape(1, -1))
action = action[:, 0]
action = env.action_space.low + (action + 1) / 2 * (env.action_space.high - env.action_space.low)
next_state, reward, done, _ = env.step(action)
add_to_replay_buffer(state, action, reward, next_state, done)
training_step()
episode_return += reward
if not done:
state = next_state
else:
state = env.reset()
with summary_writer.as_default():
print(f"Episode: {episode} return: {episode_return} (total interactions: {interaction+1})")
tf.summary.scalar('Return', episode_return, step=interaction)
episode_return = 0
episode += 1
if render_interval is not None and episode % render_interval == 0:
done = False
while not done:
state, reward, done, _ = env.step(sample_action(state.reshape(1, -1), deterministic=True)[0])
episode_return += reward
env.render()
state = env.reset()
print(f"Deterministic episode return: {episode_return}")
episode_return = 0
interaction += 1
main() if __name__ == "__main__" else None
``` |
{
"source": "JonasEssen/Polling-Bot",
"score": 3
} |
#### File: JonasEssen/Polling-Bot/main.py
```python
import os
from dotenv import load_dotenv
import discord
from discord.ext import commands
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix = '!', help_command=None)
# Permissions needed for the bot:
# - View Channels
# - Send Messages
# - Manage Messages
# - Embed Links
# - Read Message History
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name="!help"))
print(f'{bot.user} has connected to Discord!')
# Create Poll
@bot.command()
async def poll(ctx, *, input):
# Delete called command
await ctx.message.delete()
# Separate title and options
splitted = input.split('" ')
title = splitted[0].replace('"', '')
options = splitted[1:]
for i in range(len(options)):
options[i] = options[i].replace('"', '')
# Check if there is more than 1 option
if len(options) <= 1:
embed = discord.Embed(
description = ':x: There must be at least 2 options to make a poll!',
colour = discord.Colour.red(),
)
await ctx.send(embed=embed)
return
# Check if there are less than 20 options (because of Discord limits)
if len(options) > 20:
embed = discord.Embed(
description = ':x: There can\'t be more than 20 options',
colour = discord.Colour.red(),
)
await ctx.send(embed=embed)
return
# Checks wether poll is a Yes/No Question or a Multiple Choice Question
if len(options) == 2 and options[0].lower() == 'yes' and options[1].lower() == 'no':
reactions = ['✅', '❌']
else:
# Regional Indicators
reactions = [ '🇦', '🇧', '🇨', '🇩', '🇪', '🇫', '🇬', '🇭', '🇮', '🇯', '🇰', '🇱', '🇲', '🇳', '🇴', '🇵', '🇶', '🇷', '🇸', '🇹']
# Create embed response
description = []
for x, option in enumerate(options):
description += '{} {}\n\n'.format(reactions[x], option)
embed = discord.Embed(
title=title,
description=''.join(description),
colour = discord.Colour.blue()
)
message = await ctx.send(embed=embed)
for reaction in reactions[:len(options)]:
await message.add_reaction(reaction)
@bot.command()
async def help(ctx):
embed = discord.Embed(
title='Polling Bot | Help',
description='A simple Discord bot that allows you to easily create polls using reactions',
colour = discord.Colour.gold()
)
embed.add_field(name='Usage',
value='''The BOT has a unique command that allows you to generate a poll. Example usage:
!poll "Poll Title" "Option 1" "Option 2" ...
This command allows a maximum of 20 options (because of Discord limitations).\n\n
Alternatively, if the only options given to the BOT are "Yes" and "No", it will generate a Yes/No Poll. Example usage:
!poll "Poll Title" "Yes" "No"''',
inline=True
)
await ctx.send(embed=embed)
# Error Management
@poll.error
async def poll_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(
description = ':x: You must specify a title and at least two options!',
colour = discord.Colour.red(),
)
await ctx.send(embed=embed)
if isinstance(error, commands.MissingRole):
embed = discord.Embed(
description = ':x: You don\'t have the permission to use this command!',
colour = discord.Colour.red(),
)
await ctx.send(embed=embed)
bot.run(TOKEN)
``` |
{
"source": "jonasfj/dxr",
"score": 2
} |
#### File: plugins/buglink/htmlifier.py
```python
import dxr.plugins
import re
import sys
bugFinder = re.compile("(?i)bug\s+#?([0-9]+)") # also used in hg plugin
# Global variables
bugzilla = None
name = None
# Load global variables
def load(tree, conn):
global bugzilla
# Get bugzilla link
if hasattr(tree, 'plugin_buglink_bugzilla'):
bugzilla = tree.plugin_buglink_bugzilla
else:
print >> sys.stderr, "buglink plugin needs plugin_buglink_bugzilla configuration key"
sys.exit(1)
# Get bug tracker name
if hasattr(tree, 'plugin_buglink_name'):
name = tree.plugin_buglink_name
else:
print >> sys.stderr, "buglink plugin needs plugin_buglink_name configuration key"
sys.exit(1)
class BugLinkHtmlifier:
def __init__(self, text):
self.text = text
def refs(self):
for m in bugFinder.finditer(self.text):
bug = m.group(1)
yield m.start(0), m.end(0), [{
'text': "Lookup #%s" % bug,
'title': "Find this bug number at %s" % name,
'href': bugzilla % bug,
'icon': 'buglink'
}]
def regions(self):
return []
def annotations(self):
return []
def links(self):
return []
def htmlify(path, text):
return BugLinkHtmlifier(text)
__all__ = dxr.plugins.htmlifier_exports()
```
#### File: plugins/pygmentize/htmlifier.py
```python
import dxr.plugins
import pygments
import pygments.lexers
from pygments.token import Token
import os, sys
import fnmatch
class Pygmentizer:
""" Pygmentizer add syntax regions for file """
def __init__(self, text, lexer):
self.text = text
self.lexer = lexer
def refs(self):
return []
def regions(self):
for index, token, text in self.lexer.get_tokens_unprocessed(self.text):
cls = None
if token is Token.Keyword: cls = 'k'
if token is Token.Name: cls = None
if token is Token.Literal: cls = None
if token is Token.String: cls = 'str'
if token is Token.Operator: cls = None
if token is Token.Punctuation: cls = None
if token is Token.Comment: cls = 'c'
if cls: yield index, index + len(text), cls
def annotations(self):
return []
def links(self):
return []
def load(tree, conn):
pass
def htmlify(path, text):
# TODO Enable C++ highlighting using pygments, pending fix for infinite
# looping that we don't like, see:
# https://bitbucket.org/birkenfeld/pygments-main/issue/795/
if any((path.endswith(e) for e in ('.c', '.cc', '.cpp', '.cxx', '.h', '.hpp'))):
return None
# Options and filename
options = {'encoding': 'utf-8'}
filename = os.path.basename(path)
try:
lexer = pygments.lexers.get_lexer_for_filename(filename, **options)
except pygments.util.ClassNotFound:
# Small hack for js highlighting of jsm files
if fnmatch.fnmatchcase(filename, "*.jsm"):
lexer = pygments.lexers.JavascriptLexer(**options)
else:
print >> sys.stderr, "pygments: No lexer for '%s'" % filename
return None
return Pygmentizer(text, lexer)
__all__ = dxr.plugins.htmlifier_exports()
```
#### File: dxr/dxr/server_utils.py
```python
import ctypes
import os.path
import sqlite3
import sys
# Load trilite
_trilite_loaded = False
def load_tokenizer():
global _trilite_loaded
if _trilite_loaded:
return
try:
ctypes.CDLL("libtrilite.so").load_trilite_extension()
_trilite_loaded = True
return True
except:
return False
# This makes results a lot more fun!
def _collate_loc(str1, str2):
parts1 = str1.split(':')
parts2 = str2.split(':')
for i in range(1, len(parts1)):
parts1[i] = int(parts1[i])
for i in range(2, len(parts2)):
parts2[i] = int(parts2[i])
return cmp(parts1, parts2)
# Get database connection for tree
def connect_db(tree, instance_path):
load_tokenizer()
dbname = os.path.join(instance_path, 'trees', tree, '.dxr-xref.sqlite')
try:
conn = sqlite3.connect(dbname)
conn.text_factory = str
conn.execute("PRAGMA temp_store = MEMORY;")
conn.create_collation("loc", _collate_loc)
conn.row_factory = sqlite3.Row
return conn
except: # TODO: Die, bare except, die!
return None
# Log message
def log(msg):
print >> sys.stderr, "Log: %s" % msg
```
#### File: dxr/dxr/utils.py
```python
import sqlite3
import ctypes
import ConfigParser
import os, sys, subprocess
import jinja2
import string
from datetime import datetime
import dxr
# Please keep these config objects as simple as possible and in sync with
# docs/configuration.mkd. I'm well aware that this is not the most compact way
# of writing things, but it sure is doomed to fail when user forgets an important
# key. It's also fairly easy to extract default values, and config keys from
# this code, so enjoy.
class Config(object):
""" Configuration for DXR """
def __init__(self, configfile, **override):
# Create parser with sane defaults
parser = ConfigParser.ConfigParser({
'dxrroot': os.path.dirname(dxr.__file__),
'plugin_folder': "%(dxrroot)s/plugins",
'nb_jobs': "1",
'temp_folder': "/tmp/dxr-temp",
'log_folder': "%(temp_folder)s/logs",
'template': "%(dxrroot)s/templates",
'wwwroot': "/",
'enabled_plugins': "*",
'disabled_plugins': " ",
'directory_index': ".dxr-directory-index.html",
'generated_date': datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S +0000")
})
parser.read(configfile)
# Set config values
self.dxrroot = parser.get('DXR', 'dxrroot', False, override)
self.plugin_folder = parser.get('DXR', 'plugin_folder', False, override)
self.nb_jobs = parser.get('DXR', 'nb_jobs', False, override)
self.temp_folder = parser.get('DXR', 'temp_folder', False, override)
self.target_folder = parser.get('DXR', 'target_folder', False, override)
self.log_folder = parser.get('DXR', 'log_folder', False, override)
self.template_folder = parser.get('DXR', 'template', False, override)
self.wwwroot = parser.get('DXR', 'wwwroot', False, override)
self.enabled_plugins = parser.get('DXR', 'enabled_plugins', False, override)
self.disabled_plugins = parser.get('DXR', 'disabled_plugins', False, override)
self.directory_index = parser.get('DXR', 'directory_index', False, override)
self.generated_date = parser.get('DXR', 'generated_date', False, override)
# Set configfile
self.configfile = configfile
self.trees = []
# Set template parameters (using new parser to avoid defaults)
tmp_cfg = ConfigParser.ConfigParser()
tmp_cfg.read(configfile)
self.template_parameters = dict(tmp_cfg.items('Template'))
# Read all plugin_ keys
for key, value in tmp_cfg.items('DXR'):
if key.startswith('plugin_'):
setattr(self, key, value)
# Render all paths absolute
self.dxrroot = os.path.abspath(self.dxrroot)
self.plugin_folder = os.path.abspath(self.plugin_folder)
self.temp_folder = os.path.abspath(self.temp_folder)
self.log_folder = os.path.abspath(self.log_folder)
self.target_folder = os.path.abspath(self.target_folder)
self.template_folder = os.path.abspath(self.template_folder)
# Make sure wwwroot doesn't end in /
if self.wwwroot[-1] == '/':
self.wwwroot = self.wwwroot[:-1]
# Convert disabled plugins to a list
if self.disabled_plugins == "*":
self.disabled_plugins = os.listdir(self.plugin_folder)
else:
self.disabled_plugins = self.disabled_plugins.split()
# Convert enabled plugins to a list
if self.enabled_plugins == "*":
self.enabled_plugins = [p for p in os.listdir(self.plugin_folder)
if p not in self.disabled_plugins]
else:
self.enabled_plugins = self.enabled_plugins.split()
# Test for conflicting plugins settings
if any((p in self.disabled_plugins for p in self.enabled_plugins)):
msg = "Plugin: '%s' is both enabled and disabled in '%s'"
print >> sys.stderr, msg % (p, name)
sys.exit(1)
# Load trees
def section_cmp(a, b):
if parser.has_option(a, "order") and parser.has_option(b, "order"):
return cmp(parser.getint(a, "order"), parser.getint(b, "order"))
if (not parser.has_option(a, "order")) and (not parser.has_option(b, "order")):
return cmp(a, b)
return -1 if parser.has_option(a, "order") else 1
for tree in sorted(parser.sections(), section_cmp):
if tree not in ('DXR', 'Template'):
self.trees.append(TreeConfig(self, self.configfile, tree))
class TreeConfig(object):
""" Tree configuration for DXR """
def __init__(self, config, configfile, name):
# Create parser with sane defaults
parser = ConfigParser.ConfigParser({
'enabled_plugins': "*",
'disabled_plugins': "",
'temp_folder': os.path.join(config.temp_folder, name),
'log_folder': os.path.join(config.log_folder, name),
'ignore_patterns': ".hg .git CVS .svn .bzr .deps .libs",
'build_command': "make -j $jobs"
})
parser.read(configfile)
# Set config values
self.enabled_plugins = parser.get(name, 'enabled_plugins', False)
self.disabled_plugins = parser.get(name, 'disabled_plugins', False)
self.temp_folder = parser.get(name, 'temp_folder', False)
self.log_folder = parser.get(name, 'log_folder', False)
self.object_folder = parser.get(name, 'object_folder', False)
self.source_folder = parser.get(name, 'source_folder', False)
self.build_command = parser.get(name, 'build_command', False)
self.ignore_patterns = parser.get(name, 'ignore_patterns', False)
# You cannot redefine the target folder!
self.target_folder = os.path.join(config.target_folder, 'trees', name)
# Set config file and DXR config object reference
self.configfile = configfile
self.config = config
self.name = name
# Read all plugin_ keys
for key, value in parser.items(name):
if key.startswith('plugin_'):
setattr(self, key, value)
# Convert ignore patterns to list
self.ignore_patterns = self.ignore_patterns.split()
self.ignore_paths = filter(lambda p: p.startswith("/"), self.ignore_patterns)
self.ignore_patterns = filter(lambda p: not p.startswith("/"), self.ignore_patterns)
# Render all path absolute
self.temp_folder = os.path.abspath(self.temp_folder)
self.log_folder = os.path.abspath(self.log_folder)
self.object_folder = os.path.abspath(self.object_folder)
self.source_folder = os.path.abspath(self.source_folder)
# Convert disabled plugins to a list
if self.disabled_plugins == "*":
self.disabled_plugins = config.enabled_plugins
else:
self.disabled_plugins = self.disabled_plugins.split()
for p in config.disabled_plugins:
if p not in self.disabled_plugins:
self.disabled_plugins.append(p)
# Convert enabled plugins to a list
if self.enabled_plugins == "*":
self.enabled_plugins = [p for p in config.enabled_plugins
if p not in self.disabled_plugins]
else:
self.enabled_plugins = self.enabled_plugins.split()
# Test for conflicting plugins settings
if any(p in self.disabled_plugins for p in self.enabled_plugins):
msg = "Plugin: '%s' is both enabled and disabled in '%s'"
print >> sys.stderr, msg % (p, name)
sys.exit(1)
# Warn if $jobs isn't used...
if "$jobs" not in self.build_command:
msg = "Warning: $jobs is not used in build_command for '%s'"
print >> sys.stderr, msg % name
_trilite_loaded = False
def load_trilite(config):
""" Load trilite if not loaded before"""
global _trilite_loaded
if _trilite_loaded:
return
ctypes.CDLL("libtrilite.so").load_trilite_extension()
_trilite_loaded = True
def connect_database(tree):
""" Connect to database ensuring that dependencies are built first """
# Build and load tokenizer if needed
load_trilite(tree.config)
# Create connection
conn = sqlite3.connect(os.path.join(tree.target_folder, ".dxr-xref.sqlite"))
# Configure connection
conn.execute("PRAGMA synchronous=off") # TODO Test performance without this
conn.execute("PRAGMA page_size=32768")
# Optimal page should probably be tested, we get a hint from:
# http://www.sqlite.org/intern-v-extern-blob.html
conn.text_factory = str
conn.row_factory = sqlite3.Row
return conn
_template_env = None
def load_template_env(temp_folder, template_folder):
""" Load template environment (lazily) """
global _template_env
if not _template_env:
# Cache folder for jinja2
tmpl_cache = os.path.join(temp_folder, 'jinja2_cache')
if not os.path.isdir(tmpl_cache):
os.mkdir(tmpl_cache)
# Create jinja2 environment
_template_env = jinja2.Environment(
loader = jinja2.FileSystemLoader(template_folder),
auto_reload = False,
bytecode_cache = jinja2.FileSystemBytecodeCache(tmpl_cache)
)
return _template_env
_next_id = 1
def next_global_id():
""" Source of unique ids """
#TODO Please stop using this, it makes distribution and parallelization hard
# Also it's just stupid!!! When whatever SQL database we use supports this
global _next_id
n = _next_id
_next_id += 1
return n
def open_log(config_or_tree, name):
""" Get an open log file given config or tree and name """
return open(os.path.join(config_or_tree.log_folder, name), 'w')
```
#### File: tests/test_basic/test_basic.py
```python
from dxr.testing import DxrInstanceTestCase
class BasicTests(DxrInstanceTestCase):
"""Tests for functionality that isn't specific to particular filters"""
def test_text(self):
"""Assert that a plain text search works."""
self.found_files_eq('main', ['main.c', 'makefile'])
def test_extensions(self):
"""Try search by filename extension."""
self.found_files_eq('ext:c', ['main.c', 'dot_c.c'])
``` |
{
"source": "JonasFovea/PyDocToLaTeX",
"score": 3
} |
#### File: JonasFovea/PyDocToLaTeX/examplecode.py
```python
a = 1
"""a is a variable"""
varB = "abcdefg"
"""varB is another variable"""
V_ariable_C = 3.14
def funcA():
"""Function A does nothing"""
pass
def funcB(a, b: str):
"""Function B also does nothing"""
pass
class TestClass:
"""TestClass description"""
var1 = 42
var2 = "this is a string"
"""var2 contains a string"""
def __init__(self):
"""init method of TestClass"""
pass
```
#### File: JonasFovea/PyDocToLaTeX/test_pyToTeX.py
```python
import unittest
import os.path
import os
from pyToTeX import *
class MyTestCase(unittest.TestCase):
def test_convert(self):
fn = "examplecode.py"
convert(fn, True)
self.assertTrue(os.path.isfile("examplecode.tex"))
def test_cliMissingFileName(self):
stream = os.popen("python pyToTeX.py")
out = stream.read().strip()
self.assertEqual(out,
"ERROR: Please provide a path to a .py file, which you want to convert.\n\tAdditional option for overwriting existing .tex files: -o")
def test_cliTestFalseFileName(self):
stream = os.popen("python pyToTeX.py testNotFound.py")
out = stream.read().strip()
self.assertEqual(out, "ERROR: File or path not found")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JonasFrey96/DenseFusion",
"score": 2
} |
#### File: DenseFusion/src/lightning.py
```python
import warnings
warnings.simplefilter("ignore", UserWarning)
import sys
import os
sys.path.insert(0, os.getcwd())
sys.path.append(os.path.join(os.getcwd() + '/src'))
sys.path.append(os.path.join(os.getcwd() + '/lib'))
print( os.getcwd() )
import copy
import datetime
import time
import shutil
import argparse
import logging
import signal
import pickle
# misc
import numpy as np
import pandas as pd
import random
import sklearn
import yaml
from math import pi
import coloredlogs
import datetime
import torch
import torch.autograd.profiler as profiler
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.base import Callback
from scipy.spatial.transform import Rotation as R
coloredlogs.install()
# network dense fusion
from lib.network import PoseNet, PoseRefineNet
# src modules
from helper import pad
from helper import flatten_dict
from dataset import GenericDataset
from visu import Visualizer
from rotations import *
from loss import compute_auc, LossAddS, Loss_refine, Loss
from eval import *
def ret_cropped_image(img):
test = torch.nonzero(img[:, :, :])
a = torch.max(test[:, 0]) + 1
b = torch.max(test[:, 1]) + 1
c = torch.max(test[:, 2]) + 1
return img[:a, :b, :c]
def padded_cat(list_of_images, device):
"""returns torch.tensor of concatenated images with dim = max size of image padded with zeros
Args:
list_of_images ([type]): List of Images Channels x Heigh x Width
Returns:
padded_cat [type]: Tensor of concatination result len(list_of_images) x Channels x max(Height) x max(Width)
valid_indexe: len(list_of_images) x 2
"""
c = list_of_images[0].shape[0]
h = [x.shape[1] for x in list_of_images]
w = [x.shape[2] for x in list_of_images]
max_h = max(h)
max_w = max(w)
padded_cat = torch.zeros(
(len(list_of_images), c, max_h, max_w), device=device)
for i, img in enumerate(list_of_images):
padded_cat[i, :, :h[i], :w[i]] = img
valid_indexes = torch.tensor([h, w], device=device)
return padded_cat, valid_indexes
def tight_image_batch(img_batch, device):
ls = []
for i in range(img_batch.shape[0]):
ls.append(ret_cropped_image(img_batch[i]))
tight_padded_img_batch, valid_indexes = padded_cat(
ls,
device=device)
return tight_padded_img_batch
def check_exp(exp):
if exp['d_test'].get('overfitting_nr_idx', -1) != -1 or exp['d_train'].get('overfitting_nr_idx', -1) != -1:
print('Overfitting on ONE batch is activated')
time.sleep(5)
class DenseFusionLightning(LightningModule):
def __init__(self, exp, env):
super().__init__()
self._mode = 'init'
# check experiment cfg for errors
check_exp(exp)
# logging h-params
exp_config_flatten = flatten_dict(copy.deepcopy(exp))
for k in exp_config_flatten.keys():
if exp_config_flatten[k] is None:
exp_config_flatten[k] = 'is None'
self.hparams = exp_config_flatten
self.hparams['lr'] = exp['training']['lr']
self.test_size = exp['training']['test_size']
self.env, self.exp = env, exp
# number of input points to the network
num_points_small = exp['d_train']['num_pt_mesh_small']
num_points_large = exp['d_train']['num_pt_mesh_large']
num_obj = exp['d_train']['objects']
self.df_pose_estimator = PoseNet(
num_points=exp['d_test']['num_points'], num_obj=num_obj)
self.df_refiner = PoseRefineNet(
num_points=exp['d_test']['num_points'], num_obj=num_obj)
if exp.get('model', {}).get('df_load', False):
self.df_pose_estimator.load_state_dict(
torch.load(exp['model']['df_pose_estimator']))
if exp.get('model', {}).get('df_refine', False):
self.df_refiner.load_state_dict(
torch.load(exp['model']['df_refiner']))
sl = exp['d_train']['obj_list_sym']
self.df_criterion = Loss( num_points_large, sl)
self.df_criterion_refine = Loss_refine( num_points_large, sl)
self.criterion_adds = LossAddS(sym_list=sl)
self.visualizer = Visualizer(self.exp['model_path'] + '/visu/', None)
self._dict_track = {}
self.number_images_log_test = self.exp.get(
'visu', {}).get('number_images_log_test', 1)
self.counter_images_logged = 0
self.init_train_vali_split = False
mp = exp['model_path']
fh = logging.FileHandler(f'{mp}/Live_Logger_Lightning.log')
fh.setLevel(logging.DEBUG)
logging.getLogger("lightning").addHandler(fh)
self.start = time.time()
self.best_val_loss = 999
# optional, set the logging level
if self.exp.get('visu', {}).get('log_to_file', False):
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
logging.getLogger("lightning").addHandler(console)
log = open(f'{mp}/Live_Logger_Lightning.log', "a")
sys.stdout = log
logging.info('Logging to File')
def forward(self, batch):
st = time.time()
# unpack batch
points, choose, img, target, model_points, idx = batch[0:6]
log_scalars = {}
bs = points.shape[0]
tight_padded_img_batch = tight_image_batch(
img, device=self.device)
pred_r = torch.zeros((bs, 1000, 4), device=self.device)
pred_t = torch.zeros((bs, 1000, 3), device=self.device)
pred_c = torch.zeros((bs, 1000, 1), device=self.device)
emb = torch.zeros((bs, 32, 1000), device=self.device)
for i in range(bs):
pred_r[i], pred_t[i], pred_c[i], emb[i] = self.df_pose_estimator(
ret_cropped_image(img[i])[None],
points[i][None],
choose[i][None],
idx[i][None])
refine = True if exp['model']['df_refine_iterations'] > 0 else False
loss, dis, new_points, new_target, pred_r_current, pred_t_current = self.df_criterion(
pred_r, pred_t, pred_c,
target, model_points, idx,
points, exp['model']['df_w'], refine)
for i in range( self.exp['model']['df_refine_iterations'] ):
pred_r, pred_t = self.df_refiner(new_points, emb, idx)
dis, new_points, new_target, pred_r_current, pred_t_current = self.df_refine_criterion(
pred_r, pred_t, new_target, model_points, idx,
new_points, pred_r_current, pred_t_current)
return loss, dis, pred_r_current, pred_t_current, new_points, log_scalars
def training_step(self, batch, batch_idx):
self._mode = 'train'
st = time.time()
total_loss = 0
total_dis = 0
# forward
loss, dis, pred_r_current, pred_t_current, new_points, log_scalars = self(batch[0])
if self.counter_images_logged < self.exp.get('visu', {}).get('images_train', 1):
# self.visu_batch(batch, pred_trans, pred_rot_wxyz, pred_points) TODO
pass
# tensorboard logging
loss = torch.mean(loss, dim= 0)
tensorboard_logs = {'train_loss': float(loss)}
tensorboard_logs = {**tensorboard_logs, **log_scalars}
self._dict_track = {**self._dict_track}
return {'train_loss': loss, 'log': tensorboard_logs,'progress_bar': {'Loss': loss, 'ADD-S': torch.mean(dis, dim= 0) } }
def validation_step(self, batch, batch_idx):
self._mode = 'train'
st = time.time()
total_loss = 0
total_dis = 0
# forward
loss, dis, pred_r_current, pred_t_current, new_points, log_scalars = self(batch[0])
if self.counter_images_logged < self.exp.get('visu', {}).get('images_train', 1):
self.visu_batch(batch[0], pred_r_current, pred_t_current, new_points)
# tensorboard logging
loss = torch.mean(loss, dim= 0)
dis = torch.mean(dis, dim= 0)
tensorboard_logs = {'val_loss': float( loss ), 'val_dis': loss, 'val_dis_float': float(loss) }
tensorboard_logs = {**tensorboard_logs, **log_scalars}
self._dict_track = {**self._dict_track,'val_dis_float': float(loss), 'val_dis': float(loss), 'val_loss': float(loss)}
return{'val_loss': loss, 'val_dis': loss, 'log': tensorboard_logs} # 'progress_bar': {'L_Seg': log_scalars['loss_segmentation'], 'L_Add': log_scalars['loss_pose_add'], 'L_Tra': log_scalars[f'loss_translation']}}
def test_step(self, batch, batch_idx):
self._mode = 'train'
st = time.time()
total_loss = 0
total_dis = 0
# forward
loss, dis, pred_r_current, pred_t_current, new_points, log_scalars = self(batch[0])
if self.counter_images_logged < self.exp.get('visu', {}).get('images_train', 1):
# self.visu_batch(batch, pred_trans, pred_rot_wxyz, pred_points) TODO
pass
# tensorboard logging
tensorboard_logs = {'train_loss': float(dis)}
tensorboard_logs = {**tensorboard_logs, **log_scalars}
self._dict_track = {**self._dict_track}
return {'loss': dis, 'log': tensorboard_logs} # 'progress_bar': {'L_Seg': log_scalars['loss_segmentation'], 'L_Add': log_scalars['loss_pose_add'], 'L_Tra': log_scalars[f'loss_translation']}}
def validation_epoch_end(self, outputs):
avg_dict = {}
self.counter_images_logged = 0 # reset image log counter
# only keys that are logged in tensorboard are removed from log_scalars !
for old_key in list(self._dict_track.keys()):
if old_key.find('val') == -1:
continue
newk = 'avg_' + old_key
avg_dict['avg_' +
old_key] = float(np.mean(np.array(self._dict_track[old_key])))
p = old_key.find('adds_dis')
if p != -1:
auc = compute_auc(self._dict_track[old_key])
avg_dict[old_key[:p] + 'auc [0 - 100]'] = auc
self._dict_track.pop(old_key, None)
df1 = dict_to_df(avg_dict)
df2 = dict_to_df(get_df_dict(pre='val'))
img = compare_df(df1, df2, key='auc [0 - 100]')
tag = 'val_table_res_vs_df'
img.save(self.exp['model_path'] +
f'/visu/{self.current_epoch}_{tag}.png')
self.logger.experiment.add_image(tag, np.array(img).astype(
np.uint8), global_step=self.current_epoch, dataformats='HWC')
avg_val_dis_float = float(0)
if avg_dict.get( 'avg_val_loss',999) < self.best_val_loss:
self.best_val_loss = avg_dict.get( 'avg_val_loss',999)
return {'avg_val_dis_float': float(avg_dict.get( 'avg_val_loss',999)),
'log': avg_dict}
def train_epoch_end(self, outputs):
self.counter_images_logged = 0 # reset image log counter
avg_dict = {}
for old_key in list(self._dict_track.keys()):
if old_key.find('train') == -1:
continue
avg_dict['avg_' +
old_key] = float(np.mean(np.array(self._dict_track[old_key])))
self._dict_track.pop(old_key, None)
string = 'Time for one epoch: ' + str(time.time() - self.start)
print(string)
self.start = time.time()
return {**avg_dict, 'log': avg_dict}
def test_epoch_end(self, outputs):
self.counter_images_logged = 0 # reset image log counter
avg_dict = {}
# only keys that are logged in tensorboard are removed from log_scalars !
for old_key in list(self._dict_track.keys()):
if old_key.find('test') == -1:
continue
newk = 'avg_' + old_key
avg_dict['avg_' +
old_key] = float(np.mean(np.array(self._dict_track[old_key])))
p = old_key.find('adds_dis')
if p != -1:
auc = compute_auc(self._dict_track[old_key])
avg_dict[old_key[:p] + 'auc [0 - 100]'] = auc
self._dict_track.pop(old_key, None)
avg_test_dis_float = float(avg_dict['avg_test_loss [+inf - 0]'])
df1 = dict_to_df(avg_dict)
df2 = dict_to_df(get_df_dict(pre='test'))
img = compare_df(df1, df2, key='auc [0 - 100]')
tag = 'test_table_res_vs_df'
img.save(self.exp['model_path'] +
f'/visu/{self.current_epoch}_{tag}.png')
self.logger.experiment.add_image(tag, np.array(img).astype(
np.uint8), global_step=self.current_epoch, dataformats='HWC')
return {'avg_test_dis_float': avg_test_dis_float,
'avg_test_dis': avg_dict['avg_test_loss [+inf - 0]'],
'log': avg_dict}
def visu_batch(self, batch, pred_r_current, pred_t_current, new_points):
target = copy.deepcopy(batch[3][0].detach().cpu().numpy())
mp = copy.deepcopy(batch[4][0].detach().cpu().numpy())
gt_rot_wxyz, gt_trans, unique_desig = batch[10:13]
img = batch[8].detach().cpu().numpy()[0]
cam = batch[9][0]
pre = f'%s_obj%d' % (str(unique_desig[0][0]).replace('/', "_"), int(unique_desig[1][0]))
store = self.exp['visu'].get('store', False)
self.visualizer.plot_estimated_pose(tag=f'target_{pre}',
epoch=self.current_epoch,
img=img,
points=target,
cam_cx=float(cam[0]),
cam_cy=float(cam[1]),
cam_fx=float(cam[2]),
cam_fy=float(cam[3]),
store=store)
self.visualizer.plot_estimated_pose(tag=f'new_points_{pre}',
epoch=self.current_epoch,
img=img,
points=new_points[0].clone().detach().cpu().numpy(),
cam_cx=float(cam[0]),
cam_cy=float(cam[1]),
cam_fx=float(cam[2]),
cam_fy=float(cam[3]),
store=store)
t = pred_t_current.detach().cpu().numpy()[0,:][None,:]
mat = quat_to_rot(pred_r_current).detach().cpu().numpy()[0]
self.visualizer.plot_estimated_pose(tag=f'pred_{pre}',
epoch=self.current_epoch,
img=img,
points=mp,
trans=t,
rot_mat=mat,
cam_cx=float(cam[0]),
cam_cy=float(cam[1]),
cam_fx=float(cam[2]),
cam_fy=float(cam[3]),
store=store)
# self.visualizer.plot_contour(tag='gt_contour_%s_obj%d' % (str(unique_desig[0][0]).replace('/', "_"), int(unique_desig[1][0])),
# epoch=self.current_epoch,
# img=img,
# points=points,
# cam_cx=float(cam[0]),
# cam_cy=float(cam[1]),
# cam_fx=float(cam[2]),
# cam_fy=float(cam[3]),
# store=store)
# t = pred_t.detach().cpu().numpy()
# r = pred_r.detach().cpu().numpy()
# rot = R.from_quat(re_quat(r, 'wxyz'))
# self.visualizer.plot_estimated_pose(tag='pred_%s_obj%d' % (str(unique_desig[0][0]).replace('/', "_"), int(unique_desig[1][0])),
# epoch=self.current_epoch,
# img=img,
# points=copy.deepcopy(
# model_points[:, :].detach(
# ).cpu().numpy()),
# trans=t.reshape((1, 3)),
# rot_mat=rot.as_matrix(),
# cam_cx=float(cam[0]),
# cam_cy=float(cam[1]),
# cam_fx=float(cam[2]),
# cam_fy=float(cam[3]),
# store=store)
# self.visualizer.plot_contour(tag='pred_contour_%s_obj%d' % (str(unique_desig[0][0]).replace('/', "_"), int(unique_desig[1][0])),
# epoch=self.current_epoch,
# img=img,
# points=copy.deepcopy(
# model_points[:, :].detach(
# ).cpu().numpy()),
# trans=t.reshape((1, 3)),
# rot_mat=rot.as_matrix(),
# cam_cx=float(cam[0]),
# cam_cy=float(cam[1]),
# cam_fx=float(cam[2]),
# cam_fy=float(cam[3]),
# store=store)
# render_img, depth, h_render = self.vm.get_closest_image_batch(
# i=idx.unsqueeze(0), rot=pred_r.unsqueeze(0), conv='wxyz')
# # get the bounding box !
# w = 640
# h = 480
# real_img = torch.zeros((1, 3, h, w), device=self.device)
# # update the target to get new bb
# base_inital = quat_to_rot(
# pred_r.unsqueeze(0), 'wxyz', device=self.device).squeeze(0)
# base_new = base_inital.view(-1, 3, 3).permute(0, 2, 1)
# pred_points = torch.add(
# torch.bmm(model_points.unsqueeze(0), base_inital.unsqueeze(0)), pred_t)
# # torch.Size([16, 2000, 3]), torch.Size([16, 4]) , torch.Size([16, 3])
# bb_ls = get_bb_real_target(
# pred_points, cam.unsqueeze(0))
# for j, b in enumerate(bb_ls):
# if not b.check_min_size():
# pass
# c = cam.unsqueeze(0)
# center_real = backproject_points(
# pred_t.view(1, 3), fx=c[j, 2], fy=c[j, 3], cx=c[j, 0], cy=c[j, 1])
# center_real = center_real.squeeze()
# b.move(-center_real[0], -center_real[1])
# b.expand(1.1)
# b.expand_to_correct_ratio(w, h)
# b.move(center_real[0], center_real[1])
# crop_real = b.crop(img_orig).unsqueeze(0)
# up = torch.nn.UpsamplingBilinear2d(size=(h, w))
# crop_real = torch.transpose(crop_real, 1, 3)
# crop_real = torch.transpose(crop_real, 2, 3)
# real_img[j] = up(crop_real)
# inp = real_img[0].unsqueeze(0)
# inp = torch.transpose(inp, 1, 3)
# inp = torch.transpose(inp, 1, 2)
# data = torch.cat([inp, render_img], dim=3)
# data = torch.transpose(data, 1, 3)
# data = torch.transpose(data, 2, 3)
# self.visualizer.visu_network_input(tag='render_real_comp_%s_obj%d' % (str(unique_desig[0][0]).replace('/', "_"), int(unique_desig[1][0])),
# epoch=self.current_epoch,
# data=data,
# max_images=1, store=store)
def configure_optimizers(self):
optimizer = torch.optim.Adam(
[{'params': self.df_pose_estimator.parameters()}], lr=self.hparams['lr'])
scheduler = {
'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, **self.exp['lr_cfg']['on_plateau_cfg']),
**self.exp['lr_cfg']['scheduler']
}
return [optimizer], [scheduler]
def train_dataloader(self):
self.visualizer.writer = self.logger.experiment
dataset_train = GenericDataset(
cfg_d=self.exp['d_train'],
cfg_env=self.env)
# initalize train and validation indices
if not self.init_train_vali_split:
self.init_train_vali_split = True
self.indices_valid, self.indices_train = sklearn.model_selection.train_test_split(
range(0, len(dataset_train)), test_size=self.test_size)
dataset_subset = torch.utils.data.Subset(
dataset_train, self.indices_train)
dataloader_train = torch.utils.data.DataLoader(dataset_train,
**self.exp['loader'])
return dataloader_train
def test_dataloader(self):
self.visualizer.writer = self.logger.experiment
dataset_test = GenericDataset(
cfg_d=self.exp['d_test'],
cfg_env=self.env)
dataloader_test = torch.utils.data.DataLoader(dataset_test,
**self.exp['loader'])
return dataloader_test
def val_dataloader(self):
self.visualizer.writer = self.logger.experiment
dataset_val = GenericDataset(
cfg_d=self.exp['d_train'],
cfg_env=self.env)
# initalize train and validation indices
if not self.init_train_vali_split:
self.init_train_vali_split = True
self.indices_valid, self.indices_train = sklearn.model_selection.train_test_split(
range(0, len(dataset_val)), test_size=self.test_size)
dataset_subset = torch.utils.data.Subset(
dataset_val, self.indices_valid)
dataloader_val = torch.utils.data.DataLoader(dataset_val,
**self.exp['loader'])
return dataloader_val
def file_path(string):
if os.path.isfile(string):
return string
else:
raise NotADirectoryError(string)
def move_dataset_to_ssd(env, exp):
# costum code to move dataset on cluster
try:
if env.get('leonhard', {}).get('copy', False):
files = ['data', 'data_syn', 'models']
p_ls = os.popen('echo $TMPDIR').read().replace('\n', '')
p_ycb_new = p_ls + '/YCB_Video_Dataset'
p_ycb = env['p_ycb']
try:
os.mkdir(p_ycb_new)
os.mkdir('$TMPDIR/YCB_Video_Dataset')
except:
pass
for f in files:
p_file_tar = f'{p_ycb}/{f}.tar'
logging.info(f'Copying {f} to {p_ycb_new}/{f}')
if os.path.exists(f'{p_ycb_new}/{f}'):
logging.info(
"data already exists! Interactive session?")
else:
start_time = time.time()
if f == 'data':
bashCommand = "tar -xvf" + p_file_tar + \
" -C $TMPDIR | awk 'BEGIN {ORS=\" \"} {if(NR%1000==0)print NR}\' "
else:
bashCommand = "tar -xvf" + p_file_tar + \
" -C $TMPDIR/YCB_Video_Dataset | awk 'BEGIN {ORS=\" \"} {if(NR%1000==0)print NR}\' "
os.system(bashCommand)
logging.info(
f'Transferred {f} folder within {str(time.time() - start_time)}s to local SSD')
env['p_ycb'] = p_ycb_new
except:
env['p_ycb'] = p_ycb_new
logging.info('Copying data failed')
return exp, env
def move_background(env, exp):
try:
# Update the env for the model when copying dataset to ssd
if env.get('leonhard', {}).get('copy', False):
p_file_tar = env['p_background'] + '/indoorCVPR_09.tar'
p_ls = os.popen('echo $TMPDIR').read().replace('\n', '')
p_n = p_ls + '/Images'
try:
os.mkdir(p_n)
except:
pass
if os.path.exists(f'{p_n}/office'):
logging.info(
"data already exists! Interactive session?")
else:
start_time = time.time()
bashCommand = "tar -xvf" + p_file_tar + \
" -C $TMPDIR | awk 'BEGIN {ORS=\" \"} {if(NR%1000==0)print NR}\' "
os.system(bashCommand)
env['p_background'] = p_n
except:
logging.info('Copying data failed')
return exp, env
def load_from_file(p):
if os.path.isfile(p):
with open(p, 'r') as f:
data = yaml.safe_load(f)
else:
raise ValueError
return data
class CallbackRefine(Callback):
def on_epoch_start(self, trainer, pl_module):
if pl_module.best_val_loss < 0.016:
logging.warning('Refine Started')
pl_module.exp['model']['df_refine_iterations'] = 2
optimizer = torch.optim.Adam(
[{'params': pl_module.df_pose_refiner.parameters()}], lr=pl_module.hparams['lr'])
scheduler = {
'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, **pl_module.exp['lr_cfg']['on_plateau_cfg']),
**pl_module.exp['lr_cfg']['scheduler']
}
trainer.optimizers = [optimizer]
trainer.lr_schedulers = trainer.configure_schedulers([scheduler])
if __name__ == "__main__":
seed_everything(42)
def signal_handler(signal, frame):
print('exiting on CRTL-C')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=file_path, default='cfg/exp/exp.yml',
help='The main experiment yaml file.')
parser.add_argument('--env', type=file_path, default='cfg/env/env.yml',
help='The environment yaml file.')
args = parser.parse_args()
exp_cfg_path = args.exp
env_cfg_path = args.env
exp = load_from_file(exp_cfg_path)
env = load_from_file(env_cfg_path)
if exp['model_path'].split('/')[-2] == 'debug':
p = '/'.join(exp['model_path'].split('/')[:-1])
try:
shutil.rmtree(p)
except:
pass
timestamp = '_'
else:
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
p = exp['model_path'].split('/')
p.append(str(timestamp) + '_' + p.pop())
new_path = '/'.join(p)
exp['model_path'] = new_path
model_path = exp['model_path']
# copy config files to model path
if not os.path.exists(model_path):
os.makedirs(model_path)
print((pad("Generating network run folder")))
else:
print((pad("Network run folder already exits")))
if exp.get('visu', {}).get('log_to_file', False):
log = open(f'{model_path}/Live_Logger_Lightning.log', "a")
sys.stdout = log
print('Logging to File')
exp_cfg_fn = os.path.split(exp_cfg_path)[-1]
env_cfg_fn = os.path.split(env_cfg_path)[-1]
print(pad(f'Copy {env_cfg_path} to {model_path}/{exp_cfg_fn}'))
shutil.copy(exp_cfg_path, f'{model_path}/{exp_cfg_fn}')
shutil.copy(env_cfg_path, f'{model_path}/{env_cfg_fn}')
exp, env = move_dataset_to_ssd(env, exp)
exp, env = move_background(env, exp)
dic = {'exp': exp, 'env': env}
model = DenseFusionLightning(**dic)
early_stop_callback = EarlyStopping(
**exp['early_stopping'])
checkpoint_callback = ModelCheckpoint(
filepath=exp['model_path'] + '/{epoch}-{avg_val_dis_float:.4f}',
**exp['model_checkpoint'])
if exp.get('checkpoint_restore', False):
checkpoint = torch.load(
exp['checkpoint_load'], map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
# with torch.autograd.set_detect_anomaly(True):
trainer = Trainer(**exp['trainer'],
checkpoint_callback=checkpoint_callback,
early_stop_callback=early_stop_callback,
callbacks=[CallbackRefine()],
default_root_dir=exp['model_path'])
0
if exp.get('model_mode', 'fit') == 'fit':
trainer.fit(model)
elif exp.get('model_mode', 'fit') == 'test':
trainer.test(model)
else:
print("Wrong model_mode defined in exp config")
raise Exception
```
#### File: src/loss/loss.py
```python
from torch.nn.modules.loss import _Loss
from torch.autograd import Variable
import torch
import numpy as np
import random
from rotations import quat_to_rot
def knn(ref, query):
"""return indices of ref for each query point. L2 norm
Args:
ref ([type]): points * 3
query ([type]): tar_points * 3
Returns:
[knn]: distance = query * 1 , indices = query * 1
"""
mp2 = ref.unsqueeze(0).repeat(query.shape[0], 1, 1)
tp2 = query.unsqueeze(1).repeat(1, ref.shape[0], 1)
dist = torch.norm(mp2 - tp2, dim=2, p=None)
knn = dist.topk(1, largest=False)
return knn
def loss_calculation(pred_r, pred_t, pred_c, target, model_points, idx, points, w, refine, num_point_mesh, sym_list):
""" works i checked if manually to give the same result as loss_calculation
Args:
pred_r ([type]): [description]
pred_t ([type]): [description]
pred_c ([type]): [description]
target ([type]): [description]
model_points ([type]): [description]
idx ([type]): [description]
points ([type]): [description]
w ([type]): [description]
refine ([type]): [description]
num_point_mesh ([type]): [description]
sym_list ([type]): [description]
device ([type]): [description]
Returns:
[type]: [description]
"""
bs, num_p, _ = pred_c.size()
pred_r = pred_r / (torch.norm(pred_r, dim=2).view(bs, num_p, 1))
base = quat_to_rot(pred_r.contiguous().view(-1, 4),
'wxyz', device=points.device)
ori_base = base
base = base.contiguous().transpose(2, 1).contiguous()
model_points = model_points.view(bs, 1, num_point_mesh, 3).repeat(
1, num_p, 1, 1).view(bs * num_p, num_point_mesh, 3)
target = target.view(bs, 1, num_point_mesh, 3).repeat(
1, num_p, 1, 1).view(bs * num_p, num_point_mesh, 3)
ori_target = target
pred_t = pred_t.contiguous().view(bs * num_p, 1, 3)
ori_t = pred_t
points = points.contiguous().view(bs * num_p, 1, 3)
pred_c = pred_c.contiguous().view(bs * num_p)
pred = torch.add(torch.bmm(model_points, base), points + pred_t)
if not refine:
if idx[0].item() in sym_list:
knn_obj = knn(
ref=target[0, :, :], query=pred[0, :, :])
inds = knn_obj.indices
target[0, :, :] = target[0, inds[:, 0], :]
dis = torch.mean(torch.norm(
(pred - target), dim=2), dim=1)
loss = torch.mean((dis * pred_c - w * torch.log(pred_c)), dim=0)
pred_c = pred_c.view(bs, num_p)
_, which_max = torch.max(pred_c, 1)
dis = dis.view(bs, num_p)
enum = torch.arange(0, bs, 1, device=points.device, dtype=torch.long)
ori_t_sel = ori_t.view(bs, num_p, 3)[enum, which_max, :]
points_sel = points.view(bs, num_p, 3)[enum, which_max, :]
ori_base_sel = ori_base.view(bs, num_p, 3, 3)[enum, which_max, :, :]
t = ori_t_sel + points_sel
r = pred_r[enum, which_max, :]
ori_base = ori_base_sel
points = points.view(bs, num_p, 3)
ori_t = t[:,None,:].repeat(1, num_p, 1)
new_points = torch.bmm(
(points - ori_t), ori_base)
tmp1 = ori_target.view(bs, num_p, num_point_mesh, 3)
new_target = tmp1[:, 0, :, :].view(bs, num_point_mesh, 3)
# ori_t 16 2000 3
ori_t = t[:,None,:].repeat(1, num_point_mesh, 1)
new_target = torch.bmm((new_target - ori_t), ori_base)
# print('------------> ', dis[0][which_max[0]].item(), pred_c[0][which_max[0]].item(), idx[0].item())
return loss, dis[enum, which_max], new_points.detach(), new_target.detach(), r.detach(), t.detach() #TODO
class Loss(_Loss):
def __init__(self, num_points_mesh, sym_list):
super(Loss, self).__init__(True)
self.num_pt_mesh = num_points_mesh
self.sym_list = sym_list
def forward(self, pred_r, pred_t, pred_c, target, model_points, idx, points, w, refine):
return loss_calculation(pred_r, pred_t, pred_c, target, model_points, idx, points, w, refine, self.num_pt_mesh, self.sym_list)
```
#### File: src/loss/loss_refiner.py
```python
from torch.nn.modules.loss import _Loss
from torch.autograd import Variable
import torch
import numpy as np
import random
from rotations import quat_to_rot, compose_quat
from helper import knn
class Loss_refine(_Loss):
def __init__(self, num_points_mesh, sym_list):
super(Loss_refine, self).__init__(True)
self.num_pt_mesh = num_points_mesh
self.sym_list = sym_list
def forward(self, pred_r, pred_t, target, model_points, idx, points, pred_r_current, pred_t_current, use_orig=False):
bs, _ = pred_r.size()
num_p = len(points[0])
pred_r = pred_r / (torch.norm(pred_r, dim=1).view(bs, 1))
base = quat_to_rot(pred_r.contiguous().view(-1, 4),
'wxyz', device=points.device)
ori_base = base
base = base.contiguous().transpose(2, 1).unsqueeze(
0).contiguous().view(-1, 3, 3)
model_points = model_points.view(
bs, 1, self.num_pt_mesh, 3).view(bs, self.num_pt_mesh, 3)
target = target.view(bs, 1, self.num_pt_mesh, 3).view(
bs, self.num_pt_mesh, 3)
ori_target = target
pred_t = pred_t.unsqueeze(1).repeat(
1, self.num_pt_mesh, 1).contiguous() # .view(bs * num_p, 1, 3)
ori_t = pred_t
# model_points 16 x 2000 x 3
# base 16 X 3 x 3
# points 16 X 1 x 3
pred = torch.add(torch.bmm(model_points, base), pred_t)
if idx[0].item() in self.sym_list:
knn_obj = knn(
ref=target[0, :, :], query=pred[0, :, :])
inds = knn_obj.indices
target[0, :, :] = target[0, inds[:, 0], :]
dis = torch.mean(torch.norm((pred - target), dim=2), dim=1)
t = ori_t
num_input_points = points.shape[1]
points = points.view(bs, num_input_points, 3)
ori_base = ori_base.view(bs, 3, 3).contiguous()
ori_t = t[:, 0, :].unsqueeze(1).repeat(
1, num_input_points, 1).contiguous().view(bs, num_input_points, 3)
new_points = torch.bmm((points - ori_t), ori_base).contiguous()
new_target = ori_target[0].view(1, self.num_pt_mesh, 3).contiguous()
ori_t = t[:, 0, :].unsqueeze(1).repeat(
1, self.num_pt_mesh, 1).contiguous().view(bs, self.num_pt_mesh, 3)
new_target = torch.bmm((new_target - ori_t), ori_base).contiguous()
# print('------------> ', dis.item(), idx[0].item())
pred_r_current = compose_quat( pred_r_current, pred_r) #TODO check if this is working !!!
return dis, new_points.detach(), new_target.detach(), pred_r_current, pred_t_current+pred_t
``` |
{
"source": "JonasFrey96/FlowPose6D",
"score": 2
} |
#### File: src/deep_im/flownet.py
```python
import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_, constant_
import torch.nn as nn
import torch.nn.functional as F
def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):
if batchNorm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=(kernel_size - 1) // 2, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1, inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=(kernel_size - 1) // 2, bias=True),
nn.LeakyReLU(0.1, inplace=True)
)
def predict_flow(in_planes):
return nn.Conv2d(in_planes, 2, kernel_size=3, stride=1, padding=1, bias=False)
def deconv(in_planes, out_planes):
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4,
stride=2, padding=1, bias=False),
nn.LeakyReLU(0.1, inplace=True)
)
def correlate(input1, input2):
out_corr = spatial_correlation_sample(input1,
input2,
kernel_size=1,
patch_size=21,
stride=1,
padding=0,
dilation_patch=2)
# collate dimensions 1 and 2 in order to be treated as a
# regular 4D tensor
b, ph, pw, h, w = out_corr.size()
out_corr = out_corr.view(b, ph * pw, h, w) / input1.size(1)
return F.leaky_relu_(out_corr, 0.1)
def crop_like(input, target):
if input.size()[2:] == target.size()[2:]:
return input
else:
return input[:, :, :target.size(2), :target.size(3)]
__all__ = [
'flownets', 'flownets_bn'
]
class FlowNetS(nn.Module):
expansion = 1
def __init__(self, batchNorm=True):
super(FlowNetS, self).__init__()
self.batchNorm = batchNorm
self.conv1 = conv(self.batchNorm, 6, 64, kernel_size=7, stride=2)
self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
self.conv3_1 = conv(self.batchNorm, 256, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm, 1024, 1024)
self.deconv5 = deconv(1024, 512)
self.deconv4 = deconv(1026, 256)
self.deconv3 = deconv(770, 128)
self.deconv2 = deconv(386, 64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(1026)
self.predict_flow4 = predict_flow(770)
self.predict_flow3 = predict_flow(386)
self.predict_flow2 = predict_flow(194)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
out_conv2 = self.conv2(self.conv1(x))
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
# DeepIm used for prediction head
flow6 = self.predict_flow6(out_conv6)
flow6_up = crop_like(self.upsampled_flow6_to_5(flow6), out_conv5)
out_deconv5 = crop_like(self.deconv5(out_conv6), out_conv5)
concat5 = torch.cat((out_conv5, out_deconv5, flow6_up), 1)
flow5 = self.predict_flow5(concat5)
flow5_up = crop_like(self.upsampled_flow5_to_4(flow5), out_conv4)
out_deconv4 = crop_like(self.deconv4(concat5), out_conv4)
concat4 = torch.cat((out_conv4, out_deconv4, flow5_up), 1)
# concat4 used for DeepIM Feature map 1d convs ?
flow4 = self.predict_flow4(concat4)
flow4_up = crop_like(self.upsampled_flow4_to_3(flow4), out_conv3)
out_deconv3 = crop_like(self.deconv3(concat4), out_conv3)
concat3 = torch.cat((out_conv3, out_deconv3, flow4_up), 1)
flow3 = self.predict_flow3(concat3)
flow3_up = crop_like(self.upsampled_flow3_to_2(flow3), out_conv2)
out_deconv2 = crop_like(self.deconv2(concat3), out_conv2)
concat2 = torch.cat((out_conv2, out_deconv2, flow3_up), 1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2, flow3, flow4, flow5, flow6, out_conv6
else:
return flow2, flow3, flow4, flow5, flow6, out_conv6
def weight_parameters(self):
return [param for name, param in self.named_parameters() if 'weight' in name]
def bias_parameters(self):
return [param for name, param in self.named_parameters() if 'bias' in name]
def flownets(data=None):
"""FlowNetS model architecture from the
"Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)
Args:
data : pretrained weights of the network. will create a new one if not set
"""
model = FlowNetS(batchNorm=False)
if data is not None:
model.load_state_dict(data['state_dict'])
return model
def flownets_bn(data=None):
"""FlowNetS model architecture from the
"Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852)
Args:
data : pretrained weights of the network. will create a new one if not set
"""
model = FlowNetS(batchNorm=True)
if data is not None:
model.load_state_dict(data['state_dict'])
return model
# net = FlowNetS()
# img = torch.ones((1, 6, 480, 640), dtype=torch.float32)
# net(img)
```
#### File: src/deep_im/renderer_ycb.py
```python
import os
import numpy as np
import torch
import sys
import argparse
import copy
sys.path.insert(0, os.getcwd())
sys.path.append(os.path.join(os.getcwd() + '/src'))
sys.path.append(os.path.join(os.getcwd() + '/lib'))
sys.path.append(os.path.join(os.getcwd() + '/src/deep_im/lib'))
from loaders_v2 import ConfigLoader
from loaders_v2 import GenericDataset
from src.deep_im.lib.render_glumpy.render_py_light import Render_Py_Light
def mat2quat(M):
# Qyx refers to the contribution of the y input vector component to
# the x output vector component. Qyx is therefore the same as
# M[0,1]. The notation is from the Wikipedia article.
Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = M.flat
# Fill only lower half of symmetric matrix
K = (
np.array(
[
[Qxx - Qyy - Qzz, 0, 0, 0],
[Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0],
[Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0],
[Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz],
]
)
/ 3.0
)
# Use Hermitian eigenvectors, values for speed
vals, vecs = np.linalg.eigh(K)
# Select largest eigenvector, reorder to w,x,y,z quaternion
q = vecs[[3, 0, 1, 2], np.argmax(vals)]
# Prefer quaternion with positive w
if q[0] < 0:
q *= -1
return q
class RendererYCB():
def __init__(self, p_ycb, obj_name_2_idx, K1, K2):
# loads all models
self.renderes = {}
K = np.array([[1066.778, 0, 312.9869], [
0, 1067.487, 241.3109], [0, 0, 1]])
ZNEAR = 0.25
ZFAR = 6.0
for name, idx in obj_name_2_idx.items():
model_dir = f'{p_ycb}/models/{name}'
width = 640
height = 480
brightness_ratios = [0.3]
# add for each camera calibration a costum renderer
self.renderes[idx] = [Render_Py_Light(model_dir, K1, width, height, ZNEAR, ZFAR, brightness_ratios=brightness_ratios),
Render_Py_Light(model_dir, K2, width, height, ZNEAR, ZFAR, brightness_ratios=brightness_ratios)]
def render(self, obj_idx, r_mat, trans, noise, cam):
"""[summary]
Parameters
----------
obj_idx : int
0 - (max_obj-1)
r_mat : np.array 3x3
[description]
trans : np.array 3
translaton xyz
noise : [type]
[description]
cam : int
0 - 1 what set of camera parameters should be used
"""
rend = self.renderes[obj_idx][cam]
r_quat = mat2quat(r_mat)
rgb, depth = rend.render(r_quat, t, light_position=[
0, 0, -1], light_intensity=[0, 0, 0], brightness_k=0)
# how can i verfiy that anything works ?
def file_path(string):
if os.path.isfile(string):
return string
else:
raise NotADirectoryError(string)
def get_flags():
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=file_path, default='yaml/exp/exp_ws_motion_train.yml', # required=True,
help='The main experiment yaml file.')
parser.add_argument('--env', type=file_path, default='yaml/env/env_natrix_jonas.yml',
help='The environment yaml file.')
return parser.parse_args()
if __name__ == "__main__":
args = get_flags()
exp = ConfigLoader().from_file(args.exp).get_FullLoader()
env = ConfigLoader().from_file(args.env).get_FullLoader()
dataset_train = GenericDataset(
cfg_d=exp['d_train'],
cfg_env=env)
K1 = np.array([[1066.778, 0, 312.9869], [
0, 1067.487, 241.3109], [0, 0, 1]])
K2 = np.array([[1066.778, 0, 312.9869], [
0, 1067.487, 241.3109], [0, 0, 1]])
obj_name_2_idx = copy.deepcopy(dataset_train._backend._name_to_idx)
RendererYCB('/media/scratch1/jonfrey/datasets/YCB_Video_Dataset',
obj_name_2_idx=obj_name_2_idx,
K1=K1,
K2=K2)
```
#### File: src/loaders_v2/dataset_generic.py
```python
from loaders_v2 import YCB, Backend
import random
import time
import torch
class GenericDataset():
def __init__(self, cfg_d, cfg_env):
self.overfitting_nr_idx = cfg_d['output_cfg'].get(
'overfitting_nr_idx', -1)
if cfg_d['name'] == "ycb":
self._backend = self._backend = YCB(cfg_d=cfg_d,
cfg_env=cfg_env)
else:
raise ValueError('dataset not implemented in cfg_d')
self._obj_list_sym = cfg_d['obj_list_sym']
self._obj_list_fil = cfg_d['obj_list_fil']
self._batch_list = self._backend._batch_list
self._force_one_object_visible = cfg_d['output_cfg']['force_one_object_visible']
self._no_list_for_sequence_len_one = cfg_d['output_cfg'].get(
'no_list_for_sequence_len_one', False)
if self._no_list_for_sequence_len_one and \
cfg_d['output_cfg'].get('seq_length', 1):
raise ValueError(
'Its not possible to return the batch not as a list if the sequence length is larger than 1.')
if self._obj_list_fil is not None:
self._batch_list = [
x for x in self._batch_list if x[0] in self._obj_list_fil]
self._length = len(self._batch_list)
self._backend._length = len(self._batch_list)
def __len__(self):
return self._length
def __str__(self):
string = "Generic Dataloader of length %d" % len(self)
string += "\n Backbone is set to %s" % self._backend
return string
@property
def visu(self):
return self._backend.visu
@visu.setter
def visu(self, vis):
self._backend.visu = vis
@property
def sym_list(self):
return self._obj_list_sym
@property
def refine(self):
return self._backend.refine
@refine.setter
def refine(self, refine):
self._backend.refine = refine
@property
def seq_length(self):
return len(self._batch_list[0][2])
def get_num_points_mesh(self, refine=False):
# onlt implemented for backwards compatability. Refactor this
if refine == False:
return self._backend._num_pt_mesh_small
else:
return self._backend._num_pt_mesh_large
def __getitem__(self, index):
if self.overfitting_nr_idx != -1:
index = random.randrange(0, self.overfitting_nr_idx) * 1000 % self._length
seq = []
one_object_visible = False
# iterate over a sequence specified in the batch list
fails = 0
for k in self._batch_list[index][2]:
tmp = False
while type(tmp) is bool:
num = '0' * int(6 - len(str(k))) + str(k)#
tmp = self._backend.getElement(
desig=f'{self._batch_list[index][1]}/{num}', obj_idx=self._batch_list[index][0])
if type (tmp) is bool:
fails += 1
index = random.randrange(0, len(self)-1)
if self.overfitting_nr_idx != -1:
index = random.randrange(
0, self.overfitting_nr_idx) * 1000 % self._length
k = self._batch_list[index][2][0]
seq.append(tmp)
return seq
```
#### File: loaders_v2/laval/data.py
```python
from scipy import ndimage
import numpy as np
import random
from skimage.color import rgb2hsv, hsv2rgb
def add_hsv_noise(rgb, hue_offset, saturation_offset, value_offset, proba=0.5):
mask = np.all(rgb != 0, axis=2)
hsv = rgb2hsv(rgb/255)
if random.uniform(0, 1) > proba:
hsv[:, :, 0] = (
hsv[:, :, 0] + random.uniform(-hue_offset, hue_offset)) % 1
if random.uniform(0, 1) > proba:
hsv[:, :, 1] = (
hsv[:, :, 1] + random.uniform(-saturation_offset, saturation_offset)) % 1
if random.uniform(0, 1) > proba:
hsv[:, :, 2] = (
hsv[:, :, 2] + random.uniform(-value_offset, value_offset)) % 1
rgb = hsv2rgb(hsv) * 255
return rgb.astype(np.uint8) * mask[:, :, np.newaxis]
def depth_blend(rgb1, depth1, rgb2, depth2):
new_depth2 = depth2.copy()
new_depth1 = depth1.copy()
rgb1_mask = np.all(rgb1 == 0, axis=2)
rgb2_mask = np.all(rgb2 == 0, axis=2)
rgb1_mask = ndimage.binary_dilation(rgb1_mask)
new_depth2[rgb2_mask] = -100000
new_depth1[rgb1_mask] = -100000
mask = (new_depth1 < new_depth2)
pos_mask = mask.astype(np.uint8)
neg_mask = (mask == False).astype(np.uint8)
masked_rgb_occluder = rgb1 * pos_mask[:, :, np.newaxis]
masked_rgb_object = rgb2 * neg_mask[:, :, np.newaxis]
masked_depth_occluder = depth1 * pos_mask
masked_depth_object = depth2 * neg_mask
blend_rgb = masked_rgb_occluder + masked_rgb_object
blend_depth = masked_depth_occluder + masked_depth_object
return blend_rgb, blend_depth, pos_mask
def gaussian_noise(img, gaussian_std):
type = img.dtype
copy = img.astype(np.float)
gaussian_noise = np.random.normal(0, gaussian_std, img.shape)
copy = (gaussian_noise + copy)
if type == np.uint8:
copy[copy < 0] = 0
copy[copy > 255] = 255
return copy.astype(type)
def color_blend(rgb1, depth1, rgb2, depth2):
mask = np.all(rgb1 == 0, axis=2)
mask = ndimage.binary_dilation(mask).astype(mask.dtype)
depth1[mask] = 0
rgb1[mask, :] = 0
mask = mask.astype(np.uint8)
new_depth = depth2 * mask + depth1
new_color = rgb2 * mask[:, :, np.newaxis] + rgb1
return new_color.astype(np.uint8), new_depth
def show_frames(rgbA, depthA, rgbB, depthB):
import matplotlib.pyplot as plt
fig, axis = plt.subplots(2, 3)
ax1, ax2, ax5 = axis[0, :]
ax3, ax4, ax6 = axis[1, :]
ax1.imshow(rgbA.astype(np.uint8))
ax2.imshow(rgbB.astype(np.uint8))
ax3.imshow(depthA)
ax4.imshow(depthB)
ax5.imshow((rgbA - rgbB).sum(axis=2))
ax6.imshow(depthA - depthB)
plt.show()
def compute_2Dboundingbox(pose, camera, scale_size=230, scale=(1, 1, 1)):
obj_x = pose.matrix[0, 3] * scale[0]
obj_y = pose.matrix[1, 3] * scale[1]
obj_z = pose.matrix[2, 3] * scale[2]
offset = scale_size / 2
points = np.ndarray((4, 3), dtype=np.float)
points[0] = [obj_x - offset, obj_y - offset, obj_z] # top left
points[1] = [obj_x - offset, obj_y + offset, obj_z] # top right
points[2] = [obj_x + offset, obj_y - offset, obj_z] # bottom left
points[3] = [obj_x + offset, obj_y + offset, obj_z] # bottom right
return camera.project_points(points).astype(np.int32)
def project_center(pose, camera, scale=(1, 1, 1)):
obj_x = pose.matrix[0, 3] * scale[0]
obj_y = pose.matrix[1, 3] * scale[1]
obj_z = pose.matrix[2, 3] * scale[2]
points = np.ndarray((1, 3), dtype=np.float)
points[0] = [obj_x, obj_y, obj_z]
return camera.project_points(points).astype(np.int32)
def normalize_scale(color, depth, boundingbox, output_size=(100, 100)):
import cv2
left = np.min(boundingbox[:, 1])
right = np.max(boundingbox[:, 1])
top = np.min(boundingbox[:, 0])
bottom = np.max(boundingbox[:, 0])
# Compute offset if bounding box goes out of the frame (0 padding)
h, w, c = color.shape
crop_w = right - left
crop_h = bottom - top
color_crop = np.zeros((crop_h, crop_w, 3), dtype=color.dtype)
depth_crop = np.zeros((crop_h, crop_w), dtype=np.float)
top_offset = abs(min(top, 0))
bottom_offset = min(crop_h - (bottom - h), crop_h)
right_offset = min(crop_w - (right - w), crop_w)
left_offset = abs(min(left, 0))
top = max(top, 0)
left = max(left, 0)
bottom = min(bottom, h)
right = min(right, w)
color_crop[top_offset:bottom_offset, left_offset:right_offset,
:] = color[top:bottom, left:right, :]
depth_crop[top_offset:bottom_offset,
left_offset:right_offset] = depth[top:bottom, left:right]
resized_rgb = cv2.resize(color_crop, output_size,
interpolation=cv2.INTER_NEAREST)
resized_depth = cv2.resize(
depth_crop, output_size, interpolation=cv2.INTER_NEAREST)
mask_rgb = resized_rgb != 0
mask_depth = resized_depth != 0
resized_depth = resized_depth.astype(np.uint16)
final_rgb = resized_rgb * mask_rgb
final_depth = resized_depth * mask_depth
return final_rgb, final_depth
def combine_view_transform(vp, view_transform):
"""
combines a camera space transform with a camera axis dependent transform.
Whats important here is that view transform's translation represent the displacement from
each axis, and rotation from each axis. The rotation is applied around the translation point of view_transform.
:param vp:
:param view_transform:
:return:
"""
camera_pose = vp.copy()
R = camera_pose.rotation
T = camera_pose.translation
rand_R = view_transform.rotation
rand_T = view_transform.translation
rand_R.combine(R)
T.combine(rand_R)
rand_T.combine(T)
return rand_T
def image_blend(foreground, background):
"""
Uses pixel 0 to compute blending mask
:param foreground:
:param background:
:return:
"""
if len(foreground.shape) == 2:
mask = foreground[:, :] == 0
else:
mask = foreground[:, :, :] == 0
mask = np.all(mask, axis=2)[:, :, np.newaxis]
return background * mask + foreground
def compute_axis(pose, camera):
points = np.ndarray((4, 3), dtype=np.float)
points[0] = [0, 0, 0]
points[1] = [1, 0, 0]
points[2] = [0, 1, 0]
points[3] = [0, 0, 1]
points *= 0.1
camera_points = pose.dot(points)
camera_points[:, 0] *= -1
return camera.project_points(camera_points).astype(np.int32)
def center_pixel(pose, camera):
obj_x = pose.matrix[0, 3] * 1000
obj_y = pose.matrix[1, 3] * 1000
obj_z = pose.matrix[2, 3] * 1000
point = [obj_x, -obj_y, -obj_z]
return camera.project_points(np.array([point])).astype(np.uint32)
```
#### File: src/loss/loss_focal.py
```python
from torch.nn.modules.loss import _Loss
import torch
import torch.nn.functional as F
class FocalLoss(_Loss):
def __init__(self, gamma=2.0, alpha=0.25, size_average=True, per_batch=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.size_average = size_average
self.per_batch = per_batch
def forward(self, input_x, target):
"""
semantic: N x C x H x W
object_ids: N x H x W
"""
N, C, H, W = input_x.shape
target = target
logp = F.log_softmax(input_x, dim=1)
logp_t = logp.gather(1, target[:, None])
p_t = torch.exp(logp_t)
a_t = torch.full(target.shape, self.alpha,
dtype=input_x.dtype, device=input_x.device)
a_t[target == 0] = (1.0 - self.alpha)
loss = -a_t * torch.pow(1.0 - p_t, self.gamma) * logp_t
if self.per_batch:
return loss.mean(dim=3).mean(dim=2).mean(dim=1)
elif self.size_average:
return loss.mean()
else:
return loss.sum()
```
#### File: src/model/efficient_disparity.py
```python
import torch
from efficientnet_pytorch import EfficientNet
from torch import nn
from torchvision import transforms
def deconv(in_planes, out_planes, bias=False):
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4,
stride=2, padding=1, bias=bias),
nn.LeakyReLU(0.1, inplace=True)
)
def predict_flow(in_planes):
return nn.Conv2d(in_planes, 2, kernel_size=3, stride=1, padding=1, bias=False)
def cat(x, y):
if x == None:
return y
else:
return torch.cat( [x,y], dim= 1)
class EfficientDisparity(nn.Module):
def __init__(self, num_classes = 22, backbone= 'efficientnet-b1', seperate_flow_head= False, pred_flow_pyramid=True, pred_flow_pyramid_add=True, ced_real=1, ced_render=1, ced_render_d=1,ced_real_d=1):
# tested with b6
super().__init__()
self.feature_extractor = EfficientNet.from_pretrained(backbone)
self.size = self.feature_extractor.get_image_size( backbone )
self.seperate_flow_head = seperate_flow_head
self.ced_real = ced_real
self.ced_render = ced_render
self.ced_real_d = ced_real_d
self.ced_render_d = ced_render_d
self.pred_flow_pyramid_add = pred_flow_pyramid_add
self.pred_flow_pyramid = pred_flow_pyramid
idxs, feats, res = self.feature_extractor.layer_info( torch.ones( (4,3,self.size, self.size)))
if ced_render_d > 0 or ced_real_d > 0:
self.depth_backbone = True
else:
self.depth_backbone = False
if self.depth_backbone:
self.feature_extractor_depth = EfficientNet.from_name(backbone, in_channels=1)
r = res[0]
self.idx_extract = []
self.feature_sizes = []
for i in range(len(idxs)):
if r != res[i]:
self.idx_extract.append(i-1)
r = res[i]
self.feature_sizes.append( feats[i-1] )
self.idx_extract.append(len(idxs)-1)
self.feature_sizes.append( feats[len(idxs)-1] )
self._num_classes = num_classes
dc = []
pred_flow_pyramid = []
upsample_flow_layers = []
self.feature_sizes = [8] + self.feature_sizes
label_feat = [16,8, num_classes]
label_layers = []
label_i = -1
for i in range( 1, len(self.feature_sizes) ):
if i == 1:
inc_feat_0 = (int(ced_real>0) + int(ced_render>0) + int(ced_render_d>0) + int(ced_real_d>0)) * self.feature_sizes[-i ]
else:
inc_feat_0 = (int(ced_real>=i) + int(ced_render>=i) + int(ced_render_d>=i) + int(ced_real_d>=i) + 1 ) * self.feature_sizes[-i]
if self.pred_flow_pyramid_add and self.pred_flow_pyramid:
inc_feat_0 += 2
out_feat = self.feature_sizes[- (i+1) ] #leave this number for now on constant
dc.append( deconv( inc_feat_0 , out_feat ) )
print( 'Network inp:', inc_feat_0, ' out: ', out_feat )
if i > len(self.feature_sizes)-len(label_feat):
if label_i == -1:
inc_feat_label = inc_feat_0
else:
inc_feat_label = label_feat[label_i]
label_i += 1
out_feat_label = label_feat[label_i]
label_layers.append( deconv( inc_feat_label , out_feat_label, bias=True ) )
if self.pred_flow_pyramid:
pred_flow_pyramid.append( predict_flow( inc_feat_0 ) )
upsample_flow_layers.append( nn.ConvTranspose2d(
2, 2, 4, 2, 1, bias=False))
label_layers.append( deconv(label_feat[-2], label_feat[-1], bias=True) )
self.label_layers = nn.ModuleList(label_layers)
self.deconvs = nn.ModuleList(dc)
pred_flow_pyramid.append( predict_flow( self.feature_sizes[0]) )
if self.pred_flow_pyramid:
self.pred_flow_pyramid= nn.ModuleList( pred_flow_pyramid )
self.upsample_flow_layers = nn.ModuleList(upsample_flow_layers)
self.up_in = torch.nn.UpsamplingBilinear2d(size=(self.size, self.size))
self.input_trafos = transforms.Compose([
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.norm_depth = transforms.Normalize([0.485,0.485], [0.229,0.229])
self.up_out = torch.nn.UpsamplingNearest2d(size=(480, 640))
self.up_out_bl = torch.nn.UpsamplingBilinear2d(size=(480, 640))
self.up_nn_in= torch.nn.UpsamplingNearest2d(size=(self.size, self.size))
def forward(self, data, idx=False, label=None):
"""Forward pass
Args:
data ([torch.tensor]): BS,C,H,W (C=6) if self.depth_backbone: C = 8 else: C = 6
idx ([torch.tensor]): BS,1 starting for first object with 0 endind with num_classes-1
label ([type], optional): [description]. Defaults to None.
Returns:
flow ([torch.tensor]): BS,2,H,W
segmentation ([torch.tensor]): BS,num_classes,H,W
"""
# is it smart to have the residual skip connections only for the real image of course the information should be given for the real image but therfore the network needs to learn how to fully encode the rendered image correctly
# data BS, C, H, W
BS,C,H,W = data.shape
real = self.up_in(data[:,:3] )
render = self.up_in(data[:,3:6] )
if self.depth_backbone:
data[:,6:] = data[:,6:]/10000
for i in range(BS):
real[i] = self.input_trafos( real[i] )
render[i] = self.input_trafos( render[i] )
if self.depth_backbone:
real_d = self.up_nn_in(data[:,6][:,None,:,:] )
render_d = self.up_nn_in(data[:,7][:,None,:,:] )
feat_real_d = self.feature_extractor_depth.extract_features_layerwise( real_d , idx_extract = self.idx_extract[-(self.ced_real_d):])
feat_render_d = self.feature_extractor_depth.extract_features_layerwise( render_d , idx_extract = self.idx_extract[-(self.ced_render_d):])
feat_real = self.feature_extractor.extract_features_layerwise( real , idx_extract = self.idx_extract)
feat_render = self.feature_extractor.extract_features_layerwise( render, idx_extract = self.idx_extract)
pred_flow_pyramid_feat = []
x = None
for j in range( 1,len( self.deconvs)+1 ):
# calculate input:
# accumulate input to each layer
if j-1 < self.ced_real:
x = cat( x, feat_real[-j] )
if j-1 < self.ced_render:
x = cat( x, feat_render[-j])
if j-1 < self.ced_real_d:
x = cat( x, feat_real_d[-j])
if j-1 < self.ced_render_d:
x = cat( x, feat_render_d[-j])
if j > 1 and self.pred_flow_pyramid_add:
dim = x.shape[3]
# upsample flow
f_up = self.upsample_flow_layers[j-2]( pred_flow_pyramid_feat[-1]) [:,:,:dim,:dim]
x = cat( x, f_up )
# predict flow at each level
if self.pred_flow_pyramid:
pred_flow_pyramid_feat.append( self.pred_flow_pyramid[ j-1 ](x) )
try:
dim = feat_real[-(j+1)].shape[3]
pred_flow_pyramid_feat[-1] = pred_flow_pyramid_feat[-1][:,:,:dim,:dim]
except:
pass
if j == len(self.deconvs) - len(self.label_layers)+2 :
# clone features for mask prediction.
# here the conv are with bias !!!
segmentation = x.clone()
# apply upcovn layer
x = self.deconvs[j-1](x)
try:
dim = feat_real[-(j+1)].shape[3]
x = x[:,:,:dim,:dim]
except:
pass
# predict label
for l in self.label_layers:
segmentation = l(segmentation)
segmentation = self.up_out(segmentation)
# predict flow
pred_flow_pyramid_feat.append( self.pred_flow_pyramid[-1](x) )
pred_flow_pyramid_feat.append( self.up_out_bl( pred_flow_pyramid_feat[-1] ) )
if label is None:
label = segmentation.argmax(dim=1)
return pred_flow_pyramid_feat, segmentation
if __name__ == "__main__":
model = EfficientDisparity(num_classes = 22, backbone= 'efficientnet-b2', seperate_flow_head= False, pred_flow_pyramid=True, pred_flow_pyramid_add=True, ced_real=3, ced_render=3, ced_render_d=2,ced_real_d=2)
BS = 2
H = 480
W = 640
C = 8
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data = torch.ones( (BS,C,H,W), device=device )
model = model.to(device)
idx = torch.linspace(0,BS-1,BS)[:,None]
out = model(data, idx = idx)
# for i in range(0,7):
# model = EfficientDisparity(num_classes = 22, backbone= f'efficientnet-b{i}', connections_encoder_decoder = 2, depth_backbone = True)
```
#### File: src/rotations/rot_to_quat.py
```python
import torch
if __name__ == "__main__":
import os
import sys
sys.path.insert(0, os.getcwd())
sys.path.append(os.path.join(os.getcwd() + '/src'))
sys.path.append(os.path.join(os.getcwd() + '/lib'))
from helper import re_quat
from rotations import norm_quat
def _copysign(a, b):
""" From PyTorch3D see def _copysign(a, b)
Return a tensor where each element has the absolute value taken from the,
corresponding element of a, with sign taken from the corresponding
element of b. This is like the standard copysign floating-point operation,
but is not careful about negative 0 and NaN.
Args:
a: source tensor.
b: tensor whose signs will be used, of the same shape as a.
Returns:
Tensor of the same shape as a with the signs of b.
"""
signs_differ = (a < 0) != (b < 0)
return torch.where(signs_differ, -a, a)
def rot_to_quat(matrix, conv='wxyz'):
"""From PyTorch3D see def matrix_to_quaternion(matrix)
Args:
rot ([type]): [description]
conv (str, optional): [description]. Defaults to 'wxyz'.
"""
if matrix.shape == (3, 3):
matrix = matrix.reshape((1, 3, 3))
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
zero = matrix.new_zeros((1,))
m00 = matrix[..., 0, 0]
m11 = matrix[..., 1, 1]
m22 = matrix[..., 2, 2]
o0 = 0.5 * torch.sqrt(torch.max(zero, 1 + m00 + m11 + m22))
x = 0.5 * torch.sqrt(torch.max(zero, 1 + m00 - m11 - m22))
y = 0.5 * torch.sqrt(torch.max(zero, 1 - m00 + m11 - m22))
z = 0.5 * torch.sqrt(torch.max(zero, 1 - m00 - m11 + m22))
o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2])
o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0])
o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1])
if conv == 'xyzw':
return norm_quat(torch.stack((o1, o2, o3, o0), -1))
elif conv == 'wxyz':
return norm_quat(torch.stack((o0, o1, o2, o3), -1))
else:
raise Exception('undefined quaternion convention')
def test_rot_to_quat():
from scipy.spatial.transform import Rotation as R
import numpy as np
from scipy.stats import special_ortho_group
from rotations import RearangeQuat
import time
bs = 1000
re_q = RearangeQuat(bs)
mat = special_ortho_group.rvs(dim=3, size=bs)
quat = R.from_matrix(mat).as_quat()
q_test = rot_to_quat(torch.tensor(mat), conv='wxyz')
print(quat,'\n \n ', q_test)
m = q_test[:,0] > 0
mat2 = R.from_quat( q_test.numpy() ).as_matrix()
print("Fiff", torch.sum(torch.norm( torch.tensor(mat-mat2), dim=(1,2) ), dim=0))
#print( "DIF", torch.sum(torch.norm( torch.tensor(quat[m]) - q_test[m], dim=1 ), dim=0))
# q = torch.from_numpy(quat.astype(np.float32)).cuda()
# re_q(q, input_format='xyzw')
# mat2 = special_ortho_group.rvs(dim=3, size=bs)
# quat2 = R.from_matrix(mat2).as_quat()
# q2 = torch.from_numpy(quat2.astype(np.float32)).cuda()
# re_q(q2, input_format='xyzw')
# r1 = R.from_matrix(mat)
# R_out = r1 * R.from_matrix(mat2)
# print(f'scipy xyzw {R_out.as_quat()}')
# st = time.time()
# for i in range(0, 1000):
# out = compose_quat(q, q2)
# print(f'torch wxyz { compose_quat(q, q2) } ')
# print(f'took for 1000 iterations of {bs} bs {time.time()-st}s')
if __name__ == "__main__":
test_rot_to_quat()
pass
```
#### File: FlowPose6D/tools/main.py
```python
import os
import sys
sys.path.insert(0, os.getcwd())
sys.path.append(os.path.join(os.getcwd() + '/src'))
sys.path.append(os.path.join(os.getcwd() + '/lib'))
import shutil
import datetime
import argparse
import signal
import coloredlogs
coloredlogs.install()
import torch
from pytorch_lightning import seed_everything,Trainer
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
from lightning import TrackNet6D
from helper import pad
from loaders_v2 import ConfigLoader
from helper import move_dataset_to_ssd
from helper import move_background
def file_path(string):
if os.path.isfile(string):
return string
else:
raise NotADirectoryError(string)
if __name__ == "__main__":
# for reproducability
seed_everything(42)
def signal_handler(signal, frame):
print('exiting on CRTL-C')
sys.exit(0)
# this is needed for leonhard to use interactive session and dont freeze on
# control-C !!!!
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=file_path, default='/home/jonfrey/PLR3/yaml/exp/exp_natrix.yml', # required=True,
help='The main experiment yaml file.')
parser.add_argument('--env', type=file_path, default='yaml/env/env_natrix_jonas.yml',
help='The environment yaml file.')
args = parser.parse_args()
exp_cfg_path = args.exp
env_cfg_path = args.env
exp = ConfigLoader().from_file(exp_cfg_path).get_FullLoader()
env = ConfigLoader().from_file(env_cfg_path).get_FullLoader()
if exp['model_path'].split('/')[-2] == 'debug':
p = '/'.join(exp['model_path'].split('/')[:-1])
try:
shutil.rmtree(p)
except:
pass
timestamp = '_'
else:
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
p = exp['model_path'].split('/')
p.append(str(timestamp) + '_' + p.pop())
new_path = '/'.join(p)
exp['model_path'] = new_path
model_path = exp['model_path']
# copy config files to model path
if not os.path.exists(model_path):
os.makedirs(model_path)
print((pad("Generating network run folder")))
else:
print((pad("Network run folder already exits")))
exp_cfg_fn = os.path.split(exp_cfg_path)[-1]
env_cfg_fn = os.path.split(env_cfg_path)[-1]
print(pad(f'Copy {env_cfg_path} to {model_path}/{exp_cfg_fn}'))
shutil.copy(exp_cfg_path, f'{model_path}/{exp_cfg_fn}')
shutil.copy(env_cfg_path, f'{model_path}/{env_cfg_fn}')
exp, env = move_dataset_to_ssd(env, exp)
exp, env = move_background(env, exp)
dic = {'exp': exp, 'env': env}
model = TrackNet6D(**dic)
early_stop_callback = EarlyStopping(
monitor='avg_val_disparity',
patience=exp.get('early_stopping_cfg', {}).get('patience', 100),
strict=False,
verbose=True,
mode='min',
min_delta = exp.get('early_stopping_cfg', {}).get('min_delta', -0.1)
)
checkpoint_callback = ModelCheckpoint(
filepath=exp['model_path'] + '/{epoch}-{avg_val_disparity_float:.4f}',
verbose=True,
monitor="avg_val_disparity",
mode="min",
prefix="",
save_last=True,
save_top_k=10,
)
if exp.get('checkpoint_restore', False):
checkpoint = torch.load(
exp['checkpoint_load'], map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
# with torch.autograd.set_detect_anomaly(True):
# early_stop_callback=early_stop_callback,
trainer = Trainer(**exp['trainer'],
checkpoint_callback=checkpoint_callback,
default_root_dir=exp['model_path'],
callbacks=[early_stop_callback])
if exp.get('model_mode', 'fit') == 'fit':
trainer.fit(model)
elif exp.get('model_mode', 'fit') == 'test':
trainer.test(model)
else:
print("Wrong model_mode defined in exp config")
raise Exception
``` |
{
"source": "JonasFrey96/PLR2",
"score": 2
} |
#### File: src/helper/helper.py
```python
import yagmail
from sklearn.neighbors import NearestNeighbors
import yaml
import numpy as np
import collections
import torch
import copy
def flatten_list(d, parent_key='', sep='_'):
items = []
for num, element in enumerate(d):
new_key = parent_key + sep + str(num) if parent_key else str(num)
if isinstance(element, collections.MutableMapping):
items.extend(flatten_dict(element, new_key, sep=sep).items())
else:
if isinstance(element, list):
if isinstance(element[0], dict):
items.extend(flatten_list(element, new_key, sep=sep))
continue
items.append((new_key, element))
return items
def flatten_dict(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
if isinstance(v, list):
if isinstance(v[0], dict):
items.extend(flatten_list(v, new_key, sep=sep))
continue
items.append((new_key, v))
return dict(items)
def norm_quat(q):
# ToDo raise type and dim error
return q / torch.sqrt(torch.sum(q * q))
def pad(s, sym='-', p='l', length=80):
if len(s) > length:
return s
else:
if p == 'c':
front = int((length - len(s)) / 2)
s = sym * front + s
back = int(length - len(s))
s = s + sym * back
if p == 'l':
back = int(length - len(s))
s = s + sym * back
return s
def re_quat(q, input_format):
if input_format == 'xyzw':
if isinstance(q, torch.Tensor):
v0 = q[0].clone()
else:
v0 = copy.deepcopy(q[0])
q[0] = q[3]
q[3] = q[2]
q[2] = q[1]
q[1] = v0
return q
elif input_format == 'wxyz':
if isinstance(q, torch.Tensor):
v0 = q[0].clone()
else:
v0 = copy.deepcopy(q[0])
q[0] = q[1]
q[1] = q[2]
q[2] = q[3]
q[3] = v0
return q
def send_email(text):
yag = yagmail.SMTP('trackthisplr', "TrackThis")
contents = [
"Run is finished!",
text
]
yag.send('<EMAIL>',
'PLR - TrackThis - Lagopus', contents)
yag.send('<EMAIL>',
'PLR - TrackThis - Lagopus', contents)
def compose_quat(p, q, device):
"""
input is wxyz
"""
q = norm_quat(re_quat(q.squeeze(), 'wxyz')).unsqueeze(0)
p = norm_quat(re_quat(p.squeeze(), 'wxyz')).unsqueeze(0)
product = torch.zeros(
(max(p.shape[0], q.shape[0]), 4), dtype=torch.float32, device=device)
product[:, 3] = p[:, 3] * q[:, 3] - torch.sum(p[:, :3] * q[:, :3], (1))
product[:, :3] = (p[:, None, 3] * q[:, :3] + q[:, None, 3] * p[:, :3] +
torch.cross(p[:, :3], q[:, :3]))
return re_quat(product.squeeze(0), 'xyzw')
def rotation_angle(q, device):
# in radians
q = norm_quat(q)
unit_r = torch.t(torch.tensor(
[[0, 0, 0, 1]], dtype=torch.float32, device=device))
return torch.asin(torch.mm(q, unit_r)) * 2
def nearest_neighbor(src, dst):
assert src.shape[1] == dst.shape[1]
neigh = NearestNeighbors(n_neighbors=1, n_jobs=8)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
def replace_item(obj, key, replace_value):
for k, v in obj.items():
if isinstance(v, dict):
obj[k] = replace_item(v, key, replace_value)
if key in obj:
obj[key] = replace_value
return obj
def generate_unique_idx(num, max_idx):
a = random.sample(range(0, max_idx), k=min(num, max_idx))
while len(a) < num:
a = a + random.sample(
range(0, max_idx), k=min(max_idx, num - len(a)))
return a
def get_bbox_480_640(label):
border_list = [-1, 40, 80, 120, 160, 200, 240, 280,
320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
img_width = 480
img_length = 640
# print(type(label))
rows = np.any(label, axis=1)
cols = np.any(label, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
rmax += 1
cmax += 1
r_b = rmax - rmin
for tt in range(len(border_list)):
if r_b > border_list[tt] and r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
c_b = cmax - cmin
for tt in range(len(border_list)):
if c_b > border_list[tt] and c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > img_width:
delt = rmax - img_width
rmax = img_width
rmin -= delt
if cmax > img_length:
delt = cmax - img_length
cmax = img_length
cmin -= delt
return rmin, rmax, cmin, cmax
```
#### File: src/helper/plotting.py
```python
import k3d
import numpy as np
def plot_points(x, point_size = 0.005, c='g'):
"""
x: point_nr,3
"""
if c == 'b':
k = 245
elif c == 'g':
k =25811000
elif c == 'r':
k =11801000
elif c == 'black':
k =2580
else:
k =2580
colors = np.ones(x.shape[0])*k
plot = k3d.plot(name='points')
print(colors.shape)
plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader='3d'
plot.display()
def plot_two_pc(x, y, point_size = 0.005, c1='g',c2='r' ):
if c1 == 'b':
k = 245
elif c1 == 'g':
k =25811000
elif c1 == 'r':
k =11801000
elif c1 == 'black':
k =2580
else:
k =2580
if c2 == 'b':
k2 = 245
elif c2 == 'g':
k2 =25811000
elif c2 == 'r':
k2 =11801000
elif c2 == 'black':
k2 =2580
else:
k2 =2580
col1 = np.ones(x.shape[0])*k
col2 = np.ones(y.shape[0])*k2
plot = k3d.plot(name='points')
plt_points = k3d.points(x, col1.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points = k3d.points(y, col2.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader='3d'
plot.display()
```
#### File: src/helper/postprocess.py
```python
import numpy as np
import os
import sys
sys.path.append('/home/jonfrey/PLR/src/')
sys.path.append('/home/jonfrey/PLR/src/dense_fusion')
from math import pi
from PIL import Image
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from helper import re_quat, compose_quat, rotation_angle
from estimation.filter import Linear_Estimator, Kalman_Filter
from estimation.state import State_R3xQuat, State_SE3, points
from estimation.errors import ADD, ADDS, translation_error, rotation_error
from visu import plot_pcd, SequenceVisualizer
from copy import deepcopy
from scipy.spatial.transform import Rotation as R
import pandas as pd
import pickle as pkl
import copy
import glob
import k3d
sym_list = [12, 15, 18, 19, 20]
def kf_sequence(data_old, var_motion, var_sensor, params):
## extract relevant data
data = copy.deepcopy(data_old)
data = list(data)
for i, seq in enumerate(data):
# print('Loading sequence {}'.format(i))
# setup of filter
idx = np.squeeze(seq[0]['dl_dict']['idx'])
model_points_np = np.squeeze(seq[0]['dl_dict']['model_points'])
model_points = points(model_points_np)
## set up kalman filter
prior = State_R3xQuat([0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0])
prior.add_noise([0, 0, 0, 0, 2 * pi, 3])
prior_variance = State_R3xQuat([10,10,10,10,10,10,20*pi])
prior_variance = prior_variance.state.reshape(7,)
kf = Kalman_Filter(state_type = 'State_R3xQuat',
motion_model = 'Linear_Motion',
trajectory = None,
observations = None,
variance_motion = var_motion,
variance_sensor = var_sensor,
params = params)
kf.set_prior(prior, prior_variance)
for obs_data in seq:
## run the kalman filter over the observation
obs_t = np.array(obs_data['final_pred_obs']['t'])
obs_r = re_quat(np.array(obs_data['final_pred_obs']['r_wxyz']), 'wxyz')
obs_c = obs_data['final_pred_obs']['c']
gt_pose = State_R3xQuat(np.concatenate((obs_data['dl_dict']['gt_trans'][0],
re_quat(copy.deepcopy(obs_data['dl_dict']['gt_rot_wxyz'][0]), 'wxyz')), axis=0))
obs = State_R3xQuat(np.concatenate((obs_t, obs_r), axis=0)) # need to convert quat to xyzw
variance = np.true_divide(var_sensor, obs_c)
f_pose, _ = kf.update(obs, variance)
kf.predict()
filter_pred = {'t': f_pose.state[0:3, 0], 'r_wxyz': re_quat(copy.deepcopy(f_pose.state[3:7, 0]), 'xyzw')}
# calculate the ADD error
if idx in sym_list:
filter_error_ADD = ADDS(model_points, gt_pose, f_pose)
else:
filter_error_ADD = ADD(model_points, gt_pose, f_pose)
obs_data['filter_pred'] = filter_pred
obs_data['ADD'] = filter_error_ADD
obs_data['translation_error'] = translation_error(gt_pose, f_pose)
obs_data['rotation_error'] = rotation_error(gt_pose, f_pose)
# print('Processed sequence.')
return data
```
#### File: src/visu/visualizer.py
```python
import numpy as np
import sys
import os
from PIL import Image
from visu.helper_functions import save_image
from scipy.spatial.transform import Rotation as R
from helper import re_quat
import copy
import torch
import numpy as np
import k3d
class Visualizer():
def __init__(self, p_visu, writer=None):
if p_visu[-1] != '/':
p_visu = p_visu + '/'
self.p_visu = p_visu
self.writer = writer
if not os.path.exists(self.p_visu):
os.makedirs(self.p_visu)
def plot_estimated_pose(self, tag, epoch, img, points, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
points:= points of the object model [length,x,y,z]
trans: [1,3]
rot: [3,3]
"""
img_d = copy.deepcopy(img)
points = np.dot(points, rot_mat.T)
points = np.add(points, trans[0, :])
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounce")
pass
if jupyter:
display(Image.fromarray(img_d))
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
#print("IMAGE D:" ,img_d,img_d.shape )
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_bounding_box(self, tag, epoch, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, store=False, jupyter=False, b=None):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
"""
if isinstance(b, dict):
rmin = b['rmin']
rmax = b['rmax']
cmin = b['cmin']
cmax = b['cmax']
# ToDo check Input data
img_d = np.array(copy.deepcopy(img))
c = [0, 0, 255]
rmin_mi = max(0, rmin - str_width)
rmin_ma = min(img_d.shape[0], rmin + str_width)
rmax_mi = max(0, rmax - str_width)
rmax_ma = min(img_d.shape[0], rmax + str_width)
cmin_mi = max(0, cmin - str_width)
cmin_ma = min(img_d.shape[1], cmin + str_width)
cmax_mi = max(0, cmax - str_width)
cmax_ma = min(img_d.shape[1], cmax + str_width)
img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c
img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c
img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c
img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c
print("STORE", store)
img_d = img_d.astype(np.uint8)
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if jupyter:
display(Image.fromarray(img_d))
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_pcd(x, point_size=0.005, c='g'):
"""
x: point_nr,3
"""
if c == 'b':
k = 245
elif c == 'g':
k = 25811000
elif c == 'r':
k = 11801000
elif c == 'black':
k = 2580
else:
k = 2580
colors = np.ones(x.shape[0]) * k
plot = k3d.plot(name='points')
plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
def plot_two_pcd(x, y, point_size=0.005, c1='g', c2='r'):
if c1 == 'b':
k = 245
elif c1 == 'g':
k = 25811000
elif c1 == 'r':
k = 11801000
elif c1 == 'black':
k = 2580
else:
k = 2580
if c2 == 'b':
k2 = 245
elif c2 == 'g':
k2 = 25811000
elif c2 == 'r':
k2 = 11801000
elif c2 == 'black':
k2 = 2580
else:
k2 = 2580
col1 = np.ones(x.shape[0]) * k
col2 = np.ones(y.shape[0]) * k2
plot = k3d.plot(name='points')
plt_points = k3d.points(x, col1.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points = k3d.points(y, col2.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
class SequenceVisualizer():
def __init__(self, seq_data, images_path, output_path=None):
self.seq_data = seq_data
self.images_path = images_path
self.output_path = output_path
def plot_points_on_image(self, seq_no, frame_no, jupyter=False, store=False, pose_type='filtered'):
seq_data = self.seq_data
images_path = self.images_path
output_path = self.output_path
frame = seq_data[seq_no][frame_no]
unique_desig = frame['dl_dict']['unique_desig'][0]
if pose_type == 'ground_truth':
# ground truth
t = frame['dl_dict']['gt_trans'].reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['dl_dict']['gt_rot_wxyz'][0]), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'filtered':
# filter pred
t = np.array(frame['filter_pred']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['filter_pred']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'final_pred_obs':
# final pred
t = np.array(frame['final_pred_obs']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['final_pred_obs']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
else:
raise Exception('Pose type not implemented.')
w = 2
if type(unique_desig) != str:
im = np.array(Image.open(
images_path + unique_desig[0] + '-color.png')) # ycb
else:
im = np.array(Image.open(
images_path + unique_desig + '.png')) # laval
img_d = copy.deepcopy(im)
dl_dict = frame['dl_dict']
points = copy.deepcopy(
seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :])
points = np.dot(points, rot.T)
points = np.add(points, t[0, :])
cam_cx = dl_dict['cam_cal'][0][0]
cam_cy = dl_dict['cam_cal'][0][1]
cam_fx = dl_dict['cam_cal'][0][2]
cam_fy = dl_dict['cam_cal'][0][3]
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounds")
pass
img_disp = Image.fromarray(img_d)
if jupyter:
display(img_disp)
if store:
outpath = output_path + \
'{}_{}_{}.png'.format(pose_type, seq_no, frame_no)
img_disp.save(outpath, "PNG", compress_level=1)
print("Saved image to {}".format(outpath))
def save_sequence(self, seq_no, pose_type='filtered', name=''):
for fn in range(len(self.seq_data)):
self.plot_points_on_image(seq_no, fn, False, True, pose_type)
if name:
video_name = '{}_{}_{}'.format(name, pose_type, seq_no)
else:
video_name = '{}_{}'.format(pose_type, seq_no)
cmd = "cd {} && ffmpeg -r 10 -i ./filtered_{}_%d.png -vcodec mpeg4 -y {}.mp4".format(
self.output_path, seq_no, video_name)
os.system(cmd)
``` |
{
"source": "JonasFrey96/RAFT",
"score": 2
} |
#### File: JonasFrey96/RAFT/eval.py
```python
import sys
import os
# os.chdir(os.path.join(os.getenv('HOME'), 'RAFT'))
sys.path.append('./core')
import argparse
import cv2
import glob
import numpy as np
import torch
from PIL import Image
from raft import RAFT
from utils.utils import InputPadder
import yaml
def file_path(string):
if os.path.isfile(string):
return string
else:
raise NotADirectoryError(string)
def load_yaml(path):
with open(path) as file:
res = yaml.load(file, Loader=yaml.FullLoader)
return res
import coloredlogs
coloredlogs.install()
import time
import argparse
from pathlib import Path
import gc
from utils.utils import InputPadder
# Frameworks
import torch
from torchvision import transforms
from torchvision import transforms as tf
import numpy as np
import imageio
import cv2
import time
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--scannet_root', type=str,
default="/home/jonfrey/datasets/scannet/scans", help='Scannet-Folder')
parser.add_argument('--output_dir', type=str,
default="/media/scratch2/jonfrey/optical_flow_scannet", help='Output-Folder')
parser.add_argument('--limit_scenes', type=int, default=10, help='Number of scenes to generate flow for.')
parser.add_argument('--model_checkpoint', type=file_path,
default="/media/scratch1/jonfrey/results/rpose/models/raft-sintel.pth", help='RAFT-Model-Checkpoint')
parser.add_argument('--device', type=str,
default="cuda:0", help='Device')
args = parser.parse_args()
# SETUP MODEL
model_args = DotDict({
'model': args.model_checkpoint,
'small': False,
'mixed_precision': False,
'alternate_corr': False,
} )
os.chdir(os.path.join(os.getenv('HOME'), 'RAFT'))
model = torch.nn.DataParallel(RAFT(model_args))
model.load_state_dict(torch.load(model_args.model))
model = model.module
model.to(args.device)
model.eval()
# CREATE RESULT FOLDER
Path( args.output_dir).mkdir(parents=True, exist_ok=True)
def get_sequence_paths( sub, root, limit_scenes=10):
paths = [ str(s) for s in Path(root).rglob("*.jpg") if
str(s).find('color') != -1 and
int( str(s).split('/')[-3][5:9]) < limit_scenes and
int( str(s).split('/')[-1][:-4]) % sub == 0]
paths.sort(key= lambda x: int(x.split('/')[-3][5:9]) * 1000000 + int( x.split('/')[-3][10:] ) * 10000 + int( x.split('/')[-1][:-4] ) )
current_scene = "0" # paths.split('/')[-3]
paths_sorted = []
for p in paths:
if current_scene != p.split('/')[-3]:
current_scene = p.split('/')[-3]
paths_sorted.append( [p] )
else:
paths_sorted[-1].append(p)
return paths_sorted
# GET ALL SCANNET FILES
sequence_paths = get_sequence_paths( sub=10, root=args.scannet_root, limit_scenes= args.limit_scenes)
@torch.no_grad()
def gen_flow_for_sequence(paths, folder, device):
# trick to avoid last frame. Compute flow with itself
paths.append( paths[-1] )
for j,p in enumerate( paths[:-1] ):
img1 = torch.from_numpy( imageio.imread(p)).to( device=device)[None].type(torch.float)
img2 = torch.from_numpy( imageio.imread( paths[j+1])).to( device=device)[None].type(torch.float)
_, h, w ,_ = img1.shape
img1 = img1.permute(0, 3, 1, 2)
img2 = img2.permute(0, 3, 1, 2)
tra = tf.Resize(( int(h/2) ,int( w/2)))
tra_up = tf.Resize(( h,w))
img1 = tra( img1 )
img2 = tra( img2 )
padder = InputPadder( img1.shape )
img1, img2 = padder.pad(img1,img2)
# flow_low, flow_up = model(tra( images ), tra( images_next_frame ), iters=20, test_mode=True)
flow_low, flow_up = model( img1, img2, iters=12, test_mode=True)
store_file = os.path.join(folder, p.split('/')[-1].replace('.jpg', '.png'))
pred = tra_up(flow_up)
writeFlowKITTI(store_file , pred[0].permute(1,2,0).cpu().detach().numpy() )
print(p)
for paths in sequence_paths:
out = os.path.join( args.output_dir, paths[0].split('/')[-3])
Path(out).mkdir(parents=True, exist_ok=True)
gen_flow_for_sequence(paths, folder=out, device=args.device)
print("Done")
``` |
{
"source": "JonasFrey96/RPOSE",
"score": 2
} |
#### File: RPOSE/scripts/time_network.py
```python
import os
import sys
os.chdir(os.path.join(os.getenv('HOME'), 'RPOSE'))
sys.path.insert(0, os.getcwd())
sys.path.append(os.path.join(os.getcwd() + '/src'))
sys.path.append(os.path.join(os.getcwd() + '/core'))
sys.path.append(os.path.join(os.getcwd() + '/segmentation'))
from src_utils import DotDict
from raft import RAFT
import numpy as np
model = RAFT(args = DotDict( {'small':False}) )
import torch
device = 'cuda:0'
BS,H,W,C = 1,480,640,3
half = True
inp1 = torch.randn(BS, C,H,W, dtype=torch.float).to(device)
inp2 = torch.randn(BS, C,H,W, dtype=torch.float).to(device)
model.to(device)
model.eval()
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print( pytorch_total_params )
def time_model( model, inp, repetitions = 10):
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
timings=np.zeros((repetitions,1))
#GPU-WARM-UP
for _ in range(50):
_ = model( *inp)
# MEASURE PERFORMANCE
with torch.no_grad():
for rep in range(repetitions):
starter.record()
_ = model(*inp)
ender.record()
# WAIT FOR GPU SYNC
torch.cuda.synchronize()
curr_time = starter.elapsed_time(ender)
timings[rep] = curr_time
mean_syn = np.sum(timings) / repetitions
std_syn = np.std(timings)
print(mean_syn, std_syn, timings.min(), timings.max())
print("HZ: ", 1/(mean_syn/1000) , " STD in ms : ",(std_syn), " STD in hz : ",1/(std_syn/1000))
print("\nFlow 24")
time_model( model, (inp1,inp2,24), repetitions = 100)
print("\nFlow 12")
time_model( model, (inp1,inp2,12), repetitions = 100)
print("\nFlow 6")
time_model( model, (inp1,inp2,6), repetitions = 100)
print("\nFlow 2")
time_model( model, (inp1,inp2,2), repetitions = 100)
from models_asl import FastSCNN
model = FastSCNN(num_classes= 2, aux = False, extraction = {"active":False,
"layer":'learn_to_down'}, input_channels = 6)
model.to(device)
model.eval()
print("\nSEGMENTATION")
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print( pytorch_total_params )
time_model( model, (torch.cat ( [inp1,inp2],dim=1),), repetitions = 1000)
```
#### File: common/ycb/get_ycb_dataloader.py
```python
import torch.utils.data as data
from .ycb import YCB
__all__ = "get_ycb_dataloader"
def get_ycb_dataloader(cfg, env):
train_dataset = YCB(
root=env["ycb"],
mode=cfg["mode"],
image_size=cfg["image_size"],
cfg_d=cfg["cfg_ycb"],
)
train_loader = data.DataLoader(train_dataset, **cfg["loader"], drop_last=True)
return train_loader
```
#### File: flow/pose_estimation/flow_to_trafo_PnP.py
```python
import torch
import numpy as np
import copy
from scipy.spatial.transform import Rotation as R
import cv2
from .pose_estimate_violations import Violation
def filter_pcd(pcd, tol=0.05):
"""
input:
pcd : Nx3 torch.float32
returns:
mask : N torch.bool
"""
return pcd[:, 2] > tol
def rvec_tvec_to_H(r_vec, t_vec):
"""
input:
r_vec: 3 torch.float32
t_vec: 3 torch.float32
returns:
h: np.array( [4,4] )
"""
rot = R.from_rotvec(r_vec)
h = np.eye(4)
h[:3, :3] = rot.as_matrix()
h[:3, 3] = t_vec.T
return h
def get_H(pcd):
pcd_ret = torch.ones(
(pcd.shape[0], pcd.shape[1] + 1), device=pcd.device, dtype=pcd.dtype
)
pcd_ret[:, :3] = pcd
return pcd_ret
def flow_to_trafo_PnP(*args, **kwargs):
"""
input:
real_br: torch.tensor torch.Size([2])
real_tl: torch.tensor torch.Size([2])
ren_br: torch.tensor torch.Size([2])
ren_tl: torch.tensor torch.Size([2])
flow_mask: torch.Size([480, 640])
u_map: torch.Size([480, 640])
v_map: torch.Size([480, 640])
K_ren: torch.Size([3, 3])
render_d: torch.Size([480, 640])
h_render: torch.Size([4, 4])
h_real_est: torch.Size([4, 4])
output:
suc: bool
h: torch.Size([4, 4])
"""
real_br = kwargs["real_br"]
real_tl = kwargs["real_tl"]
ren_br = kwargs["ren_br"]
ren_tl = kwargs["ren_tl"]
flow_mask = kwargs["flow_mask"]
u_map = kwargs["u_map"]
v_map = kwargs["v_map"]
K_ren = kwargs["K_ren"]
K_real = kwargs["K_real"]
render_d = kwargs["render_d"]
h_render = kwargs["h_render"]
h_real_est = kwargs["h_real_est"]
typ = u_map.dtype
# Grid for upsampled real
grid_real_h = torch.linspace(
int(real_tl[0]), int(real_br[0]), 480, device=u_map.device
)[:, None].repeat(1, 640)
grid_real_w = torch.linspace(
int(real_tl[1]), int(real_br[1]), 640, device=u_map.device
)[None, :].repeat(480, 1)
# Project depth map to the pointcloud real
cam_scale = 10000
real_pixels = torch.stack(
[
grid_real_w[flow_mask],
grid_real_h[flow_mask],
torch.ones(grid_real_h.shape, device=u_map.device, dtype=u_map.dtype)[flow_mask],
],
dim=1,
).type(typ)
grid_ren_h = torch.linspace(int(ren_tl[0]), int(ren_br[0]), 480, device=u_map.device)[
:, None
].repeat(1, 640)
grid_ren_w = torch.linspace(int(ren_tl[1]), int(ren_br[1]), 640, device=u_map.device)[
None, :
].repeat(480, 1)
crop_d_pixels = torch.stack(
[
grid_ren_w.flatten(),
grid_ren_h.flatten(),
torch.ones(grid_ren_w.shape, device=u_map.device, dtype=torch.float32).flatten(),
],
dim=1,
).type(typ)
K_inv = torch.inverse(K_ren.type(torch.float32)).type(typ)
P_crop_d = K_inv @ crop_d_pixels.T.type(typ)
P_crop_d = P_crop_d.type(torch.float32) * render_d.flatten() / cam_scale
P_crop_d = P_crop_d.T
render_d_ind_h = torch.linspace(0, 479, 480, device=u_map.device)[:, None].repeat(
1, 640
)
render_d_ind_w = torch.linspace(0, 639, 640, device=u_map.device)[None, :].repeat(
480, 1
)
render_d_ind_h = torch.clamp(
(render_d_ind_h - u_map).type(torch.float32), 0, 479
).type(torch.long)[flow_mask]
render_d_ind_w = torch.clamp(
(render_d_ind_w - v_map).type(torch.float32), 0, 639
).type(torch.long)[flow_mask]
if render_d_ind_h.shape[0] < 50:
return (
False,
torch.eye(4, dtype=u_map.dtype, device=u_map.device),
np.inf,
0,
Violation.MINIMAL_NR_VALID_CONSTRAINT,
)
# Avoid two different 3D points pointing to the same 2D pixels
res, indices = np.unique(
torch.stack([render_d_ind_h, render_d_ind_w]).numpy(), axis=1, return_index=True
)
indices = torch.from_numpy(indices)
render_d_ind_h = render_d_ind_h[indices]
render_d_ind_w = render_d_ind_w[indices]
real_pixels = real_pixels[indices]
render_pixels = torch.stack(
[render_d_ind_h, render_d_ind_w, torch.ones_like(render_d_ind_w)], dim=1
)
# Hacky indexing along two dimensions
index = render_d_ind_h * 640 + render_d_ind_w
P_crop_d = P_crop_d[index]
m = filter_pcd(P_crop_d)
if torch.sum(m) < 50:
return (
False,
torch.eye(4, dtype=u_map.dtype, device=u_map.device),
np.inf,
0,
Violation.MINIMAL_NR_VALID_CONSTRAINT,
)
P_crop_d = P_crop_d[m]
real_pixels = real_pixels[m]
render_pixels = render_pixels[m]
P_ren = P_crop_d
if kwargs.get("shuffel", "random") == "random":
# random shuffel
pts_trafo = min(P_ren.shape[0], kwargs.get("max_corrospondences", 200000))
idx = torch.randperm(P_ren.shape[0])[0:pts_trafo]
P_ren = P_ren[idx]
real_pixels = real_pixels[idx]
render_pixels = render_pixels[idx]
elif kwargs.get("shuffel", "random") == "distance_populating":
# STEP0: Shuffle corrospondences
idx = torch.randperm(P_ren.shape[0])
P_ren = P_ren[idx]
real_pixels = real_pixels[idx]
render_pixels = render_pixels[idx]
# STEP1: Bin values into grids
u_bins = np.digitize(
render_pixels[:, 0].numpy(),
bins=np.arange(render_pixels[:, 0].min(), render_pixels[:, 0].max(), 5),
)
v_bins = np.digitize(
render_pixels[:, 1].numpy(),
bins=np.arange(render_pixels[:, 1].min(), render_pixels[:, 1].max(), 5),
)
indis_ori = np.arange(0, u_bins.shape[0])
selected_points = []
# STEP2: Iterate over every 2-th u-bin
for u_bin in range(0, u_bins.max(), 2):
# Create pixel mask for the bin.
m = v_bins == u_bin
s2_tmp = u_bins[m]
indis_tmp = indis_ori[m]
# STEP3: find unique indices in the v-bins with the u-bin mask applied
a, indi = np.unique(s2_tmp, return_index=True)
selection = indis_tmp[indi[::2]]
# STEP4: append the corresponding indices of the orginale point cloud
selected_points += selection.tolist()
# STEP5: Fall back to random selection if necessary
if len(selected_points) > kwargs.get("min_corrospondences", 30):
P_ren = P_ren[selected_points]
real_pixels = real_pixels[selected_points]
render_pixels = render_pixels[selected_points]
else:
print(f"Sampling failed found {len( selected_points)} corrospondences")
pts_trafo = min(P_ren.shape[0], kwargs.get("max_corrospondences", 50000))
P_ren = P_ren[0:pts_trafo]
real_pixels = real_pixels[0:pts_trafo]
render_pixels = render_pixels[0:pts_trafo]
else:
raise ValueError(
"Shuffle in flow_to_trafo not found", kwargs.get("shuffel", "random")
)
# Move the rendered points to the origin
P_ren_in_origin = (
get_H(P_ren).type(typ) @ torch.inverse(h_render.type(torch.float32)).type(typ).T
)[:, :3]
# PNP estimation
objectPoints = P_ren_in_origin.cpu().type(torch.float32).numpy()
imagePoints = real_pixels[:, :2].cpu().type(torch.float32).numpy()
dist = np.array([[0.0, 0.0, 0.0, 0.0]])
if objectPoints.shape[0] < 8:
print(f"Failed due to missing corsspondences ({ objectPoints.shape[0]})")
return (
False,
torch.eye(4, dtype=u_map.dtype, device=u_map.device),
np.inf,
0,
Violation.MINIMAL_NR_VALID_CONSTRAINT,
)
# set current guess as the inital estimate
rvec = R.from_matrix(h_real_est[:3, :3].cpu().numpy()).as_rotvec().astype(np.float32)
tvec = h_real_est[:3, 3].cpu().numpy().astype(np.float32)
# calculate PnP between the pixels coordinates in the real image and the corrosponding points in the origin frame
if kwargs.get("method", "solvePnPRansac") == "solvePnPRansac":
import time
sta = time.time()
for i in range(0, 100):
retval, r_vec2, t_vec2, inliers = cv2.solvePnPRansac(
objectPoints,
imagePoints,
cameraMatrix=K_real.cpu().type(torch.float32).numpy(),
distCoeffs=dist,
rvec=rvec,
tvec=tvec,
useExtrinsicGuess=True,
iterationsCount=kwargs.get("iterationsCount", 100),
reprojectionError=kwargs.get("reprojectionError", 5),
flags=kwargs.get("flags", 5),
)
sto = time.time()
print("EPE", sto - sta)
elif kwargs.get("method", "solvePnPRefineLM") == "solvePnPRefineLM":
objP = copy.deepcopy(objectPoints)
imgP = copy.deepcopy(imagePoints)
K_rea = K_real.cpu().type(torch.float32).numpy()
rvec_ = copy.deepcopy(rvec)[:, None]
tvec_ = copy.deepcopy(tvec)[:, None]
import time
sta = time.time()
lis = []
for i in range(0, 100):
r_vec2, t_vec2 = cv2.solvePnPRefineLM(
objP,
imgP,
K_rea,
dist,
rvec_,
tvec_,
)
sto = time.time()
print("LM", sto - sta)
elif kwargs.get("method", "solvePnPRefineLM") == "solveBoth":
retval, r_vec2, t_vec2, inliers = cv2.solvePnPRansac(
objectPoints,
imagePoints,
cameraMatrix=K_real.cpu().type(torch.float32).numpy(),
distCoeffs=dist,
rvec=rvec,
tvec=tvec,
useExtrinsicGuess=True,
iterationsCount=kwargs.get("iterationsCount", 100),
reprojectionError=kwargs.get("reprojectionError", 5),
flags=kwargs.get("flags", 5),
)
r_vec2, t_vec2 = cv2.solvePnPRefineLM(
copy.deepcopy(objectPoints),
copy.deepcopy(imagePoints),
K_real.cpu().type(torch.float32).numpy(),
dist,
copy.deepcopy(r_vec2),
copy.deepcopy(t_vec2),
)
else:
raise ValueError("NotDefined")
h = rvec_tvec_to_H(r_vec2[:, 0], t_vec2)
# calculate reprojection error
imagePointsEst, jac = cv2.projectPoints(
objectPoints[None], r_vec2, t_vec2, K_real.cpu().type(torch.float32).numpy(), dist
)
repro_error = np.linalg.norm(
imagePointsEst[:, 0, :] - imagePoints, ord=2, axis=1
).mean()
ratio = (
np.linalg.norm(imagePointsEst[:, 0, :] - imagePoints, ord=2, axis=1)
< kwargs.get("reprojectionError", 5)
).sum() / objectPoints.shape[0]
return (
True,
torch.tensor(h, device=u_map.device).type(u_map.dtype),
repro_error,
ratio,
Violation.SUCCESS,
)
``` |
{
"source": "jonasfrey/pydualsense",
"score": 3
} |
#### File: pydualsense/examples/controller_demo.py
```python
from pydualsense import *
import time
from tkinter import *
from tkinter.ttk import *
import json
import random
import colorsys
import gc
import autopy
class PixelObject:
def __init__(self, x, y, w, h, color):
self.x = x # changable value
self.y = y # changable value
self.w = w # changable value
self.h = h # changable value
self._x = x # initial value
self._y = y # initial value
self._w = w # initial value
self._h = h # initial value
self.color = color
class ControllerPixelObject:
def __init__(self, width, height):
self.width = width
self.height = height
self.set_active_and_inactive_color_by_hue(0.5)
self.l2 = PixelObject(2, 1 , 3, 2, "blue")
self.l2.boolean_pressed_name = "L2"
self.l1 = PixelObject(2, 4 , 3, 1, "blue")
self.l1.boolean_pressed_name = "L1"
#mirrored
l2_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.l2))
self.r2 = PixelObject(l2_mirrored_x_y_w_h[0], l2_mirrored_x_y_w_h[1], l2_mirrored_x_y_w_h[2], l2_mirrored_x_y_w_h[3], "blue")
self.r2.boolean_pressed_name = "R2"
l1_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.l1))
self.r1 = PixelObject(l1_mirrored_x_y_w_h[0], l1_mirrored_x_y_w_h[1], l1_mirrored_x_y_w_h[2], l1_mirrored_x_y_w_h[3], "blue")
self.r1.boolean_pressed_name = "R1"
#self.dualsense.state.DpadUp
self.DpadUp = PixelObject(3, 8, 1, 1, "blue")
self.DpadUp.boolean_pressed_name = "DpadUp"
#self.dualsense.state.DpadDown
self.DpadDown = PixelObject(3, 10, 1, 1, "blue")
self.DpadDown.boolean_pressed_name = "DpadDown"
#self.dualsense.state.DpadLeft
self.DpadLeft = PixelObject(2, 9, 1, 1, "blue")
self.DpadLeft.boolean_pressed_name = "DpadLeft"
#self.dualsense.state.DpadRight
self.DpadRight = PixelObject(4, 9, 1, 1, "blue")
self.DpadRight.boolean_pressed_name = "DpadRight"
#mirrored
#self.dualsense.state.triangle
DpadUp_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.DpadUp))
self.triangle = PixelObject(DpadUp_mirrored_x_y_w_h[0], DpadUp_mirrored_x_y_w_h[1], DpadUp_mirrored_x_y_w_h[2], DpadUp_mirrored_x_y_w_h[3], "blue")
self.triangle.boolean_pressed_name = "triangle"
#self.dualsense.state.cross
DpadDown_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.DpadDown))
self.cross = PixelObject(DpadDown_mirrored_x_y_w_h[0], DpadDown_mirrored_x_y_w_h[1], DpadDown_mirrored_x_y_w_h[2], DpadDown_mirrored_x_y_w_h[3], "blue")
self.cross.boolean_pressed_name = "cross"
#self.dualsense.state.circle
DpadLeft_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.DpadLeft))
self.circle = PixelObject(DpadLeft_mirrored_x_y_w_h[0], DpadLeft_mirrored_x_y_w_h[1], DpadLeft_mirrored_x_y_w_h[2], DpadLeft_mirrored_x_y_w_h[3], "blue")
self.circle.boolean_pressed_name = "circle"
#self.dualsense.state.square
DpadRight_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.DpadRight))
self.square = PixelObject(DpadRight_mirrored_x_y_w_h[0], DpadRight_mirrored_x_y_w_h[1], DpadRight_mirrored_x_y_w_h[2], DpadRight_mirrored_x_y_w_h[3], "blue")
self.square.boolean_pressed_name = "square"
# self.packerC = 0
# self.square, self.triangle, self.circle, self.cross = False, False, False, False
# self.DpadUp, self.DpadDown, self.DpadLeft, self.DpadRight = False, False, False, False
# self.L1, self.L2, self.L3, self.R1, self.R2, self.R3, self.R2Btn, self.L2Btn = False, False, False, False, False, False, False, False
# self.share, self.options, self.ps, self.touch1, self.touch2, self.touchBtn, self.touchRight, self.touchLeft = False, False, False, False, False, False, False, False
# self.touchFinger1, self.touchFinger2 = False, False
# self.RX, self.RY, self.LX, self.LY = 128,128,128,128
# self.trackPadTouch0, self.trackPadTouch1 = DSTouchpad(), DSTouchpad()
#self.dualsense.state.share
self.share = PixelObject(5, 6, 1, 1, "blue")
self.share.boolean_pressed_name = "share"
#self.dualsense.state.options
share_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.share))
self.options = PixelObject(share_mirrored_x_y_w_h[0], share_mirrored_x_y_w_h[1], share_mirrored_x_y_w_h[2], share_mirrored_x_y_w_h[3], "blue")
self.options.boolean_pressed_name = "options"
#left stick
self.lstick = PixelObject(6, 11, 1, 1, "blue")
self.lstick.boolean_pressed_name = "L3"
#right stick
lstick_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.lstick))
self.rstick = PixelObject(lstick_mirrored_x_y_w_h[0], lstick_mirrored_x_y_w_h[1], lstick_mirrored_x_y_w_h[2], lstick_mirrored_x_y_w_h[3], "blue")
self.rstick.boolean_pressed_name = "R3"
self.touchpad = PixelObject(6, 7, 7, 3, "blue")
self.touchpad.boolean_pressed_name = "touchBtn"
self.psbutton = PixelObject(8, 11, 3, 1, "blue")
self.psbutton.boolean_pressed_name = "ps"
self.micmutebutton = PixelObject(8, 13, 3, 1, "blue")
self.touchpadfinger1 = PixelObject(6, 7, 1, 1, "blue")
self.lborder1 = PixelObject(1, 5, 1, 13, "black")
self.lborder1.hsv_color = self.hsv_color_inactive
lborder1_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.lborder1))
self.rborder1 = PixelObject(lborder1_mirrored_x_y_w_h[0], lborder1_mirrored_x_y_w_h[1], lborder1_mirrored_x_y_w_h[2], lborder1_mirrored_x_y_w_h[3], "black")
self.rborder1.hsv_color = self.hsv_color_inactive
self.lbordertop1 = PixelObject(2, 5, 8, 1, "black")
lbordertop1_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.lbordertop1))
self.rbordertop1 = PixelObject(lbordertop1_mirrored_x_y_w_h[0], lbordertop1_mirrored_x_y_w_h[1], lbordertop1_mirrored_x_y_w_h[2], lbordertop1_mirrored_x_y_w_h[3], "black")
self.lborderbottom1 = PixelObject(2, 17, 4, 1, self.hex_color_inactive)
lborderbottom1_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.lborderbottom1))
self.rborderbottom1 = PixelObject(lborderbottom1_mirrored_x_y_w_h[0], lborderbottom1_mirrored_x_y_w_h[1], lborderbottom1_mirrored_x_y_w_h[2], lborderbottom1_mirrored_x_y_w_h[3], "black")
self.lborder2 = PixelObject(5, 17-4, 1, 5, "black")
lborder2_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.lborder2))
self.rborder2 = PixelObject(lborder2_mirrored_x_y_w_h[0], lborder2_mirrored_x_y_w_h[1], lborder2_mirrored_x_y_w_h[2], lborder2_mirrored_x_y_w_h[3], "black")
self.lborder3 = PixelObject(6, 17-4, 1, 1, "black")
lborder3_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.lborder3))
self.rborder3 = PixelObject(lborder3_mirrored_x_y_w_h[0], lborder3_mirrored_x_y_w_h[1], lborder3_mirrored_x_y_w_h[2], lborder3_mirrored_x_y_w_h[3], "black")
self.lborder4 = PixelObject(6, 17-3, 4, 1, "black")
lborder4_mirrored_x_y_w_h = (self.get_mirrored_x_y_w_h(self.lborder4))
self.rborder4 = PixelObject(lborder4_mirrored_x_y_w_h[0], lborder4_mirrored_x_y_w_h[1], lborder4_mirrored_x_y_w_h[2], lborder4_mirrored_x_y_w_h[3], "black")
def get_mirrored_x_y_w_h(self, obj):
return [self.width-(obj.x-2)-obj.w, obj.y, obj.w, obj.h]
def set_active_and_inactive_color_by_hue(self, hue):
hsv_color = [hue,1,0.75]
rgb_color = colorsys.hsv_to_rgb(hsv_color[0], hsv_color[1], hsv_color[2])
hex_color = ('#%02x%02x%02x'%(round(rgb_color[0]*255),round(rgb_color[1]*255),round(rgb_color[2]*255)))
self.hex_color_active = hex_color
self.hsv_color_active = hsv_color
hsv_color = [hue,1,0.5]
rgb_color = colorsys.hsv_to_rgb(hsv_color[0], hsv_color[1], hsv_color[2])
hex_color = ('#%02x%02x%02x'%(round(rgb_color[0]*255),round(rgb_color[1]*255),round(rgb_color[2]*255)))
self.hex_color_inactive = hex_color
self.hsv_color_inactive = hsv_color
class App:
def __init__(self):
self.controller_pixel_object = ControllerPixelObject(17, 17)
self.running = True
self.render_id = 0
# creating tkinter window
self.tk_root = Tk()
self.tk_root.title("Python GUI")
self.tk_root_w = 500
self.tk_root_h = 500
self.tk_root.geometry(str(self.tk_root_w)+"x"+str(self.tk_root_h))
self.tk_root.protocol("WM_DELETE_WINDOW", self.end)
# create dualsense
self.dualsense = pydualsense()
# find device and initialize
self.dualsense.init()
self.trigger_modes = [
"Off",
"Rigid",
"Pulse",
"Rigid_A",
"Rigid_B",
"Rigid_AB",
"Pulse_A",
"Pulse_B",
"Pulse_AB",
"Calibration"]
self.trigger_mode = "Off"
self.trigger_mode_index = 0
self.trigger_mode_change_delta = 0
self.trigger_mode_last_change_ts = 0
# Progress bar widget
# self.progress_l = Progressbar(self.tk_root, orient = VERTICAL, length = 100, mode = 'determinate')
# self.progress_r = Progressbar(self.tk_root, orient = VERTICAL, length = 100, mode = 'determinate')
# self.progress_l.pack(pady = 10)
# self.progress_r.pack(pady = 10)
self.label_text = StringVar()
self.label_text.set("test")
self.label = Label(self.tk_root, textvariable=self.label_text).place(x=5, y=0)
#self.label.pack()
self.label2_text = StringVar()
self.label2_text.set("test")
self.label2 = Label(self.tk_root, textvariable=self.label2_text).place(x=5, y=0)
#self.label2.pack()
# self.button = Button(self.tk_root, text ="close", command = self.end)
# self.button.pack(pady = 10)
if(self.controller_pixel_object.width > self.controller_pixel_object.height):
self.controller_pixel_object_factor = self.tk_root_w / (self.controller_pixel_object.width+2)
else:
self.controller_pixel_object_factor = self.tk_root_h /( self.controller_pixel_object.height+2)
self.tk_canvas = Canvas(self.tk_root, width=self.tk_root_w, height=self.tk_root_h)
self.tk_canvas.pack()
self.reset_touchpad_autopy_mouse()
self.start()
def end(self):
#print(TriggerModes.__dict__)
self.running = False
self.dualsense.close()
self.tk_root.destroy()
self.tk_root.quit()
def render_canvas(self):
for obj in gc.get_objects():
if isinstance(obj, PixelObject):
factor = self.controller_pixel_object_factor
x0 = obj.x * factor
y0 = obj.y * factor
x1 = x0 + obj.w * factor
y1 = y0 + obj.h * factor
color = obj.color
if hasattr(obj, 'boolean_pressed_name'):
if(getattr(self.dualsense.state, obj.boolean_pressed_name)):
color = self.controller_pixel_object.hex_color_active
else:
color = self.controller_pixel_object.hex_color_inactive
self.tk_canvas.create_rectangle(x0, y0, x1, y1, fill=color)
def reset_touchpad_autopy_mouse(self):
self.touch0_up = True
self.touch0_down_autopy_mouse_location = None
self.touch0_down_dualsense_state_trackpadtouch0_x = None
self.touch0_down_dualsense_state_trackpadtouch0_y = None
self.touch0_down_dualsense_state_trackpadtouch0_x_delta = None
self.touch0_down_dualsense_state_trackpadtouch0_y_delta = None
def start(self):
try:
while self.running:
self.tk_canvas.delete("all")
#render canvas
self.render_canvas()
self.render_id = self.render_id + 1
#print(dualsense.state.RY)
if(self.dualsense.state.R1):
print("move right stick up and down to change force on right rumble")
self.dualsense.setRightMotor((255-(127+self.dualsense.state.RY)))
print("move left stick up and down to change force on left rumble")
self.dualsense.setLeftMotor((255-(127+self.dualsense.state.LY)))
else:
print("move right stick up and down to change force on right rumble")
self.dualsense.setRightMotor(0)
print("move left stick up and down to change force on left rumble")
self.dualsense.setLeftMotor(0)
if(self.dualsense.state.cross):
self.trigger_mode_change_delta = time.time() - self.trigger_mode_last_change_ts
if(self.trigger_mode_change_delta > 0.1):
self.trigger_mode_last_change_ts = time.time()
self.trigger_mode_index = (self.trigger_mode_index + 1) % len(self.trigger_modes)
self.trigger_mode = self.trigger_modes[self.trigger_mode_index]
# self.progress_l['value'] = (100/255)*(255-(127+self.dualsense.state.LY))
# self.progress_r['value'] = (100/255)*(255-(127+self.dualsense.state.RY))
self.dualsense.triggerR.setMode(TriggerModes[self.trigger_mode])
self.dualsense.triggerR.setForce(int(((128+self.dualsense.state.LY)/255)*6), 127+self.dualsense.state.RY)
self.dualsense.triggerL.setMode(TriggerModes[self.trigger_mode])
self.dualsense.triggerL.setForce(int(((128+self.dualsense.state.LY)/255)*6), 127+self.dualsense.state.RY)
self.label_text.set("press x to change, trigger mode:"+str(self.trigger_mode))
self.tk_root.title("press x to change, trigger mode:"+str(self.trigger_mode))
if(self.dualsense.state.triangle):
if((self.render_id)%10== 0):
#self.dualsense.light.setColorI(random.randint(0,255),random.randint(0,255), random.randint(0,255))
random_hsv = [(1/100)*random.randint(0,100),1,1]
self.controller_pixel_object.set_active_and_inactive_color_by_hue(random_hsv[0])
random_color = colorsys.hsv_to_rgb(random_hsv[0], random_hsv[1], random_hsv[2])
#self.dualsense.light.setColorI(int(random_color[0]*255), int(random_color[1]*255), int(random_color[2]*255))
self.dualsense.light.setColorI(int(random_color[0]*255), int(random_color[1]*255), int(random_color[2]*255))
for obj in gc.get_objects():
if isinstance(obj, PixelObject):
obj.color = self.controller_pixel_object.hex_color_inactive
#print(Brightness.__dict__)#{'_generate_next_value_': <function Flag._generate_next_value_ at 0x00000225991D83A0>, '__module__': 'pydualsense.enums', '__doc__': 'An enumeration.', '_member_names_': ['high', 'medium', 'low'], '_member_map_': {'high': <Brightness.high: 0>, 'medium': <Brightness.medium: 1>, 'low': <Brightness.low: 2>}, '_member_type_': <class 'int'>, '_value2member_map_': {0: <Brightness.high: 0>, 1: <Brightness.medium: 1>, 2: <Brightness.low: 2>}, 'high': <Brightness.high: 0>, 'medium': <Brightness.medium: 1>, 'low': <Brightness.low: 2>, '__new__': <function Enum.__new__ at 0x00000225991D5CA0>}
else:
if(self.odd_frame_ids(20)):
#autopy.mouse.move(200,200)
self.dualsense.light.setBrightness(Brightness.low)
else:
self.dualsense.light.setBrightness(Brightness.high)
if(self.dualsense.state.L1):
self.controller_pixel_object.l1.color = self.controller_pixel_object.hex_color_active
else:
self.controller_pixel_object.l1.color = self.controller_pixel_object.hex_color_inactive
self.controller_pixel_object.rstick.x = self.controller_pixel_object.rstick._x + (1/127)*self.dualsense.state.RX
self.controller_pixel_object.rstick.y = self.controller_pixel_object.rstick._y + (1/127)*self.dualsense.state.RY
self.controller_pixel_object.lstick.x = self.controller_pixel_object.lstick._x + (1/127)*self.dualsense.state.LX
self.controller_pixel_object.lstick.y = self.controller_pixel_object.lstick._y + (1/127)*self.dualsense.state.LY
self.controller_pixel_object.lborder1.hsv_color[2] = ((self.controller_pixel_object.lborder1.hsv_color[2] + (((self.dualsense.leftMotor+1)/(255+1))*0.1) )) % 0.5 + 0.5
rgb_color = colorsys.hsv_to_rgb(self.controller_pixel_object.lborder1.hsv_color[0], self.controller_pixel_object.lborder1.hsv_color[1], self.controller_pixel_object.lborder1.hsv_color[2])
hex_color = ('#%02x%02x%02x'%(round(rgb_color[0]*255),round(rgb_color[1]*255),round(rgb_color[2]*255)))
self.controller_pixel_object.lborder1.color = hex_color
self.controller_pixel_object.rborder1.hsv_color[2] = ((self.controller_pixel_object.rborder1.hsv_color[2] + (((self.dualsense.rightMotor+1)/(255+1))*0.1) )) % 0.5 + 0.5
rgb_color = colorsys.hsv_to_rgb(self.controller_pixel_object.rborder1.hsv_color[0], self.controller_pixel_object.rborder1.hsv_color[1], self.controller_pixel_object.rborder1.hsv_color[2])
hex_color = ('#%02x%02x%02x'%(round(rgb_color[0]*255),round(rgb_color[1]*255),round(rgb_color[2]*255)))
self.controller_pixel_object.rborder1.color = hex_color
if(self.dualsense.state.touchBtn):
autopy.mouse.click()
self.controller_pixel_object.touchpadfinger1.color = self.controller_pixel_object.hex_color_inactive
if(self.dualsense.state.trackPadTouch0.isActive == True):
self.controller_pixel_object.touchpadfinger1.color = self.controller_pixel_object.hex_color_active
if(self.touch0_down_autopy_mouse_location == None):
self.touch0_down_autopy_mouse_location = autopy.mouse.location()
self.touch0_down_dualsense_state_trackpadtouch0_x = self.dualsense.state.trackPadTouch0.X
self.touch0_down_dualsense_state_trackpadtouch0_y = self.dualsense.state.trackPadTouch0.Y
else:
self.touch0_up = False
#trackpad has FullHD 1920 x 1080 :0
self.label2_text.set(str(self.dualsense.state.trackPadTouch0.X)+":"+str(self.dualsense.state.trackPadTouch0.Y))
self.touch0_down_dualsense_state_trackpadtouch0_x_delta = self.touch0_down_dualsense_state_trackpadtouch0_x - self.dualsense.state.trackPadTouch0.X
self.touch0_down_dualsense_state_trackpadtouch0_y_delta = self.touch0_down_dualsense_state_trackpadtouch0_y - self.dualsense.state.trackPadTouch0.Y
mouse_move_sensitivity = 0.5
autopy.mouse.move(self.touch0_down_autopy_mouse_location[0]-(self.touch0_down_dualsense_state_trackpadtouch0_x_delta*mouse_move_sensitivity),self.touch0_down_autopy_mouse_location[1]-(self.touch0_down_dualsense_state_trackpadtouch0_y_delta*mouse_move_sensitivity))
finger_x = ((self.controller_pixel_object.touchpad.w-1) / 1920) * self.dualsense.state.trackPadTouch0.X
finger_y = ((self.controller_pixel_object.touchpad.h-1) / 1080) * self.dualsense.state.trackPadTouch0.Y
self.controller_pixel_object.touchpadfinger1.x = self.controller_pixel_object.touchpadfinger1._x + finger_x
self.controller_pixel_object.touchpadfinger1.y = self.controller_pixel_object.touchpadfinger1._y + finger_y
else:
if(self.touch0_down_autopy_mouse_location != None):
self.reset_touchpad_autopy_mouse()
self.tk_root.update_idletasks()
self.tk_root.update()
time.sleep(0.001)
#self.dualsense.light.setPulseOption(PulseOptions.FadeBlue)
#print(LedOptions.__dict__)
#print(PulseOptions.__dict__)
#self.label_text.set((self.state.trackPadTouch0.X))
except KeyboardInterrupt:
pass
def odd_frame_ids(self, frame_ids):
return int(self.render_id/frame_ids)%2 == 0
app = App()
``` |
{
"source": "jonasfreyr/Home-Projects",
"score": 3
} |
#### File: Home-Projects/GunGame/GunGame.py
```python
import pygame, math, time
wW = 1200
wH = 900
pygame.init()
class game:
def __init__(self, wW, wH):
self.screen = pygame.display.set_mode((wW, wH))
self.clock = pygame.time.Clock()
self.wW = wW
self.wH = wH
telR = 0
telL = 0
color = (0, 0, 0)
Scolor = (255, 255, 255)
self.fontSize = 50
self.font = pygame.font.SysFont("monospace", self.fontSize)
self.speed = 5
self.GunSpeed = 7
self.GunWait = 50
self.FPS = 120
self.cheats = False
self.gP = False
self.win = 10
game.restart(self, telR, telL, color, Scolor)
def restart(self, telR, telL, color, Scolor):
self.x = int((self.wW / 2) / 2)
self.y = int((self.wH / 2))
self.x2 = int((self.wW / 2) + self.wW / 4)
self.y2 = int((self.wH / 2))
if self.cheats == False:
self.respos = True
self.f3 = False
self.f4 = False
self.f5 = False
self.f6 = False
self.won = False
self.size = 50
self.shotSize = 10
self.shotsL = []
self.shotsR = []
self.color = color
self.Scolor = Scolor
self.telR = telR
self.telL = telL
def shoot(self, x, y, dire):
if dire == True:
self.shotsR.append([x + self.size, y])
elif dire == False:
self.shotsL.append([x - self.size - self.shotSize * 2, y])
def check(self):
temp = self.shotsR
for a in temp:
x = a[0]
y = a[1]
vector1 = []
vector1.append(self.x2 - x)
vector1.append(self.y2 - y)
vector2 = []
vector2.append(self.x2 - x)
vector2.append(self.y2 - (y + self.shotSize))
vector3 = []
vector3.append(self.x2 - (x + self.shotSize * 2))
vector3.append(self.y2 - y)
vector4 = []
vector4.append(self.x2 - (x + self.shotSize * 2))
vector4.append(self.y2 - (y + self.shotSize))
if (self.size >= int(math.sqrt(math.pow(vector1[0], 2) + math.pow(vector1[1], 2)))) or (self.size >= int(math.sqrt(math.pow(vector2[0], 2) + math.pow(vector2[1], 2)))) or (self.size >= int(math.sqrt(math.pow(vector3[0], 2) + math.pow(vector3[1], 2)))) or (self.size >= int(math.sqrt(math.pow(vector4[0], 2) + math.pow(vector4[1], 2)))):
if self.respos == False:
self.shotsR.remove(a)
self.telR += 1
else:
game.restart(self, self.telR + 1, self.telL, self.color, self.Scolor)
temp = self.shotsL
for a in temp:
x = a[0]
y = a[1]
vector1 = []
vector1.append(self.x - x)
vector1.append(self.y - y)
vector2 = []
vector2.append(self.x - x)
vector2.append(self.y - (y + self.shotSize))
vector3 = []
vector3.append(self.x - (x + self.shotSize * 2))
vector3.append(self.y - y)
vector4 = []
vector4.append(self.x - (x + self.shotSize * 2))
vector4.append(self.y - (y + self.shotSize))
if (self.size >= int(math.sqrt(math.pow(vector1[0], 2) + math.pow(vector1[1], 2)))) or (self.size >= int(math.sqrt(math.pow(vector2[0], 2) + math.pow(vector2[1], 2)))) or (self.size >= int(math.sqrt(math.pow(vector3[0], 2) + math.pow(vector3[1], 2)))) or (self.size >= int(math.sqrt(math.pow(vector4[0], 2) + math.pow(vector4[1], 2)))):
if self.respos == False:
self.shotsL.remove(a)
self.telL += 1
else:
game.restart(self, self.telR, self.telL + 1, self.color, self.Scolor)
def shots(self):
for a in range(len(self.shotsR)):
x = self.shotsR[a][0]
y = self.shotsR[a][1]
pygame.draw.rect(self.screen, self.color ,pygame.Rect(x, y, self.shotSize * 2, self.shotSize ))
self.shotsR[a].insert(0 ,x + self.GunSpeed)
self.shotsR[a].remove(x)
for a in range(len(self.shotsL)):
x = self.shotsL[a][0]
y = self.shotsL[a][1]
pygame.draw.rect(self.screen, self.color ,pygame.Rect(x, y, self.shotSize * 2, self.shotSize ))
self.shotsL[a].insert(0 ,x - self.GunSpeed)
self.shotsL[a].remove(x)
temp = self.shotsR
for a in temp:
if a[0] > self.wW:
self.shotsR.remove(a)
temp = self.shotsL
for a in temp:
if a[0] < 0:
self.shotsL.remove(a)
def colorChange(self):
if self.color == (0, 0, 0):
self.color = (255,255,255)
self.Scolor = (0, 0, 0)
elif self.color == (255, 255, 255):
self.color = (0, 0, 0)
self.Scolor = (255, 255, 255)
def winner(self,vann):
self.won = True
label = self.font.render(vann + " won!", 1, self.color)
self.screen.blit(label,(self.wW / 2 - self.fontSize * 3, self.wH / 2 - (self.fontSize / 2)))
def loop(self):
tel1 = self.GunWait
tel2 = self.GunWait
while True:
pressed = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
if event.type == pygame.KEYUP:
if event.key == pygame.K_F1:
game.colorChange(self)
if event.key == pygame.K_F2:
self.respos = not self.respos
if event.key == pygame.K_F3:
self.f3 = not self.f3
if event.key == pygame.K_F4:
self.f4 = not self.f4
if event.key == pygame.K_F5:
self.f5 = not self.f5
if event.key == pygame.K_F6:
self.f6 = not self.f6
if event.key == pygame.K_ESCAPE:
self.gP = not self.gP
if self.gP == False and self.won == False:
self.screen.fill(self.Scolor)
if self.f3 or self.f4 or self.f5 or self.f6:
self.cheats = True
if self.f3 == True:
self.GunSpeed = 20
elif self.f3 == False:
self.GunSpeed = 7
if self.f4 == True:
self.GunWait = 15
elif self.f4 == False:
self.GunWait = 50
if self.f5 == True:
self.FPS = 30
elif self.f5 == False:
self.FPS = 120
if self.f6 == True:
self.speed = 15
elif self.f6 == False:
self.speed = 5
if self.y > 0 + self.size:
if pressed[pygame.K_w]: self.y -= self.speed
if self.y < self.wH - self.size:
if pressed[pygame.K_s]: self.y += self.speed
if self.x > 0 + self.size:
if pressed[pygame.K_a]: self.x -= self.speed
if self.x < int(self.wW /2) - self.size:
if pressed[pygame.K_d]: self.x += self.speed
if tel1 >= self.GunWait:
if pressed[pygame.K_SPACE]:
game.shoot(self, self.x, self.y, True)
tel1 = 0
if self.y2 > 0 + self.size:
if pressed[pygame.K_UP]: self.y2 -= self.speed
if self.y2 < self.wH - self.size:
if pressed[pygame.K_DOWN]: self.y2 += self.speed
if self.x2 > int(self.wW / 2) + self.size:
if pressed[pygame.K_LEFT]: self.x2 -= self.speed
if self.x2 < int(self.wW) - self.size:
if pressed[pygame.K_RIGHT]: self.x2 += self.speed
if tel2 >= self.GunWait:
if pressed[pygame.K_KP0]:
game.shoot(self, self.x2, self.y2, False)
tel2 = 0
tel1 += 1
tel2 += 1
game.check(self)
pygame.draw.line(self.screen,self.color,[self.wW / 2, 0],[self.wW / 2, self.wH])
pygame.draw.circle(self.screen, self.color,(self.x, self.y), self.size)
pygame.draw.circle(self.screen, self.color, (self.x2, self.y2), self.size)
game.shots(self)
scoreR = self.font.render(str(self.telR), 1, self.color)
scoreL = self.font.render(str(self.telL), 1, self.color)
if self.telR < 10:
self.screen.blit(scoreR, ((self.wW / 2) - self.fontSize, 0))
elif self.telR >= 10 and self.telR < 100:
self.screen.blit(scoreR, ((self.wW / 2) - self.fontSize - 15, 0))
elif self.telR >= 100 and self.telR < 1000:
self.screen.blit(scoreR, ((self.wW / 2) - self.fontSize - 45, 0))
elif self.telR >= 1000:
self.screen.blit(scoreR, ((self.wW / 2) - self.fontSize - 75, 0))
self.screen.blit(scoreL, ((self.wW / 2) + 15, 0))
if self.telR == self.win:
game.winner(self,"Left")
elif self.telL == self.win:
game.winner(self,"Right")
if pressed[pygame.K_r]:
self.cheats = False
game.restart(self, 0, 0, self.color, self.Scolor)
pygame.display.flip()
self.clock.tick(self.FPS)
H = game(wW, wH)
H.loop()
```
#### File: Home-Projects/Pong/obj.py
```python
from constants import *
from vector import Vector
def draw_text(screen, text, color, x, y):
'''
Draws a text on the desired position
'''
# Puts the text into a surface
text_surface = FONT.render(text, True, color)
# Creates a rect object at the desired position
text_rect = text_surface.get_rect()
text_rect.center = (x, y)
# Adds the text surface into the screen surface on the rect position
screen.blit(text_surface, text_rect)
class Player:
'''
A player object that holds all the necessary methods
'''
def __init__(self, x, y, screen):
'''
Just takes in the position and the screen surface
'''
self.pos = Vector(x, y)
# Make the velocity 0
self.vel = Vector()
self.screen = screen
def move(self, y):
'''
Function that moves the player by y pixels
'''
self.vel.y = y
def check_ball(self, ball):
'''
Function that checks if the ball is colliding with the player
'''
if (ball.pos.x + BALL_SIZE > self.pos.x and ball.pos.y + BALL_SIZE > self.pos.y) and (
ball.pos.x < self.pos.x + PLAYER_DIMENSIONS[0] and ball.pos.y < self.pos.y + PLAYER_DIMENSIONS[1]):
return True
return False
def update(self, dt):
'''
Function that updates the position
'''
# Multiply by delta time so the game stay the same no matter the Fps
self.pos += self.vel * dt
# Checks if the player move too low
if self.pos.y < 0:
self.pos.y = 0
# Check if the player moves to high
elif self.pos.y + PLAYER_DIMENSIONS[1] > WINDOW_HEIGHT:
self.pos.y = WINDOW_HEIGHT - PLAYER_DIMENSIONS[1]
# Reset the velocity so when the player lets go of the key,
# it will stop moving
self.vel *= 0
def draw(self):
'''
Function that draws the player
'''
pygame.draw.rect(self.screen, COLOR, pygame.Rect(self.pos.x, self.pos.y, PLAYER_DIMENSIONS[0], PLAYER_DIMENSIONS[1]))
class Ball:
'''
A ball object that holds all the necessary methods
'''
def __init__(self, x, y, screen):
'''
Just takes in the position and the screen surface
'''
self.pos = Vector(x, y)
# Make the velocity 0
self.vel = Vector()
self.screen = screen
def update(self, dt):
'''
Function that updates the position
'''
# Multiply by delta time so the game stay the same no matter the Fps
self.pos += self.vel * dt
def draw(self):
'''
Function that draws the ball
'''
pygame.draw.rect(self.screen, COLOR, pygame.Rect(self.pos.x, self.pos.y, BALL_SIZE, BALL_SIZE))
```
#### File: Home-Projects/Pong/Pong.py
```python
from obj import *
import random
class Game:
def __init__(self):
self.screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
self.FPS = FPS
self.clock = pygame.time.Clock()
# The inital position of the ball
x1 = random.choice([-BALL_SPEED, BALL_SPEED])
y1 = random.choice([-BALL_SPEED, BALL_SPEED])
# A boolean to see who hit the ball last
# Used to fix a bug that makes the player hit the ball
# multiple times.
self.right = True if x1 < 0 else False
# Make an instance of the ball
self.ball = Ball(WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2, self.screen)
self.ball.vel.x = x1
self.ball.vel.y = y1
# The position of player 1
x1 = PLAYER_PIXELS_FROM_SIDE
y1 = WINDOW_HEIGHT / 2 - PLAYER_DIMENSIONS[1] / 2
# Make an instance of player 1
player1 = Player(x1, y1, self.screen)
# The position of player 2
x2 = WINDOW_WIDTH - PLAYER_PIXELS_FROM_SIDE - PLAYER_DIMENSIONS[0]
# Make an instance of player 2
player2 = Player(x2, y1, self.screen)
# Add the players into a list to be used later
self.players = [player1, player2]
# Set all the necessary variables
self.restart = False
self.score = [0, 0]
self.served = False
self.finished = False
# print(pygame.font.get_fonts()) # To see all the available fonts
def reset(self):
# Reset the ball position
self.ball.pos.x = WINDOW_WIDTH / 2
self.ball.pos.y = WINDOW_HEIGHT / 2
self.served = False
def checkBall(self):
'''
A function to check if the ball collides with something
'''
# To check if the player collides with player 1
if self.players[0].check_ball(self.ball) and self.right:
self.ball.vel.x *= -1
self.right = not self.right
HIT_PADDLE_SOUND.play()
# To check if the player collides with player 2
elif self.players[1].check_ball(self.ball) and not self.right:
self.ball.vel.x *= -1
self.right = not self.right
HIT_PADDLE_SOUND.play()
# If the ball collides with the wall
if self.ball.pos.y < 0 or self.ball.pos.y + BALL_SIZE > WINDOW_HEIGHT:
self.ball.vel.y *= -1
HIT_WALL_SOUND.play()
# If the ball goes outside the screen
if self.ball.pos.x < 0:
self.score[1] += 1
self.reset()
SCORE_SOUND.play()
elif self.ball.pos.x + BALL_SIZE > WINDOW_WIDTH:
self.score[0] += 1
self.reset()
SCORE_SOUND.play()
def loop(self):
'''
The main game loop
'''
while True:
self.events()
if not self.finished:
self.update()
self.draw()
if self.restart is True:
break
def events(self):
'''
A function that checks the inputs
'''
# For loop to check the KEYUP events
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE):
quit()
if event.type == pygame.KEYUP:
if event.key == pygame.K_r:
self.restart = True
if not self.finished:
if event.key == pygame.K_SPACE:
self.served = True
if not self.finished:
# Get all the keys being held down
pressed = pygame.key.get_pressed()
if pressed[pygame.K_w]:
self.players[0].move(-PLAYER_SPEED)
if pressed[pygame.K_s]:
self.players[0].move(PLAYER_SPEED)
if pressed[pygame.K_UP]:
self.players[1].move(-PLAYER_SPEED)
if pressed[pygame.K_DOWN]:
self.players[1].move(PLAYER_SPEED)
def update(self):
'''
Updates all the objects
f.ex Adds the velocity to the position of an object
'''
# Get the delta time since the last call
# Is used to make sure the game plays the same on different Fps
dt = self.clock.tick(self.FPS)
self.checkBall()
if self.served:
self.ball.update(dt)
for player in self.players:
player.update(dt)
if self.score[0] == 10 or self.score[1] == 10:
self.finished = True
def draw(self):
'''
Function that draws everything on the screen.
'''
self.screen.fill(BACKGROUND_COLOR)
pygame.draw.line(self.screen, COLOR, (WINDOW_WIDTH / 2, 0), (WINDOW_WIDTH / 2, WINDOW_HEIGHT), 1)
self.drawPlayers()
self.ball.draw()
self.draw_text()
pygame.display.flip()
def drawPlayers(self):
'''
Function that draws all the players
'''
for player in self.players:
player.draw()
def draw_text(self):
'''
Function that draws all the text
'''
# If the game is not over and the ball is to be served
if not self.served and not self.finished:
draw_text(self.screen, "Serve the ball!", SERVE_COLOR, WINDOW_WIDTH / 2, WINDOW_HEIGHT / 5)
# Draws the scores
draw_text(self.screen, str(self.score[0]), COLOR, WINDOW_WIDTH / 5, WINDOW_HEIGHT / 10)
draw_text(self.screen, str(self.score[1]), COLOR, WINDOW_WIDTH / 5*4, WINDOW_HEIGHT / 10)
# If someone won, draw who won
if self.finished:
if self.score[0] == 10:
draw_text(self.screen, "Left Won!", SERVE_COLOR, WINDOW_WIDTH / 2, WINDOW_HEIGHT / 5)
elif self.score[1] == 10:
draw_text(self.screen, "Right Won!", SERVE_COLOR, WINDOW_WIDTH / 2, WINDOW_HEIGHT / 5)
while True:
h = Game()
h.loop()
```
#### File: Home-Projects/pyglet_test/GameObjects.py
```python
import pyglet
def preload_img(img):
return pyglet.image.load("res/sprites/" + img)
class GameObject:
def __init__(self, posx, posy, sprite=None):
self.posx = posx
self.posy = posy
self.velx = 0
self.vely = 0
if sprite is not None:
self.sprite = sprite
self.sprite.x = self.posx
self.sprite.y = self.posy
def draw(self):
self.sprite.draw()
def update(self, dt):
self.posx += self.velx * dt
self.posy += self.vely * dt
self.sprite.x = self.posx
self.sprite.y = self.posy
```
#### File: Home-Projects/Shooter/sprites.py
```python
import pygame as pg
from settings import *
from tilemap import collide_hit_rect
import random, math
import pytweening as tween
from hud import *
def collide_with_walls(sprite, group, dir):
if dir == 'x':
hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect)
if hits:
if hits[0].rect.centerx > sprite.hit_rect.centerx:
sprite.pos.x = hits[0].rect.left - sprite.hit_rect.width / 2
if hits[0].rect.centerx < sprite.hit_rect.centerx:
sprite.pos.x = hits[0].rect.right + sprite.hit_rect.width / 2
sprite.vel.x = 0
sprite.hit_rect.centerx = sprite.pos.x
if dir == 'y':
hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect)
if hits:
if hits[0].rect.centery > sprite.hit_rect.centery:
sprite.pos.y = hits[0].rect.top - sprite.hit_rect.height / 2
if hits[0].rect.centery < sprite.hit_rect.centery:
sprite.pos.y = hits[0].rect.bottom + sprite.hit_rect.height / 2
sprite.vel.y = 0
sprite.hit_rect.centery = sprite.pos.y
class Player(pg.sprite.Sprite):
def __init__(self, game, x, y):
self._layer = PLAYER_LAYER
self.groups = game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.weapon = "pistol"
self.image = game.player_images[self.weapon]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.hit_rect = PLAYER_HIT_RECT
self.hit_rect.center = self.rect.center
self.vel = vec(0, 0)
self.pos = vec(x, y)
self.rot = 0
self.last_shot = 0
self.health = PLAYER_HEALTH
self.armor = 0
self.ammo = WEAPONS[self.weapon]['ammo_clip']
self.maxammo = WEAPONS[self.weapon]['ammo_max']
self.cursor = self.game.crosshair_img
self.cursor_rect = self.cursor.get_rect()
def add_health(self, amount):
self.health += amount
if self.health > PLAYER_HEALTH:
self.health = PLAYER_HEALTH
def reload(self):
self.game.effects_sounds['reload'].play()
if WEAPON_TYPES[self.weapon] != 'shotgun':
a = WEAPONS[self.weapon]['ammo_clip'] - self.ammo
if self.maxammo - a < 0:
a = self.maxammo
self.maxammo = 0
else:
self.maxammo -= a
self.ammo += a
else:
self.ammo += 1
self.maxammo -= 1
def draw_hit_box(self):
hit_box = self.hit_rect.move(self.game.camera.camera.topleft)
pg.draw.rect(self.game.screen, WHITE, hit_box, 2)
def get_mouse(self):
posM = pg.mouse.get_pos()
self.cursor_rect.center = posM
self.cursor_rect = self.game.camera.apply_mouse_rect(self.cursor_rect)
v = self.pos - self.cursor_rect.center
self.rot = v.angle_to(vec(-1, 0))
def get_keys(self):
self.vel = vec(0, 0)
mouse = pg.mouse.get_pressed()
keys = pg.key.get_pressed()
if keys[pg.K_a]:
self.vel.x -= PLAYER_SPEED
if keys[pg.K_d]:
self.vel.x += PLAYER_SPEED
if keys[pg.K_s]:
self.vel.y += PLAYER_SPEED
if keys[pg.K_w]:
self.vel.y -= PLAYER_SPEED
if keys[pg.K_TAB]:
self.game.tab = True
self.get_mouse()
if mouse[0] == 1:
if self.ammo != 0:
self.shoot()
else:
now = pg.time.get_ticks()
if now - self.last_shot > WEAPONS[self.weapon]['rate']:
self.last_shot = now
self.game.out_ammo.play()
if self.vel.x != 0 and self.vel.y != 0:
self.vel *= 0.7071
def shoot(self):
now = pg.time.get_ticks()
if now - self.last_shot > WEAPONS[self.weapon]['rate']:
self.ammo -= 1
self.last_shot = now
dir = vec(1, 0).rotate(-self.rot)
pos = self.pos + BARREL_OFFSET.rotate(-self.rot)
for a in range(WEAPONS[self.weapon]['bullet_count']):
spread = random.uniform(-WEAPONS[self.weapon]['spread'], WEAPONS[self.weapon]['spread'])
Bullet(self.game, pos, dir.rotate(spread), self.rot, self.weapon)
snd = random.choice(self.game.weapon_sounds[self.weapon])
if snd.get_num_channels() > 2:
snd.stop()
snd.play()
MuzzleFlash(self.game, pos, self.rot, self.vel)
self.vel = vec(-WEAPONS[self.weapon]['kickback']).rotate(-self.rot)
def update(self):
self.get_keys()
self.pos += self.vel * self.game.dt
self.image = pg.transform.rotate(self.game.player_images[self.weapon], self.rot)
self.rect = self.image.get_rect()
self.hit_rect.centerx = self.pos.x
collide_with_walls(self, self.game.walls, 'x')
collide_with_walls(self, self.game.windows, 'x')
self.hit_rect.centery = self.pos.y
collide_with_walls(self, self.game.walls, 'y')
collide_with_walls(self, self.game.windows, 'y')
self.rect.center = self.hit_rect.center
class Enemy(pg.sprite.Sprite):
def __init__(self, game, x, y, weapon, last_known=None):
self._layer = ENEMY_LAYER
self.groups = game.all_sprites, game.enemies
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.enemy_images[weapon].copy()
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.hit_rect = ENEMY_HIT_RECT.copy()
self.hit_rect.center = self.rect.center
self.pos = vec(x, y)
self.rect.center = self.pos
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.rot = 0
if weapon != "mini":
self.health = ENEMY_HEALTH
self.maxHealth = ENEMY_HEALTH
elif weapon == "mini":
self.health = BOSS_HEALTH
self.maxHealth = BOSS_HEALTH
self.last_shot = 0
self.target = game.player
if last_known is None or weapon == "mini":
self.moving = False
self.last_known = [int(self.pos.x), int(self.pos.y)]
else:
self.moving = True
self.last_known = last_known
self.weapon = weapon
def draw_hit_box(self):
hit_box = self.hit_rect.move(self.game.camera.camera.topleft)
pg.draw.rect(self.game.screen, WHITE, hit_box, 2)
def avoid_mobs(self):
for enemy in self.game.enemies:
if enemy != self:
dist = self.pos - enemy.pos
if 0 < dist.length() < AVOID_RADIUS:
self.acc += dist.normalize()
def lineLine(self, x1, y1, x2, y2, x3, y3, x4, y4):
uA = ((x4-x3)*(y1-y3) - (y4-y3)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1))
uB = ((x2-x1)*(y1-y3) - (y2-y1)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1))
if (uA >= 0 and uA <= 1 and uB >= 0 and uB <= 1):
return True
else:
return False
def line_collide(self):
target_dist = self.target.pos - self.pos
rot = target_dist.angle_to(vec(1, 0))
pos = self.pos + BARREL_OFFSET.rotate(-rot)
for a in self.game.walls:
r = a.rect.copy()
topleft = r.topleft
topright = r.topright
bottomleft = r.bottomleft
bottomright = r.bottomright
left = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topleft[0], topleft[1], topleft[0], bottomleft[1])
right = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topright[0], topright[1], topright[0], bottomright[1])
top = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topleft[0], topleft[1], topright[0], topright[1])
bottom = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, bottomleft[0], bottomleft[1], bottomright[0], bottomright[1])
if left or right or top or bottom:
return True
for a in self.game.enemies:
r = a.hit_rect.copy()
topleft = r.topleft
topright = r.topright
bottomleft = r.bottomleft
bottomright = r.bottomright
left = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topleft[0], topleft[1], topleft[0],
bottomleft[1])
right = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topright[0], topright[1],
topright[0], bottomright[1])
top = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topleft[0], topleft[1], topright[0],
topright[1])
bottom = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, bottomleft[0], bottomleft[1],
bottomright[0], bottomright[1])
if left or right or top or bottom:
return True
return False
def rot_towards_target(self, target_dist):
rotT = target_dist.angle_to(vec(1, 0))
angle = math.atan2(-target_dist.x, -target_dist.y)/math.pi * 180.0
diff = (angle - self.rot - 90) % 360
if 175 < int(diff) < 183:
rot = rotT
elif diff > 180:
rot = self.rot + ENEMY_ROTATION_SPEED
else:
rot = self.rot - ENEMY_ROTATION_SPEED
return rot
def move(self):
target_dist = self.last_known - self.pos
self.rot = self.rot_towards_target(target_dist)
# self.rot = target_dist.angle_to(vec(1, 0))
self.acc = vec(1, 0).rotate(-self.rot)
self.avoid_mobs()
try:
self.acc.scale_to_length(ENEMY_SPEED)
except:
pass
self.acc += self.vel * -1.5
self.vel += self.acc * self.game.dt
self.pos += self.vel * self.game.dt + 0.5 * self.acc * self.game.dt ** 2
self.hit_rect.centerx = self.pos.x
collide_with_walls(self, self.game.walls, "x")
collide_with_walls(self, self.game.windows, "x")
self.hit_rect.centery = self.pos.y
collide_with_walls(self, self.game.walls, "y")
collide_with_walls(self, self.game.windows, "y")
self.rect.center = self.hit_rect.center
def update(self):
self.target = self.game.player
closest = self.target.pos - self.pos
for a in self.game.ally:
target_dist = a.pos - self.pos
if target_dist.length() < closest.length():
closest = target_dist
self.target = a
target_dist = closest
if target_dist.length_squared() < (WEAPONS[self.weapon]['detect_radius'] - NIGHT_RADIUS) ** 2:
if self.line_collide() is False:
self.moving = False
self.vel = vec(0, 0)
self.last_known = [int(self.target.pos.x), int(self.target.pos.y)]
pos = self.pos + BARREL_OFFSET.rotate(-self.rot)
target_distA = self.target.pos - pos
self.rot = self.rot_towards_target(target_distA)
self.image = pg.transform.rotate(self.game.enemy_images[self.weapon], self.rot)
self.rect = self.image.get_rect()
self.rect.center = self.pos
rot = target_dist.angle_to(vec(1, 0))
if self.rot - 20 < rot < self.rot + 20:
self.shoot()
else:
self.moving = True
self.image = pg.transform.rotate(self.game.enemy_images[self.weapon], self.rot)
else:
self.moving = True
self.image = pg.transform.rotate(self.game.enemy_images[self.weapon], self.rot)
pos = [int(self.pos.x), int(self.pos.y)]
if ((pos[0] - 5 < self.last_known[0]) and (pos[1] - 5 < self.last_known[1])) and ((pos[0] + 5 > self.last_known[0]) and (pos[1] + 5 > self.last_known[1])):
self.moving = False
if self.moving and self.weapon != "mini":
self.move()
if self.health <= 0:
random.choice(self.game.enemy_hit_sounds).play()
self.kill()
self.game.map_img.blit(self.game.blood, self.pos - vec(TILESIZE / 2, TILESIZE / 2))
self.game.kills += 1
def shoot(self):
now = pg.time.get_ticks()
if now - self.last_shot > WEAPONS[self.weapon]['rate']:
self.last_shot = now
dir = vec(1, 0).rotate(-self.rot)
pos = self.pos + BARREL_OFFSET.rotate(-self.rot)
for a in range(WEAPONS[self.weapon]['bullet_count']):
spread = random.uniform(-WEAPONS[self.weapon]['spread'], WEAPONS[self.weapon]['spread'])
Bullet(self.game, pos, dir.rotate(spread), self.rot, self.weapon)
snd = random.choice(self.game.weapon_sounds[self.weapon])
if snd.get_num_channels() > 2:
snd.stop()
snd.play()
MuzzleFlash(self.game, pos, self.rot, self.vel)
#self.vel = vec(-WEAPONS[self.weapon]['kickback']).rotate(-self.rot)
def draw_health(self):
if self.health > int((self.maxHealth / 3) * 2):
col = GREEN
elif self.health > int(self.maxHealth / 3):
col = YELLOW
else:
col = RED
width = int(self.rect.width * self.health / self.maxHealth)
self.health_bar = pg.Rect(0, 0, width, 7)
if self.health < self.maxHealth:
pg.draw.rect(self.image, col, self.health_bar)
class Ally(pg.sprite.Sprite):
def __init__(self, game, x, y, weapon, last_known=None):
self._layer = ENEMY_LAYER
self.groups = game.all_sprites, game.ally
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.player_images[weapon].copy()
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.hit_rect = ENEMY_HIT_RECT.copy()
self.hit_rect.center = self.rect.center
self.pos = vec(x, y)
self.rect.center = self.pos
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.rot = 0
self.health = ENEMY_HEALTH
self.last_shot = 0
self.target = game.enemies
if last_known is None:
self.moving = False
self.last_known = [int(self.pos.x), int(self.pos.y)]
else:
self.moving = True
self.last_known = last_known
self.weapon = weapon
self.selected = False
def draw_hit_box(self):
hit_box = self.hit_rect.move(self.game.camera.camera.topleft)
pg.draw.rect(self.game.screen, WHITE, hit_box, 2)
def avoid_mobs(self):
for enemy in self.game.enemies:
if enemy != self:
dist = self.pos - enemy.pos
if 0 < dist.length() < AVOID_RADIUS:
self.acc += dist.normalize()
def lineLine(self, x1, y1, x2, y2, x3, y3, x4, y4):
uA = ((x4-x3)*(y1-y3) - (y4-y3)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1))
uB = ((x2-x1)*(y1-y3) - (y2-y1)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1))
if (uA >= 0 and uA <= 1 and uB >= 0 and uB <= 1):
return True
else:
return False
def line_collide(self,):
target_dist = self.target.pos - self.pos
rot = target_dist.angle_to(vec(1, 0))
pos = self.pos + BARREL_OFFSET.rotate(-rot)
for a in self.game.walls:
r = a.rect.copy()
topleft = r.topleft
topright = r.topright
bottomleft = r.bottomleft
bottomright = r.bottomright
left = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topleft[0], topleft[1], topleft[0], bottomleft[1])
right = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topright[0], topright[1], topright[0], bottomright[1])
top = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topleft[0], topleft[1], topright[0], topright[1])
bottom = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, bottomleft[0], bottomleft[1], bottomright[0], bottomright[1])
if left or right or top or bottom:
return True
for a in self.game.ally:
r = a.hit_rect.copy()
topleft = r.topleft
topright = r.topright
bottomleft = r.bottomleft
bottomright = r.bottomright
left = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topleft[0], topleft[1], topleft[0],
bottomleft[1])
right = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topright[0], topright[1],
topright[0], bottomright[1])
top = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, topleft[0], topleft[1], topright[0],
topright[1])
bottom = self.lineLine(pos.x, pos.y, self.target.pos.x, self.target.pos.y, bottomleft[0], bottomleft[1],
bottomright[0], bottomright[1])
if left or right or top or bottom:
return True
return False
def rot_towards_target(self, target_dist):
rotT = target_dist.angle_to(vec(1, 0))
angle = math.atan2(-target_dist.x, -target_dist.y)/math.pi * 180.0
diff = (angle - self.rot - 90) % 360
if 175 < int(diff) < 183:
rot = rotT
elif diff > 180:
rot = self.rot + ENEMY_ROTATION_SPEED
else:
rot = self.rot - ENEMY_ROTATION_SPEED
return rot
def update(self):
closest = vec(9999, 9999)
for a in self.game.enemies:
target_dist = a.pos - self.pos
if target_dist.length() < closest.length():
closest = target_dist
self.target = a
if closest.length_squared() < (WEAPONS[self.weapon]['detect_radius'] - NIGHT_RADIUS) ** 2:
if self.line_collide() is False:
self.moving = False
self.vel = vec(0, 0)
self.last_known = [int(self.target.pos.x), int(self.target.pos.y)]
pos = self.pos + BARREL_OFFSET.rotate(-self.rot)
target_distA = self.target.pos - pos
self.rot = self.rot_towards_target(target_distA)
self.image = pg.transform.rotate(self.game.player_images[self.weapon], self.rot)
self.rect = self.image.get_rect()
self.rect.center = self.pos
rot = closest.angle_to(vec(1, 0))
if self.rot - 20 < rot < self.rot + 20:
self.shoot()
else:
self.moving = True
self.image = pg.transform.rotate(self.game.player_images[self.weapon], self.rot)
else:
self.moving = True
self.image = pg.transform.rotate(self.game.player_images[self.weapon], self.rot)
pos = [int(self.pos.x), int(self.pos.y)]
if ((pos[0] - 5 < self.last_known[0]) and (pos[1] - 5 < self.last_known[1])) and ((pos[0] + 5 > self.last_known[0]) and (pos[1] + 5 > self.last_known[1])):
self.moving = False
self.moving = False
if self.moving:
target_dist = self.last_known - self.pos
self.rot = self.rot_towards_target(target_dist)
#self.rot = target_dist.angle_to(vec(1, 0))
self.acc = vec(1, 0).rotate(-self.rot)
self.avoid_mobs()
try:
self.acc.scale_to_length(ENEMY_SPEED)
except:
pass
self.acc += self.vel * -1.5
self.vel += self.acc * self.game.dt
self.pos += self.vel * self.game.dt + 0.5 * self.acc * self.game.dt ** 2
self.hit_rect.centerx = self.pos.x
collide_with_walls(self, self.game.walls, "x")
collide_with_walls(self, self.game.windows, "x")
self.hit_rect.centery = self.pos.y
collide_with_walls(self, self.game.walls, "y")
collide_with_walls(self, self.game.windows, "y")
self.rect.center = self.hit_rect.center
if self.health <= 0:
random.choice(self.game.enemy_hit_sounds).play()
self.kill()
self.game.map_img.blit(self.game.blood, self.pos - vec(TILESIZE / 2, TILESIZE / 2))
self.game.deaths += 1
def shoot(self):
now = pg.time.get_ticks()
if now - self.last_shot > WEAPONS[self.weapon]['rate']:
self.last_shot = now
dir = vec(1, 0).rotate(-self.rot)
pos = self.pos + BARREL_OFFSET.rotate(-self.rot)
for a in range(WEAPONS[self.weapon]['bullet_count']):
spread = random.uniform(-WEAPONS[self.weapon]['spread'], WEAPONS[self.weapon]['spread'])
Bullet(self.game, pos, dir.rotate(spread), self.rot, self.weapon)
snd = random.choice(self.game.weapon_sounds[self.weapon])
if snd.get_num_channels() > 2:
snd.stop()
snd.play()
MuzzleFlash(self.game, pos, self.rot, self.vel)
#self.vel = vec(-WEAPONS[self.weapon]['kickback']).rotate(-self.rot)
def draw_health(self):
if self.health > 60:
col = GREEN
elif self.health > 30:
col = YELLOW
else:
col = RED
width = int(self.rect.width * self.health / ENEMY_HEALTH)
self.health_bar = pg.Rect(0, 0, width, 7)
if self.health < ENEMY_HEALTH:
pg.draw.rect(self.image, col, self.health_bar)
class Bullet(pg.sprite.Sprite):
def __init__(self, game, pos, dir, angle, weapon):
self._layer = BULLET_LAYER
self.groups = game.all_sprites, game.bullets
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.rot = angle
self.image = game.bullet_images[WEAPONS[weapon]['bullet_size']]
self.image = pg.transform.rotate(self.image, self.rot)
self.rect = self.image.get_rect()
self.pos = vec(pos)
self.rect.center = pos
#spread = random.uniform(-GUN_SPREAD, GUN_SPREAD)
self.vel = dir * WEAPONS[game.player.weapon]['bullet_speed'] * random.uniform(0.9, 1.1)
self.spawn_time = pg.time.get_ticks()
self.weapon = weapon
def update(self):
self.pos += self.vel * self.game.dt
self.rect.center = self.pos
if (pg.time.get_ticks() - self.spawn_time > WEAPONS[self.weapon]['bullet_lifetime']) or (pg.sprite.spritecollideany(self, self.game.walls)):
self.kill()
class Obstacle(pg.sprite.Sprite):
def __init__(self, game, x, y, w, h, type):
self._layer = WALL_LAYER
if type == "Wall":
self.groups = game.walls
elif type == "Window":
self.groups = game.windows
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.rect = pg.Rect(x, y, w, h)
self.x = x
self.y = y
self.rect.x = x
self.rect.y = y
class MuzzleFlash(pg.sprite.Sprite):
def __init__(self, game, pos, rot, vel):
self._layer = EFFECTS_LAYER
self.groups = game.all_sprites
self.game = game
pg.sprite.Sprite.__init__(self, self.groups)
size = random.randint(20, 50)
self.image = pg.transform.scale(game.gun_flash, (size * 2, size))
self.image = pg.transform.rotate(self.image, rot)
self.rect = self.image.get_rect()
self.pos = pos
self.rect.center = pos
self.vel = vel
self.spawn_time = pg.time.get_ticks()
def update(self):
if pg.time.get_ticks() - self.spawn_time > FLASH_DURATION:
self.kill()
self.rect.center += self.vel * self.game.dt
class Item(pg.sprite.Sprite):
def __init__(self, game, pos, type):
self._layer = ITEMS_LAYER
self.groups = game.all_sprites, game.items
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.item_images[type]
self.image = pg.transform.scale(self.image, ITEM_SIZES[type])
self.rect = self.image.get_rect()
self.rect.center = pos
self.pos = pos
self.type = type
self.tween = tween.easeInOutSine
self.step = 0
self.dir = 1
def update(self):
offset = BOB_RANGE * (self.tween(self.step / BOB_RANGE) - 0.5)
self.rect.centery = self.pos.y + offset * self.dir
self.step += BOB_SPEED
if self.step > BOB_RANGE:
self.step = 0
self.dir *= -1
```
#### File: Home-Projects/Shooter/test.py
```python
class Class1:
def __init__(self, obj):
self.obj = obj
def print(self):
print(self.obj.x)
class Class2:
def __init__(self):
self.x = 5
g = Class2()
h = Class1(g)
h.print()
```
#### File: Home-Projects/WireWorld/camera.py
```python
import pygame as pg
from settings import *
def collide_rect(one, two):
return one.rect.colliderect(two.rect)
class Mouse:
def __init__(self):
self.x = 0
self.y = 0
def update(self):
mouse = pg.mouse.get_pos()
self.x = mouse[0]
self.y = mouse[1]
class Player:
def __init__(self, x, y):
self.rect = pg.Rect(x, y, camera_width, camera_height)
self.x = self.rect.centerx
self.y = self.rect.centery
def update(self):
if self.x >= camera_width / 2 and self.x < ww - camera_width / 2:
self.rect.centerx = self.x
else:
self.x = self.rect.centerx
if self.y > camera_height / 2 and self.y < wh - camera_height / 2:
self.rect.centery = self.y
else:
self.y = self.rect.centery
class Camera:
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = width
self.height = height
def apply(self, entity):
return entity.rect.move(self.camera.topleft)
def apply_rect(self, rect):
return rect.move(self.camera.topleft)
def apply_mouse_rect(self, rect):
tops = [-self.camera.topleft[0], -self.camera.topleft[1]]
return rect.move(tops)
def update(self, target):
x = -target.rect.centerx + int(camera_width / 2)
y = -target.rect.centery + int(camera_height / 2)
x = min(0, x)
y = min(0, y)
x = max(-(self.width - camera_width), x)
y = max(-(self.height - camera_height), y)
self.camera = pg.Rect(x, y, self.width, self.height)
```
#### File: Home-Projects/WireWorld/Tile.py
```python
import pygame as pg
from settings import *
class Tile(pg.sprite.Sprite):
def __init__(self, x, y, game, state="none"):
self.group = game.tiles
pg.sprite.Sprite.__init__(self, self.group)
self.x = x
self.y = y
self.size = tile_size
self.rect = pg.Rect(x, y, self.size, self.size)
self.game = game
self.state = state
self.color = states[state]
def update(self):
pass
``` |
{
"source": "jonasfreyr/Lokaverkefni-Forritun",
"score": 3
} |
#### File: Lokaverkefni-Forritun/Lokaverkefni/Lokaverkefni.py
```python
from tkinter import *
import math,random
#Fall sem heldur utan um þríhyrninga liðinn
def thry():
for a in root.winfo_children():
a.destroy()
#Fall sem reiknar rúmmál þríhyrnings
def rummal():
for a in root.winfo_children():
a.destroy()
#Fall sem framkvæmir reikni aðgerðina
def reikna():
lengd = float(entry_1.get())
breidd = float(entry_2.get())
haed = float(entry_3.get())
svar = ((lengd * breidd) * haed)/3
label_4.configure(text = round(svar,2))
label_1 = Label(root, text = " Sláðu inn Lengd ")
label_2 = Label(root, text = " Sláðu inn Breidd ")
label_3 = Label(root, text = " Sláðu inn Hæð ")
label_4 = Label(root, text = "")
label_5 = Label(root, text = "Svar:")
entry_1 = Entry(root, textvariable = StringVar())
entry_2 = Entry(root, textvariable=StringVar())
entry_3 = Entry(root, textvariable=StringVar())
label_1.grid(row = 1, column = 1)
label_2.grid(row=2, column=1)
label_3.grid(row=3, column=1)
label_4.grid(row=4, column=2)
label_5.grid(row=4, column=1)
entry_1.grid(row = 1, column = 2)
entry_2.grid(row=2, column=2)
entry_3.grid(row=3, column=2)
button_1 = Button(root, text = " Reikna ", command = reikna)
button_1.grid(columnspan = 2)
button_2 = Button(root, text = " Til baka ",command = thry)
button_2.grid(columnspan = 3)
#Fall sem reiknar yfirflatarmál
def yfirflat():
for a in root.winfo_children():
a.destroy()
#Fall sem framkvæmir reikni aðgerðina
def reikna():
lengd = float(entry_1.get())
breidd = float(entry_2.get())
haed = float(entry_3.get())
svar = lengd * breidd
c = math.sqrt(math.pow((lengd/2),2)+math.pow(haed,2))
c2 = math.sqrt(math.pow((breidd / 2), 2) + math.pow(haed, 2))
svar = svar + (((c * breidd)/2)*2) + (((c2 * lengd)/2)*2)
label_4.configure(text = round(svar,2))
label_1 = Label(root, text=" Sláðu inn Lengd ")
label_2 = Label(root, text=" Sláðu inn Breidd ")
label_3 = Label(root, text=" Sláðu inn Hæð ")
label_4 = Label(root, text="")
label_5 = Label(root, text="Svar:")
entry_1 = Entry(root, textvariable=StringVar())
entry_2 = Entry(root, textvariable=StringVar())
entry_3 = Entry(root, textvariable=StringVar())
label_1.grid(row=1, column=1)
label_2.grid(row=2, column=1)
label_3.grid(row=3, column=1)
label_4.grid(row=4, column=2)
label_5.grid(row=4, column=1)
entry_1.grid(row=1, column=2)
entry_2.grid(row=2, column=2)
entry_3.grid(row=3, column=2)
button_1 = Button(root, text=" Reikna ", command=reikna)
button_1.grid(columnspan=2)
button_2 = Button(root, text=" Til baka ", command=thry)
button_2.grid(columnspan=3)
#Fall sem reiknar pythogoras
def pygor():
for a in root.winfo_children():
a.destroy()
#Fall sem framkvæmir reikni aðgerðina
def py():
# tala1 er það sem er í entry_1
tala1 = float(entry_1.get())
# tala2 er það sem er í entry_2
tala2 = float(entry_2.get())
# tala3 er það sem er í entry_3
tala3 = float(entry_3.get())
# Ef tala1 og tala2 og tala3 eru meira en 0
if tala1 > 0 and tala2 > 0 and tala3 > 0:
# Breyti hvað er í lable_4 og set strengin
label_4.configure(text="Ein hlið þarf að vera óþekkt")
# Ef einhverjar tvær tölur eru 0
elif (tala1 == 0 and tala2 == 0) or (tala1 == 0 and tala3 == 0) or (tala2 == 0 and tala3 == 0):
# Breyti hvað er í lable_4 og set strengin
label_4.configure(text="Villa meiri en ein hlið er með lengdina 0")
# Ef tala3 er meira en 0 en minna en annhvort tala1 eða tala2
elif (tala3 <= tala1 or tala3 <= tala2) and tala3 > 0:
# Breyti hvað er í lable_4 og set strengin
label_4.configure(text="Langhlið getur ekki verið minni eða sama og skammhlið")
# Ef tala3 er 0
elif tala3 == 0:
# Reikna pýþagóras
utkoma = math.sqrt((math.pow(tala1, 2)) + (math.pow(tala2, 2)))
# Breyti hvað er í lable_4 og set utkoma
label_4.configure(text=round(utkoma, 2))
elif tala2 == 0:
# Reikna pýþagóras
utkoma = math.sqrt((math.pow(tala3, 2)) - (math.pow(tala1, 2)))
# Breyti hvað er í lable_4 og set utkoma
label_4.configure(text=round(utkoma, 2))
elif tala1 == 0:
# Reikna pýþagóras
utkoma = math.sqrt((math.pow(tala3, 2)) - (math.pow(tala2, 2)))
# Breyti hvað er í lable_4 og set utkoma
label_4.configure(text=round(utkoma, 2))
# label_1 er texti
label_1 = Label(root, text="Sláðu inn skammhlið eitt ")
# label_2 er texti
label_2 = Label(root, text="Sláðu inn skammhlið tvö ")
# label_3 er texti
label_3 = Label(root, text="Sláðu inn langhlið ")
# label_5 er texti
label_5 = Label(root, text="Óþekkta hliðin: ")
# label_6 er texti
label_6 = Label(root, text="Hliðin sem þú veist ekki slærðu inn 0")
# texti er mismunadi strengur
texti = StringVar()
# texti2 er mismunadi strengur
texti2 = StringVar()
# texti3 er mismunadi strengur
texti3 = StringVar()
# entry_1 er input sem tekur inn mismunandi streng
entry_1 = Entry(root, textvariable=texti)
# entry_2 er input sem tekur inn mismunandi streng
entry_2 = Entry(root, textvariable=texti2)
# entry_3 er input sem tekur inn mismunandi streng
entry_3 = Entry(root, textvariable=texti3)
# texti er það sem er í entry_1
texti = entry_1.get()
# label_4 er tómur texti
label_4 = Label(root, text="")
# Stað set öll stök(strengi,inputs...)
label_6.grid(columnspan=2)
label_1.grid(row=1, sticky=E)
label_2.grid(row=2, sticky=E)
label_3.grid(row=3, sticky=E)
label_5.grid(row=4, sticky=E)
entry_1.grid(row=1, column=1)
entry_2.grid(row=2, column=1)
entry_3.grid(row=3, column=1)
label_4.grid(row=4, column=1)
# button_1 er takki sem kallar á fallið py
button_1 = Button(root, text=" <NAME> ", command=py)
# Staðsetji takkan
button_1.grid(columnspan=2)
button_2 = Button(root, text=" Til baka ", command=thry)
button_2.grid(columnspan=3)
# Forritið keyrir í loopu
label_1 = Label(root, text="Vel<NAME>ð þú vilt gera")
button_1 = Button(root, text=" Rúmmál ", command=rummal)
button_2 = Button(root, text=" Yfirborðsflatarmál ", command=yfirflat)
button_3 = Button(root, text=" Pythogoras ", command=pygor)
button_4 = Button(root, text=" Til baka ", command=home)
button_1.grid(row=1, column=1)
button_2.grid(row=1, column=2)
button_3.grid(row=1, column=3)
button_4.grid(row=1, column=4)
label_1.grid(columnspan=5)
#Fall sem heldur utan um kúlu liðinn
def kula():
for a in root.winfo_children():
a.destroy()
#Fall sem reiknar rúmmál
def rummal():
for a in root.winfo_children():
a.destroy()
#Fall sem framkvæmir reikni aðgerðina
def reikna():
radius = float(entry_1.get())
svar = (4*math.pi*radius**3)/3
label_2.configure(text=round(svar, 2))
label_1 = Label(root, text=" Sláðu inn radíus ")
label_2 = Label(root, text="")
label_3 = Label(root, text="Svar:")
entry_1 = Entry(root, textvariable=StringVar())
label_1.grid(row=1, column=1)
label_2.grid(row=2, column=2)
label_3.grid(row=2, column=1)
entry_1.grid(row=1, column=2)
button_1 = Button(root, text=" Reikna ", command=reikna)
button_1.grid(columnspan=2)
button_2 = Button(root, text=" Til baka ", command=kula)
button_2.grid(columnspan=3)
#Fall sem reiknar yfirborðsflatarmál
def yfirflat():
for a in root.winfo_children():
a.destroy()
#Fall sem framkvæmir reikni aðgerðina
def reikna():
radius = float(entry_1.get())
svar = (4 * math.pi * radius ** 2)
label_2.configure(text=round(svar, 2))
label_1 = Label(root, text=" Sláðu inn radíus ")
label_2 = Label(root, text="")
label_3 = Label(root, text="Svar:")
entry_1 = Entry(root, textvariable=StringVar())
label_1.grid(row=1, column=1)
label_2.grid(row=2, column=2)
label_3.grid(row=2, column=1)
entry_1.grid(row=1, column=2)
button_1 = Button(root, text=" Reikna ", command=reikna)
button_1.grid(columnspan=2)
button_2 = Button(root, text=" Til baka ", command=kula)
button_2.grid(columnspan=3)
#Fall sem reiknar flatarmál
def flatar():
for a in root.winfo_children():
a.destroy()
#Fall sem framvæmir reikni aðgerðina
def reikna():
radius = float(entry_1.get())
svar = (math.pi * radius ** 2)
label_2.configure(text=round(svar, 2))
label_1 = Label(root, text=" Sláðu inn radíus ")
label_2 = Label(root, text="")
label_3 = Label(root, text="Svar:")
entry_1 = Entry(root, textvariable=StringVar())
label_1.grid(row=1, column=1)
label_2.grid(row=2, column=2)
label_3.grid(row=2, column=1)
entry_1.grid(row=1, column=2)
button_1 = Button(root, text=" Reikna ", command=reikna)
button_1.grid(columnspan=2)
button_2 = Button(root, text=" Til baka ", command=kula)
button_2.grid(columnspan=3)
label_1 = Label(root, text="Veldu hvað þú vilt gera")
button_1 = Button(root, text=" Rúmmál ", command=rummal)
button_2 = Button(root, text=" Yfirborðsflatarmál ", command=yfirflat)
button_3 = Button(root, text=" Flatarmál Hrings ", command=flatar)
button_4 = Button(root, text=" Til baka ", command=home)
button_1.grid(row=1, column=1)
button_2.grid(row=1, column=2)
button_3.grid(row=1, column=3)
button_4.grid(row=1, column=4)
label_1.grid(columnspan=5)
#Fall sem heldur utan um kassa liðinn
def kassi():
for a in root.winfo_children():
a.destroy()
#Fall sem reiknar flatarmál
def flatar():
for a in root.winfo_children():
a.destroy()
#Fall sem framkvæmir reikni aðgerðina
def reikna():
lengd = float(entry_1.get())
breidd = float(entry_2.get())
svar = lengd * breidd
label_3.configure(text=round(svar, 2))
label_1 = Label(root, text=" Sláðu inn Lengd ")
label_2 = Label(root, text=" Sláðu inn Breidd ")
label_3 = Label(root, text="")
label_4 = Label(root, text="Svar:")
entry_1 = Entry(root, textvariable=StringVar())
entry_2 = Entry(root, textvariable=StringVar())
label_1.grid(row=1, column=1)
label_2.grid(row=2, column=1)
label_4.grid(row=3, column=1)
label_3.grid(row=3, column=2)
entry_1.grid(row=1, column=2)
entry_2.grid(row=2, column=2)
button_1 = Button(root, text=" Reikna ", command=reikna)
button_1.grid(columnspan=2)
button_2 = Button(root, text=" Til baka ", command=kassi)
button_2.grid(columnspan=3)
#Fall sem reiknar rúmmál
def rummal():
for a in root.winfo_children():
a.destroy()
#Fall sem framkvæmir reikni aðgerðina
def reikna():
lengd = float(entry_1.get())
breidd = float(entry_2.get())
haed = float(entry_3.get())
svar = lengd * breidd * haed
label_4.configure(text = round(svar,2))
label_1 = Label(root, text=" Sláðu inn Lengd ")
label_2 = Label(root, text=" Sláðu inn Breidd ")
label_3 = Label(root, text=" Sláðu inn Hæð ")
label_4 = Label(root, text="")
label_5 = Label(root, text="Svar:")
entry_1 = Entry(root, textvariable=StringVar())
entry_2 = Entry(root, textvariable=StringVar())
entry_3 = Entry(root, textvariable=StringVar())
label_1.grid(row=1, column=1)
label_2.grid(row=2, column=1)
label_3.grid(row=3, column=1)
label_4.grid(row=4, column=2)
label_5.grid(row=4, column=1)
entry_1.grid(row=1, column=2)
entry_2.grid(row=2, column=2)
entry_3.grid(row=3, column=2)
button_1 = Button(root, text=" Reikna ", command=reikna)
button_1.grid(columnspan=2)
button_2 = Button(root, text=" Til baka ", command=kassi)
button_2.grid(columnspan=3)
#Fall er reiknar yfirborðsflatarmál
def yfirflat():
for a in root.winfo_children():
a.destroy()
#Fall sem framkvæmir reikni aðgerðina
def reikna():
lengd = float(entry_1.get())
breidd = float(entry_2.get())
haed = float(entry_3.get())
hl_1 = lengd * breidd * 2
hl_2 = lengd * haed *2
hl_3 = breidd * haed * 2
svar = hl_1 + hl_2 + hl_3
label_4.configure(text=round(svar))
label_1 = Label(root, text=" Sláðu inn Lengd ")
label_2 = Label(root, text=" Sláðu inn Breidd ")
label_3 = Label(root, text=" Sláðu inn Hæð ")
label_4 = Label(root, text="")
label_5 = Label(root, text="Svar:")
entry_1 = Entry(root, textvariable=StringVar())
entry_2 = Entry(root, textvariable=StringVar())
entry_3 = Entry(root, textvariable=StringVar())
label_1.grid(row=1, column=1)
label_2.grid(row=2, column=1)
label_3.grid(row=3, column=1)
label_4.grid(row=4, column=2)
label_5.grid(row=4, column=1)
entry_1.grid(row=1, column=2)
entry_2.grid(row=2, column=2)
entry_3.grid(row=3, column=2)
button_1 = Button(root, text=" Reikna ", command=reikna)
button_1.grid(columnspan=2)
button_2 = Button(root, text=" Til baka ", command=kassi)
button_2.grid(columnspan=3)
label_1 = Label(root, text = " Veldu hvað þú vilt gera ")
button_1 = Button(root, text=" Rúmmál ", command=rummal)
button_2 = Button(root, text=" Yfirborðsflatarmál ", command=yfirflat)
button_3 = Button(root, text="Flatarmál ferhyrnings", command=flatar)
button_4 = Button(root, text=" Til baka ", command=home)
button_1.grid(row=1, column=1)
button_2.grid(row=1, column=2)
button_3.grid(row=1, column=3)
button_4.grid(row=1, column=4)
label_1.grid(columnspan=5)
#Fall sem heldur utan um home
def home():
for a in root.winfo_children():
a.destroy()
label_1 = Label(root, text = "Veldu hvað þú vilt gera")
button_1 = Button(root, text=" Pýramídi ", command = thry)
button_2 = Button(root, text=" Kassi ", command = kassi)
button_3 = Button(root, text=" Kúla ", command = kula)
button_4 = Button(root, text=" Leikir ", command = leikir)
button_1.grid(row = 1, column = 1)
button_2.grid(row = 1, column = 2)
button_3.grid(row = 1, column = 3)
button_4.grid(row = 1, column = 4)
label_1.grid(columnspan=5)
#Fall sem heldur utan um leikina
def leikir():
for a in root.winfo_children():
a.destroy()
global Owin
global Xwin
global ai
Owin = 0
Xwin = 0
ai = False
#Klasi sem keyrir þegar er ýtt á takka í myllu
class push:
def __init__(self):
for a in root.winfo_children():
a.destroy()
#Ef valið var takka 1
def p1():
global label_p1
if label_p1 == " ":
global tel
if tel == "X":
label_p1 = "X"
tel = "O"
elif tel == "O":
label_p1 = "O"
tel = "X"
homeG()
# Ef valið var takka 2
def p2():
global label_p2
if label_p2 == " ":
global tel
if tel == "X":
label_p2 = "X"
tel = "O"
elif tel == "O":
label_p2 = "O"
tel = "X"
homeG()
# Ef valið var takka 3
def p3():
global label_p3
if label_p3 == " ":
global tel
if tel == "X":
label_p3 = "X"
tel = "O"
elif tel == "O":
label_p3 = "O"
tel = "X"
homeG()
# Ef valið var takka 4
def p4():
global label_p4
if label_p4 == " ":
global tel
if tel == "X":
label_p4 = "X"
tel = "O"
elif tel == "O":
label_p4 = "O"
tel = "X"
homeG()
# Ef valið var takka 5
def p5():
global label_p5
if label_p5 == " ":
global tel
if tel == "X":
label_p5 = "X"
tel = "O"
elif tel == "O":
label_p5 = "O"
tel = "X"
homeG()
# Ef valið var takka 6
def p6():
global label_p6
if label_p6 == " ":
global tel
if tel == "X":
label_p6 = "X"
tel = "O"
elif tel == "O":
label_p6 = "O"
tel = "X"
homeG()
# Ef valið var takka 7
def p7():
global label_p7
if label_p7 == " ":
global tel
if tel == "X":
label_p7 = "X"
tel = "O"
elif tel == "O":
label_p7 = "O"
tel = "X"
homeG()
# Ef valið var takka 8
def p8():
global label_p8
if label_p8 == " ":
global tel
if tel == "X":
label_p8 = "X"
tel = "O"
elif tel == "O":
label_p8 = "O"
tel = "X"
homeG()
# Ef valið var takka 9
def p9():
global label_p9
if label_p9 == " ":
global tel
if tel == "X":
label_p9 = "X"
tel = "O"
elif tel == "O":
label_p9 = "O"
tel = "X"
homeG()
#Klasi fyrir ef þú ert að spila á móti tölvunni
class ai_turn:
def __init__(self):
for a in root.winfo_children():
a.destroy()
# Ef valið var takka 1
def p1():
global label_p1
if label_p1 == " ":
label_p1 = "O"
listi.remove(1)
turn_ai()
# Ef valið var takka 2
def p2():
global label_p2
if label_p2 == " ":
label_p2 = "O"
listi.remove(2)
turn_ai()
# Ef valið var takka 3
def p3():
global label_p3
if label_p3 == " ":
label_p3 = "O"
listi.remove(3)
turn_ai()
# Ef valið var takka 4
def p4():
global label_p4
if label_p4 == " ":
label_p4 = "O"
listi.remove(4)
turn_ai()
# Ef valið var takka 5
def p5():
global label_p5
if label_p5 == " ":
label_p5 = "O"
listi.remove(5)
turn_ai()
# Ef valið var takka 6
def p6():
global label_p6
if label_p6 == " ":
label_p6 = "O"
listi.remove(6)
turn_ai()
# Ef valið var takka 7
def p7():
global label_p7
if label_p7 == " ":
label_p7 = "O"
listi.remove(7)
turn_ai()
# Ef valið var takka 8
def p8():
global label_p8
if label_p8 == " ":
label_p8 = "O"
listi.remove(8)
turn_ai()
# Ef valið var takka 9
def p9():
global label_p9
if label_p9 == " ":
label_p9 = "O"
listi.remove(9)
turn_ai()
#Fall fyrir tölvuna að gera
def turn_ai():
global listi
global label_p1
global label_p2
global label_p3
global label_p4
global label_p5
global label_p6
global label_p7
global label_p8
global label_p9
#Reyni
try:
val = random.choice(listi)
#Ef notandi er búin að vinna
if (label_p1 == "O" and label_p2 == "O" and label_p3 == "O") or (
label_p4 == "O" and label_p5 == "O" and label_p6 == "O") or (
label_p7 == "O" and label_p8 == "O" and label_p9 == "O") or (
label_p1 == "O" and label_p4 == "O" and label_p7 == "O") or (
label_p2 == "O" and label_p5 == "O" and label_p8 == "O") or (
label_p3 == "O" and label_p6 == "O" and label_p9 == "O") or (
label_p1 == "O" and label_p5 == "O" and label_p9 == "O") or (
label_p3 == "O" and label_p5 == "O" and label_p7 == "O"):
vinner("O vinnur!")
elif val == 1:
label_p1 = "X"
elif val == 2:
label_p2 = "X"
elif val == 3:
label_p3 = "X"
elif val == 4:
label_p4 = "X"
elif val == 5:
label_p5 = "X"
elif val == 6:
label_p6 = "X"
elif val == 7:
label_p7 = "X"
elif val == 8:
label_p8 = "X"
elif val == 9:
label_p9 = "X"
listi.remove(val)
#Annars
except:
homeG()
homeG()
#Fall tuk að reseta leikinn
def reset():
global listi
listi = [1, 2, 3, 4, 5, 6, 7, 8, 9]
global tel
tel = "O"
global label_p1
label_p1 = " "
global label_p2
label_p2 = " "
global label_p3
label_p3 = " "
global label_p4
label_p4 = " "
global label_p5
label_p5 = " "
global label_p6
label_p6 = " "
global label_p7
label_p7 = " "
global label_p8
label_p8 = " "
global label_p9
label_p9 = " "
homeG()
#Fall þegar einhver er búin að vinna
def vinner(x):
for a in root.winfo_children():
a.destroy()
r1c1 = Button(root, text=label_p1, command="")
r1c2 = Button(root, text=label_p2, command="")
r1c3 = Button(root, text=label_p3, command="")
r2c1 = Button(root, text=label_p4, command="")
r2c2 = Button(root, text=label_p5, command="")
r2c3 = Button(root, text=label_p6, command="")
r3c1 = Button(root, text=label_p7, command="")
r3c2 = Button(root, text=label_p8, command="")
r3c3 = Button(root, text=label_p9, command="")
label = Label(root, text=x)
res = Button(root, text="Reset", command=reset)
haetta = Button(root, text ="Hætta",command=leikir)
r1c1.grid(row=1, column=1)
r1c2.grid(row=1, column=2)
r1c3.grid(row=1, column=3)
r2c1.grid(row=2, column=1)
r2c2.grid(row=2, column=2)
r2c3.grid(row=2, column=3)
r3c1.grid(row=3, column=1)
r3c2.grid(row=3, column=2)
r3c3.grid(row=3, column=3)
label.grid(columnspan=5)
res.grid(columnspan=5)
haetta.grid(columnspan=5)
#Fall ef valið er að spila á móti tölvu
def tolva():
global ai
ai = True
homeG()
# Fall ef valið er að spila á móti leikmann
def leikmann():
global ai
ai = False
homeG()
# Fall sem spyr þig hvor þú vil spila á móti tölvu eða leikmann
def homeT():
for a in root.winfo_children():
a.destroy()
button_1 = Button(root, text="Tölva", command=tolva)
button_2 = Button(root, text="Leikmann", command=leikmann)
label = Label(root, text="Veldu hvor þú vilt spila á móti")
label.grid(columnspan=3)
button_1.grid(row=1, column=1)
button_2.grid(row=1, column=2)
#Aðal heimaskjár myllu
def homeG():
for a in root.winfo_children():
a.destroy()
if ai == False:
r1c1 = Button(root, text=label_p1, command=push.p1)
r1c2 = Button(root, text=label_p2, command=push.p2)
r1c3 = Button(root, text=label_p3, command=push.p3)
r2c1 = Button(root, text=label_p4, command=push.p4)
r2c2 = Button(root, text=label_p5, command=push.p5)
r2c3 = Button(root, text=label_p6, command=push.p6)
r3c1 = Button(root, text=label_p7, command=push.p7)
r3c2 = Button(root, text=label_p8, command=push.p8)
r3c3 = Button(root, text=label_p9, command=push.p9)
elif ai == True:
r1c1 = Button(root, text=label_p1, command=ai_turn.p1)
r1c2 = Button(root, text=label_p2, command=ai_turn.p2)
r1c3 = Button(root, text=label_p3, command=ai_turn.p3)
r2c1 = Button(root, text=label_p4, command=ai_turn.p4)
r2c2 = Button(root, text=label_p5, command=ai_turn.p5)
r2c3 = Button(root, text=label_p6, command=ai_turn.p6)
r3c1 = Button(root, text=label_p7, command=ai_turn.p7)
r3c2 = Button(root, text=label_p8, command=ai_turn.p8)
r3c3 = Button(root, text=label_p9, command=ai_turn.p9)
res = Button(root, text="Reset", command=reset)
global Owin
global Xwin
lable_O = Label(root, text="Sigrar O: " + str(Owin))
lable_X = Label(root, text="Sigrar X: " + str(Xwin))
r1c1.grid(row=1, column=1)
r1c2.grid(row=1, column=2)
r1c3.grid(row=1, column=3)
r2c1.grid(row=2, column=1)
r2c2.grid(row=2, column=2)
r2c3.grid(row=2, column=3)
r3c1.grid(row=3, column=1)
r3c2.grid(row=3, column=2)
r3c3.grid(row=3, column=3)
res.grid(columnspan=4)
lable_O.grid(row=1, column=4)
lable_X.grid(row=2, column=4)
if (label_p1 == "X" and label_p2 == "X" and label_p3 == "X") or (
label_p4 == "X" and label_p5 == "X" and label_p6 == "X") or (
label_p7 == "X" and label_p8 == "X" and label_p9 == "X") or (
label_p1 == "X" and label_p4 == "X" and label_p7 == "X") or (
label_p2 == "X" and label_p5 == "X" and label_p8 == "X") or (
label_p3 == "X" and label_p6 == "X" and label_p9 == "X") or (
label_p1 == "X" and label_p5 == "X" and label_p9 == "X") or (
label_p3 == "X" and label_p5 == "X" and label_p7 == "X"):
vinner("X Vinnur!")
Xwin += 1
elif (label_p1 == "O" and label_p2 == "O" and label_p3 == "O") or (
label_p4 == "O" and label_p5 == "O" and label_p6 == "O") or (
label_p7 == "O" and label_p8 == "O" and label_p9 == "O") or (
label_p1 == "O" and label_p4 == "O" and label_p7 == "O") or (
label_p2 == "O" and label_p5 == "O" and label_p8 == "O") or (
label_p3 == "O" and label_p6 == "O" and label_p9 == "O") or (
label_p1 == "O" and label_p5 == "O" and label_p9 == "O") or (
label_p3 == "O" and label_p5 == "O" and label_p7 == "O"):
vinner("O Vinnur!")
Owin += 1
elif label_p1 != " " and label_p2 != " " and label_p3 != " " and label_p4 != " " and label_p5 != " " and label_p6 != " " and label_p7 != " " and label_p8 != " " and label_p9 != " ":
vinner("<NAME>")
#Fall sem byrjar myllu
def mylla():
reset()
homeT()
#Fall sem heldur utan um leikinn Planets
def Planets():
#Fall sem resetar leikinn
def reset():
for a in root.winfo_children():
a.destroy()
global dot
global canvas
global Sx
global Sy
global wW
global wH
global fY
global speed
global room
global oldroom
global Cy
global Cx
global star
global Seaty
global Seatx
global sol
global dod
global tel
global planets
global rooms
global Dx
global Dy
global Dr
global fuel
global maxfuel
global eydsla
global Etel
Sx = 200
Sy = 200
wW = 1200
wH = 900
Seaty = wH / 2
Seatx = 800
Cx = Seatx
Cy = Seaty
fY = "a"
tel = 0
speed = 7
planets = []
star = []
rooms = ["R1", "R2", "R3", "R4"]
oldroom = ""
Dx = random.randint(10, 600)
Dy = random.randint(10, 600)
Dr = "R1"
maxfuel = 100
fuel = 100
eydsla = 20
Etel = 1
sol = []
dod = False
canvas = Canvas(root, width=wW, height=wH, borderwidth=0, highlightthickness=0, bg="black")
dot = canvas.create_circle(Dx, Dy, 3, fill="yellow")
stars(500, 10, 10, wW, wH)
R1()
canvas.bind("<Key>", key)
canvas.bind("<Button-1>", callback)
canvas.pack()
root.wm_title("Planets")
homeS()
# Svo það er þæginlegt að búa til hringi
def _create_circle(self, x, y, r, **kwargs):
global canvas
return self.create_oval(x - r, y - r, x + r, y + r, **kwargs)
# Til að geta notað command til að búa til hring
Canvas.create_circle = _create_circle
# Svo það er þæginlegt að búa til pizzu form
def _create_circle_arc(self, x, y, r, **kwargs):
global canvas
if "start" in kwargs and "end" in kwargs:
kwargs["extent"] = kwargs["end"] - kwargs["start"]
del kwargs["end"]
return self.create_arc(x - r, y - r, x + r, y + r, **kwargs)
# Til að geta notað command til að búa til pizzu form
Canvas.create_circle_arc = _create_circle_arc
# Þegar er ýtt á takka er farið hér
def key(event):
global canvas
global listi
for a in listi:
canvas.delete(a)
print("pressed", repr(event.char))
global fuel
global maxfuel
global eydsla
global Etel
global Sx
global Sy
global wW
global wH
global fY
global speed
global room
global oldroom
global Cy
global Cx
global star
global Seaty
global Seatx
global sol
global dod
if event.char == "q":
if room == "sR1" and Cx == Seatx and Cy == Seaty:
if oldroom == "R1":
R1()
elif oldroom == "R2":
R2()
elif oldroom == "R3":
R3()
elif oldroom == "R4":
R4()
elif room != "sR1" and room != "sR2" and room != "sR3":
oldroom = room
sR1()
if room == "sR1" or room == "sR2" or room == "sR3":
if event.char == "w":
if Cy > 254:
Cy -= speed
elif event.char == "s":
if Cy < wH - 254:
Cy += speed
elif event.char == "a":
if room != "sR3":
if Cx > 50:
Cx -= speed
else:
if room == "sR2":
sR3()
elif room == "sR1":
sR2()
Cx = wW
else:
if Cx > 303 + 50:
Cx -= speed
elif event.char == "d":
if room != "sR1":
if Cx < wW - 50:
Cx += speed
else:
if room == "sR2":
sR1()
elif room == "sR3":
sR2()
Cx = 2
else:
if Cx < 934 - 50:
Cx += speed
else:
if Etel % eydsla == 0:
fuel = fuel - 1
Etel += 1
if event.char == "w":
if Sy > 8:
Sy -= speed
fY = "w"
else:
if room == "R3":
R1()
Sy = wH
elif room == "R4":
R2()
Sy = wH
elif event.char == "s":
if Sy < wH - 6:
Sy += speed
fY = "s"
else:
if room == "R1":
R3()
Sy = 0
elif room == "R2":
R4()
Sy = 0
elif event.char == "a":
if Sx > 8:
Sx -= speed
fY = "a"
else:
if room == "R2":
R1()
Sx = wW
elif room == "R4":
R3()
Sx = wW
elif event.char == "d":
if Sx < wW - 6:
Sx += speed
fY = "d"
else:
if room == "R1":
R2()
Sx = 0
elif room == "R3":
R4()
Sx = 0
for a in sol:
canvas.delete(a)
inn = False
if room == "R1":
tel = 0
for a in range(628):
if Sy > round(900 + 400 * math.sin(tel)) and Sx > round(
1200 + 400 * math.cos(tel)): # 1200, 900
inn = True
if Sy > round(900 + 300 * math.sin(tel)) and Sx > round(1200 + 300 * math.cos(tel)):
dod = True
break
tel = round(tel + 0.01, 2)
elif room == "R2":
tel = 0
for a in range(628):
if Sy > round(900 + 400 * math.sin(tel)) and Sx < round(0 + 400 * math.cos(tel)): # 0, 900
inn = True
if Sy > round(900 + 300 * math.sin(tel)) and Sx < round(0 + 300 * math.cos(tel)):
dod = True
break
tel = round(tel + 0.01, 2)
elif room == "R3":
tel = 0
for a in range(628):
if Sy < round(0 + 400 * math.sin(tel)) and Sx > round(1200 + 400 * math.cos(tel)): # 1200, 0
inn = True
if Sy < round(0 + 300 * math.sin(tel)) and Sx > round(1200 + 300 * math.cos(tel)):
dod = True
break
tel = round(tel + 0.01, 2)
elif room == "R4":
tel = 0
for a in range(628):
if Sy < round(0 + 400 * math.sin(tel)) and Sx < round(0 + 400 * math.cos(tel)): # 0, 0
inn = True
if Sy < round(0 + 300 * math.sin(tel)) and Sx < round(0 + 300 * math.cos(tel)):
dod = True
break
tel = round(tel + 0.01, 2)
if inn == True:
if dod == True:
for a in sol:
canvas.delete(a)
destroy(15)
elif dod == False:
sol.append(canvas.create_text(wW / 2, wH / 2, text="Heat Warning!", fill="white"))
if room == "sR1" or room == "sR2" or room == "sR3":
homeSS()
else:
homeS()
#Fall fyrir ef þú ert bensínlaus
def emty():
global dod
global sol
dod = True
canvas.bind("<Key>", dead)
for a in sol:
canvas.delete(a)
user()
canvas.create_text(wW / 2, wH / 2, text="Ship is out of fuel!", fill="white")
canvas.create_rectangle((wW / 2) - 50, (wH / 2) + 10, (wW / 2) + 50, (wH / 2) + 50, fill="grey")
canvas.create_text(wW / 2, (wH / 2) + 30, text="Restart", fill="white", anchor="center")
# Dautt fall(bara svo að þegar þú ert dauður getur ekki haldið áfram)
def dead(key):
pass
# Þegar þú deyrð
def destroy(boom):
global canvas
global listi
global sol
global wW
global wH
canvas.create_circle(Sx, Sy, boom, fill="orange")
for a in listi:
canvas.delete(a)
canvas.bind("<Key>", dead)
user()
canvas.create_text(wW / 2, wH / 2, text="Ship destroyed!", fill="white")
canvas.create_rectangle((wW / 2) - 50, (wH / 2) + 10, (wW / 2) + 50, (wH / 2) + 50, fill="grey")
canvas.create_text(wW / 2, (wH / 2) + 30, text="Restart", fill="white", anchor="center")
def user():
global wW
global wH
global e1
e1 = Entry(canvas)
canvas.create_text(wW / 2, (wH / 2) + 60, text="Sláðu inn notenda nafn þitt", fill="white")
canvas.create_window(wW / 2, (wH / 2) + 80, window=e1)
# Ef það er ýtt á músa takkann
def callback(event):
global canvas
global dod
global tel
global e1
canvas.focus_set()
print("clicked at", event.x, event.y)
if dod == True:
if event.x > (wW / 2) - 50 and event.y > (wH / 2) + 10 and event.x < (wW / 2) + 50 and event.y < (wH / 2) + 50:
nafn = e1.get()
if nafn != "":
entry = ","+nafn+":"+str(tel)
with open("scores.txt","a") as f:
f.write(entry)
reset()
# Til að búa til punkt
def new_dot():
global canvas
global speed
global dot
global tel
tel += 1
global Dx
global Dy
global Dr
global rooms
global room
global dtel
Dx = random.randint(0, 1200)
Dy = random.randint(0, 900)
Dr = random.choice(rooms)
tel1 = 0
for a in range(628):
if Dr == "R1":
print("Y:", Dy)
print("X:", Dx)
print("Ry:", round(900 + 300 * math.sin(tel)))
print("Rx:", round(1200 + 300 * math.cos(tel)))
if Dy > round(900 + 300 * math.sin(tel1)) and Dx > round(1200 + 300 * math.cos(tel1)):
print("yay!!")
new_dot()
elif Dr == "R2":
if Dy > round(900 + 300 * math.sin(tel1)) and Dx < round(0 + 300 * math.cos(tel1)):
new_dot()
elif Dr == "R3":
if Dy < round(0 + 300 * math.sin(tel1)) and Dx > round(1200 + 300 * math.cos(tel1)):
new_dot()
elif Dr == "R4":
if Dy < round(0 + 300 * math.sin(tel1)) and Dx < round(0 + 300 * math.cos(tel1)):
new_dot()
tel1 = round(tel1 + 0.01, 2)
# print(tel)
# Aðal skjár í geimskipi
def homeSS():
global canvas
global dot
global Cx
global Cy
global listi
canvas.delete(dot)
listi = []
listi.append(canvas.create_circle(Cx, Cy, 50, fill="white", outline=""))
# Aðal skjár í geimnum
def homeS():
global canvas
global dot
canvas.delete(dot)
global Sx
global Sy
global listi
listi = []
global fY
global room
global Dr
global dod
global maxfuel
global fuel
global wW
global wH
if room == Dr:
dot = canvas.create_circle(Dx, Dy, 3, fill="yellow")
try:
with open("scores.txt","r") as f:
text = f.read()
users = text.split(",")
telja = 0
for a in users:
nota = a.split(":")
if int(nota[1]) > int(telja):
nafn = nota[0]
telja = nota[1]
listi.append(canvas.create_text(wW/2,0,text="Hæsti notandi: "+nafn+" : "+str(telja),fill ="white",anchor = N))
except:
listi.append(
canvas.create_text(wW / 2, 0, text="Hæsti notandi: Enginn", fill="white",anchor=N))
if dod == False:
if fY == "s":
listi.append(canvas.create_circle(Sx, Sy - 8, 3, fill="lightblue", outline=""))
elif fY == "w":
listi.append(canvas.create_circle(Sx, Sy + 2, 3, fill="lightblue", outline=""))
elif fY == "a":
listi.append(canvas.create_circle(Sx + 2, Sy, 3, fill="lightblue", outline=""))
elif fY == "d":
listi.append(canvas.create_circle(Sx - 8, Sy, 3, fill="lightblue", outline=""))
if fY == "s" or fY == "w":
listi.append(canvas.create_circle(Sx, Sy, 3, fill="grey", outline=""))
listi.append(canvas.create_circle(Sx, Sy - 2, 3, fill="grey", outline=""))
listi.append(canvas.create_circle(Sx, Sy - 4, 3, fill="grey", outline=""))
listi.append(canvas.create_circle(Sx, Sy - 6, 3, fill="grey", outline=""))
elif fY == "a" or fY == "d":
listi.append(canvas.create_circle(Sx, Sy, 3, fill="grey", outline=""))
listi.append(canvas.create_circle(Sx - 2, Sy, 3, fill="grey", outline=""))
listi.append(canvas.create_circle(Sx - 4, Sy, 3, fill="grey", outline=""))
listi.append(canvas.create_circle(Sx - 6, Sy, 3, fill="grey", outline=""))
if Sx - 3 <= Dx + 3 and Sx + 3 >= Dx - 3 and Sy - 3 <= Dy + 3 and Sy + 3 >= Dy - 3 and room == Dr:
fuel = fuel + 10
if fuel > maxfuel:
fuel = maxfuel
new_dot()
if fuel == 0:
emty()
listi.append(canvas.create_text(wW - 10, 10, text="Stig: " + str(tel), width=100, anchor=NE, fill="white"))
listi.append(canvas.create_text(0 + 10, 10, text="Fuel: " + str(fuel), width=100, anchor=NW, fill="white"))
# Til að búa til stjörnur
def stars(numb, x1, y1, x2, y2):
global canvas
for a in range(numb):
x = random.randint(x1, x2)
y = random.randint(y1, y2)
star.append(canvas.create_circle(x, y, 1, fill="white", outline=""))
# Herbergi 1 í geimnum
def R1():
global canvas
global dot
global room
room = "R1"
canvas.delete(dot)
for a in planets:
canvas.delete(a)
planets.append(canvas.create_circle(100, 120, 50, fill="blue", outline="lightblue", width=4))
planets.append(canvas.create_circle_arc(100, 120, 48, fill="green", outline="", start=45, end=140))
planets.append(canvas.create_circle_arc(100, 120, 48, fill="green", outline="", start=275, end=305))
planets.append(canvas.create_circle_arc(100, 120, 45, style="arc", outline="white", width=6, start=270 - 25,
end=270 + 25))
planets.append(canvas.create_circle(150, 40, 20, fill="#BBB", outline=""))
planets.append(canvas.create_circle(140, 40, 2, fill="darkgrey", outline=""))
planets.append(canvas.create_circle(160, 50, 4, fill="darkgrey", outline=""))
planets.append(canvas.create_circle(160, 30, 3, fill="darkgrey", outline=""))
planets.append(canvas.create_circle(1200, 900, 300, fill="#FF5C00"))
planets.append(canvas.create_circle(1200, 900, 400, outline="#FF5C00"))
# Herbergi 2 í geimnum
def R2():
global canvas
global dot
global room
room = "R2"
canvas.delete(dot)
for a in planets:
canvas.delete(a)
planets.append(canvas.create_circle(0, 900, 300, fill="#FF5C00"))
planets.append(canvas.create_circle(0, 900, 400, outline="#FF5C00"))
planets.append(canvas.create_circle(900, 500, 45, fill="red"))
planets.append(canvas.create_circle(880, 520, 10, fill="#E82A00", outline=""))
planets.append(canvas.create_circle(920, 520, 8, fill="#E82A00", outline=""))
planets.append(canvas.create_circle(900, 480, 5, fill="#E82A00", outline=""))
planets.append(canvas.create_circle(500, 100, 60, fill="#FFA30B", outline="#FFBD05", width=4))
# Herbergi 3 í geimnum
def R3():
global canvas
global dot
global room
room = "R3"
canvas.delete(dot)
for a in planets:
canvas.delete(a)
planets.append(canvas.create_circle(1200, 0, 300, fill="#FF5C00"))
planets.append(canvas.create_circle(1200, 0, 400, outline="#FF5C00"))
# Herbergi 4 í geimnum
def R4():
global canvas
global dot
global room
room = "R4"
canvas.delete(dot)
for a in planets:
canvas.delete(a)
planets.append(canvas.create_circle(0, 0, 300, fill="#FF5C00"))
planets.append(canvas.create_circle(0, 0, 400, outline="#FF5C00"))
planets.append(canvas.create_circle(900, 600, 150, fill="#FFA700"))
planets.append(canvas.create_circle(900, 600, 225, outline="#FFE100", width=40))
# Herbergi 1 í geimskipi
def sR1():
global canvas
global dot
global wW
global wH
global Seatx
global Seaty
global room
room = "sR1"
canvas.delete(dot)
for a in planets:
canvas.delete(a)
planets.append(canvas.create_rectangle(0, 200, (wW / 4) * 3, 700, fill="darkgrey"))
planets.append(canvas.create_polygon(900, 200, wW, wH / 2, 900, 700, fill="darkgrey"))
planets.append(
canvas.create_rectangle(Seatx - 50, Seaty - 50, Seatx + 50, Seaty + 50, fill="brown", outline=""))
# Herbergi 2 í geimskipi
def sR2():
global canvas
global wW
global wH
global room
room = "sR2"
for a in planets:
canvas.delete(a)
planets.append(canvas.create_rectangle(0, 200, wW, 700, fill="darkgrey"))
# Herbergi 3 í geimskipi
def sR3():
global canvas
global wW
global wH
global room
room = "sR3"
for a in planets:
canvas.delete(a)
planets.append(canvas.create_circle(300, wH / 2, 250, fill="lightblue", outline=""))
planets.append(canvas.create_rectangle(300, 200, wW, 700, fill="darkgrey", outline=""))
root = Tk()
reset()
root.mainloop()
#Fall sem heldur utan um skæri, blað, steinn
def sbs():
global tel1
global tel2
global tel3
tel1 = 0
tel2 = 0
tel3 = 0
for a in root.winfo_children():
a.destroy()
#Fall fyrir ef það er valið skæri
def skaeri():
global tel1
global tel2
global tel3
tala = random.randint(1, 3)
if tala == 1:
labelU.configure(text="Notandi: Skæri")
labelT.configure(text="Tölva: Skæri")
labelV.configure(text="Jafntefli")
tel3 = tel3 + 1
labelTj.configure(text=tel3)
elif tala == 2:
labelU.configure(text="Notandi: Skæri")
labelT.configure(text="Tölva: Blað")
labelV.configure(text="Notandi vinnur")
tel1 = tel1 + 1
labelTv.configure(text=tel1)
else:
labelU.configure(text="Notandi: Skæri")
labelT.configure(text="Tölva: Steinn")
labelV.configure(text="T<NAME>")
tel2 = tel2 + 1
labelTt.configure(text=tel2)
#Fall fyrir ef það er valið blað
def blad():
global tel1
global tel2
global tel3
tala = random.randint(1, 3)
if tala == 1:
labelU.configure(text="Notandi: Blað")
labelT.configure(text="Tölva: Skæri")
labelV.configure(text="T<NAME>")
tel2 = tel2 + 1
labelTt.configure(text=tel2)
elif tala == 2:
labelU.configure(text="Notandi: Blað")
labelT.configure(text="Tölva: Blað")
labelV.configure(text="Jafntefli")
tel3 = tel3 + 1
labelTj.configure(text=tel3)
else:
labelU.configure(text="Notandi: Blað")
labelT.configure(text="Tölva: Steinn")
labelV.configure(text="Notandi vinnur")
tel1 = tel1 + 1
labelTv.configure(text=tel1)
#Fall fyrir ef það er valið stein
def steinn():
global tel1
global tel2
global tel3
tala = random.randint(1, 3)
if tala == 1:
labelU.configure(text="Notandi: Steinn")
labelT.configure(text="Tölva: Skæri")
labelV.configure(text="Notandi vinnur")
tel1 = tel1 + 1
labelTv.configure(text=tel1)
elif tala == 2:
labelU.configure(text="Notandi: Steinn")
labelT.configure(text="Tölva: Blað")
labelV.configure(text="Tölva vinnur")
tel2 = tel2 + 1
labelTt.configure(text=tel2)
else:
labelU.configure(text="Notandi: Steinn")
labelT.configure(text="Tölva: Blað")
labelV.configure(text="Jafntefli")
tel3 = tel3 + 1
labelTj.configure(text=tel3)
button_1 = Button(root, text="Skæri", command=skaeri)
button_2 = Button(root, text="Blað", command=blad)
button_3 = Button(root, text="Steinn", command=steinn)
button_4 = Button(root, text="Til baka", command=leikir)
labelU = Label(root, text="")
labelT = Label(root, text="")
labelV = Label(root, text="")
labelTtxt = Label(root, text="Sigrar:")
labelUtxt = Label(root, text="Töp:")
labelVtxt = Label(root, text="Jafntefli:")
labelTv = Label(root, text=tel1)
labelTt = Label(root, text=tel2)
labelTj = Label(root, text=tel3)
button_1.grid(row=1, column=1)
button_2.grid(row=2, column=1)
button_3.grid(row=3, column=1)
button_4.grid(columnspan=5)
labelU.grid(row=1, column=2)
labelT.grid(row=2, column=2)
labelV.grid(row=3, column=2)
labelTtxt.grid(row=1, column=3)
labelUtxt.grid(row=2, column=3)
labelVtxt.grid(row=3, column=3)
labelTv.grid(row = 1, column = 4)
labelTt.grid(row=2, column=4)
labelTj.grid(row=3, column=4)
label_1 = Label(root, text="Veldu hvað þú vilt gera")
button_1 = Button(root, text=" Skæri, blað, steinn ", command=sbs)
button_2 = Button(root, text=" Mylla ", command=mylla)
button_3 = Button(root, text=" Planets ", command=Planets)
button_4 = Button(root, text=" Til baka ", command=home)
button_1.grid(row=1, column=1)
button_2.grid(row=1, column=2)
button_3.grid(row=1, column=3)
button_4.grid(row=1, column=4)
label_1.grid(columnspan=5)
#Bý til glugga
root = Tk()
#By<NAME>
home()
#Læt gluggan keyra í loopu
root.mainloop()
``` |
{
"source": "jonasfreyr/Lokaverkefni-Vefforritun2018",
"score": 3
} |
#### File: Lokaverkefni-Vefforritun2018/Final/app.py
```python
from flask import *
a = "api"
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file
@app.route('/')
@app.route('/home')
def home():
return render_template('index.html')
@app.route('/stjornendur')
def stjornendur():
return render_template('stjorn.html')
@app.route('/verslanir')
def verslanir():
return render_template('vefverslanir.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/blog')
def blog():
return render_template('blog.html')
@app.route('/new')
def new():
return render_template('nyskraning.html')
@app.route('/logon', methods=['POST'])
def logon():
email = request.form['email']
passw = request.form['password']
print(email)
print(passw)
@app.route('/new-user')
def newuser():
conn = MySQLdb.connect("tsuts.tskoli.is", "2801002260", "mypassword", "<PASSWORD>")
db = conn.cursor()
commant = "INSERT INTO Notandi (USER_ID, USER_NAME, USER_PASS, USER_ADMIN) VALUES (1, '2341234', '1234', TRUE)"
try:
db.execute(commant)
conn.commit()
except:
conn.rollback()
conn.close()
if __name__ == "__main__":
app.run(host='0.0.0.0')
``` |
{
"source": "jonasfreyr/Net-forritun",
"score": 3
} |
#### File: Net-forritun/Verkefni6/mp.py
```python
import multiprocessing as mp
import logging
import socket
import time
logger = mp.log_to_stderr(logging.DEBUG)
def worker(socket):
while True:
client, address = socket.accept()
logger.debug("{u} connected".format(u=address))
client.send("OK")
client.close()
if __name__ == '__main__':
num_workers = 5
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('',9090))
serversocket.listen(5)
workers = [mp.Process(target=worker, args=(serversocket,)) for i in
range(num_workers)]
for p in workers:
p.daemon = True
p.start()
while True:
try:
time.sleep(10)
except:
break
``` |
{
"source": "JonasFurrer/django_channels",
"score": 2
} |
#### File: src/sensor/views.py
```python
from django.shortcuts import render
def sensor_view(request):
return render(request, "sensor.html", {'sensor': '99'})
``` |
{
"source": "JonasGeiping/breaching",
"score": 2
} |
#### File: JonasGeiping/breaching/benchmark_breaches.py
```python
import hydra
from omegaconf import OmegaConf
import datetime
import time
import logging
import breaching
import os
os.environ["HYDRA_FULL_ERROR"] = "0"
log = logging.getLogger(__name__)
def main_process(process_idx, local_group_size, cfg, num_trials=100):
"""This function controls the central routine."""
total_time = time.time() # Rough time measurements here
setup = breaching.utils.system_startup(process_idx, local_group_size, cfg)
model, loss_fn = breaching.cases.construct_model(cfg.case.model, cfg.case.data, cfg.case.server.pretrained)
if cfg.num_trials is not None:
num_trials = cfg.num_trials
server = breaching.cases.construct_server(model, loss_fn, cfg.case, setup)
model = server.vet_model(model)
attacker = breaching.attacks.prepare_attack(model, loss_fn, cfg.attack, setup)
if cfg.case.user.user_idx is not None:
print("The argument user_idx is disregarded during the benchmark. Data selection is fixed.")
log.info(
f"Partitioning is set to {cfg.case.data.partition}. Make sure there exist {num_trials} users in this scheme."
)
cfg.case.user.user_idx = -1
run = 0
overall_metrics = []
while run < num_trials:
local_time = time.time()
# Select data that has not been seen before:
cfg.case.user.user_idx += 1
try:
user = breaching.cases.construct_user(model, loss_fn, cfg.case, setup)
except ValueError:
log.info("Cannot find other valid users. Finishing benchmark.")
break
if cfg.case.data.modality == "text":
dshape = user.dataloader.dataset[0]["input_ids"].shape
data_shape_mismatch = any([d != d_ref for d, d_ref in zip(dshape, cfg.case.data.shape)])
else:
data_shape_mismatch = False # Handled by preprocessing for images
if len(user.dataloader.dataset) < user.num_data_points or data_shape_mismatch:
log.info(f"Skipping user {user.user_idx} (has not enough data or data shape mismatch).")
else:
log.info(f"Now evaluating user {user.user_idx} in trial {run}.")
run += 1
# Run exchange
shared_user_data, payloads, true_user_data = server.run_protocol(user)
# Evaluate attack:
try:
reconstruction, stats = attacker.reconstruct(
payloads, shared_user_data, server.secrets, dryrun=cfg.dryrun
)
# Run the full set of metrics:
metrics = breaching.analysis.report(
reconstruction,
true_user_data,
payloads,
server.model,
order_batch=True,
compute_full_iip=True,
compute_rpsnr=True,
compute_ssim=True,
cfg_case=cfg.case,
setup=setup,
)
# Add query metrics
metrics["queries"] = user.counted_queries
# Save local summary:
breaching.utils.save_summary(cfg, metrics, stats, time.time() - local_time, original_cwd=False)
overall_metrics.append(metrics)
# Save recovered data:
if cfg.save_reconstruction:
breaching.utils.save_reconstruction(reconstruction, payloads, true_user_data, cfg)
if cfg.dryrun:
break
except Exception as e: # noqa # yeah we're that close to the deadlines
log.info(f"Trial {run} broke down with error {e}.")
# Compute average statistics:
average_metrics = breaching.utils.avg_n_dicts(overall_metrics)
# Save global summary:
breaching.utils.save_summary(
cfg, average_metrics, stats, time.time() - total_time, original_cwd=True, table_name="BENCHMARK_breach"
)
@hydra.main(version_base="1.1", config_path="breaching/config", config_name="cfg")
def main_launcher(cfg):
"""This is boiler-plate code for the launcher."""
log.info("--------------------------------------------------------------")
log.info("-----Launching federating learning breach experiment! --------")
launch_time = time.time()
if cfg.seed is None:
cfg.seed = 233 # The benchmark seed is fixed by default!
log.info(OmegaConf.to_yaml(cfg))
breaching.utils.initialize_multiprocess_log(cfg) # manually save log configuration
main_process(0, 1, cfg)
log.info("-------------------------------------------------------------")
log.info(
f"Finished computations with total train time: " f"{str(datetime.timedelta(seconds=time.time() - launch_time))}"
)
log.info("-----------------Job finished.-------------------------------")
if __name__ == "__main__":
main_launcher()
```
#### File: JonasGeiping/breaching/simulate_breach.py
```python
import torch
import hydra
from omegaconf import OmegaConf
import datetime
import time
import logging
import breaching
import os
os.environ["HYDRA_FULL_ERROR"] = "0"
log = logging.getLogger(__name__)
def main_process(process_idx, local_group_size, cfg):
"""This function controls the central routine."""
local_time = time.time()
setup = breaching.utils.system_startup(process_idx, local_group_size, cfg)
# Propose a model architecture:
# (Replace this line with your own model if you want)
model, loss_fn = breaching.cases.construct_model(cfg.case.model, cfg.case.data, cfg.case.server.pretrained)
# Instantiate server and vet model
# This is a no-op for an honest-but-curious server, but a malicious-model server can modify the model in this step.
server = breaching.cases.construct_server(model, loss_fn, cfg.case, setup)
model = server.vet_model(model)
# Instantiate user and attacker
user = breaching.cases.construct_user(model, loss_fn, cfg.case, setup)
attacker = breaching.attacks.prepare_attack(model, loss_fn, cfg.attack, setup)
# Summarize startup:
breaching.utils.overview(server, user, attacker)
# Simulate a simple FL protocol
shared_user_data, payloads, true_user_data = server.run_protocol(user)
# Run an attack using only payload information and shared data
reconstructed_user_data, stats = attacker.reconstruct(payloads, shared_user_data, server.secrets, dryrun=cfg.dryrun)
# How good is the reconstruction?
metrics = breaching.analysis.report(
reconstructed_user_data, true_user_data, payloads, model, cfg_case=cfg.case, setup=setup
)
# Save to summary:
breaching.utils.save_summary(cfg, metrics, stats, user.counted_queries, time.time() - local_time)
# Save to output folder:
breaching.utils.dump_metrics(cfg, metrics)
if cfg.save_reconstruction:
breaching.utils.save_reconstruction(reconstructed_user_data, payloads, true_user_data, cfg)
@hydra.main(version_base="1.1", config_path="breaching/config", config_name="cfg")
def main_launcher(cfg):
"""This is boiler-plate code for the launcher."""
log.info("--------------------------------------------------------------")
log.info("-----Launching federating learning breach experiment! --------")
launch_time = time.time()
if cfg.seed is None:
cfg.seed = torch.randint(0, 2**32 - 1, (1,)).item()
log.info(OmegaConf.to_yaml(cfg))
breaching.utils.initialize_multiprocess_log(cfg) # manually save log configuration
main_process(0, 1, cfg)
log.info("-------------------------------------------------------------")
log.info(
f"Finished computations with total train time: " f"{str(datetime.timedelta(seconds=time.time() - launch_time))}"
)
log.info("-----------------Job finished.-------------------------------")
if __name__ == "__main__":
main_launcher()
``` |
{
"source": "JonasGoebel/lost",
"score": 2
} |
#### File: api/label/endpoint.py
```python
from flask import request, make_response
from flask_restx import Resource
from flask_jwt_extended import jwt_required, get_jwt_identity
from lost.api.api import api
from lost.api.label.api_definition import label_leaf
from lost.api.label.parsers import update_label_parser, create_label_parser
from lost.db import model, roles, access
from lost.db.vis_level import VisLevel
from lost.settings import LOST_CONFIG
from lost.logic.label import LabelTree
from io import BytesIO
import flask
namespace = api.namespace('label', description='Label API.')
@namespace.route('/tree/<string:visibility>')
class LabelTrees(Resource):
<EMAIL>()
@jwt_required
def get(self, visibility):
dbm = access.DBMan(LOST_CONFIG)
identity = get_jwt_identity()
user = dbm.get_user_by_id(identity)
flask.current_app.logger.info(visibility)
default_group = dbm.get_group_by_name(user.user_name)
if visibility == VisLevel().USER:
if not user.has_role(roles.DESIGNER):
dbm.close_session()
return "You are not authorized.", 401
else:
root_leaves = dbm.get_all_label_trees(group_id=default_group.idx)
trees = list()
for root_leaf in root_leaves:
trees.append(LabelTree(dbm, root_leaf.idx).to_hierarchical_dict())
dbm.close_session()
return trees
if visibility == VisLevel().GLOBAL:
if not user.has_role(roles.ADMINISTRATOR):
dbm.close_session()
return "You are not authorized.", 401
else:
root_leaves = dbm.get_all_label_trees(global_only=True)
trees = list()
for root_leaf in root_leaves:
trees.append(LabelTree(dbm, root_leaf.idx).to_hierarchical_dict())
dbm.close_session()
return trees
dbm.close_session()
return "You are not authorized.", 401
@namespace.route('/<string:visibility>')
class LabelEditNew(Resource):
@api.expect(update_label_parser)
@jwt_required
def patch(self, visibility):
args = update_label_parser.parse_args()
dbm = access.DBMan(LOST_CONFIG)
identity = get_jwt_identity()
user = dbm.get_user_by_id(identity)
if not user.has_role(roles.DESIGNER):
dbm.close_session()
return "You are not authorized.", 401
else:
label = dbm.get_label_leaf(int(args.get('id')))
label.name = args.get('name')
label.description = args.get('description')
label.abbreviation = args.get('abbreviation')
label.external_id = args.get('external_id')
label.color = args.get('color')
dbm.save_obj(label)
dbm.close_session()
return 'success'
@api.expect(create_label_parser)
@jwt_required
def post(self, visibility):
args = create_label_parser.parse_args()
dbm = access.DBMan(LOST_CONFIG)
identity = get_jwt_identity()
user = dbm.get_user_by_id(identity)
default_group = dbm.get_group_by_name(user.user_name)
if visibility == VisLevel().USER:
if not user.has_role(roles.DESIGNER):
dbm.close_session()
return "You are not authorized.", 401
else:
label = model.LabelLeaf(name=args.get('name'),abbreviation=args.get('abbreviation'), \
description=args.get('description'),external_id=args.get('external_id'),
is_root=args.get('is_root'),color=args.get('color'), group_id=default_group.idx)
if args.get('parent_leaf_id'):
label.parent_leaf_id = args.get('parent_leaf_id'),
dbm.save_obj(label)
dbm.close_session()
return "success"
if visibility == VisLevel().GLOBAL:
if not user.has_role(roles.ADMINISTRATOR):
dbm.close_session()
return "You are not authorized.", 401
else:
label = model.LabelLeaf(name=args.get('name'),abbreviation=args.get('abbreviation'), \
description=args.get('description'),external_id=args.get('external_id'),
is_root=args.get('is_root'),color=args.get('color'))
if args.get('parent_leaf_id'):
label.parent_leaf_id = args.get('parent_leaf_id'),
dbm.save_obj(label)
dbm.close_session()
return "success"
dbm.close_session()
return "You are not authorized.", 401
@namespace.route('/<int:label_leaf_id>')
@namespace.param('label_leaf_id', 'The group identifier')
class Label(Resource):
@api.marshal_with(label_leaf)
@jwt_required
def get(self,label_leaf_id):
dbm = access.DBMan(LOST_CONFIG)
identity = get_jwt_identity()
user = dbm.get_user_by_id(identity)
if not user.has_role(roles.DESIGNER):
dbm.close_session()
return "You are not authorized.", 401
else:
re = dbm.get_label_leaf(label_leaf_id)
dbm.close_session()
return re
@jwt_required
def delete(self,label_leaf_id):
dbm = access.DBMan(LOST_CONFIG)
identity = get_jwt_identity()
user = dbm.get_user_by_id(identity)
if not user.has_role(roles.DESIGNER):
dbm.close_session()
return "You are not authorized.", 401
else:
label = dbm.get_label_leaf(label_leaf_id)
dbm.delete(label)
dbm.commit()
dbm.close_session()
return "success"
@namespace.route('/export/<int:label_leaf_id>')
class ExportLabelTree(Resource):
@jwt_required
def get(self,label_leaf_id):
dbm = access.DBMan(LOST_CONFIG)
identity = get_jwt_identity()
user = dbm.get_user_by_id(identity)
if not user.has_role(roles.DESIGNER):
dbm.close_session()
return "You are not authorized.", 401
else:
label_tree = LabelTree(dbm, root_id=label_leaf_id)
ldf = label_tree.to_df()
dbm.close_session()
f = BytesIO()
ldf.to_csv(f)
f.seek(0)
resp = make_response(f.read())
resp.headers["Content-Disposition"] = f"attachment; filename={label_tree.root.name}.csv"
resp.headers["Content-Type"] = "blob"
return resp
```
#### File: api/user/login_manager.py
```python
import datetime
from flask_ldap3_login import LDAP3LoginManager, AuthenticationResponseStatus
from lost.settings import LOST_CONFIG, FLASK_DEBUG
from flask_jwt_extended import create_access_token, create_refresh_token
from lost.db.model import User as DBUser, Group
from lost.db import roles
class LoginManager():
def __init__(self, dbm, user_name, password):
self.dbm = dbm
self.user_name = user_name
self.password = password
def login(self):
if LOST_CONFIG.ldap_config['LDAP_ACTIVE']:
access_token, refresh_token = self.__authenticate_ldap()
else:
access_token, refresh_token = self.__authenticate_flask()
if access_token and refresh_token:
return {
'token': access_token,
'refresh_token': refresh_token
}, 200
return {'message': 'Invalid credentials'}, 401
def __get_token(self, user_id):
expires = datetime.timedelta(minutes=LOST_CONFIG.session_timeout)
expires_refresh = datetime.timedelta(minutes=LOST_CONFIG.session_timeout + 2)
if FLASK_DEBUG:
expires = datetime.timedelta(days=365)
expires_refresh = datetime.timedelta(days=366)
access_token = create_access_token(identity=user_id, fresh=True, expires_delta=expires)
refresh_token = create_refresh_token(user_id, expires_delta=expires_refresh)
return access_token, refresh_token
def __authenticate_flask(self):
if self.user_name:
user = self.dbm.find_user_by_user_name(self.user_name)
if user and user.check_password(self.password):
return self.__get_token(user.idx)
return None, None
def __authenticate_ldap(self):
# auth with ldap
ldap_manager = LDAP3LoginManager()
ldap_manager.init_config(LOST_CONFIG.ldap_config)
# Check if the credentials are correct
response = ldap_manager.authenticate(self.user_name, self.password)
if response.status != AuthenticationResponseStatus.success:
# no user found in ldap, try it with db user:
return self.__authenticate_flask()
user_info = response.user_info
user = self.dbm.find_user_by_user_name(self.user_name)
# user not in db:
if not user:
user = self.__create_db_user(user_info)
else:
# user in db -> synch with ldap
user = self.__update_db_user(user_info, user)
return self.__get_token(user.idx)
def __create_db_user(self, user_info):
user = DBUser(user_name=user_info['uid'], email=user_info['mail'],
email_confirmed_at=datetime.datetime.now(), first_name=user_info['givenName'],
last_name=user_info['sn'], is_external=True)
anno_role = self.dbm.get_role_by_name(roles.ANNOTATOR)
user.roles.append(anno_role)
user.groups.append(Group(name=user.user_name, is_user_default=True))
self.dbm.save_obj(user)
return user
def __update_db_user(self, user_info, user):
user.email = user_info['mail']
user.first_name = user_info['givenName']
user.last_name = user_info['sn']
self.dbm.save_obj(user)
return user
```
#### File: lost/db/patches.py
```python
import lost
from lost.logic.file_man import AppFileMan
from lostconfig import LOSTConfig
import os
import json
from lost.db.db_patch import DBPatcher
def update_version_log():
fm = AppFileMan(LOSTConfig())
path = fm.get_version_log_path()
if not os.path.exists(path):
print('Patchsystem: Created version log file: {}'.format(path))
versions = []
versions.append(lost.__version__)
with open(path, 'w') as json_file:
json.dump(versions, json_file)
else:
with open(path) as json_file:
versions = json.load(json_file)
print("Versions: ", versions)
if versions[-1] == lost.__version__:
print('Patchsystem: No version change!')
else:
print('Patchsystem: We maybe need to patch!')
dbp = DBPatcher()
dbp.patch()
versions.append(lost.__version__)
with open(path, 'w') as json_file:
json.dump(versions, json_file)
```
#### File: logic/jobs/cron_jobs.py
```python
import argparse
import traceback
from lost.logic import dask_session
from lost.logic.pipeline import cron
from lost.logic.pipeline import worker
import lostconfig as config
from lost.db.access import DBMan
import time
import threading
from dask.distributed import Client
from lost.logic.log import get_file_logger
from lost.logic.file_man import FileMan, AppFileMan
import logging
from lost.logic.jobs import jobs
from lost.logic.dask_session import ds_man
def process_pipes(log_name, client):
lostconfig = config.LOSTConfig()
dbm = DBMan(lostconfig)
pipe_list = dbm.get_pipes_to_process()
# For each task in this project
for p in pipe_list:
pipe_man = cron.PipeEngine(dbm=dbm, pipe=p, lostconfig=lostconfig,
client=client, logger_name=log_name)
pipe_man.process_pipeline()
dbm.close_session()
def run_loop(run, sleep_time, **kwargs):
logger = logging.getLogger(kwargs["log_name"])
logger.info('Starting {} loop'.format(run.__name__))
while True:
try:
run(**kwargs)
time.sleep(sleep_time)
except Exception as e:
logger.error(traceback.format_exc())
time.sleep(1)
def process_pipes_loop(log_name):
lostconfig = config.LOSTConfig()
if lostconfig.worker_management != 'dynamic':
client = Client('{}:{}'.format(
lostconfig.scheduler_ip, lostconfig.scheduler_port)
)
else:
client = None
run_loop(process_pipes, lostconfig.pipe_schedule, log_name=log_name, client=client)
def worker_lifesign(log_name):
worker.send_life_sign()
def worker_lifesign_loop(log_name):
lostconfig = config.LOSTConfig()
run_loop(worker_lifesign, lostconfig.worker_beat, log_name=log_name)
def release_annos(log_name):
logger = logging.getLogger(log_name)
c_imgs, c_annos = jobs.release_annos_on_session_timeout()
logger.info('Released img_annos: {}, 2d_annos: {}'.format(c_imgs, c_annos))
def release_annos_loop(log_name):
lostconfig = config.LOSTConfig()
run_loop(release_annos, lostconfig.session_timeout*60, log_name=log_name)
def main():
parser = argparse.ArgumentParser(description='Run LOST cronjobs')
parser.add_argument('--debug', action='store_true',
help='start cronjobs just once for debugging')
args = parser.parse_args()
lostconfig = config.LOSTConfig()
fm = AppFileMan(lostconfig)
log_name = 'cron_jobs'
logger = get_file_logger(log_name, fm.get_app_log_path('cron_jobs.log') )
logger.info('Starting cron jobs!')
if args.debug:
t = threading.Thread(
target=worker_lifesign_loop,
args=(log_name,),
daemon=True
)
t.start()
client = Client('{}:{}'.format(
lostconfig.scheduler_ip, lostconfig.scheduler_port)
)
process_pipes(log_name, client)
else:
jobs = [
process_pipes_loop,
worker_lifesign_loop,
release_annos_loop
]
if lostconfig.worker_management == 'dynamic':
jobs.append(dask_session.release_client_by_timeout_loop)
jobs += lostconfig.extra_cron_jobs
threads = []
for j in jobs:
t = threading.Thread(
target=j,
args=(log_name,),
daemon=True
)
t.start()
threads.append(t)
[t.join() for t in threads]
if __name__ == "__main__":
main()
```
#### File: logic/pipeline/exec_utils.py
```python
import os
import hashlib
import importlib
import zipfile
def zipdir(path, out_path, timestamp=None):
# zipf is zipfile handle
zipf = zipfile.ZipFile(out_path, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(path):
for file in files:
src = os.path.join(root, file)
if timestamp is None:
dst = os.path.relpath(os.path.join(root, file),
os.path.join(path, '..'))
else:
dst = os.path.relpath(os.path.join(f'{root}_{timestamp}', file),
os.path.join(path, '..'))
zipf.write(src, dst)
zipf.close()
# def module_to_bytes(path, ignore=['__pycache__']):
# # zipf is zipfile handle
# cont = b''
# for root, dirs, files in os.walk(path):
# for file in files:
# for i in ignore:
# src = os.path.join(root, file)
# if i not in src:
# with open(src, 'rb') as f:
# cont += f.read()
# return cont
def get_module_hash(path, ignore=['__pycache__']):
# zipf is zipfile handle
sha = hashlib.sha256()
for root, dirs, files in os.walk(path):
for file in files:
for i in ignore:
src = os.path.join(root, file)
if i not in src:
with open(src, 'rb') as f:
sha.update(f.read())
# cont += f.read()
return sha.hexdigest()
def import_by_string(full_name):
module_name, unit_name = full_name.rsplit('.', 1)
mod = importlib.import_module(module_name)
return getattr(mod, unit_name)
def exec_dyn_class(idx, class_name):
my_class = import_by_string(class_name)
instance = my_class(idx)
return instance._run(ret_success=True)
def get_import_name_by_script(script_name, timestamp=None):
mod_name = os.path.splitext(script_name)[0]
if timestamp is not None:
mod_list = mod_name.split('.')
mod_list[0] = f'{mod_list[0]}_{timestamp}'
mod_name = '.'.join(mod_list)
return f'{mod_name}.LostScript'
```
#### File: lost/logic/sia.py
```python
import lost
import json
import os
from lost.db import dtype, state, model
from lost.logic.anno_task import set_finished, update_anno_task
from datetime import datetime
from lost.logic.file_man import FileMan
__author__ = "<NAME>"
def get_first(db_man, user_id, media_url):
""" Get first image anno.
:type db_man: lost.db.access.DBMan
"""
at = get_sia_anno_task(db_man, user_id)
iteration = db_man.get_pipe_element(pipe_e_id=at.pipe_element_id).iteration
tmp_anno = db_man.get_first_sia_anno(at.idx, iteration, user_id)
if tmp_anno:
image_anno_id = tmp_anno.idx
image_anno = db_man.get_image_anno(image_anno_id)
if image_anno:
image_anno.timestamp_lock = datetime.now()
if image_anno.state == state.Anno.UNLOCKED:
image_anno.state = state.Anno.LOCKED
elif image_anno.state == state.Anno.LABELED:
image_anno.state = state.Anno.LABELED_LOCKED
is_last_image = __is_last_image__(db_man, user_id, at.idx, iteration, image_anno.idx)
current_image_number, total_image_amount = get_image_progress(db_man, at, image_anno.idx, at.pipe_element.iteration)
sia_serialize = SiaSerialize(image_anno, user_id, media_url, True, is_last_image, current_image_number, total_image_amount)
db_man.save_obj(image_anno)
return sia_serialize.serialize()
else:
return "nothing available"
def get_next(db_man, user_id, img_id, media_url):
# ptvsd.wait_for_attach()
# ptvsd.break_into_debugger()
""" Get next ImageAnno with all its TwoDAnnos
:type db_man: lost.db.access.DBMan
"""
at = get_sia_anno_task(db_man, user_id)
if at and at.pipe_element.pipe.state != state.Pipe.PAUSED:
image_anno = None
iteration = db_man.get_pipe_element(pipe_e_id=at.pipe_element_id).iteration
if int(img_id) == -1:
tmp_annos = db_man.get_next_locked_sia_anno(at.idx, user_id, iteration)
if len(tmp_annos) > 0:
image_anno = tmp_annos[0]
if image_anno is None:
image_anno = db_man.get_next_unlocked_sia_anno(at.idx, iteration)
if image_anno is None:
tmp_anno = db_man.get_last_sia_anno(at.idx, iteration, user_id)
if tmp_anno:
image_anno_id = tmp_anno.idx
image_anno = db_man.get_image_anno(image_anno_id)
else:
image_anno = db_man.get_next_sia_anno_by_last_anno(at.idx, user_id, img_id, iteration)
if image_anno is None:
tmp_annos = db_man.get_next_locked_sia_anno(at.idx, user_id, iteration)
if len(tmp_annos) > 0:
image_anno = tmp_annos[0]
if image_anno is None:
image_anno = db_man.get_next_unlocked_sia_anno(at.idx, iteration)
if image_anno:
is_first_image = True
image_anno.timestamp_lock = datetime.now()
if image_anno.state == state.Anno.UNLOCKED:
image_anno.state = state.Anno.LOCKED
elif image_anno.state == state.Anno.LABELED:
image_anno.state = state.Anno.LABELED_LOCKED
image_anno.user_id = user_id
db_man.save_obj(image_anno)
first_image_anno = db_man.get_first_sia_anno(at.idx, iteration, user_id)
if first_image_anno is not None and first_image_anno.idx != image_anno.idx:
is_first_image = False
is_last_image = __is_last_image__(db_man, user_id, at.idx, iteration, image_anno.idx)
current_image_number, total_image_amount = get_image_progress(db_man, at, image_anno.idx, at.pipe_element.iteration)
sia_serialize = SiaSerialize(image_anno, user_id, media_url, is_first_image, is_last_image, current_image_number, total_image_amount)
db_man.save_obj(image_anno)
return sia_serialize.serialize()
return "nothing available"
def get_previous(db_man, user_id, img_id, media_url):
""" Get previous image anno
:type db_man: lost.db.access.DBMan
"""
at = get_sia_anno_task(db_man, user_id)
iteration = db_man.get_pipe_element(pipe_e_id=at.pipe_element_id).iteration
image_anno = db_man.get_previous_sia_anno(at.idx, user_id, img_id, iteration)
is_last_image = False
is_first_image = False
first_anno = db_man.get_first_sia_anno(at.idx, iteration, user_id)
if image_anno is None:
if first_anno:
image_anno = db_man.get_image_anno(img_anno_id=first_anno.idx)
if image_anno:
if first_anno.idx == image_anno.idx:
is_first_image = True
image_anno.timestamp_lock = datetime.now()
db_man.save_obj(image_anno)
current_image_number, total_image_amount = get_image_progress(db_man, at, image_anno.idx, at.pipe_element.iteration)
sia_serialize = SiaSerialize(image_anno, user_id, media_url, is_first_image, is_last_image, current_image_number, total_image_amount)
return sia_serialize.serialize()
else:
return "nothing available"
def get_label_trees(db_man, user_id, at=None):
"""
:type db_man: lost.db.access.DBMan
"""
if at is None:
at = get_sia_anno_task(db_man, user_id)
label_trees_json = dict()
label_trees_json['labels'] = list()
if at:
for rll in db_man.get_all_required_label_leaves(at.idx): #type: lost.db.model.RequiredLabelLeaf
for label_leaf in db_man.get_all_child_label_leaves(rll.label_leaf.idx): #type: lost.db.model.LabelLeaf
label_leaf_json = dict()
label_leaf_json['id'] = label_leaf.idx
label_leaf_json['label'] = label_leaf.name
label_leaf_json['nameAndClass'] = label_leaf.name + " (" + rll.label_leaf.name + ")"
label_leaf_json['description'] = label_leaf.description
if label_leaf.color and label_leaf.color != '':
label_leaf_json['color'] = label_leaf.color
label_trees_json['labels'].append(label_leaf_json)
return label_trees_json
else:
label_trees = dict()
label_trees['labels'] = list()
return label_trees
def get_configuration(db_man, user_id):
at = get_sia_anno_task(db_man,user_id)
return json.loads(at.configuration)
def get_sia_anno_task(db_man, user_id):
for cat in db_man.get_choosen_annotask(user_id):
if cat.anno_task.dtype == dtype.AnnoTask.SIA:
return cat.anno_task
return None
def get_image_progress(db_man, anno_task, img_id, iteration=None):
'''Get image progress for current request
Args:
db_man (access.DBMan): Database manager
anno_task (model.AnnoTask): Annotation task
img_id (int): Id of the current image
iteration (int): int or None. If None all annotations will be considered
'''
anno_ids = list()
if iteration is None:
for anno in db_man.get_all_image_annos(anno_task.idx):
anno_ids.append(anno.idx)
else:
for anno in db_man.get_all_image_annos_by_iteration(anno_task.idx, iteration):
anno_ids.append(anno.idx)
total_image_amount = len(anno_ids)
current_image_number = anno_ids.index(img_id) + 1
return current_image_number, total_image_amount
def __is_last_image__(db_man, user_id, at_id, iteration, img_id):
"""
:type db_man: lost.db.access.DBMan
"""
# three ways to check
# first: are there some next locked annos for that user ?
image_annos = db_man.get_next_locked_sia_anno(at_id, user_id, iteration)
if image_annos:
# has to be more than one, current viewed image is not part of condition
if len(image_annos) > 1:
return False
# second: are we in a previous view ? - check for next allready labeled anno by last anno
image_anno = db_man.get_next_sia_anno_by_last_anno(at_id, user_id, img_id, iteration)
if image_anno:
return False
# third: is there one next free anno for that user ?
image_anno = db_man.get_next_unlocked_sia_anno(at_id, iteration)
if image_anno:
# found one - lock it !
img = db_man.get_image_anno(image_anno.idx)
img.user_id = user_id
img.state = state.Anno.LOCKED
db_man.save_obj(img)
return False
else:
return True
def update(db_man, data, user_id):
""" Update Image and TwoDAnnotation from SIA
:type db_man: lost.db.access.DBMan
"""
anno_task = get_sia_anno_task(db_man, user_id)
sia_update = SiaUpdate(db_man, data, user_id, anno_task)
return sia_update.update()
def review_update(db_man, data, user_id, pe_id):
""" Update Image and TwoDAnnotation from SIA
:type db_man: lost.db.access.DBMan
"""
pe = db_man.get_pipe_element(pipe_e_id=pe_id)
at = pe.anno_task
sia_update = SiaUpdate(db_man, data, user_id, at, sia_type='review')
return sia_update.update()
def finish(db_man, user_id):
at = get_sia_anno_task(db_man, user_id)
if at.idx:
return set_finished(db_man, at.idx)
else:
return "error: anno_task not found"
def junk(db_man, user_id, img_id):
image_anno = db_man.get_image_anno(img_id) #type: lost.db.model.ImageAnno
if image_anno:
image_anno.state = state.Anno.JUNK
image_anno.user_id = user_id
image_anno.timestamp = datetime.now()
db_man.save_obj(image_anno)
return "success"
else:
return "error: image_anno not found"
class SiaUpdate(object):
def __init__(self, db_man, data, user_id, anno_task, sia_type='sia'):
"""
:type db_man: lost.db.access.DBMan
"""
self.sia_type = sia_type
self.timestamp = datetime.now()
self.db_man = db_man
self.user_id = user_id
self.at = anno_task #type: lost.db.model.AnnoTask
# self.sia_history_file = FileMan(self.db_man.lostconfig).get_sia_history_path(self.at)
self.iteration = db_man.get_pipe_element(pipe_e_id=self.at.pipe_element_id).iteration
self.image_anno = self.db_man.get_image_annotation(data['imgId'])
self.image_anno.timestamp = self.timestamp
if self.image_anno.anno_time is None:
self.image_anno.anno_time = 0.0
if self.sia_type == 'sia':
# Do not update image annotation time for sia review
self.image_anno.anno_time = data['annoTime']
self.b_boxes = list()
self.points = list()
self.lines = list()
self.polygons = list()
self.history_json = dict()
self.history_json['annotations'] = dict()
self.history_json['annotations']['new'] = list()
self.history_json['annotations']['unchanged'] = list()
self.history_json['annotations']['changed'] = list()
self.history_json['annotations']['deleted'] = list()
self._update_img_labels(data)
self.image_anno.is_junk = data['isJunk']
# store certain annotations
if 'bBoxes' in data['annotations']:
self.b_boxes = data['annotations']['bBoxes']
else:
self.b_boxes = None
if 'points' in data['annotations']:
self.points = data['annotations']['points']
else:
self.points = None
if 'lines' in data['annotations']:
self.lines = data['annotations']['lines']
else:
self.lines = None
if 'polygons' in data['annotations']:
self.polygons = data['annotations']['polygons']
else:
self.polygons = None
def _update_img_labels(self, data):
if(data['imgLabelChanged']):
old = set([lbl.label_leaf_id for lbl in self.image_anno.labels])
new = set(data['imgLabelIds'])
to_delete = old - new
to_add = new - old
for lbl in self.image_anno.labels:
if lbl.label_leaf_id in to_delete:
self.image_anno.labels.remove(lbl)
# self.db_man.delete(lbl)
for ll_id in to_add:
self.image_anno.labels.append(model.Label(label_leaf_id=ll_id))
def update(self):
if self.at.pipe_element.pipe.state == state.Pipe.PAUSED:
return "pipe is paused"
if self.b_boxes is not None:
self.__update_annotations(self.b_boxes, dtype.TwoDAnno.BBOX)
if self.points is not None:
self.__update_annotations(self.points, dtype.TwoDAnno.POINT)
if self.lines is not None:
self.__update_annotations(self.lines, dtype.TwoDAnno.LINE)
if self.polygons is not None:
self.__update_annotations(self.polygons, dtype.TwoDAnno.POLYGON)
self.image_anno.state = state.Anno.LABELED
# Update Image Label
# self.image_anno.labels = self.img_labels
self.db_man.add(self.image_anno)
self.db_man.commit()
# self.__update_history_json_file()
update_anno_task(self.db_man, self.at.idx, self.user_id)
return "success"
def __update_annotations(self, annotations, two_d_type):
annotation_json = dict()
annotation_json['unchanged'] = list()
annotation_json['deleted'] = list()
annotation_json['new'] = list()
annotation_json['changed'] = list()
for annotation in annotations:
if annotation['status'] != "database" \
and annotation['status'] != "deleted" \
and annotation['status'] != "new" \
and annotation['status'] != "changed":
error_msg = "Status: '" + str(annotation['status']) + "' is not valid."
raise SiaStatusNotFoundError(error_msg)
for annotation in annotations:
if annotation['status'] == "database":
two_d = self.db_man.get_two_d_anno(annotation['id']) #type: lost.db.model.TwoDAnno
two_d.user_id = self.user_id
two_d.state = state.Anno.LABELED
two_d.timestamp = self.timestamp
two_d.timestamp_lock = self.image_anno.timestamp_lock
if two_d.anno_time is None:
two_d.anno_time = 0.0
# two_d.anno_time += average_anno_time
two_d_json = self.__serialize_two_d_json(two_d)
annotation_json['unchanged'].append(two_d_json)
self.db_man.save_obj(two_d)
elif annotation['status'] == "deleted":
try:
two_d = self.db_man.get_two_d_anno(annotation['id']) #type: lost.db.model.TwoDAnno
two_d_json = self.__serialize_two_d_json(two_d)
annotation_json['deleted'].append(two_d_json)
for label in self.db_man.get_all_two_d_label(two_d.idx):
self.db_man.delete(label)
self.db_man.delete(two_d)
except KeyError:
print('SIA bug backend fix! Do not try to delete annotations that are not in db!')
elif annotation['status'] == "new":
annotation_data = annotation['data']
try:
annotation_data.pop('left')
annotation_data.pop('right')
annotation_data.pop('top')
annotation_data.pop('bottom')
except:
pass
two_d = model.TwoDAnno(anno_task_id=self.at.idx,
img_anno_id=self.image_anno.idx,
timestamp=self.timestamp,
timestamp_lock=self.image_anno.timestamp_lock,
anno_time=annotation['annoTime'],
data=json.dumps(annotation_data),
user_id=self.user_id,
iteration=self.iteration,
dtype=two_d_type,
state=state.Anno.LABELED)
self.db_man.save_obj(two_d)
for l_id in annotation['labelIds']:
label = model.Label(two_d_anno_id=two_d.idx,
label_leaf_id=l_id,
dtype=dtype.Label.TWO_D_ANNO,
timestamp=self.timestamp,
annotator_id=self.user_id,
timestamp_lock=self.image_anno.timestamp_lock,
anno_time=annotation['annoTime'])
self.db_man.save_obj(label)
two_d_json = self.__serialize_two_d_json(two_d)
annotation_json['new'].append(two_d_json)
elif annotation['status'] == "changed":
annotation_data = annotation['data']
try:
annotation_data.pop('left')
annotation_data.pop('right')
annotation_data.pop('top')
annotation_data.pop('bottom')
except:
pass
two_d = self.db_man.get_two_d_anno(annotation['id']) #type: lost.db.model.TwoDAnno
two_d.timestamp = self.timestamp
two_d.timestamp_lock = self.image_anno.timestamp_lock
two_d.data = json.dumps(annotation_data)
two_d.user_id = self.user_id
if two_d.anno_time is None:
two_d.anno_time = 0.0
two_d.anno_time = annotation['annoTime']
two_d.state = state.Anno.LABELED
l_id_list = list()
# get all labels of that two_d_anno.
for label in self.db_man.get_all_two_d_label(two_d.idx):
# save id.
l_id_list.append(label.idx)
# delete labels, that are not in user labels list.
if label.idx not in annotation['labelIds']:
self.db_man.delete(label)
# labels that are in the list get a new anno_time
else:
if label.anno_time is None:
label.anno_time = 0.0
label.anno_time = annotation['annoTime']
label.timestamp = self.timestamp
label.annotator_id=self.user_id,
label.timestamp_lock = self.image_anno.timestamp_lock
self.db_man.save_obj(label)
# new labels
for l_id in annotation['labelIds']:
if l_id not in l_id_list:
label = model.Label(two_d_anno_id=two_d.idx,
label_leaf_id=l_id,
dtype=dtype.Label.TWO_D_ANNO,
timestamp=self.timestamp,
annotator_id=self.user_id,
timestamp_lock=self.image_anno.timestamp_lock,
anno_time=annotation['annoTime'])
self.db_man.save_obj(label)
self.db_man.save_obj(two_d)
two_d_json = self.__serialize_two_d_json(two_d)
annotation_json['changed'].append(two_d_json)
else:
continue
self.history_json['annotations'] = annotation_json
return "success"
def __serialize_two_d_json(self, two_d):
two_d_json = dict()
two_d_json['id'] = two_d.idx
two_d_json['user_id'] = two_d.user_id
if two_d.annotator:
two_d_json['user_name'] = two_d.annotator.first_name + " " + two_d.annotator.last_name
else:
two_d_json['user_name'] = None
two_d_json['anno_time'] = two_d.anno_time
two_d_json['data'] = two_d.data
label_list_json = list()
if two_d.labels:
label_json = dict()
label_json['id'] = [lbl.idx for lbl in two_d.labels]
label_json['label_leaf_id'] = [lbl.label_leaf.idx for lbl in two_d.labels]
label_json['label_leaf_name'] = [lbl.label_leaf.name for lbl in two_d.labels]
label_list_json.append(label_json)
two_d_json['labels'] = label_list_json
return two_d_json
# def __update_history_json_file(self):
# # create history directory if not exist
# if self.sia_type == 'sia':
# self.history_json['timestamp'] = self.timestamp.strftime("%Y-%m-%d %H:%M:%S.%f")
# self.history_json['timestamp_lock'] = self.image_anno.timestamp_lock.strftime("%Y-%m-%d %H:%M:%S.%f")
# self.history_json['image_anno_time'] = self.image_anno.anno_time
# self.history_json['user_id'] = self.user_id
# self.history_json['user_name'] = None
# if self.image_anno.annotator:
# self.history_json['user_name'] = self.image_anno.annotator.first_name + " " + self.image_anno.annotator.last_name
# if not os.path.exists(self.sia_history_file):
# with open(self.sia_history_file, 'w') as f:
# event_json = dict()
# json.dump(event_json, f)
# with open(self.sia_history_file) as f:
# data = json.load(f)
# if str(self.image_anno.idx) in data:
# data[str(self.image_anno.idx)].append(self.history_json)
# else:
# data[str(self.image_anno.idx)] = list()
# data[str(self.image_anno.idx)].append(self.history_json)
# with open(self.sia_history_file, 'w') as f:
# json.dump(data, f)
class SiaSerialize(object):
def __init__(self, image_anno, user_id, media_url, is_first_image, is_last_image, current_image_number, total_image_amount):
self.sia_json = dict()
self.image_anno = image_anno #type: lost.db.model.ImageAnno
self.user_id = user_id
self.media_url = media_url
self.is_first_image = is_first_image
self.is_last_image = is_last_image
self.current_image_number = current_image_number
self.total_image_amount = total_image_amount
def serialize(self):
self.sia_json['image'] = dict()
self.sia_json['image']['id'] = self.image_anno.idx
self.sia_json['image']['url'] = "/" + self.image_anno.img_path
self.sia_json['image']['isFirst'] = self.is_first_image
self.sia_json['image']['isLast'] = self.is_last_image
self.sia_json['image']['number'] = self.current_image_number
self.sia_json['image']['amount'] = self.total_image_amount
self.sia_json['image']['isJunk'] = self.image_anno.is_junk
self.sia_json['image']['annoTime'] = self.image_anno.anno_time
if self.image_anno.labels is None:
self.sia_json['image']['labelIds'] = []
else:
self.sia_json['image']['labelIds'] = [lbl.label_leaf_id for lbl in self.image_anno.labels]
self.sia_json['annotations'] = dict()
self.sia_json['annotations']['bBoxes'] = list()
self.sia_json['annotations']['points'] = list()
self.sia_json['annotations']['lines'] = list()
self.sia_json['annotations']['polygons'] = list()
for two_d_anno in self.image_anno.twod_annos: #type: lost.db.model.TwoDAnno
if two_d_anno.dtype == dtype.TwoDAnno.BBOX:
bbox_json = dict()
bbox_json['id'] = two_d_anno.idx
bbox_json['labelIds'] = list()
if two_d_anno.labels: #type: lost.db.model.Label
bbox_json['labelIds'] = [lbl.label_leaf_id for lbl in two_d_anno.labels]
bbox_json['data'] = json.loads(two_d_anno.data)
bbox_json['annoTime'] = two_d_anno.anno_time
self.sia_json['annotations']['bBoxes'].append(bbox_json)
elif two_d_anno.dtype == dtype.TwoDAnno.POINT:
point_json = dict()
point_json['id'] = two_d_anno.idx
point_json['labelIds'] = list()
if two_d_anno.labels: #type: lost.db.model.Label
point_json['labelIds'] = [lbl.label_leaf_id for lbl in two_d_anno.labels]
point_json['data'] = json.loads(two_d_anno.data)
point_json['annoTime'] = two_d_anno.anno_time
self.sia_json['annotations']['points'].append(point_json)
elif two_d_anno.dtype == dtype.TwoDAnno.LINE:
line_json = dict()
line_json['id'] = two_d_anno.idx
line_json['labelIds'] = list()
if two_d_anno.labels: #type: lost.db.model.Label
line_json['labelIds'] = [lbl.label_leaf_id for lbl in two_d_anno.labels]
line_json['data'] = json.loads(two_d_anno.data)
line_json['annoTime'] = two_d_anno.anno_time
self.sia_json['annotations']['lines'].append(line_json)
elif two_d_anno.dtype == dtype.TwoDAnno.POLYGON:
polygon_json = dict()
polygon_json['id'] = two_d_anno.idx
polygon_json['labelIds'] = list()
if two_d_anno.labels: #type: lost.db.model.Label
polygon_json['labelIds'] = [lbl.label_leaf_id for lbl in two_d_anno.labels]
polygon_json['data'] = json.loads(two_d_anno.data)
polygon_json['annoTime'] = two_d_anno.anno_time
self.sia_json['annotations']['polygons'].append(polygon_json)
return self.sia_json
class SiaStatusNotFoundError(Exception):
""" Base class for SiaStatusNotFoundError
"""
pass
def get_last_image_id(dbm, user_id):
at = get_sia_anno_task(dbm, user_id)
if at:
iteration = dbm.get_pipe_element(pipe_e_id=at.pipe_element_id).iteration
tmp_anno = dbm.get_last_edited_sia_anno(at.idx, iteration, user_id)
if tmp_anno:
return tmp_anno.idx -1
return None
def review(dbm, data, user_id, media_url):
direction = data['direction']
current_idx = data['image_anno_id']
iteration = data['iteration']
pe_id = data['pe_id']
at = dbm.get_pipe_element(pipe_e_id=pe_id).anno_task
first_anno = dbm.get_sia_review_first(at.idx, iteration)
if direction == 'first':
image_anno = first_anno
elif direction == 'next':
image_anno = dbm.get_sia_review_next(at.idx, current_idx, iteration)
elif direction == 'previous':
image_anno = dbm.get_sia_review_prev(at.idx, current_idx, iteration)
if image_anno:
# all_iterations = True
# if iteration:
# all_iterations = False
current_image_number, total_image_amount = get_image_progress(dbm, at, image_anno.idx, iteration)
is_first_image = False
if first_anno.idx == image_anno.idx:
is_first_image = True
is_last_image = False
if current_image_number == total_image_amount:
is_last_image = True
sia_serialize = SiaSerialize(image_anno, user_id, media_url, is_first_image, is_last_image, current_image_number, total_image_amount)
return sia_serialize.serialize()
else:
return 'no annotation found'
def reviewoptions(dbm, pe_id, user_id):
options = {}
pipe_element = dbm.get_pipe_element(pipe_e_id=pe_id)
if pipe_element.state == state.PipeElement.PENDING:
options['max_iteration'] = pipe_element.iteration - 1
else:
options['max_iteration'] = pipe_element.iteration
options['possible_labels'] = get_label_trees(dbm, user_id, pipe_element.anno_task)['labels']
return options
```
#### File: lost/utils/testils.py
```python
from lost.db import model, dtype
from lost.logic.label import LabelTree
import datetime
import pandas as pd
from lost.logic.pipeline.instance import PipeInstance
def get_user(dbm):
email = '<EMAIL>'
user = None
for u in dbm.get_users():
if u.email == email:
user = u
break
if user is None:
user = model.User(
user_name = 'test',
email=email,
email_confirmed_at=datetime.datetime.utcnow(),
password='<PASSWORD>',
first_name= 'Test',
last_name='User'
)
user.groups.append(model.Group(name=user.user_name,
is_user_default=True))
dbm.add(user)
dbm.commit()
return user
def delete_user(dbm, user):
for g in user.groups:
if g.is_user_default:
dbm.delete(g)
dbm.delete(user)
dbm.commit()
def get_voc_label_tree(dbm):
tree = LabelTree(dbm)
df = pd.read_csv('/code/src/backend/lost/pyapi/examples/label_trees/pascal_voc2012.csv')
root = tree.import_df(df)
if root is None:
name = df[df['parent_leaf_id'].isnull()]['name'].values[0]
tree = LabelTree(dbm, name=name)
return tree
def get_script_pipeline_fragment(dbm):
'''Get a fragment of a pipeline
Script -> AnnoTask:
A Script connected to an AnnoTask
Returns:
:class:`lost.db.model.PipeElement`, :class:`lost.db.model.PipeElement`, :class:`lost.db.model.Pipe`:
(script_element, annotation_task_element, pipeline)
'''
pipe = model.Pipe(name='TestPipe')
dbm.add(pipe)
dbm.commit()
script = model.Script(name='TestScript', path='data/pipes/test/test.py')
dbm.add(script)
dbm.commit()
pe_s = model.PipeElement(pipe_id=pipe.idx, dtype=dtype.PipeElement.SCRIPT)
pe_s.script = script
dbm.add(pe_s)
dbm.commit()
script_result = model.Result()
dbm.add(script_result)
dbm.commit()
anno_task = model.AnnoTask(name='TestAnnoTask')
dbm.add(anno_task)
pe_a = model.PipeElement(pipe_id=pipe.idx, dtype=dtype.PipeElement.ANNO_TASK)
pe_a.anno_task = anno_task
# pe_a.result_in.append(script_result)
# pe_a.result_out.append(script_result)
dbm.add(pe_a)
dbm.commit()
# pe_s.pe_outs.append(pe_a)
# dbm.commit()
# Link elements
res_link_s = model.ResultLink(result_id=script_result.idx,
pe_n=pe_s.idx, pe_out=pe_a.idx
)
res_link_a = model.ResultLink(result_id=script_result.idx,
pe_n=pe_a.idx, pe_out=None
)
dbm.add(res_link_s)
dbm.add(res_link_a)
dbm.commit()
return pe_s, pe_a, pipe
def delete_script_pipeline_fragment(dbm, pipe):
'''Delete a fragment of a pipeline
Script -> AnnoTask:
A Script connected to an AnnoTask
'''
# pi = PipeInstance(dbm, pipe)
# pi.delete_pipeline()
print('We should implement a working clean up here!')
``` |
{
"source": "jonasgrebe/pt-femb-face-embeddings",
"score": 3
} |
#### File: femb/data/face_image_folder_dataset.py
```python
from .face_dataset import FaceDataset
import os
import logging
class FaceImageFolderDataset(FaceDataset):
def __init__(self, auto_initialize=True, **kwargs):
super(FaceImageFolderDataset, self).__init__(**kwargs)
self.img_paths = []
self.img_ids = []
self.img_id_labels = []
if auto_initialize:
self.init_from_directories()
def init_from_directories(self):
if not self.dataset_exists():
logging.warning(f"The dataset {self.name} does not contain any images under {os.path.join(self.root, self.name, 'images')}")
return
logging.info(f"Creating a FaceImageFolderDataset ({self.name}) with data from {os.path.join(self.root, self.name, 'images')}.")
images_dir = os.path.join(self.root, self.name, 'images')
for label, identity in enumerate(os.listdir(images_dir)):
id_path = os.path.join(images_dir, identity)
for img_file in os.listdir(id_path):
self.img_paths.append(os.path.join(id_path, img_file))
self.img_ids.append(identity)
self.img_id_labels.append(label)
def dataset_exists(self):
images_dir = os.path.join(self.root, self.name, 'images')
return os.path.isdir(images_dir) and len(os.listdir(images_dir)) > 0
```
#### File: femb/evaluation/verification_evaluator.py
```python
import torch
from tqdm import tqdm
import numpy as np
import logging
from itertools import combinations, product
from sklearn.metrics import roc_curve
from .similarity import get_similarity_function
from .evaluator import Evaluator
class VerificationEvaluator(Evaluator):
def __init__(self, similarity, metrics=['eer'], limit=50, batch_size=32):
self.similarity = similarity
self.limit = limit
def evaluate(self, features, labels):
genuine_scores, imposter_scores = compute_comparison_scores(features, labels, self.similarity, self.limit)
fpr, tpr, threshold = compute_roc(genuine_scores, imposter_scores)
fnr = 1 - tpr
eer_threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
stats = {
'eer': eer,
'eer_threshold': eer_threshold,
# add other metrics here
}
return stats
def compute_comparison_scores(features, labels, similarity, limit):
assert len(features) > 0
similarity = get_similarity_function(similarity)
distinct_labels = np.unique(labels).astype(int)
genuine_idxs = {label: np.random.permutation(np.argwhere(labels == label))[:limit] for label in distinct_labels}
imposter_idxs = {label: np.random.permutation(np.argwhere(labels != label))[:limit] for label in distinct_labels}
genuine_scores = []
imposter_scores = []
for label in distinct_labels:
for idx0, idx1 in combinations(genuine_idxs[label], r=2):
score = similarity(features[idx0], features[idx1])
genuine_scores.append(score)
for idx0, idx1 in product(genuine_idxs[label], imposter_idxs[label]):
score = similarity(features[idx0], features[idx1])
imposter_scores.append(score)
return np.nan_to_num(np.array(genuine_scores)), np.nan_to_num(np.array(imposter_scores))
def compute_roc(genuine, imposter):
return roc_curve(np.hstack([np.ones(len(genuine)), np.zeros(len(imposter))]), np.hstack([genuine, imposter]))
```
#### File: femb/headers/arcface.py
```python
from .arcmargin import ArcMarginHeader
class ArcFaceHeader(ArcMarginHeader):
""" ArcFaceHeader class"""
def __init__(self, in_features, out_features, s=64.0, m=0.5):
super(ArcFaceHeader, self).__init__(in_features=in_features, out_features=out_features, s=s, m2=m)
```
#### File: pt-femb-face-embeddings/femb/model.py
```python
import os
from tqdm import tqdm
import torch
import torchvision
import numpy as np
import random
import cv2
import logging
from .backbones import count_parameters
class FaceEmbeddingModel:
def __init__(self, backbone, header, loss):
self.backbone = backbone
self.header = header
self.loss = loss
# join parameter sets of backbone and header
self.params = list(backbone.parameters())
self.params.extend(list(header.parameters()))
self.params = [{'params': backbone.parameters()}, {'params': header.parameters()}]
self.name = f"model_{backbone.__class__.__name__}-{header.__class__.__name__}-{loss.__class__.__name__}".lower()
print(f"Built Embedding Model: [{backbone.__class__.__name__} -> {header.__class__.__name__} -> {loss.__class__.__name__}]")
print(f"#Trainable parameters: {count_parameters(backbone)} (Backbone)")
print(f"#Trainable parameters: {count_parameters(header)} (Header)")
print(f"#Trainable parameters: {count_parameters(loss)} (Loss)")
self.loss_window = 100
def fit(self, train_dataset, batch_size, device, optimizer, max_epochs=0, max_training_steps=0, lr_global_step_scheduler=None, lr_epoch_scheduler=None, evaluator=None, val_dataset=None, evaluation_steps=0, tensorboard=False):
assert max_epochs > 0 or max_training_steps > 0
training_id = self.name + '_' + str(random.randint(0, 9999999)).zfill(7)
training_path = os.path.join('output', training_id)
if not os.path.exists(training_path):
os.makedirs(training_path)
logging.basicConfig(
filename=os.path.join(training_path, 'training.log'),
level=logging.INFO,
format='[%(levelname)s] %(asctime)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
print(f"Logs will be written to {os.path.join(training_path, 'training.log')}")
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
if tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter(logdir=os.path.join('tensorboard', training_id))
def evaluate(global_step):
features, labels, thumbnails = self.encode_dataset(val_dataset, batch_size=batch_size, device=device, return_labels=True, return_thumbnails=True)
stats = evaluator(features, labels)
logging.info(evaluator.__class__.__name__ + ': ' + str(stats))
writer.add_scalars(evaluator.__class__.__name__, stats, global_step=global_step)
writer.add_embedding(features, metadata=labels, label_img=thumbnails, global_step=global_step)
else:
def evaluate(global_step):
features, labels = self.encode_dataset(val_dataset, batch_size=batch_size, device=device, return_labels=True)
stats = evaluator(features, labels)
logging.info(evaluator.__class__.__name__ + ': ' + str(stats))
if type(device) == str:
device = torch.device(device)
self.header.to(device)
self.backbone.to(device)
global_step = 0
epoch = 0
while(True):
logging.info(f"Epoch {epoch}:")
train_losses = np.empty(len(train_dataloader))
self.header.train()
self.backbone.train()
pbar = tqdm(enumerate(train_dataloader), total=len(train_dataloader))
for step, batch in pbar:
# skip batch if singleton
if len(batch[0]) <= 1:
continue
inputs = batch[0].to(device)
labels = batch[1].to(device).long().view(-1)
features = self.backbone(inputs)
features = features.view(features.shape[0], features.shape[1])
outputs = self.header(features, labels)
loss_value = self.loss(outputs, labels)
train_losses[step] = loss_value
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
status = self.get_status_format_string(epoch, step, train_dataloader, global_step, max_epochs, max_training_steps, train_losses)
pbar.set_description_str(status)
if evaluator is not None and evaluation_steps > 0 and step % evaluation_steps == 0:
evaluate(global_step)
if tensorboard:
writer.add_scalar(self.loss.__class__.__name__, loss_value, global_step=global_step)
global_step += 1
if max_training_steps > 0 and global_step >= max_training_steps:
return
if lr_global_step_scheduler is not None:
lr_global_step_scheduler.step()
if evaluator is not None and max_epochs > 0 and evaluation_steps == 0:
evaluate(global_step)
epoch += 1
if max_epochs > 0 and epoch >= max_epochs:
return
if lr_epoch_scheduler is not None:
lr_epoch_scheduler.step()
def get_status_format_string(self, epoch, step, train_dataloader, global_step, max_epochs, max_training_steps, train_losses):
epoch_status = f"Epoch: {epoch+1}"
status = [epoch_status]
global_step_status = f"Global Step: {global_step}/{max_training_steps} ({global_step / max_training_steps * 100:.2f} %)"
status.append(global_step_status)
status.append(f"Train_Loss: {train_losses[max([0, step-self.loss_window+1]):step+1].mean()}")
status = ' - '.join(status)
return status
def encode_dataset(self, dataset, batch_size, device, return_labels=False, return_thumbnails=False, thumbnail_size=32):
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False)
all_features = []
all_labels = []
all_thumbnails = []
self.backbone.eval()
with torch.no_grad():
for step, batch in enumerate(dataloader):
inputs = batch[0]
labels = batch[1].view(-1)
features = self.backbone(inputs.to(device))
features = features.view(features.shape[0], features.shape[1]).cpu().numpy()
all_features.extend([f for f in features])
if return_labels:
all_labels.extend([l for l in labels.numpy()])
if return_thumbnails:
all_thumbnails.extend([cv2.resize(img, dsize=(thumbnail_size, thumbnail_size), interpolation=cv2.INTER_CUBIC) for img in inputs.numpy().transpose(0, 2, 3, 1)])
self.backbone.train()
encoded = [np.array(all_features)]
if return_labels:
encoded.append(np.array(all_labels))
if return_thumbnails:
all_thumbnails = np.array(all_thumbnails)
if len(all_thumbnails.shape) == 3:
all_thumbnails = np.expand_dims(all_thumbnails, axis=-1)
encoded.append(all_thumbnails.transpose(0, 3, 1, 2))
return encoded
``` |
{
"source": "JonasGroeger/ars",
"score": 3
} |
#### File: JonasGroeger/ars/ars.py
```python
import json
import pathlib
import requests
import untangle
class ArsCodelistProvider(object):
def __init__(self):
self.api = "https://www.xrepository.de/api"
self.ars_urn = "urn:de:bund:destatis:bevoelkerungsstatistik:schluessel:rs"
self.ars_list = []
latest_xml = self._get_latest_xml()
self.version_uri = (
latest_xml.gc_CodeList.Identification.CanonicalVersionUri.cdata
)
for entry in latest_xml.gc_CodeList.SimpleCodeList.Row:
ars = entry.Value[0].SimpleValue.cdata
name = entry.Value[1].SimpleValue.cdata
self.ars_list.append({"name": name, "ars": ars})
def write(self, out_dir):
out_file = out_dir.joinpath(self.version_uri + ".json")
latest_file = out_dir.joinpath("latest.json")
to_write = {
"data": self.ars_list,
"urn": self.ars_urn,
"version_uri": self.version_uri,
"version": self.version_uri.rsplit("_", 1)[-1],
}
with open(out_file, "w", encoding="UTF-8") as f:
json.dump(to_write, f, indent=2, ensure_ascii=False)
with open(latest_file, "w", encoding="UTF-8") as f:
json.dump(to_write, f, indent=2, ensure_ascii=False)
def _get_latest_urn(self):
xml = untangle.parse(f"{self.api}/codeliste/{self.ars_urn}/gueltigeVersion")
return xml.dat_VersionCodeliste.dat_kennung.cdata
def _get_all_urns(self):
xml = untangle.parse(f"{self.api}/xrepository/{self.ars_urn}")
return map(lambda x: x.cdata, xml.dat_Codeliste.dat_versionCodeliste_kennung)
def _get_latest_xml(self):
latest_urn = self._get_latest_urn()
latest = requests.get(f"{self.api}/version_codeliste/{latest_urn}/genericode")
return untangle.parse(latest.text)
if __name__ == "__main__":
PROJECT_DIR = pathlib.Path(__file__).parent
acp = ArsCodelistProvider()
acp.write(PROJECT_DIR.joinpath("data"))
``` |
{
"source": "JonasGroeger/SPEER",
"score": 3
} |
#### File: JonasGroeger/SPEER/packet.py
```python
import struct
class Packet(object):
def __init__(self, sender, broker, receiver, type, data):
self.sender = sender
self.broker = broker
self.receiver = receiver
self.type = type
self.data = data
def pack(self):
sender_enc = self.sender.encode()
broker_enc = self.broker.encode()
receiver_enc = self.receiver.encode()
type_enc = self.type.encode()
data_enc = self.data.encode()
return struct.pack(
">IIIII{}s{}s{}s{}s{}s".format(len(sender_enc), len(broker_enc), len(receiver_enc), len(type_enc),
len(data_enc)),
len(sender_enc),
len(broker_enc),
len(receiver_enc),
len(type_enc),
len(data_enc),
sender_enc,
broker_enc,
receiver_enc,
type_enc,
data_enc,
)
@staticmethod
def retrieve_packet(packet):
return Packet(*Packet.unpack(packet))
@staticmethod
def unpack(packet):
sender_len = struct.unpack(">I", packet[0:4])[0]
broker_len = struct.unpack(">I", packet[4:8])[0]
receiver_len = struct.unpack(">I", packet[8:12])[0]
type_len = struct.unpack(">I", packet[12:16])[0]
data_len = struct.unpack(">I", packet[16:20])[0]
return [x for x in
map(lambda x: x.decode(),
struct.unpack(">{}s{}s{}s{}s{}s".format(sender_len, broker_len, receiver_len, type_len, data_len),
packet[20:]))
]
def __repr__(self):
return 'Packet(' + ', '.join([self.sender, self.broker, self.receiver, self.type, self.data]) + ')'
def __eq__(self, other):
try:
return all(getattr(self, key) == getattr(other, key)
for key in self.__dict__ if not key.startswith('_'))
except AttributeError:
return False
if __name__ == '__main__':
assert ["Alice", "Broker1", "Bob", "Message", "Hello World"] == Packet.unpack(
Packet("Alice", "Broker1", "Bob", "Message", "Hello World").pack())
``` |
{
"source": "Jonas-Grundler/Warehouse-Simulation-Tool",
"score": 4
} |
#### File: Warehouse-Simulation-Tool/simulation_tool/data.py
```python
import pandas as pd
import numpy as np
'''
#############################################################################
DESCRIPTION DATA.PY
Author: <NAME> (University of Edinburgh)
This module can be used to generate random order data.
Functions:
- generate_orders(): Generate some input data (order information)
for our simulation experiments.
#############################################################################
'''
def generate_orders(seed_no, simulation_period = 120, backlog = 100, interval = 15, arrival_mean = 5, arrival_stddev = 0, arrival_list = None,
min_lead = 30, avg_items_per_order = 1.5, first_due = 30, inter_due = 30,
aisle_no = 10, aisle_length = 45, log_file = 'logging.txt', metrics_file = 'metrics.txt'):
'''
Generate some input data (order information) for our simulation experiments.
Users can specify their desired time horizon, backlog of orders,
arrival process as well as many other parameters and receive a data frame
containing the arrival and departure times, size and location of the
various generated orders.
Inputs:
seed_no (int): random seed for replication purposes
simulation_period (int): time horizon of simulation (in minutes)
backlog (int): number of orders in backlog at t=0
interval (int): interval in which lambda changes (exponential arrival process)
arrival_mean (float): mean of lambda (exponential arrival process)
arrival_stddev (float): standard dev. of lambda (exponential arrival process)
arrival_list (list): list with prespecified lambdas (exponential arrival process);
alternatively to arrival_mean and arrival_stddev
min_lead (int): minimum time between order arrival and departure
avg_items_per_order (int): avg. no. of items (SKUs) included in a single order
first_due (int): departure time of first vehicle (in minutes after t=0)
inter_due (int): time between two vehicle departures
aisle_no (int): number of aisles in considered one-block warehouse
aisle_length (int): length of aisles (in meters) in considered one-block warehouse
log_file (.txt): file for simulation run protocol
metrics_file (.txt): file for simulation run results
Outputs:
df (DataFrame): DataFrame containing information on
- order ID
- order arrivaltime
- order departuretime
- number of items
- size of each item
- location of each item (x,y)
'''
#WRITE HEADER FOR RESULT FILES
for files in [log_file, metrics_file]:
file = open(files, 'a')
file.write(f"" + '\n')
file.write(f"###### DATA INFORMATION ########"+ '\n')
file.write(f"simulation_period: {simulation_period}" + '\n')
file.write(f"backlog: {backlog}" + '\n')
file.write(f"min_lead: {min_lead}" + '\n')
if(arrival_list != None):
file.write(f"arrivals: list = {arrival_list} (interval = {interval})" + '\n')
else:
file.write(f"arrivals: mean = {arrival_mean}, stddev = {arrival_stddev} (interval = {interval})" + '\n')
file.write(f"seed_no: {seed_no}" + '\n')
file.write(f"" + '\n')
file.close()
#Initialize df
cols = ["order_ID", "arrivaltime", "departuretime", "size", "x_coord", "y_coord"]
df = pd.DataFrame(columns = cols)
#Initialize random number generator and other parameters
rng = np.random.default_rng(seed = seed_no)
t = 0
d = 1
order_ID = 0
# GENERATE BACKLOG
for i in range(backlog):
arrivaltime = t
order_ID = order_ID + 1
#Assign order to one of the vehicles and get departuretime
lead = rng.binomial(n = 5, p = 0.1)
departuretime = first_due + lead * inter_due
#Generate no_items included in order
#for each order: generate row entry
no_items = rng.geometric(1/avg_items_per_order)
for k in range(no_items):
size = rng.integers(1,10)
x_coord = rng.integers(0, aisle_no)
y_coord = rng.integers(1, aisle_length+1)
df = df.append({"order_ID": order_ID, "arrivaltime": arrivaltime,
"departuretime": departuretime, "size": size,
"x_coord": x_coord, "y_coord": y_coord}, ignore_index=True)
# GENERATE NEW ARRIVING ORDERS
#Determine arrivalrate
# Option 1: specfic arrival list as input
if(arrival_list != None):
arrivalrate = arrival_list[0]
# Option 2: generate normally distributed arrival rate
else:
arrivalrate = rng.normal(arrival_mean, arrival_stddev)
#Note: arrival rates <= 0 are not valid
if arrivalrate < 0:
arrivalrate = 0.05
# Generate first arrivaltime
t = rng.exponential(1/arrivalrate)
while t < simulation_period:
arrivaltime = t
order_ID = order_ID + 1
#Assign order to one of the vehicles and get departuretime
earliest_departure = t + min_lead
index_earliest_vehicle = max(0,np.ceil((earliest_departure-first_due)/inter_due))
lead = rng.binomial(n = 5, p = 0.1)
departuretime = first_due + (index_earliest_vehicle + lead)*inter_due
#Generate no_items included in order
#for each order: generate row entry
no_items = rng.geometric(1/avg_items_per_order)
for k in range(no_items):
size = rng.integers(1,10)
x_coord = rng.integers(0, aisle_no)
y_coord = rng.integers(1, aisle_length+1)
df = df.append({"order_ID": order_ID, "arrivaltime": arrivaltime,
"departuretime": departuretime, "size": size,
"x_coord": x_coord, "y_coord": y_coord}, ignore_index=True)
#Check if arrivalrate has to be updated
if(arrivaltime > d*interval):
#Update interval_counter
d += 1
#Determine arrivalrate
# Option 1: specfic arrival list as input
if(arrival_list != None):
arrivalrate = arrival_list[d-1]
# Option 2: generate normally distributed arrival rate
else:
arrivalrate = rng.normal(arrival_mean, arrival_stddev)
#Note: arrival rates <= 0 are not valid
if arrivalrate < 0:
arrivalrate = 0.05
#Generate next arrivaltime
t = arrivaltime + rng.exponential(1/arrivalrate)
#Change data types of columns
df = df.astype({"order_ID": int, "arrivaltime": float, "departuretime": float, "size": int, "x_coord": int, "y_coord":int})
return df
```
#### File: Warehouse-Simulation-Tool/simulation_tool/evaluation.py
```python
import numpy as np
import pandas as pd
import simulation_tool.data as dax
import simulation_tool.initialization as init
import simulation_tool.measures as ms
import simulation_tool.distance as ds
import simulation_tool.batching as bt
'''
#############################################################################
DESCRIPTION EVALUATION.PY
Author: <NAME> (University of Edinburgh)
This module mainly fulfills two tasks. Firstly, it helps to translate the ’raw’ output
data coming from the simulate system() function into reasonable performance metrics.
Secondly, the package ensures that a detailed protocol of each individual test run is created.
Functions:
- log(): Write entry for different 'events' to log_file
- evaluate_system(): Evaluate simulation experiments by calculating various KPI's.
#############################################################################
'''
def log(log_file, event, backlog = None, backlog_size = None, t = None, batch = None, batch_size = None, used_method = None, i = None, tour_time = None):
'''
Write entry for different 'events' to log_file.
Inputs:
log_file (.txt): File for simulation run protocol
event (string): Event that should be written to log_file
- backlog_before
- backlog_after
- batch_created
- picker_tour
backlog (int): Number of orders currently in backlog
backlog_size (int): Current backlog size
t (float): Current point in time (in min)
batch (list): List of orders included in current batch
batch_size (int): Current batch size
used_method (string): String incl. used batching method (for protocol)
i (int): Picker ID
tour_time (float): Tour time of next batch
Outputs:
None. Write various events to log_file, e.g:
- Backlog before batching: 76 orders (size: 578)
- Backlog after batching: 38 orders (size: 299)
- TIME 19.9: Batch created with orders [179, 182, ..., 133]
- Picker 3 starts tour (tour_time: 16.88 min).
'''
file = open(log_file, 'a')
if(event == "backlog_before"):
file.write(f"Backlog before batching: {backlog} orders (size: {backlog_size})"+ '\n')
elif(event == "backlog_after"):
file.write(f"Backlog after batching: {backlog} orders (size: {backlog_size})"+ '\n')
elif(event == "batch_created"):
file.write(f"TIME {np.round(t,1)}: Batch created with orders {batch} (size: {batch_size}). {used_method}"+ '\n')
elif(event == "picker_tour"):
file.write(f"Picker {i+1} starts tour (tour_time: {np.round(tour_time,2)} min)."+ '\n')
file.close()
return None
def evaluate_system(eval_df, picker_df, simulation_period, metrics_file):
'''
Evaluate simulation experiments by calculating various KPI's.
Inputs:
eval_df (DataFrame): DataFrame containing order statistics
picker_df (DataFrame): DataFrame containing picker statistics
simulation_period (int): Time horizon of simulation (in min)
metrics_file (.txt): File for simulation run results
Outputs:
stats (DataFrame): DataFrame with various KPI's for simulation run
Customer Service Metrics:
- no_finished
- no_delayed
- avg_delay_time
- avg_waiting_time
- avg_service_time
- delivery_rate
- delay_finished_ratio
Efficiency metrics:
- total_travel_time
- time_per_item
- items_per_tour
Additionally all KPI's are written to the metrics_file.
'''
#Allow for chained pd.assignment
pd.options.mode.chained_assignment = None
#Get total no. of orders
total_no_orders = eval_df.loc[:,"order_ID"].nunique()
### CUSTOMER SERVICE METRICS
#Calculate helper columns
eval_df.loc[:,"started"] = np.where(eval_df.loc[:,"batch_start"] <= simulation_period, 1, 0)
eval_df.loc[:,"finished"] = np.where(eval_df.loc[:,"batch_end"] <= simulation_period, 1, 0)
eval_df.loc[:,"due"] = np.where(eval_df.loc[:,"departuretime"] <= simulation_period, 1, 0)
eval_df.loc[:,"now_delayed"] = np.where((eval_df["batch_end"] > eval_df["departuretime"]), 1, 0)
eval_df.loc[:,"later_delayed"] = np.where((eval_df.loc[:,"started"] == 0) & (eval_df.loc[:,"due"] == 1), 1, 0)
eval_df.loc[:,"delayed"] = np.where((eval_df["now_delayed"] == 1) | (eval_df["later_delayed"] == 1), 1, 0)
#Calculate number components
no_finished = eval_df.loc[:,"finished"].sum()
no_delayed = eval_df.loc[:,"delayed"].sum()
now_delayed = eval_df.loc[:,"now_delayed"].sum()
later_delayed = eval_df.loc[:,"later_delayed"].sum()
delivery_rate = 100*(no_finished/total_no_orders)
delay_finished_ratio = 100*(no_delayed/no_finished)
#Calculate time components
eval_df.loc[:,"delay_time"] = np.where(eval_df.loc[:,"batch_end"] <= eval_df.loc[:,"departuretime"], 0, eval_df.loc[:,"batch_end"] - eval_df.loc[:,"departuretime"])
eval_df.loc[:,"waiting_time"] = eval_df.loc[:,"batch_start"] - eval_df.loc[:,"arrivaltime"]
eval_df.loc[:,"serivce_time"] = eval_df.loc[:,"batch_end"] - eval_df.loc[:,"batch_start"]
avg_delay_time = eval_df[eval_df["delay_time"] > 0]["delay_time"].mean()
avg_waiting_time = eval_df.loc[:,"waiting_time"].mean()
avg_service_time = eval_df.loc[:,"serivce_time"].mean()
### EFFICIENCY METRICS
no_tours = picker_df.loc[:,"no_tours"].sum()
no_orders = picker_df.loc[:,"no_orders"].sum()
no_items = picker_df.loc[:,"no_items"].sum()
total_travel_time = picker_df.loc[:,"total_travel_time"].sum()
items_per_tour = no_items/no_tours
time_per_item = total_travel_time/no_items
#Write to metrics_file
file = open(metrics_file, 'a')
file.write(""+ '\n')
file.write("CUSTOMER SERVICE METRICS"+ '\n')
file.write(f"no_finished: {no_finished}"+ '\n')
file.write(f"no_delayed: {no_delayed} (now: {now_delayed}, later: {later_delayed})"+ '\n')
file.write(f"avg_delay_time: {avg_delay_time}"+ '\n')
file.write(f"avg_waiting_time: {avg_waiting_time}"+ '\n')
file.write(f"avg_service_time: {avg_service_time}"+ '\n')
file.write(f"delivery_rate: {delivery_rate}"+ '\n')
file.write(f"delay_finished_ratio: {delay_finished_ratio}"+ '\n')
file.write(""+ '\n')
file.write("EFFICIENCY METRICS"+ '\n')
file.write(f"total_travel_time: {total_travel_time}"+ '\n')
file.write(f"time_per_item: {time_per_item}"+ '\n')
file.write(f"items_per_tour: {items_per_tour}"+ '\n')
file.write(""+ '\n')
file.close()
#Append statistics to stats dictionary
stats = {"no_finished": no_finished, "no_delayed": no_delayed, "avg_delay_time": avg_delay_time,
"avg_waiting_time": avg_waiting_time, "avg_service_time": avg_service_time, "delivery_rate": delivery_rate,
"delay_finished_ratio": delay_finished_ratio, "total_travel_time": total_travel_time, "time_per_item": time_per_item,
"items_per_tour": items_per_tour}
return stats
``` |
{
"source": "jonashaag/CountNet",
"score": 2
} |
#### File: jonashaag/CountNet/predict_api.py
```python
import base64
import json
import numpy as np
from werkzeug.wrappers import Request, Response
import predict
def decode_audio(audio_bytes):
return np.frombuffer(base64.b64decode(audio_bytes), dtype="float32")
def make_app(estimate_func):
def app(environ, start_response):
inputs = json.loads(Request(environ).get_data())
outputs = []
for inp in inputs:
try:
est = int(estimate_func(decode_audio(inp)))
except Exception as e:
print(f"Error estimating speaker count for input {len(outputs)}: {e}")
est = None
outputs.append(est)
return Response(json.dumps(outputs))(environ, start_response)
return app
if __name__ == "__main__":
import argparse
import functools
from werkzeug.serving import run_simple
parser = argparse.ArgumentParser(
description="Run simple JSON api server to predict speaker count"
)
parser.add_argument("--model", default="CRNN", help="model name")
args = parser.parse_args()
model = predict.load_model(args.model)
scaler = predict.load_scaler()
app = make_app(functools.partial(predict.count, model=model, scaler=scaler))
run_simple("0.0.0.0", 5000, app, use_debugger=True)
``` |
{
"source": "jonashaag/django-autocomplete-light",
"score": 3
} |
#### File: autocomplete_light/autocomplete/base.py
```python
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
__all__ = ('AutocompleteInterface', 'AutocompleteBase')
class AutocompleteInterface(object):
"""
This is the minimum to implement in a custom Autocomplete class. It has two
attributes:
values
A list of values which validate_values() and choices_for_values()
should use.
request
A request object which choices_for_request() and autocomplete_html()
should use.
An autocomplete proposes "choices". A choice has a "value". When the user
selects a "choice", then it is converted to a "value".
"""
def __init__(self, request=None, values=None):
"""
Class constructor sets the given request and values as instance
attributes, casting values to list if necessary.
"""
self.request = request
if hasattr(values, '__iter__'):
self.values = values
else:
self.values = [values]
def autocomplete_html(self):
"""
Return the HTML autocomplete that should be displayed under the text
input. Use self.request if set.
"""
raise NotImplemented()
def validate_values(self):
"""
Return True if self.values are all valid.
"""
raise NotImplemented()
def choices_for_values(self):
"""
Return the list of choices corresponding to self.values.
"""
raise NotImplemented()
def get_absolute_url(self):
"""
Return the absolute url for this autocomplete, using
autocomplete_light_autocomplete url
"""
try:
return urlresolvers.reverse('autocomplete_light_autocomplete',
args=(self.__class__.__name__,))
except urlresolvers.NoReverseMatch, e:
# Such error will ruin form rendering. It would be automatically
# silenced because of e.silent_variable_failure=True, which is
# something we don't want. Let's give the user a hint:
raise ImproperlyConfigured("URL lookup for autocomplete '%s' "
"failed. Have you included autocomplete_light.urls in "
"your urls.py?" % (self.__class__.__name__,))
class AutocompleteBase(AutocompleteInterface):
"""
A basic implementation of AutocompleteInterface that renders HTML and
should fit most cases. However, it requires to overload
choices_for_request().
"""
choice_html_format = u'<span class="div" data-value="%s">%s</span>'
empty_html_format = u'<span class="div"><em>%s</em></span>'
autocomplete_html_format = u'%s'
add_another_url_name = None
def choices_for_request(self):
"""
Return the list of choices that are available. Uses self.request if
set. Use self.request if set, may be used by autocomplete_html().
"""
raise NotImplemented()
def validate_values(self):
"""
Return True if all the values are available in choices_for_values().
"""
return len(self.choices_for_values()) == len(self.values)
def autocomplete_html(self):
"""
Simple rendering of the autocomplete.
"""
html = []
for choice in self.choices_for_request():
html.append(self.choice_html(choice))
if not html:
html = self.empty_html_format % _('no matches found').capitalize()
return self.autocomplete_html_format % ''.join(html)
def choice_html(self, choice):
"""
Return a choice formated according to self.choice_html_format.
"""
return self.choice_html_format % (
self.choice_value(choice), self.choice_label(choice))
def choice_value(self, choice):
"""
Convert a choice to a value.
"""
return unicode(choice)
def choice_label(self, choice):
"""
Convert a choice to a label.
"""
return unicode(choice)
```
#### File: autocomplete_light/autocomplete/generic.py
```python
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from autocomplete_light.generic import GenericModelChoiceField
from .model import AutocompleteModel
__all__ = ['AutocompleteGeneric']
class AutocompleteGeneric(AutocompleteModel):
choices = None
search_fields = None
def choice_value(self, choice):
"""
Rely on GenericModelChoiceField to return a string containing the
content type id and object id of the result.
Because this autocomplete is made for that field, and to avoid code
duplication.
"""
field = GenericModelChoiceField()
return field.prepare_value(choice)
def validate_values(self):
"""
Ensure that every choice is part of a queryset.
"""
assert self.choices, 'autocomplete.choices should be a queryset list'
for value in self.values:
if not isinstance(value, basestring):
return False
try:
content_type_id, object_id = value.split('-')
except ValueError:
return False
try:
content_type = ContentType.objects.get_for_id(content_type_id)
except ContentType.DoesNotExist:
return False
model_class = content_type.model_class()
found = False
for queryset in self.choices:
if queryset.model != model_class:
continue
if queryset.filter(pk=object_id).count() == 1:
found = True
else:
return False
if not found:
# maybe a user would cheat by using a forbidden ctype id !
return False
return True
def choices_for_request(self):
"""
Propose local results and fill the autocomplete with remote
suggestions.
"""
assert self.choices, 'autocomplete.choices should be a queryset list'
q = self.request.GET.get('q', '')
request_choices = []
querysets_left = len(self.choices)
i = 0
for queryset in self.choices:
conditions = Q()
if q:
for search_field in self.search_fields[i]:
conditions |= Q(**{search_field + '__icontains': q})
limit = ((self.limit_choices - len(request_choices)) /
querysets_left)
for choice in queryset.filter(conditions)[:limit]:
request_choices.append(choice)
querysets_left -= 1
i += 1
return request_choices
def choices_for_values(self):
"""
Values which are not found in the querysets are ignored.
"""
values_choices = []
for queryset in self.choices:
ctype = ContentType.objects.get_for_model(queryset.model).pk
try:
ids = [x.split('-')[1] for x in self.values
if x is not None and int(x.split('-')[0]) == ctype]
except ValueError:
continue
for choice in queryset.filter(pk__in=ids):
values_choices.append(choice)
return values_choices
```
#### File: autocomplete_light/autocomplete/template.py
```python
import types
from django.template import loader
from .base import AutocompleteBase
class AutocompleteTemplate(AutocompleteBase):
"""
This replacement for AutocompleteBase supports two new attributes:
choice_template
Name of the template to use to render a choice in the autocomplete. If
none is specified, then ``AutocompleteBase`` will render the choice.
autocomplete_template
Name of the template to use to render the autocomplete. Again, fall
back on ``AutocompleteBase`` if this is None.
"""
choice_template = None
autocomplete_template = None
def get_base_context(self):
"""
Return a dict to use as base context for all templates.
It contains:
- ``{{ request }}`` if available,
- ``{{ autocomplete }}`` the "self" instance.
"""
return {
'request': self.request,
'autocomplete': self,
}
def render_template_context(self, template, extra_context=None):
"""
Render ``template`` with base context and ``extra_context``.
"""
context = self.get_base_context()
context.update(extra_context or {})
return loader.render_to_string(template, context)
def autocomplete_html(self):
"""
Render ``autocomplete_template`` with base context and ``{{ choices
}}``. If ``autocomplete_template`` is none then fall back on
``AutocompleteBase``.
"""
if self.autocomplete_template:
choices = self.choices_for_request()
return self.render_template_context(self.autocomplete_template,
{'choices': choices})
else:
return super(AutocompleteTemplate, self).autocomplete_html()
def choice_html(self, choice):
"""
Render choice_template with base context and ``{{ choice }}``. If
``choice_template`` is none then fall back on ``AutocompleteBase``.
"""
if self.choice_template:
return self.render_template_context(self.choice_template,
{'choice': choice})
else:
return super(AutocompleteTemplate, self).choice_html(choice)
```
#### File: autocomplete_light/contrib/generic_m2m.py
```python
from genericm2m.models import RelatedObjectsDescriptor
from ..generic import GenericModelChoiceField, GenericModelForm
class GenericModelForm(GenericModelForm):
"""
Extension of autocomplete_light.GenericModelForm, that handles
genericm2m's RelatedObjectsDescriptor.
"""
def __init__(self, *args, **kwargs):
"""
Add related objects to initial for each generic m2m field.
"""
super(GenericModelForm, self).__init__(*args, **kwargs)
for name, field in self.generic_m2m_fields():
related_objects = getattr(self.instance, name).all()
self.initial[name] = [x.object for x in related_objects]
def generic_m2m_fields(self):
"""
Yield name, field for each RelatedObjectsDescriptor of the model of
this ModelForm.
"""
for name, field in self.fields.items():
if not isinstance(field, GenericModelMultipleChoiceField):
continue
model_class_attr = getattr(self._meta.model, name, None)
if not isinstance(model_class_attr, RelatedObjectsDescriptor):
continue
yield name, field
def save(self, commit=True):
"""
Save the form and particularely the generic many to many relations.
"""
instance = super(GenericModelForm, self).save(commit=commit)
def save_m2m():
for name, field in self.generic_m2m_fields():
model_attr = getattr(instance, name)
selected_relations = self.cleaned_data.get(name, [])
for related in model_attr.all():
if related.object not in selected_relations:
model_attr.remove(related)
for related in selected_relations:
model_attr.connect(related)
if hasattr(self, 'save_m2m'):
old_m2m = self.save_m2m
def _():
save_m2m()
old_m2m()
self.save_m2m = _
else:
save_m2m()
return instance
class GenericModelMultipleChoiceField(GenericModelChoiceField):
"""
Simple form field that converts strings to models.
"""
def prepare_value(self, value):
return [super(GenericModelMultipleChoiceField, self
).prepare_value(v) for v in value]
def to_python(self, value):
return [super(GenericModelMultipleChoiceField, self).to_python(v)
for v in value]
```
#### File: autocomplete_light/contrib/taggit_tagfield.py
```python
from taggit.forms import TagField as TaggitTagField
from taggit.utils import edit_string_for_tags
from ..widgets import TextWidget
class TagWidget(TextWidget):
def render(self, name, value, attrs=None):
if value is not None and not isinstance(value, basestring):
value = edit_string_for_tags(
[o.tag for o in value.select_related("tag")])
return super(TagWidget, self).render(name, value, attrs)
class TagField(TaggitTagField):
widget = TagWidget
```
#### File: tests/autocomplete/model.py
```python
from .case import *
from django.contrib.auth.models import User
class AutocompleteModelMock(autocomplete_light.AutocompleteModelBase):
limit_choices = 2
choices = User.objects.all()
search_fields = ('username', 'email')
class FormMock(forms.Form):
x = forms.ModelChoiceField(queryset=AutocompleteModelMock.choices,
widget=autocomplete_light.ChoiceWidget(
autocomplete=AutocompleteModelMock))
class MultipleFormMock(forms.Form):
x = forms.ModelMultipleChoiceField(queryset=AutocompleteModelMock.choices,
widget=autocomplete_light.MultipleChoiceWidget(
autocomplete=AutocompleteModelMock))
class AutocompleteModelTestCase(AutocompleteTestCase):
autocomplete_mock = AutocompleteModelMock
def setUp(self):
User.objects.all().delete()
self.abe = User(username='Abe', email='<EMAIL>')
self.jack = User(username='Jack', email='<EMAIL>')
self.james = User(username='James', email='<EMAIL>')
self.john = User(username='John', email='<EMAIL>')
self.abe.save()
self.jack.save()
self.james.save()
self.john.save()
def assert_choices_equal(self, result, test):
self.assertEqual(list(result), test['expected'])
def get_choices_for_values_tests(self):
return (
{
'fixture': [1, 4],
'expected': [
self.abe,
self.john,
]
},
)
def get_choices_for_request_tests(self):
return (
{
'fixture': make_get_request('q=j'),
'expected': [
self.jack,
self.james,
]
},
{
'fixture': make_get_request('q=sale'),
'expected': [
self.abe,
self.james,
]
},
{
'fixture': make_get_request(),
'expected': [
self.abe,
self.jack,
]
}
)
def get_validate_tests(self):
return (
{
'fixture': [1, 4],
'expected': True,
},
{
'fixture': [1, 4, 123],
'expected': False,
},
)
def get_autocomplete_html_tests(self):
return (
{
'fixture': make_get_request('q=j'),
'expected': u''.join([
'<span class="div" data-value="%s">%s</span>' % (
self.jack.pk, unicode(self.jack)),
'<span class="div" data-value="%s">%s</span>' % (
self.james.pk, unicode(self.james)),
])
},
{
'fixture': make_get_request(),
'expected': u''.join([
'<span class="div" data-value="%s">%s</span>' % (
self.abe.pk, unicode(self.abe)),
'<span class="div" data-value="%s">%s</span>' % (
self.jack.pk, unicode(self.jack)),
])
},
)
def get_widget_tests(self):
return (
{
'form_class': FormMock,
'fixture': 'x=4',
'expected_valid': True,
'expected_data': self.john,
},
{
'form_class': FormMock,
'fixture': 'x=3&x=4',
'expected_valid': True,
'expected_data': self.john,
},
{
'fixture': 'x=abc',
'expected_valid': False,
},
{
'form_class': MultipleFormMock,
'fixture': 'x=4&x=2',
'expected_valid': True,
'expected_data': [self.jack, self.john],
},
{
'fixture': 'x=abc&x=2',
'expected_valid': False,
},
)
``` |
{
"source": "jonashaag/django-floppyforms",
"score": 2
} |
#### File: floppyforms/tests/base.py
```python
import difflib
from copy import copy
from django.test import TestCase
from django.test.signals import template_rendered
from django.test.utils import ContextList
from django.utils.unittest.util import safe_repr
from .html import HTMLParseError, parse_html
class InvalidVariable(unicode):
def __nonzero__(self):
return False
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return u'%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += u' No template was rendered.'
else:
message += u' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return u'%s was rendered.' % self.template_name
class TemplatesTestCase(object):
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
if response is None and template_name is None:
raise TypeError(u'response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
# Use assertTemplateUsed as context manager.
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
context = _AssertTemplateUsedContext(self, template_name)
return context
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, u', '.join(template_names))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
if response is None and template_name is None:
raise TypeError(u'response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
# Use assertTemplateUsed as context manager.
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
context = _AssertTemplateNotUsedContext(self, template_name)
return context
template_names = [t.name for t in response.templates]
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError, e:
standardMsg = u'%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class HTMLTestCase(object):
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
u'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
u'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
unicode(dom1).splitlines(),
unicode(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
u'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
u'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
class FloppyFormsTestCase(HTMLTestCase, TemplatesTestCase, TestCase):
pass
```
#### File: floppyforms/tests/fields.py
```python
from .base import FloppyFormsTestCase
import floppyforms as forms
class IntegerFieldTests(FloppyFormsTestCase):
def test_parse_int(self):
int_field = forms.IntegerField()
result = int_field.clean('15')
self.assertEqual(15, result)
self.assertIsInstance(result, int)
``` |
{
"source": "jonashaag/django-indisposable",
"score": 2
} |
#### File: django-indisposable/django_indisposable/__init__.py
```python
import MailChecker
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
def validate(email):
if email and '@' in email and MailChecker.MailChecker.is_blacklisted(email):
host = email.rsplit('@', 1)[1]
raise ValidationError(_('%(host)s email addresses are not accepted'), params={'host': host})
``` |
{
"source": "jonashaag/german-normalize",
"score": 3
} |
#### File: jonashaag/german-normalize/german_normalize.py
```python
from __future__ import unicode_literals
import re
import unicodedata
def normalize(text, lexicographic=False, heuristic_case=False):
assert not heuristic_case or not lexicographic, "'heuristic_case' conflicts with 'lexicographic'"
text = text.replace('ß', 'ss').replace('ẞ', 'SS')
if not lexicographic:
if heuristic_case:
text = re.sub('Ä(?=[a-z])', 'Ae', text)
text = re.sub('Ö(?=[a-z])', 'Oe', text)
text = re.sub('Ü(?=[a-z])', 'Ue', text)
for original, replacement in (('ä', 'ae'), ('ö', 'oe'), ('ü', 'ue'),
('Ä', 'AE'), ('Ö', 'OE'), ('Ü', 'UE')):
text = text.replace(original, replacement)
return unicodedata.normalize('NFKD', text).encode('ascii',
'ignore').decode()
``` |
{
"source": "jonashaag/karellen-kombu-ext",
"score": 2
} |
#### File: django/south_migrations/0001_initial.py
```python
from __future__ import absolute_import, unicode_literals
# flake8: noqa
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Queue'
db.create_table('djkombu_queue', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)),
))
db.send_create_signal('django', ['Queue'])
# Adding model 'Message'
db.create_table('djkombu_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('visible', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)),
('payload', self.gf('django.db.models.fields.TextField')()),
('queue', self.gf('django.db.models.fields.related.ForeignKey')(related_name='messages', to=orm['django.Queue'])),
))
db.send_create_signal('django', ['Message'])
def backwards(self, orm):
# Deleting model 'Queue'
db.delete_table('djkombu_queue')
# Deleting model 'Message'
db.delete_table('djkombu_message')
models = {
'django.message': {
'Meta': {'object_name': 'Message', 'db_table': "'djkombu_message'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django.Queue']"}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'})
},
'django.queue': {
'Meta': {'object_name': 'Queue', 'db_table': "'djkombu_queue'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['django']
``` |
{
"source": "jonashaag/kartothek",
"score": 2
} |
#### File: io/dask/_update.py
```python
from functools import partial
from typing import List
import numpy as np
import pandas as pd
from kartothek.io_components.metapartition import (
MetaPartition,
parse_input_to_metapartition,
)
from kartothek.io_components.utils import sort_values_categorical
from ._utils import map_delayed
_KTK_HASH_BUCKET = "__KTK_HASH_BUCKET"
def _hash_bucket(df: pd.DataFrame, subset: List[str], num_buckets: int):
"""
Categorize each row of `df` based on the data in the columns `subset`
into `num_buckets` values. This is based on `pandas.util.hash_pandas_object`
"""
if subset is None:
subset = df.columns
hash_arr = pd.util.hash_pandas_object(df[subset], index=False)
buckets = hash_arr % num_buckets
available_bit_widths = np.array([8, 16, 32, 64])
mask = available_bit_widths > np.log2(num_buckets)
bit_width = min(available_bit_widths[mask])
df[_KTK_HASH_BUCKET] = buckets.astype(f"uint{bit_width}")
return df
def _update_dask_partitions_shuffle(
ddf,
table,
secondary_indices,
metadata_version,
partition_on,
store_factory,
df_serializer,
dataset_uuid,
num_buckets,
sort_partitions_by,
bucket_by,
):
if ddf.npartitions == 0:
return ddf
if num_buckets is not None:
meta = ddf._meta
meta[_KTK_HASH_BUCKET] = np.uint64(0)
ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta)
group_cols = [partition_on[0], _KTK_HASH_BUCKET]
else:
group_cols = [partition_on[0]]
ddf = ddf.groupby(by=group_cols)
ddf = ddf.apply(
partial(
_store_partition,
secondary_indices=secondary_indices,
sort_partitions_by=sort_partitions_by,
table=table,
dataset_uuid=dataset_uuid,
partition_on=partition_on,
store_factory=store_factory,
df_serializer=df_serializer,
metadata_version=metadata_version,
),
meta=("MetaPartition", "object"),
)
return ddf
def _update_dask_partitions_one_to_one(
delayed_tasks,
secondary_indices,
metadata_version,
partition_on,
store_factory,
df_serializer,
dataset_uuid,
sort_partitions_by,
):
input_to_mps = partial(
parse_input_to_metapartition,
metadata_version=metadata_version,
expected_secondary_indices=secondary_indices,
)
mps = map_delayed(delayed_tasks, input_to_mps)
if sort_partitions_by:
mps = map_delayed(
mps,
MetaPartition.apply,
partial(sort_values_categorical, column=sort_partitions_by),
)
if partition_on:
mps = map_delayed(mps, MetaPartition.partition_on, partition_on)
if secondary_indices:
mps = map_delayed(mps, MetaPartition.build_indices, secondary_indices)
return map_delayed(
mps,
MetaPartition.store_dataframes,
store=store_factory,
df_serializer=df_serializer,
dataset_uuid=dataset_uuid,
)
def _store_partition(
df,
secondary_indices,
sort_partitions_by,
table,
dataset_uuid,
partition_on,
store_factory,
df_serializer,
metadata_version,
):
store = store_factory()
# I don't have access to the group values
mps = parse_input_to_metapartition(
{"data": {table: df}}, metadata_version=metadata_version
)
# delete reference to enable release after partition_on; before index build
del df
if sort_partitions_by:
mps = mps.apply(partial(sort_values_categorical, column=sort_partitions_by))
if partition_on:
mps = mps.partition_on(partition_on)
if secondary_indices:
mps = mps.build_indices(secondary_indices)
return mps.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
```
#### File: io/testing/write.py
```python
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
# Store a second table with shared columns. All shared columns must be of the same type
# This may fail in the presence of empty partitions if the schema validation doesn't account for it
df_shared_cols = df_all_types.loc[:, df_all_types.columns[:3]]
df_shared_cols["different_col"] = "a"
assert df_empty.empty
df_list = [
{
"label": "cluster_1",
"data": [("tableA", df_empty), ("tableB", df_shared_cols.copy(deep=True))],
},
{
"label": "cluster_2",
"data": [
("tableA", df_all_types),
("tableB", df_shared_cols.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableA"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableA"], store=store
)
# Roundtrips for type date are not type preserving
df_stored["date"] = df_stored["date"].dt.date
pdt.assert_frame_equal(df_all_types, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [
[
{
"label": "cluster_1",
"data": [("core", df)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_1"] for v in values_p1}
)
},
},
{
"label": "cluster_2",
"data": [("core", df2)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_2"] for v in values_p2}
)
},
},
]
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df2, df_stored)
assert stored_dataset.indices["P"].to_dict() == {
1: np.array(["cluster_1"], dtype=object),
2: np.array(["cluster_1"], dtype=object),
3: np.array(["cluster_1"], dtype=object),
4: np.array(["cluster_2"], dtype=object),
5: np.array(["cluster_2"], dtype=object),
6: np.array(["cluster_2"], dtype=object),
}
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
{
"label": "cluster_2",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_list_input(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame(
{
"P": np.arange(100, 110),
"L": np.arange(100, 110),
"TARGET": np.arange(10, 20),
}
)
df_list = [df, df2]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store_factory())
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame({"P": np.arange(0, 10), "info": np.arange(100, 110)})
mp = MetaPartition(
label=gen_uuid(),
data={"core": df, "helper": df2},
metadata_version=metadata_version,
)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [
{
"label": "label",
"data": [("order_proposals", df.head(0))],
"indices": {"location": {}},
},
{
"label": "label",
"data": [("order_proposals", df)],
"indices": {"location": {k: ["label"] for k in df["location"].unique()}},
},
]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int16),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int16),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int32),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.uint64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
"Y": pd.Series([2], dtype=np.int64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1, 2], dtype=np.int64),
"X": pd.Series([1, 2], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([3], dtype=np.int64),
"X": pd.Series([3], dtype=np.uint64),
}
),
],
False,
),
],
)
def test_schema_check_write(dfs, ok, store_factory, bound_store_dataframes):
df_list = [{"label": "cluster_1", "data": [("core", df)]} for df in dfs]
if ok:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P"],
metadata_version=4,
)
else:
with pytest.raises(Exception) as exc:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P"],
metadata_version=4,
)
assert (
"Schemas for table 'core' of dataset 'dataset_uuid' are not compatible!"
in _exception_str(exc.value)
)
def test_schema_check_write_shared(store_factory, bound_store_dataframes):
df1 = pd.DataFrame(
{"P": pd.Series([1], dtype=np.int64), "X": pd.Series([1], dtype=np.int64)}
)
df2 = pd.DataFrame(
{"P": pd.Series([1], dtype=np.uint64), "Y": pd.Series([1], dtype=np.int64)}
)
df_list = [
{"label": "cluster_1", "data": [("core", df1)]},
{"label": "cluster_2", "data": [("prediction", df2)]},
]
with pytest.raises(Exception) as exc:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P"],
metadata_version=4,
)
assert 'Found incompatible entries for column "P"' in str(exc.value)
def test_schema_check_write_nice_error(store_factory, bound_store_dataframes):
df1 = pd.DataFrame(
{
"P": pd.Series([1, 1], dtype=np.int64),
"Q": pd.Series([1, 2], dtype=np.int64),
"X": pd.Series([1, 1], dtype=np.int64),
}
)
df2 = pd.DataFrame(
{
"P": pd.Series([2, 2], dtype=np.uint64),
"Q": pd.Series([1, 2], dtype=np.int64),
"X": pd.Series([1, 1], dtype=np.int64),
}
)
df_list = [
{"label": "uuid1", "data": [("core", df1)]},
{"label": "uuid2", "data": [("core", df2)]},
]
with pytest.raises(Exception) as exc:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "Q"],
metadata_version=4,
)
assert _exception_str(exc.value).startswith(
"""Schemas for table 'core' of dataset 'dataset_uuid' are not compatible!
Schema violation
Origin schema: {core/P=2/Q=2/uuid2}
Origin reference: {core/P=1/Q=2/uuid1}
Diff:
"""
)
def test_schema_check_write_cut_error(store_factory, bound_store_dataframes):
df1 = pd.DataFrame(
{
"P": pd.Series([1] * 100, dtype=np.int64),
"Q": pd.Series(range(100), dtype=np.int64),
"X": pd.Series([1] * 100, dtype=np.int64),
}
)
df2 = pd.DataFrame(
{
"P": pd.Series([2] * 100, dtype=np.uint64),
"Q": pd.Series(range(100), dtype=np.int64),
"X": pd.Series([1] * 100, dtype=np.int64),
}
)
df_list = [
{"label": "uuid1", "data": [("core", df1)]},
{"label": "uuid2", "data": [("core", df2)]},
]
with pytest.raises(Exception) as exc:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "Q"],
metadata_version=4,
)
assert _exception_str(exc.value).startswith(
"""Schemas for table 'core' of dataset 'dataset_uuid' are not compatible!
Schema violation
Origin schema: {core/P=2/Q=99/uuid2}
Origin reference: {core/P=1/Q=99/uuid1}
Diff:
"""
)
def test_metadata_consistency_errors_fails(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame({"W": np.arange(0, 10), "L": np.arange(0, 10)})
df_2 = pd.DataFrame(
{"P": np.arange(10, 20), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_list = [
{"label": "cluster_1", "data": [("core", df)]},
{"label": "cluster_2", "data": [("core", df_2)]},
]
# Also test `df_list` in reverse order, as this could lead to different results
for dfs in [df_list, list(reversed(df_list))]:
with pytest.raises(
Exception, match=r"Schemas for table .* of dataset .* are not compatible!"
):
return bound_store_dataframes(
dfs, store=store_factory, metadata_version=metadata_version
)
def test_table_consistency_resistance(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame({"P": np.arange(0, 10)})
df_helper = pd.DataFrame(
{"P": np.arange(15, 35), "info": string.ascii_lowercase[:10]}
)
df_list = [
{"label": "cluster_1", "data": [("core", df)]},
{"label": "cluster_2", "data": [("core", df), ("helper", df_helper)]},
]
store_kwargs = dict(store=store_factory, metadata_version=metadata_version)
metadata1 = bound_store_dataframes(df_list, **store_kwargs)
metadata2 = bound_store_dataframes(list(reversed(df_list)), **store_kwargs)
assert set(metadata1.tables) == set(metadata2.tables) == {"core", "helper"}
def test_store_dataframes_as_dataset_overwrite(
store_factory, dataset_function, bound_store_dataframes
):
with pytest.raises(RuntimeError):
bound_store_dataframes(
[pd.DataFrame()], store=store_factory, dataset_uuid=dataset_function.uuid
)
bound_store_dataframes(
[pd.DataFrame()],
store=store_factory,
dataset_uuid=dataset_function.uuid,
overwrite=True,
)
bound_store_dataframes(
[pd.DataFrame()], store=store_factory, dataset_uuid="new_dataset_uuid"
)
def test_store_empty_dataframes_partition_on(store_factory, bound_store_dataframes):
df1 = pd.DataFrame({"x": [1], "y": [1]}).iloc[[]]
md1 = bound_store_dataframes(
[df1], store=store_factory, dataset_uuid="uuid", partition_on=["x"]
)
assert md1.tables == ["table"]
assert set(md1.table_meta["table"].names) == set(df1.columns)
df2 = pd.DataFrame({"x": [1], "y": [1], "z": [1]}).iloc[[]]
md2 = bound_store_dataframes(
[df2],
store=store_factory,
dataset_uuid="uuid",
partition_on=["x"],
overwrite=True,
)
assert md2.tables == ["table"]
assert set(md2.table_meta["table"].names) == set(df2.columns)
df3 = pd.DataFrame({"x": [1], "y": [1], "a": [1]}).iloc[[]]
md3 = bound_store_dataframes(
[{"table2": df3}],
store=store_factory,
dataset_uuid="uuid",
partition_on=["x"],
overwrite=True,
)
assert md3.tables == ["table2"]
assert set(md3.table_meta["table2"].names) == set(df3.columns)
def test_store_overwrite_none(store_factory, bound_store_dataframes):
df1 = pd.DataFrame({"x": [1], "y": [1]})
md1 = bound_store_dataframes(
[df1], store=store_factory, dataset_uuid="uuid", partition_on=["x"]
)
assert md1.tables == ["table"]
assert set(md1.table_meta["table"].names) == set(df1.columns)
md2 = bound_store_dataframes(
[{}],
store=store_factory,
dataset_uuid="uuid",
partition_on=["x"],
overwrite=True,
)
assert md2.tables == []
``` |
{
"source": "jonashaag/lleaves",
"score": 3
} |
#### File: compiler/ast/parser.py
```python
from lleaves.compiler.ast.nodes import DecisionNode, Forest, LeafNode, Tree
from lleaves.compiler.ast.scanner import cat_args_bitmap, scan_model_file
from lleaves.compiler.utils import DecisionType
def _parse_tree_to_ast(tree_struct, cat_bitmap):
n_nodes = len(tree_struct["decision_type"])
leaves = [
LeafNode(idx, value) for idx, value in enumerate(tree_struct["leaf_value"])
]
# Create the nodes using all non-specific data
# categorical nodes are finalized later
nodes = [
DecisionNode(
idx, split_feature, threshold, decision_type_id, left_idx, right_idx
)
for idx, (
split_feature,
threshold,
decision_type_id,
left_idx,
right_idx,
) in enumerate(
zip(
tree_struct["split_feature"],
tree_struct["threshold"],
tree_struct["decision_type"],
tree_struct["left_child"],
tree_struct["right_child"],
)
)
]
assert len(nodes) == n_nodes
categorical_nodes = [
idx
for idx, decision_type_id in enumerate(tree_struct["decision_type"])
if DecisionType(decision_type_id).is_categorical
]
for idx in categorical_nodes:
node = nodes[idx]
thresh = int(node.threshold)
# pass just the relevant vector entries
start = tree_struct["cat_boundaries"][thresh]
end = tree_struct["cat_boundaries"][thresh + 1]
node.finalize_categorical(
cat_threshold=tree_struct["cat_threshold"][start:end],
)
for node in nodes:
# in the model_file.txt, the outgoing left + right nodes are specified
# via their index in the list. negative numbers are leaves, positive numbers
# are other nodes
children = [
leaves[abs(idx) - 1] if idx < 0 else nodes[idx]
for idx in (node.left_idx, node.right_idx)
]
node.add_children(*children)
for node in nodes:
node.validate()
if nodes:
return Tree(tree_struct["Tree"], nodes[0], cat_bitmap)
else:
# special case for when tree is just single leaf
assert len(leaves) == 1
return Tree(tree_struct["Tree"], leaves[0], cat_bitmap)
def parse_to_ast(model_path):
scanned_model = scan_model_file(model_path)
n_args = scanned_model["general_info"]["max_feature_idx"] + 1
cat_bitmap = cat_args_bitmap(scanned_model["general_info"]["feature_infos"])
assert n_args == len(cat_bitmap), "Ill formed model file"
trees = [
_parse_tree_to_ast(tree_struct, cat_bitmap)
for tree_struct in scanned_model["trees"]
]
return Forest(trees, cat_bitmap)
```
#### File: lleaves/lleaves/lleaves.py
```python
import concurrent.futures
import os
from ctypes import CFUNCTYPE, POINTER, c_double, c_int
from pathlib import Path
import llvmlite.binding as llvm
import numpy as np
from lleaves import compiler
from lleaves.compiler.ast import scanner
from lleaves.compiler.objective_funcs import get_objective_func
try:
from pandas import DataFrame as pd_DataFrame
except ImportError:
class pd_DataFrame:
"""Dummy class for pandas.DataFrame."""
pass
# Habe das Gefuehl dass in der `Model` Klasse mindestens 2 Dinge passieren die nicht
# unbedingt zusammen gehoeren:
# 1) Data preprocessing, das eigentlich rein funktional/stateless ist
# 2) "Lazy" compilation mit caching. Koennte man das nicht einfach komplett von der
# Klasse trennen und auf den Nutzer abwaelzen, dass das nicht mehrfach kompiliert wird?
# Also das Interface so aendern, dass es wie folgt benutzt wird:
#
# parsed_model = parse_model(filepath)
# predict_func = compile_to_func(parsed_model)
# objective_func = get_objective_func(parsed_model)
#
# Dann koennte das sklearn interface nur ein wrapper darum sein:
#
# CompiledModel(predict_func, objective_func, parsed_model.number_of_features)
#
# Und dann kann man das Caching ganz loeschen. Ist flexibler, wenn der Nutzer das
# macht, oder?
#
# Meine Vision fuer dieses Modul ist, dass es ein Haufen unabhaengiger Funktionen
# ist, und die `Model`-Klasse nur ein ca. 10-zeiliger Wrapper, um das sklearn
# Interface zu unterstuetzen.
class Model:
"""
The base class of lleaves.
"""
# machine-targeted compiler & exec engine.
_execution_engine = None
# LLVM IR Module
_IR_module: llvm.ModuleRef = None
# prediction function, drops GIL on entry
_c_entry_func = None
def __init__(self, model_file=None):
# model_file darf nicht None sein.
"""
Initialize the uncompiled model.
:param model_file: Path to the model.txt.
"""
self.model_file = model_file
self.is_compiled = False
# model_file darf nicht None sein.
# Finde es bisschen ueberraschend, dass hier direkt mit dem Lexer/Scanner interagiert wird.
# Normalerweise wird ein Lexer nur vom Parser benutzt.
scanned_model = scanner.scan_model_file(model_file, general_info_only=True)
# Finde es bisschen seltsam, dass zweimal geparsed wird, 1x hier und 1x beim Compilen.
self._general_info = scanned_model["general_info"]
# ist das nicht identisch?
# self._pandas_categorical = scanner.scan_pandas_categorical(model_file)
self._pandas_categorical = scanned_model["pandas_categorical"]
# objective function is implemented as an np.ufunc.
self.objective_transf = get_objective_func(self._general_info["objective"])
def num_feature(self):
"""
Returns the number of features used by this model.
"""
return self._general_info["max_feature_idx"] + 1
# _get_execution_engine ist unused? Oder nicht?
def _get_llvm_module(self):
if self._IR_module:
return self._IR_module
self._IR_module = compiler.compile_to_module(self.model_file)
return self._IR_module
def compile(self, cache=None):
"""
Generate the LLVM IR for this model and compile it to ASM.
This function can be called multiple times, but will only compile once.
:param cache: Path to a cache file. If this path doesn't exist, binary will be dumped at path after compilation.
If path exists, binary will be loaded and compilation skipped.
No effort is made to check staleness / consistency.
The precise workings of the cache parameter will be subject to future changes.
"""
if self.is_compiled:
return
# Hat LLVM hier globalen state? was passiert wenn man das mehrfach aufruft oder
# parallel aufruft?
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
# Create a target machine representing the host
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine()
if cache is None or not Path(cache).exists():
# Compile to LLVM IR
module = self._get_llvm_module()
else:
# when loading binary from cache we use a dummy empty module
module = llvm.parse_assembly("")
# Create execution engine for our module
self._execution_engine = llvm.create_mcjit_compiler(module, target_machine)
# Ich glaube der cache-Code wird einfacher wenn man explizit zwei Faelle
# hinschreibt (Cache hit/miss).
# Wenn ich den Code richtig verstehe, wird gerade bei Cache hit auch
# "save_to_cache" ausgefuehrt und hat dann Abbruch durch den exists()
# call? Waere es nicht besser, im Fall von Cache hit einfach set_object_cache ohne
# notify_func= aufzurufen?
# when caching we dump the executable once the module finished compiling
def save_to_cache(module, buffer):
if cache and not Path(cache).exists():
with open(cache, "wb") as file:
file.write(buffer)
# when caching load the executable if it exists
def load_from_cache(module):
if cache and Path(cache).exists():
return Path(cache).read_bytes()
self._execution_engine.set_object_cache(
notify_func=save_to_cache, getbuffer_func=load_from_cache
)
# compile IR to ASM
self._execution_engine.finalize_object()
self._execution_engine.run_static_constructors()
# construct entry func
addr = self._execution_engine.get_function_address("forest_root")
# evtl. besser den Ctypes-Typen auf Modulebene hinzuschreiben und hier
# nur den pointer zu erzeugen (also "ENTRY_FUNC_TYPE(addr)" oder so).
# CFUNCTYPE params: void return, pointer to data, pointer to results arr, start_idx, end_idx
# Drops GIL during call, re-aquires it after
self._c_entry_func = CFUNCTYPE(
None, POINTER(c_double), POINTER(c_double), c_int, c_int
)(addr)
self.is_compiled = True
def predict(self, data, n_jobs=os.cpu_count()):
"""
Return predictions for the given data.
The model needs to be compiled before prediction.
:param data: Pandas df, numpy 2D array or Python list. For fastest speed pass 2D float64 numpy arrays only.
Wenn es ein df ist, wie muss dieser aussehen?
Gibt es eigentlich einen Check, dass data das richtige Format hat? z.B. richtige Anzahl features
:param n_jobs: Number of threads to use for prediction. Defaults to number of CPUs. For single-row prediction
this should be set to 1.
:return: 1D numpy array (dtype float64)
"""
if not self.is_compiled:
raise RuntimeError(
"Model needs to be compiled before prediction. Run model.compile()."
)
if isinstance(data, pd_DataFrame):
data = self._data_from_pandas(data)
data, n_preds = self._to_1d_ndarray(data)
ptr_data = data.ctypes.data_as(POINTER(c_double))
preds = np.zeros(n_preds, dtype=np.float64)
ptr_preds = preds.ctypes.data_as(POINTER(c_double))
if n_jobs > 1:
# was ist die Bedeutung des Teils nach dem "+"?
batchsize = n_preds // n_jobs + (n_preds % n_jobs > 0)
def f(start_idx):
self._c_entry_func(
ptr_data, ptr_preds, start_idx, min(start_idx + batchsize, n_preds)
)
with concurrent.futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:
for i in range(0, n_preds, batchsize):
executor.submit(f, i)
else:
self._c_entry_func(ptr_data, ptr_preds, 0, n_preds)
# Es koennte nuetzlich sein, als dritte Option fuer die Parallelisierung
# zu ermoeglichen, dass man seinen eigenen Code fuer Parallelisierung nutzt.
# Ich habe nicht genau durchdacht wie das aussehen koennte, aber ich denke es
# waere hilfreich den ganzen Preprocessing-Kram (to-1d, to-ctypes, ...) separat
# ausfuehren zu koenne, sodass ich so etwas hinschreiben kann:
# inputs, outputs, func = prepare(data)
# my_run_parallel(inputs, outputs, func)
return self.objective_transf(preds)
def _data_from_pandas(self, data):
# Was macht diese Funktion?
# Kann man sie von der Model-Klasse trennen?
if len(data.shape) != 2 or data.shape[0] < 1:
raise ValueError("Input data must be 2D and non-empty.")
cat_cols = list(data.select_dtypes(include=["category"]).columns)
if len(cat_cols) != len(self._pandas_categorical):
raise ValueError(
"The categorical features passed don't match the train dataset."
)
for col, category in zip(cat_cols, self._pandas_categorical):
# we use set_categories to get the same (category -> code) mapping that we used during train
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is list
data = data.copy()
# apply (category -> code) mapping. Categories become floats
data[cat_cols] = (
data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
)
data = data.values
if data.dtype != np.float32 and data.dtype != np.float64:
data = data.astype(np.float64)
return data
def _to_1d_ndarray(self, data):
# Von Model-Klasse trennen
if isinstance(data, list):
try:
data = np.array(data, dtype=np.float64)
except BaseException:
raise ValueError("Cannot convert data list to appropriate np array")
if not isinstance(data, np.ndarray):
raise ValueError(f"Expecting list or numpy.ndarray, got {type(data)}")
if len(data.shape) != 2:
raise ValueError(
f"Data must be 2 dimensional, is {len(data.shape)} dimensional"
)
n_preds = data.shape[0]
if data.dtype == np.float64:
# flatten the array to 1D
data = np.array(data.reshape(data.size), dtype=np.float64, copy=False)
else:
data = np.array(data.reshape(data.size), dtype=np.float64)
return data, n_preds
```
#### File: lleaves/tests/test_nans.py
```python
import lightgbm as lgb
import numpy as np
import numpy.testing as npt
import pytest
import lleaves
# 2: MissingType None, default left
# 4: MissingType 0, default right
# 6: MissingType 0, default left
# 8: MissingType NaN, default left
# 10: MissingType NaN, default right
@pytest.mark.parametrize(
"decision_type, threshold_le_zero",
[
(0, True),
(2, True),
(4, True),
(6, True),
(8, True),
(10, True),
(0, False),
(2, False),
(4, False),
(6, False),
(8, False),
(10, False),
],
)
def test_zero_as_missing_numerical(tmp_path, decision_type, threshold_le_zero):
model_txt = tmp_path / "model.txt"
with open("tests/models/tiniest_single_tree/model.txt") as infile, open(
model_txt, "w"
) as outfile:
for line in infile.readlines():
if line.startswith("decision_type="):
outfile.write(line.replace("2", str(decision_type)))
elif threshold_le_zero and line.startswith("threshold="):
outfile.write(line.replace("0.", "-0."))
else:
outfile.write(line)
lgbm_model = lgb.Booster(model_file=str(model_txt))
llvm_model = lleaves.Model(model_file=str(model_txt))
llvm_model.compile()
nan = float("NaN")
data = [
[0.0, 1.0, 1.0],
[0.0, -1.0, -1.0],
[0.0, 1.0, 0.5],
[0.0, -1.0, -0.5],
[0.0, 0.5, 1.0],
[0.0, -0.5, -1.0],
[0.0, 0.5, 0.0],
[0.0, -0.5, 0.0],
[-0.01, -0.01, -0.01],
[0.0, 0.0, 0.0],
[0.01, 0.01, 0.01],
[nan, nan, nan],
[None, None, None],
]
npt.assert_equal(llvm_model.predict(data), lgbm_model.predict(data))
@pytest.mark.parametrize(
"decision_type, zero_in_bitvec",
[
(1, True),
(3, True),
(5, True),
(7, True),
(9, True),
(11, True),
(1, False),
(3, False),
(5, False),
(7, False),
(9, False),
(11, False),
],
)
def test_zero_as_missing_categorical(tmp_path, decision_type, zero_in_bitvec):
model_txt = tmp_path / "model.txt"
with open("tests/models/pure_categorical/model.txt") as infile, open(
model_txt, "w"
) as outfile:
for line in infile.readlines():
if line.startswith("decision_type"):
outfile.write(line.replace("1", str(decision_type)))
elif line.startswith("cat_threshold") and not zero_in_bitvec:
outfile.write(line.replace("23", "22"))
else:
outfile.write(line)
lgbm_model = lgb.Booster(model_file=str(model_txt))
llvm_model = lleaves.Model(model_file=str(model_txt))
llvm_model.compile()
nan = float("NaN")
data = [
[1.0, 6.0, 0.0],
[nan, 6.0, 0.0],
[nan, 1.0, 0.0],
[None, 1.0, 0.0],
[1.0, nan, 0.0],
[3.0, nan, 0.0],
[3.0, None, 0.0],
[0.0, 6.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, -0.001, 0.0],
[1.0, 0.001, 0.0],
[3.0, 0.0, 0.0],
[3.0, -0.001, 0.0],
[3.0, 0.001, 0.0],
[nan, nan, nan],
[None, None, None],
]
npt.assert_equal(llvm_model.predict(data), lgbm_model.predict(data))
def test_lightgbm_nan_pred_inconsistency(tmp_path):
# see https://github.com/dmlc/treelite/issues/277
model_file = str(tmp_path / "model.txt")
X = np.array(30 * [[1]] + 30 * [[2]] + 30 * [[0]])
y = np.array(60 * [5] + 30 * [10])
train_data = lgb.Dataset(X, label=y, categorical_feature=[0])
bst = lgb.train({}, train_data, 1, categorical_feature=[0])
bst.save_model(model_file)
# just to make sure it's not due to LightGBM model export
lgbm_model = lgb.Booster(model_file=model_file)
llvm_model = lleaves.Model(model_file=model_file)
llvm_model.compile()
data = np.array([[np.NaN], [0.0], [-0.1], [0.1], [10.0], [np.Inf], [-np.NaN]])
npt.assert_equal(lgbm_model.predict(data), llvm_model.predict(data))
def test_nan_prediction_numerical():
model_path = "tests/models/tiniest_single_tree/model.txt"
llvm_model = lleaves.Model(model_file=model_path)
llvm_model.compile()
lgbm_model = lgb.Booster(model_file=model_path)
nan = float("NaN")
inf = float("Inf")
data = [
3 * [nan],
3 * [inf],
[nan, inf, nan],
[0.0, nan, 0.0],
[0.0, inf, 0.0],
[0.0, nan, 1.0],
[0.0, inf, 1.0],
[0.0, 0.0, nan],
[0.0, 0.0, inf],
[0.0, 1.0, nan],
[0.0, 1.0, inf],
]
npt.assert_equal(llvm_model.predict(data), lgbm_model.predict(data))
def test_nan_prediction_categorical():
model_path = "tests/models/pure_categorical/model.txt"
llvm_model = lleaves.Model(model_file=model_path)
llvm_model.compile()
lgbm_model = lgb.Booster(model_file=model_path)
nan = float("NaN")
inf = float("Inf")
data = [
3 * [nan],
3 * [inf],
[nan, inf, nan],
# run both branches with Nans
[1.0, nan, 0.0],
[1.0, inf, 0.0],
[0.0, nan, 0.0],
[0.0, inf, 0.0],
[nan, 6.0, 0.0],
[inf, 6.0, 0.0],
[nan, 0.0, 0.0],
[inf, 0.0, 0.0],
]
npt.assert_equal(llvm_model.predict(data), lgbm_model.predict(data))
```
#### File: lleaves/tests/test_parsing.py
```python
from pathlib import Path
from lleaves.compiler.ast.scanner import scan_model_file, scan_pandas_categorical
def test_parser():
model_file = "tests/models/boston_housing/model.txt"
result = scan_model_file(model_file)
assert result["general_info"]["max_feature_idx"] == 12
assert result["pandas_categorical"] is None
assert len(result["trees"]) == 100
tree_3 = result["trees"][3]
assert tree_3["num_leaves"] == 18
assert tree_3["left_child"] == [
1,
3,
-2,
-1,
9,
8,
7,
10,
15,
16,
-5,
-9,
-8,
14,
-14,
-6,
-3,
]
tree_95 = result["trees"][95]
assert tree_95["Tree"] == 95
assert tree_95["num_leaves"] == 21
model_file = "tests/models/tiniest_single_tree/model.txt"
result = scan_model_file(model_file)
assert len(result["trees"]) == 1
tree_0 = result["trees"][0]
assert tree_0["num_leaves"] == 4
assert result["pandas_categorical"] is None
def test_parsing_pandas(tmp_path):
mod_model_file = tmp_path / "mod_model.txt"
model_file = Path("tests/models/pure_categorical/model.txt")
with open(model_file, "r") as file:
lines = file.readlines()
assert lines[-1].startswith("pandas_categorical")
lines[
-1
] = 'pandas_categorical:[["a", "b", "c"], ["b", "c", "d"], ["w", "x", "y", "z"]]'
with open(mod_model_file, "x") as file:
file.writelines(lines)
pandas_categorical = scan_pandas_categorical(model_file)
assert pandas_categorical is None
pandas_categorical = scan_pandas_categorical(mod_model_file)
assert pandas_categorical == [
["a", "b", "c"],
["b", "c", "d"],
["w", "x", "y", "z"],
]
``` |
{
"source": "jonashaag/PhoneFortifiedPerceptualLoss",
"score": 3
} |
#### File: jonashaag/PhoneFortifiedPerceptualLoss/utils.py
```python
import torch
from torch.nn.utils import rnn
def rnn_collate(batch):
n = rnn.pad_sequence([b[0] for b in batch]).transpose(0, 1)
c = rnn.pad_sequence([b[1] for b in batch]).transpose(0, 1)
l = torch.LongTensor([b[2] for b in batch])
return n, c, l
``` |
{
"source": "jonashaag/pint",
"score": 2
} |
#### File: pint/compat/__init__.py
```python
from __future__ import division, unicode_literals, print_function, absolute_import
import sys
from io import BytesIO
from numbers import Number
from decimal import Decimal
from . import tokenize
ENCODING_TOKEN = tokenize.ENCODING
PYTHON3 = sys.version >= '3'
def tokenizer(input_string):
for tokinfo in tokenize.tokenize(BytesIO(input_string.encode('utf-8')).readline):
if tokinfo.type == ENCODING_TOKEN:
continue
yield tokinfo
if PYTHON3:
string_types = str
def u(x):
return x
maketrans = str.maketrans
long_type = int
else:
string_types = basestring
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
maketrans = lambda f, t: dict((ord(a), b) for a, b in zip(f, t))
long_type = long
try:
from collections import Chainmap
except ImportError:
from .chainmap import ChainMap
try:
from functools import lru_cache
except ImportError:
from .lrucache import lru_cache
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
try:
import numpy as np
from numpy import ndarray
HAS_NUMPY = True
NUMPY_VER = np.__version__
NUMERIC_TYPES = (Number, Decimal, ndarray, np.number)
def _to_magnitude(value, force_ndarray=False):
if isinstance(value, (dict, bool)) or value is None:
raise TypeError('Invalid magnitude for Quantity: {0!r}'.format(value))
elif isinstance(value, string_types) and value == '':
raise ValueError('Quantity magnitude cannot be an empty string.')
elif isinstance(value, (list, tuple)):
return np.asarray(value)
if force_ndarray:
return np.asarray(value)
return value
except ImportError:
np = None
class ndarray(object):
pass
HAS_NUMPY = False
NUMPY_VER = '0'
NUMERIC_TYPES = (Number, Decimal)
def _to_magnitude(value, force_ndarray=False):
if isinstance(value, (dict, bool)) or value is None:
raise TypeError('Invalid magnitude for Quantity: {0!r}'.format(value))
elif isinstance(value, string_types) and value == '':
raise ValueError('Quantity magnitude cannot be an empty string.')
elif isinstance(value, (list, tuple)):
raise TypeError('lists and tuples are valid magnitudes for '
'Quantity only when NumPy is present.')
return value
try:
from uncertainties import ufloat
HAS_UNCERTAINTIES = True
except ImportError:
ufloat = None
HAS_UNCERTAINTIES = False
try:
from babel import Locale as Loc
from babel import units as babel_units
HAS_BABEL = True
HAS_PROPER_BABEL = hasattr(babel_units, 'format_unit')
except ImportError:
HAS_PROPER_BABEL = HAS_BABEL = False
if not HAS_PROPER_BABEL:
Loc = babel_units = None
try:
import pandas as pd
HAS_PANDAS = True
HAS_PROPER_PANDAS = (
hasattr(pd.core, "arrays")
and hasattr(pd.core.arrays.base, "ExtensionOpsMixin")
)
except ImportError:
HAS_PROPER_PANDAS = HAS_PANDAS = False
try:
import pytest
HAS_PYTEST = True
except ImportError:
HAS_PYTEST = False
``` |
{
"source": "jonashaag/quetz",
"score": 2
} |
#### File: quetz_content_trust/quetz_content_trust/main.py
```python
import logging
from pathlib import Path
from sqlalchemy import desc
import quetz
from quetz.database import get_db_manager
from . import db_models
from .api import router
logger = logging.getLogger("quetz")
@quetz.hookimpl
def register_router():
return router
@quetz.hookimpl
def post_index_creation(raw_repodata: dict, channel_name, subdir):
"""Use available online keys to sign packages"""
with get_db_manager() as db:
query = (
db.query(db_models.SigningKey)
.join(db_models.RoleDelegation.keys)
.filter(
db_models.RoleDelegation.channel == channel_name,
db_models.RoleDelegation.type == "pkg_mgr",
db_models.SigningKey.private_key is not None,
)
.order_by(desc('time_created'))
.all()
)
if query:
import json
from libmambapy import bindings as libmamba_api
signatures = {}
for name, metadata in raw_repodata["packages"].items():
sig = libmamba_api.sign(
json.dumps(metadata, indent=2, sort_keys=True), query[0].private_key
)
if name not in signatures:
signatures[name] = {}
signatures[name][query[0].public_key] = dict(signature=sig)
logger.info(f"Signed {Path(channel_name) / subdir}")
raw_repodata["signatures"] = signatures
```
#### File: quetz/tests/conftest.py
```python
import uuid
from pytest import fixture
from quetz.db_models import Profile, User
pytest_plugins = "quetz.testing.fixtures"
@fixture
def user_role():
return None
@fixture
def user_without_profile(db, user_role):
new_user = User(id=uuid.uuid4().bytes, username="bartosz", role=user_role)
db.add(new_user)
db.commit()
yield new_user
db.delete(new_user)
db.commit()
@fixture
def user(db, user_without_profile):
profile = Profile(
name="Bartosz", avatar_url="http:///avatar", user=user_without_profile
)
db.add(profile)
db.commit()
yield user_without_profile
db.query(Profile).filter(
Profile.name == profile.name,
Profile.avatar_url == profile.avatar_url,
Profile.user_id == user_without_profile.id,
).delete()
db.commit()
``` |
{
"source": "jonashaag/source_separation",
"score": 2
} |
#### File: source_separation/source_separation/train.py
```python
import fire
import os
import torch
import torch.nn as nn
from typing import Tuple
from pytorch_sound.data.meta import voice_bank, dsd100, musdb18
from pytorch_sound.models import build_model
from torch.optim.lr_scheduler import MultiStepLR
from source_separation.dataset import get_datasets
from source_separation.trainer import Wave2WaveTrainer, LossMixingTrainer
def main(meta_dir: str, save_dir: str,
save_prefix: str, pretrained_path: str = '',
model_name: str = 'refine_unet_base', batch_size: int = 128, num_workers: int = 16, fix_len: float = 2.,
lr: float = 5e-4, betas: Tuple[float] = (0.5, 0.9), weight_decay: float = 0.0,
max_step: int = 200000, valid_max_step: int = 30, save_interval: int = 1000, log_interval: int = 50,
grad_clip: float = 0.0, grad_norm: float = 30.0,
is_augment: bool = True, milestones: Tuple[int] = None, gamma: float = 0.1,
case_name: str = 'voice_bank', mix_loss: bool = False):
# check args
assert os.path.exists(meta_dir)
# create model
model = build_model(model_name).cuda()
# multi-gpu
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
# create optimizers
optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
if milestones:
milestones = [int(x) for x in list(milestones)]
scheduler = MultiStepLR(optimizer, milestones, gamma=gamma)
else:
scheduler = None
# handle cases
train_loader, valid_loader, sr = handle_cases(case_name, is_augment, meta_dir, batch_size, num_workers, fix_len)
if mix_loss:
trainer = LossMixingTrainer
else:
trainer = Wave2WaveTrainer
# train
trainer(
model, optimizer, train_loader, valid_loader,
max_step=max_step, valid_max_step=min(valid_max_step, len(valid_loader)), save_interval=save_interval,
log_interval=log_interval,
save_dir=save_dir, save_prefix=save_prefix, grad_clip=grad_clip, grad_norm=grad_norm,
pretrained_path=pretrained_path, scheduler=scheduler, sr=sr
).run()
def handle_cases(case_name: str, is_augment: bool, meta_dir: str, batch_size: int, num_workers: int, fix_len: int):
assert case_name in ['voice_bank', 'musdb18', 'dsd100'], \
f'{case_name} is not implemented ! choose one in [\'voice_bank\', \'musdb18\', \'dsd100\']'
sr = 44100
if is_augment:
is_audioset = False
if case_name == 'dsd100':
dataset_func = get_datasets
meta_cls = dsd100.DSD100Meta
elif case_name == 'musdb18':
dataset_func = get_datasets
meta_cls = musdb18.MUSDB18Meta
elif case_name == 'voice_bank':
sr = 22050
dataset_func = get_datasets
meta_cls = voice_bank.VoiceBankMeta
is_audioset = True
train_loader, valid_loader = dataset_func(
meta_dir, batch_size=batch_size, num_workers=num_workers, meta_cls=meta_cls,
fix_len=int(fix_len * sr), audio_mask=True, is_audioset=is_audioset
)
else:
if case_name == 'dsd100':
dataset_func = dsd100.get_datasets
elif case_name == 'musdb18':
dataset_func = musdb18.get_datasets
elif case_name == 'voice_bank':
dataset_func = voice_bank.get_datasets
sr = 22050
train_loader, valid_loader = dataset_func(
meta_dir, batch_size=batch_size, num_workers=num_workers, fix_len=int(fix_len * sr), audio_mask=True
)
return train_loader, valid_loader, sr
if __name__ == '__main__':
fire.Fire(main)
``` |
{
"source": "jonashaag/WSGITest",
"score": 3
} |
#### File: WSGITest/tests/test_environ.py
```python
from wsgitest import expect
from wsgitest.config import SERVER_HOST, SERVER_PORT_RANGE
from wsgitest.testutils import *
def test_GET(env, start_response):
assert_equal(env['REQUEST_METHOD'], 'GET')
start_response('200 ok', [])
return ()
def test_POST(env, start_response):
'''
POST / HTTP/1.0
Content-Length: 12
hello\\nworld!
'''
assert_equal(env['REQUEST_METHOD'], 'POST')
assert_equal(env['CONTENT_LENGTH'], '12')
assert_equal(env['wsgi.input'].read(), 'hello\nworld!')
start_response('200 ok', [])
return []
@expect.Status(400)
def test_BLAH(env, start_response):
'''
BLAH / HTTP/1.0
'''
assert_equal(env['REQUEST_METHOD'], 'BLAH')
start_response('200 ok', [])
return ''
# TODO: SCRIPT_NAME
# PATH_INFO
def test_query_string(env, start_response):
'''
GET /hello?foo=bar&x=y HTTP/1.0
'''
assert_equal(env['QUERY_STRING'], 'foo=bar&x=y')
start_response('200 ok', [])
return iter(lambda: None, None)
def test_content__star(env, start_response):
'''
GET / HTTP/1.1
Content-Type: text/x-python
Content-Length: 3
Hi!
'''
assert_equal(env['CONTENT_TYPE'], 'text/x-python')
assert_equal(env['CONTENT_LENGTH'], '3')
start_response('200 ok', [])
return ['']
def test_empty_query_string(env, start_response):
'''
GET / HTTP/1.0
'''
assert_equal(env.get('QUERY_STRING', ''), '')
start_response('200 ok', [])
return ['blah']
def test_server_star(env, start_response):
assert_equal(env['SERVER_NAME'], SERVER_HOST)
assert_contains(SERVER_PORT_RANGE, int(env['SERVER_PORT']))
start_response('200 ok', [])
return ()
def test_server_protocol(env, start_response):
assert_equal(env['SERVER_PROTOCOL'], 'HTTP/1.1')
start_response('200 ok', [])
return []
def test_http_vars(env, start_response):
'''
GET /foo HTTP/1.1
x-hello-iam-a-header: 42,42
header-twice: 1
IgNoREtheCAsE_pLeas-E: hello world!
header-twice: 2
and-a-multiline-value: foo 42
\tbar and\\r\\n\t
\tso
on
'''
# normalize the joined header because
# RFC2616 does not specify whether
# \r\n(\t| ) has to be replaced by ' '
env['HTTP_AND_A_MULTILINE_VALUE'] = \
env['HTTP_AND_A_MULTILINE_VALUE'].replace('\r\n', '').replace('\t', ' ')
assert_subdict(
env,
{ 'HTTP_X_HELLO_IAM_A_HEADER' : '42,42',
'HEADER_TWICE' : '1,2',
'HTTP_IGNORETHECASE_PLEAS_E' : 'hello world!',
'HTTP_AND_A_MULTILINE_VALUE' : 'foo 42 bar and so on'}
)
start_response('200 ok', [])
return []
@expect.Status(200, 'ok')
def test_wsgi_vars(env, start_response):
assert_isinstance(env['wsgi.version'], tuple)
assert_equal(len(env['wsgi.version']), 2)
assert_equal(env['wsgi.url_scheme'][:4], 'http')
assert_isinstance(env['wsgi.multithread'], bool)
assert_isinstance(env['wsgi.multiprocess'], bool)
assert_isinstance(env['wsgi.run_once'], bool)
start_response('200 ok', [])
return []
@expect.Status(200)
@expect.Body('yay')
def test_input(env, start_response):
'''
POST /foo HTTP/1.1
Content-Length: 29
Hello<NL>World,\r<NL>\twhat's\r<NL>\r<NL><NL>up?
'''
input_ = env['wsgi.input']
assert_equal(input_.read(1), 'H')
assert_equal(input_.readline(), 'ello\n')
for line in input_:
assert_equal(line, 'World,\r\n')
break
assert_equal(input_.read(4), '\twha')
assert_equal(input_.readlines(), ["t's\r\n", "\r\n", "\n", "up?"])
assert_equal(input_.read(123), '')
start_response('200 ok', [])
return 'yay'
@expect.Status(200)
@expect.Body('yay')
@expect.ServerError('ExpectedError')
def test_errors(env, start_response):
errors = env['wsgi.errors']
errors.write("Hello World, this is an error\n")
errors.writelines(["Hello\n", "ExpectedError: blah"])
errors.flush()
start_response('200 ok', [])
return 'yay'
```
#### File: WSGITest/wsgitest/request.py
```python
from wsgitest.utils import normalize_docstring
class MalformedHeader(Exception):
pass
class Request(object):
def __init__(self, method, path, protocol='HTTP/1.1', header=None, body=None):
self.method = method
self.path = path
self.protocol = protocol
self.header = header or []
if isinstance(body, (dict, tuple, list)):
if method != 'POST':
raise TypeError(
"body may only be sequence/mapping if method is 'POST'")
else:
raise NotImplementedError()
self.body = body
@classmethod
def from_docstring(cls, docstring):
lines = normalize_docstring(docstring)
method, path, protocol = lines.next().split()
header = []
name = value = None
for line in lines:
if not line:
# end of header
body = '\r\n'.join(lines).replace('<NL>', '\n').rstrip('\r\n')
break
if line[0] in ' \t':
if name is None:
raise MalformedHeader('Continuation without field name')
# continue previous value
value += '\r\n' + line
else:
if name is not None:
header.append((name, value))
name = value = None
name, value = line.split(': ')
else:
body = None
if name is not None:
header.append((name, value))
return cls(method, path, protocol, header, body)
```
#### File: WSGITest/wsgitest/run.py
```python
import os
import time
from wsgitest.base import Test, Testsuite, TestsuiteResult
from wsgitest.client import Client
from wsgitest.server import Rack
from wsgitest.utils import import_file, chain_iterable
def find_tests(files_and_folders):
files = []
for obj in files_and_folders:
if os.path.isfile(obj):
files.append(obj)
continue
for file in os.listdir(obj):
if file.endswith('.py'):
files.append(os.path.join(obj, file))
modules = []
for file in files:
modules.append(import_file(file))
suite = Testsuite()
for module in modules:
module_tests = [Test.from_func(getattr(module, obj)) \
for obj in dir(module) if obj.startswith('test_')]
if module_tests:
suite.add_tests(module, module_tests)
return suite
def run_tests(files):
testsuite = find_tests(files)
rack = Rack()
client = Client()
gen = rack.start_servers_lazily(testsuite.tests.chainvalues())
start = time.time()
client.run(testsuite, gen)
duration = time.time() - start
rack.stop_servers()
return testsuite.get_result(client.responses, rack.outputs, duration)
```
#### File: WSGITest/wsgitest/testutils.py
```python
def assert_equal(a, b):
assert a == b, '%r != %r (expected the latter)' % (a, b)
def assert_isinstance(obj, types):
assert isinstance(obj, types), \
'%r object of type %r not of type(s) %r' % (obj, type(obj), types)
def assert_subdict(a, b):
a_sub = dict((k, a[k]) for k in b)
assert_equal(a_sub, b)
def assert_contains(haystack, needle):
assert needle in haystack, '%r not in %r' % (needle, haystack)
``` |
{
"source": "JonasHablitzel/spaczz",
"score": 3
} |
#### File: spaczz/search/fuzzysearcher.py
```python
from __future__ import annotations
from typing import Any, Union
from spacy.tokens import Doc, Span, Token
from spacy.vocab import Vocab
from . import _PhraseSearcher
from ..process import FuzzyFuncs
class FuzzySearcher(_PhraseSearcher):
"""Class for fuzzy searching/matching in spacy `Doc` objects.
Fuzzy searching is done on the token level.
The class provides methods for finding the best fuzzy match
span in a `Doc`, the n best fuzzy matched spans in a `Doc`,
and fuzzy matching between any two given spaCy containers
(`Doc`, `Span`, `Token`).
Fuzzy matching is currently provided by rapidfuzz and the searcher
provides access to all rapidfuzz matchers with default settings.
Attributes:
vocab (Vocab): The shared vocabulary.
Included for consistency and potential future-state.
_fuzzy_funcs (FuzzyFuncs):
Container class housing fuzzy matching functions.
Functions are accessible via the classes `get()` method
by their given key name. All rapidfuzz matching functions
with default settings are available:
"simple" = `ratio`
"partial" = `partial_ratio`
"token_set" = `token_set_ratio`
"token_sort" = `token_sort_ratio`
"partial_token_set" = `partial_token_set_ratio`
"partial_token_sort" = `partial_token_sort_ratio`
"quick" = `QRatio`
"weighted" = `WRatio`
"quick_lev" = `quick_lev_ratio`
"""
def __init__(self: FuzzySearcher, vocab: Vocab) -> None:
"""Initializes a fuzzy searcher.
Args:
vocab: A spaCy `Vocab` object.
Purely for consistency between spaCy
and spaczz matcher APIs for now.
spaczz matchers are mostly pure-Python
currently and do not share vocabulary
with spaCy pipelines.
"""
super().__init__(vocab=vocab)
self._fuzzy_funcs: FuzzyFuncs = FuzzyFuncs(match_type="phrase")
def compare(
self: FuzzySearcher,
query: Union[Doc, Span, Token],
reference: Union[Doc, Span, Token],
ignore_case: bool = True,
fuzzy_func: str = "simple",
*args: Any,
**kwargs: Any,
) -> int:
"""Peforms fuzzy matching between two spaCy container objects.
Applies the given fuzzy matching algorithm (`fuzzy_func`)
to two spacy containers (`Doc`, `Span`, `Token`)
and returns the resulting fuzzy ratio.
Args:
query: First container for comparison.
reference: Second container for comparison.
ignore_case: Whether to lower-case `query` and `reference`
before comparison or not. Default is `True`.
fuzzy_func: Key name of fuzzy matching function to use.
All rapidfuzz matching functions with default settings
are available:
"simple" = `ratio`
"partial" = `partial_ratio`
"token_set" = `token_set_ratio`
"token_sort" = `token_sort_ratio`
"partial_token_set" = `partial_token_set_ratio`
"partial_token_sort" = `partial_token_sort_ratio`
"quick" = `QRatio`
"weighted" = `WRatio`
"token" = `token_ratio`
"partial_token" = `partial_token_ratio`
Default is `"simple"`.
*args: Overflow for child positional arguments.
**kwargs: Overflow for child keyword arguments.
Returns:
The fuzzy ratio between `query` and `reference` as an `int`.
Example:
>>> import spacy
>>> from spaczz.search import FuzzySearcher
>>> nlp = spacy.blank("en")
>>> searcher = FuzzySearcher(nlp.vocab)
>>> searcher.compare(nlp("spaczz"), nlp("spacy"))
73
"""
if ignore_case:
query_text = query.text.lower()
reference_text = reference.text.lower()
else:
query_text = query.text
reference_text = reference.text
return round(self._fuzzy_funcs.get(fuzzy_func)(query_text, reference_text))
```
#### File: spaczz/search/tokensearcher.py
```python
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
import regex
from spacy.tokens import Doc, Token
from spacy.vocab import Vocab
from ..process import FuzzyFuncs
from ..util import n_wise
class TokenSearcher:
"""Class for flexbile token searching in spaCy `Doc` objects.
Uses individual (and extended) spaCy token matching patterns to find
match candidates. Candidates are used to generate new patterns to add
to a spaCy `Matcher`.
"FUZZY" and "FREGEX" are the two additional spaCy token pattern options.
For example:
{"TEXT": {"FREGEX": "(database){e<=1}"}},
{"LOWER": {"FUZZY": "access", "MIN_R": 85, "FUZZY_FUNC": "quick_lev"}}
Make sure to use uppercase dictionary keys in patterns.
Attributes:
vocab (Vocab): The shared vocabulary.
Included for consistency and potential future-state.
_fuzzy_funcs (FuzzyFuncs):
Container class housing fuzzy matching functions.
Functions are accessible via the classes `get()` method
by their given key name. The following rapidfuzz matching
functions with default settings are available:
"simple" = `ratio`
"quick" = `QRatio`
"quick_lev" = `quick_lev_ratio`
"""
def __init__(self: TokenSearcher, vocab: Vocab) -> None:
"""Initializes a token searcher.
Args:
vocab: A spaCy `Vocab` object.
Purely for consistency between spaCy
and spaczz matcher APIs for now.
spaczz matchers are mostly pure-Python
currently and do not share vocabulary
with spaCy pipelines.
"""
self.vocab = vocab
self._fuzzy_funcs: FuzzyFuncs = FuzzyFuncs(match_type="token")
def fuzzy_compare(
self: TokenSearcher,
a: str,
b: str,
ignore_case: bool = True,
fuzzy_func: str = "simple",
) -> int:
"""Peforms fuzzy matching between two strings.
Applies the given fuzzy matching algorithm (fuzzy_func)
to two strings and returns the resulting fuzzy ratio.
Args:
a: First string for comparison.
b: Second string for comparison.
ignore_case: Whether to lower-case a and b
before comparison or not. Default is `True`.
fuzzy_func: Key name of fuzzy matching function to use.
The following rapidfuzz matching functions with default
settings are available:
"simple" = `ratio`
"quick" = `QRatio`
Default is `"simple"`.
Returns:
The fuzzy ratio between a and b.
Example:
>>> import spacy
>>> from spaczz.search import TokenSearcher
>>> nlp = spacy.blank("en")
>>> searcher = TokenSearcher(nlp.vocab)
>>> searcher.fuzzy_compare("spaczz", "spacy")
73
"""
if ignore_case:
a = a.lower()
b = b.lower()
return round(self._fuzzy_funcs.get(fuzzy_func)(a, b))
def match(
self: TokenSearcher,
doc: Doc,
pattern: List[Dict[str, Any]],
min_r: int = 75,
fuzzy_func: str = "simple",
) -> List[List[Optional[Tuple[str, str]]]]:
"""Finds potential token pattern matches in a `Doc` object.
Make sure to use uppercase dictionary keys in patterns.
Args:
doc: `Doc` object to search over.
pattern: Individual spaCy token pattern.
min_r: Minimum match ratio required for fuzzy matching.
Can be overwritten with token pattern options.
Default is `75`.
fuzzy_func: Fuzzy matching function to use.
Can be overwritten with token pattern options.
Default is `simple`.
Returns:
A list of lists with each inner list representing a potential match.
The inner lists will be populated with key, value tuples of token matches
and `None` for placeholder tokens representing non-fuzzy tokens.
Raises:
TypeError: doc must be a `Doc` object.
TypeError: pattern must be a `Sequence`.
ValueError: pattern cannot have zero tokens.
Example:
>>> import spacy
>>> from spaczz.search import TokenSearcher
>>> nlp = spacy.blank("en")
>>> searcher = TokenSearcher(nlp)
>>> doc = nlp("I was prescribed zithramax and advar")
>>> pattern = [
{"TEXT": {"FUZZY": "zithromax"}},
{"POS": "CCONJ"},
{"TEXT": {"FREGEX": "(advair){e<=1}"}}
]
>>> searcher.match(doc, pattern)
[[('TEXT', 'zithramax'), None, ('TEXT', 'advar')]]
"""
if not isinstance(doc, Doc):
raise TypeError("doc must be a Doc object.")
if not isinstance(pattern, list):
raise TypeError(
"pattern must be a list",
"Make sure pattern is wrapped in a list.",
)
if len(pattern) == 0:
raise ValueError("pattern cannot have zero tokens.")
matches = []
for seq in n_wise(doc, len(pattern)):
seq_matches = self._iter_pattern(seq, pattern, min_r, fuzzy_func)
if seq_matches:
matches.append(seq_matches)
if matches:
filtered_matches = [
i for n, i in enumerate(matches) if i not in matches[:n]
]
return filtered_matches
else:
return matches
@staticmethod
def regex_compare(text: str, pattern: str, ignore_case: bool = False) -> bool:
"""Performs fuzzy-regex supporting regex matching between two strings.
Args:
text: The string to match against.
pattern: The regex pattern string.
ignore_case: Whether to lower-case text
before comparison or not. Default is `False`.
Returns:
`True` if match, `False` if not.
Example:
>>> import spacy
>>> from spaczz.search import TokenSearcher
>>> nlp = spacy.blank("en")
>>> searcher = TokenSearcher(nlp)
>>> searcher.regex_compare("sequel", "(sql){i<=3}")
True
"""
if ignore_case:
text = text.lower()
if regex.match(pattern, text):
return True
else:
return False
def _iter_pattern(
self: TokenSearcher,
seq: Tuple[Token, ...],
pattern: List[Dict[str, Any]],
min_r: int,
fuzzy_func: str,
) -> List[Optional[Tuple[str, str]]]:
"""Evaluates each token in a pattern against a doc token sequence."""
seq_matches: List[Optional[Tuple[str, str]]] = []
for i, token in enumerate(pattern):
pattern_dict, case, case_bool = self._parse_case(token)
if isinstance(pattern_dict, dict):
pattern_text, pattern_type = self._parse_type(pattern_dict)
if pattern_text and pattern_type == "FUZZY":
if (
self.fuzzy_compare(
seq[i].text,
pattern_text,
case_bool,
pattern_dict.get("FUZZY_FUNC", fuzzy_func),
)
>= pattern_dict.get("MIN_R", min_r)
):
seq_matches.append((case, seq[i].text))
else:
return []
elif pattern_text and pattern_type == "FREGEX":
if self.regex_compare(seq[i].text, pattern_text, case_bool):
seq_matches.append((case, seq[i].text))
else:
return []
else:
seq_matches.append(None)
else:
seq_matches.append(None)
return seq_matches
@staticmethod
def _parse_case(token: Dict[str, Any]) -> Tuple[Union[str, Dict, None], str, bool]:
"""Parses the case of a token pattern."""
if token.get("TEXT"):
return token.get("TEXT"), "TEXT", False
else:
return token.get("LOWER"), "LOWER", True
@staticmethod
def _parse_type(pattern_dict: Dict[str, Any]) -> Tuple[str, str]:
"""Parses the type of a token pattern."""
fuzzy_text = pattern_dict.get("FUZZY")
regex_text = pattern_dict.get("FREGEX")
if isinstance(fuzzy_text, str):
return fuzzy_text, "FUZZY"
elif isinstance(regex_text, str):
return regex_text, "FREGEX"
else:
return "", ""
```
#### File: tests/test_matcher/test_tokenmatcher.py
```python
import pickle
from typing import List, Tuple
import warnings
import pytest
import spacy
from spacy.errors import MatchPatternError
from spacy.language import Language
from spacy.tokens import Doc, Span
from spaczz.matcher import TokenMatcher
def add_name_ent(
matcher: TokenMatcher, doc: Doc, i: int, matches: List[Tuple[str, int, int, None]]
) -> None:
"""Callback on match function. Adds "NAME" entities to doc."""
_match_id, start, end, _details = matches[i]
entity = Span(doc, start, end, label="NAME")
doc.ents += (entity,)
@pytest.fixture
def matcher(nlp: Language) -> TokenMatcher:
"""It returns a token matcher."""
matcher = TokenMatcher(vocab=nlp.vocab)
matcher.add(
"DATA",
[
[
{"TEXT": "SQL"},
{"LOWER": {"FREGEX": "(database){s<=1}"}},
{"LOWER": {"FUZZY": "access"}},
],
[{"TEXT": {"FUZZY": "Sequel"}}, {"LOWER": "db"}],
],
)
matcher.add("NAME", [[{"TEXT": {"FUZZY": "Garfield"}}]], on_match=add_name_ent)
return matcher
@pytest.fixture
def doc(nlp: Language) -> Doc:
"""Example doc for search."""
return nlp(
"""The manager gave me SQL databesE acess so now I can acces the Sequal DB.
My manager's name is Grfield.
"""
)
def test_adding_patterns(matcher: TokenMatcher) -> None:
"""It adds the "TEST" label and some patterns to the matcher."""
assert matcher.patterns == [
{
"label": "DATA",
"pattern": [
{"TEXT": "SQL"},
{"LOWER": {"FREGEX": "(database){s<=1}"}},
{"LOWER": {"FUZZY": "access"}},
],
"type": "token",
},
{
"label": "DATA",
"pattern": [{"TEXT": {"FUZZY": "Sequel"}}, {"LOWER": "db"}],
"type": "token",
},
{
"label": "NAME",
"pattern": [{"TEXT": {"FUZZY": "Garfield"}}],
"type": "token",
},
]
def test_add_without_list_of_patterns_raises_error(matcher: TokenMatcher) -> None:
"""Trying to add non-sequences of patterns raises a TypeError."""
with pytest.raises(TypeError):
matcher.add("TEST", [{"TEXT": "error"}]) # type: ignore
def test_add_with_zero_len_pattern(matcher: TokenMatcher) -> None:
"""Trying to add zero-length patterns raises a ValueError."""
with pytest.raises(ValueError):
matcher.add("TEST", [[]])
def test_len_returns_count_of_labels_in_matcher(matcher: TokenMatcher) -> None:
"""It returns the sum of unique labels in the matcher."""
assert len(matcher) == 2
def test_in_returns_bool_of_label_in_matcher(matcher: TokenMatcher) -> None:
"""It returns whether a label is in the matcher."""
assert "DATA" in matcher
def test_labels_returns_label_names(matcher: TokenMatcher) -> None:
"""It returns a tuple of all unique label names."""
assert all(label in matcher.labels for label in ("DATA", "NAME"))
def test_vocab_prop_returns_vocab(matcher: TokenMatcher, nlp: Language) -> None:
"""It returns the vocab it was initialized with."""
assert matcher.vocab == nlp.vocab
def test_remove_label(matcher: TokenMatcher) -> None:
"""It removes a label from the matcher."""
matcher.add("TEST", [[{"TEXT": "test"}]])
assert "TEST" in matcher
matcher.remove("TEST")
assert "TEST" not in matcher
def test_remove_label_raises_error_if_label_not_in_matcher(
matcher: TokenMatcher,
) -> None:
"""It raises a ValueError if trying to remove a label not present."""
with pytest.raises(ValueError):
matcher.remove("TEST")
def test_matcher_returns_matches(matcher: TokenMatcher, doc: Doc) -> None:
"""Calling the matcher on a `Doc` object returns matches."""
assert matcher(doc) == [
("DATA", 4, 7, None),
("DATA", 13, 15, None),
("NAME", 22, 23, None),
]
def test_matcher_returns_matches_in_expected_order(nlp: Language) -> None:
"""Calling the matcher on a `Doc` object returns matches in expected order."""
matcher = TokenMatcher(nlp.vocab)
matcher.add(
"COMPANY",
[
[
{"IS_UPPER": True, "OP": "+"},
{"IS_PUNCT": True, "OP": "?"},
{"TEXT": {"REGEX": r"S\.\s?[A-Z]\.?\s?[A-Z]?\.?"}},
{"IS_PUNCT": True, "OP": "?"},
]
],
)
doc = nlp("My company is called LARGO AND MARMG S.L.")
matches = matcher(doc)
assert doc[matches[0][1] : matches[0][2]].text == "LARGO AND MARMG S.L."
def test_matcher_returns_empty_list_if_no_matches(nlp: Language) -> None:
"""Calling the matcher on a `Doc` object with no matches returns empty list."""
matcher = TokenMatcher(nlp.vocab)
matcher.add("TEST", [[{"TEXT": {"FUZZY": "blah"}}]])
doc = nlp("No matches here.")
assert matcher(doc) == []
def test_matcher_warns_if_unknown_pattern_elements(nlp: Language) -> None:
"""Calling the matcher on a `Doc` object with no matches returns empty list."""
matcher = TokenMatcher(nlp.vocab)
matcher.add("TEST", [[{"TEXT": {"fuzzy": "test"}}]])
doc = nlp("test")
if spacy.__version__ < "3.0.0":
with pytest.warns(UserWarning):
matcher(doc)
else:
with pytest.raises(MatchPatternError):
matcher(doc)
def test_matcher_uses_on_match_callback(matcher: TokenMatcher, doc: Doc) -> None:
"""It utilizes callback on match functions passed when called on a Doc object."""
matcher(doc)
ent_text = [ent.text for ent in doc.ents]
assert "Grfield" in ent_text
def test_matcher_pipe(nlp: Language) -> None:
"""It returns a stream of Doc objects."""
warnings.filterwarnings("ignore")
doc_stream = (
nlp("test doc 1: Corvold"),
nlp("test doc 2: Prosh"),
)
matcher = TokenMatcher(nlp.vocab)
output = matcher.pipe(doc_stream)
assert list(output) == list(doc_stream)
def test_matcher_pipe_with_context(nlp: Language) -> None:
"""It returns a stream of Doc objects as tuples with context."""
warnings.filterwarnings("ignore")
doc_stream = (
(nlp("test doc 1: Corvold"), "Jund"),
(nlp("test doc 2: Prosh"), "Jund"),
)
matcher = TokenMatcher(nlp.vocab)
output = matcher.pipe(doc_stream, as_tuples=True)
assert list(output) == list(doc_stream)
def test_matcher_pipe_with_matches(nlp: Language) -> None:
"""It returns a stream of Doc objects and matches as tuples."""
warnings.filterwarnings("ignore")
doc_stream = (
nlp("test doc 1: Corvold"),
nlp("test doc 2: Prosh"),
)
matcher = TokenMatcher(nlp.vocab)
matcher.add(
"DRAGON",
[[{"TEXT": {"FUZZY": "Korvold"}}], [{"TEXT": {"FUZZY": "Prossh"}}]],
)
output = matcher.pipe(doc_stream, return_matches=True)
matches = [entry[1] for entry in output]
assert matches == [[("DRAGON", 4, 5, None)], [("DRAGON", 4, 5, None)]]
def test_matcher_pipe_with_matches_and_context(nlp: Language) -> None:
"""It returns a stream of Doc objects and matches and context as tuples."""
warnings.filterwarnings("ignore")
doc_stream = (
(nlp("test doc 1: Corvold"), "Jund"),
(nlp("test doc 2: Prosh"), "Jund"),
)
matcher = TokenMatcher(nlp.vocab)
matcher.add(
"DRAGON",
[[{"TEXT": {"FUZZY": "Korvold"}}], [{"TEXT": {"FUZZY": "Prossh"}}]],
)
output = matcher.pipe(doc_stream, return_matches=True, as_tuples=True)
matches = [(entry[0][1], entry[1]) for entry in output]
assert matches == [
([("DRAGON", 4, 5, None)], "Jund"),
([("DRAGON", 4, 5, None)], "Jund"),
]
def test_pickling_matcher(nlp: Language) -> None:
"""It pickles the matcher object."""
matcher = TokenMatcher(nlp.vocab)
matcher.add("NAME", [[{"TEXT": {"FUZZY": "Ridley"}}, {"TEXT": {"FUZZY": "Scott"}}]])
bytestring = pickle.dumps(matcher)
assert type(bytestring) == bytes
def test_unpickling_matcher(nlp: Language) -> None:
"""It unpickles the matcher object."""
matcher = TokenMatcher(nlp.vocab)
matcher.add("NAME", [[{"TEXT": {"FUZZY": "Ridley"}}, {"TEXT": {"FUZZY": "Scott"}}]])
bytestring = pickle.dumps(matcher)
matcher = pickle.loads(bytestring)
doc = nlp("<NAME> was the director of Alien.")
assert matcher(doc) == [("NAME", 0, 2, None)]
```
#### File: tests/test_search/test_tokensearcher.py
```python
import pytest
from spacy.language import Language
from spacy.tokens import Doc
from spaczz.search import TokenSearcher
@pytest.fixture
def searcher(nlp: Language) -> TokenSearcher:
"""It returns a token searcher."""
return TokenSearcher(vocab=nlp.vocab)
@pytest.fixture
def example(nlp: Language) -> Doc:
"""Example doc for search."""
return nlp(
"The manager gave me SQL databesE ACESS so now I can acces the SQL databasE."
)
def test_match_lower(searcher: TokenSearcher, example: Doc) -> None:
"""The searcher with lower-cased text is working as intended."""
assert (
searcher.match(
example,
[
{"TEXT": "SQL"},
{"LOWER": {"FREGEX": "(database){e<=1}"}},
{"LOWER": {"FUZZY": "access"}, "POS": "NOUN"},
],
)
== [[None, ("LOWER", "databesE"), ("LOWER", "ACESS")]]
)
def test_match_text(searcher: TokenSearcher, example: Doc) -> None:
"""The searcher with verbatim text is working as intended."""
assert (
searcher.match(
example,
[
{"TEXT": {"FUZZY": "access"}, "POS": "NOUN"},
{},
{"TEXT": {"REGEX": "[Ss][Qq][Ll]"}},
{"TEXT": {"FREGEX": "(database){e<=1}"}},
],
)
== [[("TEXT", "acces"), None, None, ("TEXT", "databasE.")]]
)
def test_match_multiple_matches(searcher: TokenSearcher, example: Doc) -> None:
"""The searcher with lower-cased text will return multiple matches if found."""
assert searcher.match(example, [{"LOWER": {"FUZZY": "access"}}]) == [
[("LOWER", "ACESS")],
[("LOWER", "acces")],
]
def test_no_matches(searcher: TokenSearcher, example: Doc) -> None:
"""No matches returns empty list."""
assert searcher.match(example, [{"TEXT": {"FUZZY": "MongoDB"}}]) == []
def test_empty_doc(searcher: TokenSearcher, nlp: Language) -> None:
"""Empty doc returns empty list."""
doc = nlp("")
assert searcher.match(doc, [{"TEXT": {"FUZZY": "MongoDB"}}]) == []
def test_raises_type_error_when_doc_not_doc(searcher: TokenSearcher) -> None:
"""It raises a type error if doc is not a `Doc`."""
with pytest.raises(TypeError):
searcher.match(
"example",
[
{"TEXT": "SQL"},
{"LOWER": {"FREGEX": "(database){e<=1}"}},
{"LOWER": {"FUZZY": "access"}, "POS": "NOUN"},
],
)
def test_raises_type_error_when_pattern_not_list(
searcher: TokenSearcher, example: Doc
) -> None:
"""It raises a type error if pattern is not a `list`."""
with pytest.raises(TypeError):
searcher.match(
example,
{"TEXT": "SQL"}, # type: ignore
)
def test_raises_value_error_when_pattern_has_zero_tokens(
searcher: TokenSearcher, example: Doc
) -> None:
"""It raises a value error if pattern has zero tokens."""
with pytest.raises(ValueError):
searcher.match(example, [])
``` |
{
"source": "jonashackt/molecule-ansible-vagrant-macosx",
"score": 2
} |
#### File: molecule/tests/test_docker.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_run_hello_world_container_successfully_on_macos(host, Command):
if host.system_info.type == "darwin":
hello_world_ran = Command("sudo docker run hello-world")
assert 'Hello from Docker!' in hello_world_ran.stdout
assert host.system_info.distribution == "Mac OS X"
def test_is_docker_installed(host):
package_docker = host.package('docker-ce')
assert package_docker.is_installed
def test_run_hello_world_container_successfully(host):
hello_world_ran = host.run("sudo docker run hello-world")
assert 'Hello from Docker!' in hello_world_ran.stdout
``` |
{
"source": "jonas-hagen/databird",
"score": 2
} |
#### File: databird/databird/configuration.py
```python
from dict_recursive_update import recursive_update
from frozendict import frozendict
from ruamel.yaml import YAML
import collections
import glob
import os
from databird import Repository
from databird import Profile
import importlib
_settings = {}
class ConfigurationError(ValueError):
pass
class Settings(collections.UserDict):
"""Combine multiple mappings for sequential lookup.
"""
def __init__(self, base_config_file):
self._maps = {}
self.data = frozendict()
self._base_config = base_config_file
base_config = self.add_file(base_config_file)
if "includes" in base_config:
root = os.path.dirname(base_config_file)
for inc in base_config["includes"]:
if not os.path.isabs(inc):
inc = os.path.normpath(os.path.join(root, inc))
for fn in glob.glob(inc):
self.add_file(fn)
self.data = self.parse()
def __setitem__(self, key, value):
raise TypeError("immutable")
def add_file(self, fn):
yaml = YAML(typ="safe")
with open(fn) as doc:
config = yaml.load(doc)
self._maps[fn] = config
return config
def _collect(self):
merged = {}
for m in self._maps.values():
merged = recursive_update(merged, m)
return frozendict(merged)
def parse(self):
config = self._collect()
# Drivers
driver_names = []
for name, profile in config["profiles"].items():
if "driver" not in profile:
raise ConfigurationError(
"Profile {} is missing driver field.".format(name)
)
driver_names.append(profile["driver"])
drivers = {}
for name in driver_names:
parts = name.split(".")
package_name = ".".join(parts[:-1])
class_name = parts[-1]
module_name = "databird_drivers." + package_name
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError:
raise ConfigurationError(
"Driver module not found: '{}' for driver '{}'".format(
module_name, name
)
)
try:
drivers[name] = getattr(module, class_name)
except AttributeError:
raise ConfigurationError(
"Driver module '{}' has no class '{}'.".format(
module_name, class_name
)
)
# Profiles
if "profiles" in config:
for name, profile_config in config["profiles"].items():
profile_config["driver"] = drivers[profile_config["driver"]]
config["profiles"][name] = Profile(name, **profile_config)
# Repositories
if "repositories" in config:
for name, repo_config in config["repositories"].items():
if not "profile" in repo_config:
raise ConfigurationError(
"Repository `{}` is missing profile field.".format(name)
)
profile = config["profiles"][repo_config["profile"]]
repo_config["profile"] = profile
repo_config_complete = profile.kwargs.copy()
repo_config_complete.update(repo_config)
config["repositories"][name] = Repository(name, **repo_config_complete)
return config
def get_settings():
return _settings
def initialize(base_config):
global _settings
_settings = Settings(base_config)
```
#### File: databird_drivers/standard/command.py
```python
from databird import BaseDriver
import logging
import os
import subprocess
import shutil
logger = logging.getLogger("databird_drivers.command")
class CommandDriver(BaseDriver):
"""
Execute a shell command to retrieve files.
Configuration options:
- command: The command to call (absolute path is preferred)
- check: Check if exit status is 0
- env: A key -> value map for environment variables to export
- patterns: A target_name->[param list] map.
This map specifies a pattern (that is rendered in context) for every
target name in the repository.
Example configuration:
```
command: nsck
check: True
env:
API_KEY: <KEY>
patterns:
merra_temperature:
- "-v lat,lon,time,lev,T"
- "https://goldsmr5.gesdisc.eosdis.nasa.gov/opendap/MERRA2/M2I3NPASM.5.12.4/{time:%Y}/{time:%m}/MERRA2_400.inst3_3d_asm_Np.{time:%Y%m%d}.nc4"
- "{target_file}"
```
"""
@classmethod
def check_config(cls, config):
super().check_config(config)
assert "command" in config
assert isinstance(config["check"], bool)
assert isinstance(config["env"], dict)
assert "patterns" in config
assert isinstance(config["patterns"], dict)
for v in config["patterns"].values():
assert isinstance(v, list)
@classmethod
def default_config(cls):
return {"env": dict(), "check": True}
def check_connection(self):
return shutil.which(self._config["command"]) is not None
def is_available(self, context):
return self.check_connection()
def _render_arguments(self, context, target, name):
pattern = self._config["patterns"][name]
rendered = [arg.format(target_file=target, **context) for arg in pattern]
return rendered
def retrieve_single(self, context, target, name):
# target is an absolute local path to a file
self.create_dir(target)
args = self._render_arguments(context, target, name)
command = [self._config["command"]] + args
env = os.environ.copy()
env = env.update(self._config["env"])
subprocess.run(command, env=env, check=self._config["check"])
```
#### File: databird/databird/runner.py
```python
from collections import defaultdict
from databird import Repository
from databird import utils
from typing import List
import datetime as dt
import logging
from databird.queue import MultiQueue
from redis import Redis
logger = logging.getLogger("databird.runner")
def retrieve_missing(
root_dir, repos: List[Repository], redis_conn=None, is_async=True, ref_time=None
):
"""Retrieve all targets that are missing from the repositories."""
if redis_conn is None:
redis_conn = Redis()
queue = MultiQueue(redis_conn, is_async=is_async)
if ref_time is None:
ref_time = dt.datetime.now()
logger.debug("ref time is " + str(ref_time))
submitted_jobs = []
for repo in repos:
logger.debug("checking repo " + repo.name)
for context, targets in repo.iter_missing(root_dir, ref_time):
logger.debug(
"missing {} targets for {}".format(len(targets), str(context["time"]))
)
driver_name = str(type(repo.driver).__name__)
info = "Repo {} with {} for targets {} at {}".format(
repo.name, driver_name, ", ".join(targets), str(context["time"])
)
job_id = "db_" + utils.hash_dict(targets)
job = queue.submit_job(
repo.queue,
job_id,
repo.driver.retrieve_safe,
context,
targets,
description=info,
)
if job is not None:
logger.info("Sumitted job " + job_id)
submitted_jobs.append(job)
else:
status = queue.job_status(job_id)
logger.info("Job {} already in queue: {}".format(job_id, str(status)))
return submitted_jobs
```
#### File: databird/databird/utils.py
```python
import hashlib
from databird import dtutil
def hash_dict(d: dict):
m = hashlib.sha1()
for k in sorted(d):
v = d[k]
if not isinstance(k, str) or not isinstance(v, str):
raise NotImplementedError("hash for other than dict(str->str)")
m.update(v.encode())
return m.hexdigest()
def get_context(time):
context = {
"time": time,
"month_last_date": dtutil.month_last_day(time),
"month_first_date": dtutil.month_first_day(time),
"iso_date": dtutil.iso_date(time),
}
return context
def render_dict(d: dict, context: dict):
d2 = dict()
for key, value in d.items():
if isinstance(value, str):
d2[key] = value.format(**context)
elif isinstance(value, dict):
d2[key] = render_dict(value, context)
else:
d2[key] = value
return d2
```
#### File: tests/drivers/test_command.py
```python
from databird_drivers.standard import CommandDriver
import datetime as dt
import glob
def test_args_render():
config = dict(
command="cp",
patterns=dict(default=["-v", "simple_{date:%Y-%m-%d}.txt", "{target_file}"]),
)
cd = CommandDriver(config)
context = dict(date=dt.date(2019, 3, 1))
assert cd._render_arguments(
context, "file.x", "default"
) == "-v simple_2019-03-01.txt file.x".split(" ")
def test_command(tmpdir):
source_root = tmpdir.mkdir("source")
repo_root = tmpdir.mkdir("repo")
# Create new source file
(source_root.join("simple_2019-03-01.txt")).open("w").close()
config = dict(
command="cp",
patterns=dict(
default=[
"-f",
str(source_root.join("simple_{date:%Y-%m-%d}.txt")),
"{target_file}",
]
),
)
dri = CommandDriver(config)
context = dict(date=dt.date(2019, 3, 1))
assert dri.is_available(context)
assert len(list(glob.glob(str(repo_root.join("*.txt"))))) == 0
dri.retrieve(context, dict(default=repo_root.join("new_file.txt")))
assert len(list(glob.glob(str(repo_root.join("*.txt"))))) == 1
```
#### File: tests/drivers/test_ftp.py
```python
from databird.utils import get_context
from databird_drivers.standard import FtpDriver
import datetime as dt
import pytest
@pytest.mark.external_service
def test_ftp(tmpdir):
fd = FtpDriver(
dict(
host="cddis.nasa.gov",
user="anonymous",
password="",
tls=False,
root="/gnss/data/daily",
patterns=dict(status="{time:%Y}/{time:%j}/{time:%y%j}.status"),
)
)
context = get_context(time=dt.datetime(2019, 1, 11))
target = tmpdir.join("file.status")
# Since we are connecting a third-party service,
# only run the test if the connection can be established
if fd.check_connection():
assert fd.is_available(context)
fd.retrieve(context, dict(status=target))
with open(target) as f:
first_line = f.readline()
assert first_line.startswith(
"IGS Tracking Network Status (RINEX V2 Data) for 11-Jan-19"
)
``` |
{
"source": "jonas-hagen/dogatrest",
"score": 2
} |
#### File: jonas-hagen/dogatrest/dogatrest.py
```python
import falcon
import logging
import datetime
import requests
import json
from apscheduler.schedulers.background import BackgroundScheduler
logging.basicConfig(level=logging.INFO)
class StorageEngine:
"""
Simple key value store.
"""
def __init__(self):
self.store = dict()
def __getitem__(self, key):
key = tuple(key)
try:
value = self.store[key]
except KeyError:
raise StorageError(f'No entry found for {key}.')
return value
def __setitem__(self, key, value):
key = tuple(key)
self.store[key] = value
def load_file(self, filename, prefix=None):
with open(filename) as f:
items = json.load(f)
for id, item in items.items():
if prefix is not None:
self.store[prefix, id] = item
else:
self.store[id] = item
logging.info(f'Loaded {len(items)} items to db.')
class StorageError(Exception):
def __init__(self, message):
self.message = message
@staticmethod
def handle(ex, req, resp, params):
description = (f'Sorry, could not write or read your thing to or from the '
f'database: {ex.message}.')
raise falcon.HTTPError(falcon.HTTP_725,
'Database Error',
description)
class RequireJSON:
def process_request(self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable(
'This API only supports responses encoded as JSON.',
href='http://docs.examples.com/api/json')
if req.method in ('POST', 'PUT'):
if 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(
'This API only supports requests encoded as JSON.',
href='http://docs.examples.com/api/json')
def max_body(limit):
def hook(req, resp, resource, params):
length = req.content_length
if length is not None and length > limit:
msg = ('The size of the request is too large. The body must not '
'exceed ' + str(limit) + ' bytes in length.')
raise falcon.HTTPRequestEntityTooLarge(
'Request body is too large', msg)
return hook
class DogResource:
def __init__(self, db):
self.db = db
def on_get(self, req, resp, dog_id):
# Upon get, we return the data
resp.media = self.db['dog', dog_id]
resp.status = falcon.HTTP_200
@falcon.before(max_body(64 * 1024))
def on_post(self, req, resp, dog_id):
# Upon post we reset the last_time
data = req.context or dict()
now = datetime.datetime.utcnow()
dog = self.db['dog', dog_id]
dog['last_time'] = now.timestamp()
dog['last_time_str'] = now.isoformat()
dog['last_data'] = data
self.db['dog', dog_id] = dog
resp.status = falcon.HTTP_200
resp.media = self.db['dog', dog_id]
def check_dogs():
dogs = {id: value for (_, id), value in db.store.items()}
overdue = dict()
now = datetime.datetime.utcnow().timestamp()
for id, dog in dogs.items():
if 'last_time' in dog:
delta = (now - dog['last_time']) / 60
if delta > dog['interval'] and dog.get('alive', True):
dog['bark_status'] = bark_dead(dog)
dog['alive'] = False
elif delta < dog['interval'] and not dog.get('alive', False):
dog['bark_status'] = bark_alive(dog)
dog['alive'] = True
if not dog.get('alive', True):
overdue[id] = dog
logging.info(f'{len(overdue)} dogs are dead.')
return overdue
def bark_dead(dog):
default_template = {'message': 'I am probably dead. Could anyone check?', 'user': '{name}'}
data = dict()
for key, value in dog.get('template_dead', default_template).items():
data[key] = value.format(**dog)
r = requests.post(dog['hook'], json=data)
logging.info(f"Hooked {dog['name']}: Dead.")
return r.status_code
def bark_alive(dog):
default_template = {'message': 'Back to life! Thanks.', 'user': '{name}'}
data = dict()
for key, value in dog.get('template_alive', default_template).items():
data[key] = value.format(**dog)
r = requests.post(dog['hook'], json=data)
logging.info(f"Hooked {dog['name']}: Alive.")
return r.status_code
db = StorageEngine()
db.load_file('data/dogs.json', 'dog')
app = falcon.API(middleware=[
RequireJSON(),
])
dogs = DogResource(db)
app.add_route('/dog/{dog_id}/', dogs)
app.add_error_handler(StorageError, StorageError.handle)
scheduler = BackgroundScheduler(timezone='UTC')
scheduler.add_job(check_dogs, 'interval', seconds=60)
scheduler.start()
``` |
{
"source": "jonas-hagen/ofcourse",
"score": 3
} |
#### File: ofcourse/ofcourse/models.py
```python
from dataclasses import dataclass
from dataclasses import field
import datetime
import typing
import phonenumbers
@dataclass
class Contact:
channel: str
address: str
order: int = field(repr=False, default=0)
def __post_init__(self):
self.normalize()
@property
def is_phone(self):
return self.channel in ("mobile", "phone", "emergency")
@classmethod
def _get_channel_order(cls):
return ("mobile", "phone", "email", "emergency")
@classmethod
def key(cls, obj):
return cls._get_channel_order().index(obj.channel), obj.order
def normalize(self):
if self.channel not in self._get_channel_order():
raise ValueError("Unknown channel " + self.channel)
if self.is_phone:
phone = phonenumbers.parse(str(self.address), "CH")
if not phonenumbers.is_valid_number(phone):
raise ValueError("Invalid phone number " + self.address)
self.address = phonenumbers.format_number(
phone, phonenumbers.PhoneNumberFormat.INTERNATIONAL
)
@dataclass
class Person:
first_name: str
last_name: str
adress: str = field(repr=False, default="")
city: str = field(default="")
plz: str = field(repr=False, default="")
country: str = field(default="CH")
birthdate: datetime.date = field(default=None)
gender: str = field(default="")
contact: typing.List[Contact] = field(repr=False, default_factory=list)
notes: typing.List[str] = field(repr=False, default_factory=list)
def __post_init__(self):
self.normalize()
@property
def full_name(self):
return self.first_name + " " + self.last_name
@property
def identifier(self):
return self.full_name.replace(" ", "_")
@property
def primary_email(self):
return list(filter(lambda c: c.channel == "email", self.contact))[0].address
def age(self, date=None):
if not self.birthdate:
return None
if not date:
date = datetime.date.today()
age = (
date.year
- self.birthdate.year
- ((date.month, date.day) < (self.birthdate.month, self.birthdate.day))
)
return age
@classmethod
def key(cls, obj):
return obj.full_name, obj.birthdate
def normalize(self):
if self.gender:
if self.gender not in ("o", "f", "m"):
raise ValueError("Unknown gender " + self.gender)
if self.plz:
if isinstance(self.plz, int):
self.plz = str(self.plz)
if "-" in self.plz:
country, plz = self.plz.split("-")
self.country = country
self.plz = plz
try:
int(self.plz)
except ValueError as e:
raise ValueError("Invalid plz code " + self.plz)
if self.notes:
if not isinstance(self.notes, (tuple, list)):
self.notes = [self.notes]
@dataclass
class Course:
title: str
first_date: datetime.date = field(default=None)
last_date: datetime.date = field(default=None)
number: int = field(repr=False, default=0)
code: str = field(default="")
costs: float = field(default=0)
notes: typing.List[str] = field(repr=False, default_factory=list)
participants: typing.List[Person] = field(repr=False, default_factory=list)
waitlist: typing.List[str] = field(repr=False, default_factory=list)
instructors: typing.List[Person] = field(repr=False, default_factory=list)
```
#### File: ofcourse/ofcourse/parsers.py
```python
from ruamel import yaml
from ofcourse import models
def contacts_to_list(contacts):
lst = list()
for channel, adresses in contacts.items():
if not isinstance(adresses, (list, tuple)):
adresses = [adresses]
for i, a in enumerate(adresses):
lst.append(models.Contact(channel=channel, address=a, order=i))
return lst
def person_list_parser(f):
y = yaml.YAML()
lst = list()
for key, p_dict in y.load(f).items():
if "contact" in p_dict:
p_dict["contact"] = contacts_to_list(p_dict["contact"])
if "first_name" not in p_dict:
p_dict["first_name"] = key.split("_")[0]
if "last_name" not in p_dict:
p_dict["last_name"] = key.split("_")[-1]
lst.append(models.Person(**p_dict))
return lst
def course_parser(f, person_list):
y = yaml.YAML()
course_dict = y.load(f)
person_dict = {p.identifier: p for p in person_list}
course_dict["participants"] = [
person_dict[name] for name in course_dict["participants"]
]
course = models.Course(**course_dict)
return course
```
#### File: ofcourse/scripts/from_fixture.py
```python
import click
import json
from docopt import docopt
from ruamel import yaml
from collections import defaultdict
from ofcourse import people
from datetime import datetime
import sys
@click.command()
@click.argument('filename')
def get_people(filename):
with open(filename, 'r') as f:
data = json.load(f)
persons = {el['pk']: el['fields'] for el in data if el['model'] == 'kumi.person'}
contacts = {el['pk']: el['fields'] for el in data if el['model'] == 'kumi.kontakt'}
c_map = {
'e': 'email',
'e2': 'email2',
'N': 'emergency',
'm': 'mobile',
'p': 'phone',
}
ps = list()
for pk, p in persons.items():
name = p['vorname'] + ' ' + p['nachname']
pn = dict()
pn['first_name'] = p['vorname']
pn['last_name'] = p['nachname']
pn['adress'] = p['adresse']
pn['plz'] = p['plz']
pn['city'] = p['ort']
pn['birthdate'] = datetime.strptime(p['geburtsdatum'], '%Y-%m-%d').date()
if p['geschlecht']:
pn['gender'] = p['geschlecht']
if p['bemerkungen']:
pn['notes'] = p['bemerkungen']
c_data = [el for el in contacts.values() if el['person'] == pk]
c = defaultdict(list)
for k in c_data:
c[c_map[k['kanal']]].append(k['adresse'])
# keep secondary email at end
c['email'] += c.get('email2', [])
if not c['email']:
del c['email']
if 'email2' in c:
del c['email2']
pn['contact'] = c
try:
ps.append(people.normalize_person(pn, name))
except people.PersonError as e:
print('While at {} -> {}:'.format(pk, name), file=sys.stderr)
print(pn, file=sys.stderr)
print(' ' + str(e), file=sys.stderr)
people.dump(sys.stdout, dict(ps))
if __name__ == '__main__':
get_people()
``` |
{
"source": "jonas-hagen/pyretrievals",
"score": 3
} |
#### File: data/ecmwf/store.py
```python
from retrievals.data import dtutils
from retrievals.data.ecmwf import levels
import pandas as pd
import xarray as xr
class ECMWFLocationFileStore:
"""
This data store assumes that the files are organised in the following way:
* One day per file
* One location `(lat, lon)` per file.
* All data is along a `loc` coordinate and `lat`, `lon` are along this coordinate.
* The variable holding the logarithm of surface pressure is `logarithm_of_surface_pressure`
* The variable holding the level is called `level`
"""
def __init__(self, path, fmt):
"""
Build a store given the path and format.
If the files are organized as `/path/to/folder/2018/ecmwf_2018-01-01.nc`, one can build the store with:
>>> es = ECMWFLocationFileStore('/path/to/folder', '%Y/ecmwf_%Y-%m-%d.nc')
and then ask for desired data:
>>> es.select_time('2018-01-01 12:30', '2018-01-02 16:45')
:param path: The base path to the files.
:param fmt: The format string for the file names as used by :py:meth:`datetime.datetime.strftime`
"""
self._path = path
self._fmt = fmt
self._files = dict(dtutils.date_glob(path, fmt))
def select_time(self, t1, t2, **kwargs):
"""
Select all data within time interval `(t1, t2)`
:param t1: Start time
:type t1: Anything understood by :py:func:`pandas.to_datetime`
:param t2: End time
:type t2: Anything understood by :py:func:`pandas.to_datetime`
:param kwargs: Additional arguments to :py:func:`xarray.open_mfdataset`
:return: A dataset that has been normalized by :py:meth:`normalize`
:rtype: xarray.Dataset
"""
ts1 = pd.to_datetime(t1)
ts2 = pd.to_datetime(t2)
days = pd.date_range(ts1.floor('D'), ts2.floor('D'), freq='D')
try:
files = sorted(self._files[d] for d in days)
except KeyError as e:
raise KeyError('No ECMWF data found for {}'.format(e.args[0])) from e
ds_mf = xr.open_mfdataset(files, **kwargs)
ds = ds_mf.sel(time=slice(ts1, ts2)).compute().copy(deep=True)
ds_mf.close()
return self.normalize(ds)
def select_hours(self, t1, t2, hour1, hour2):
"""
Select all data for certain hours within time interval `[t1, t2]` (inclusive).
:param t1: Start time
:type t1: Anything understood by :py:func:`pandas.to_datetime`
:param t2: End time
:type t2: Anything understood by :py:func:`pandas.to_datetime`
:param int hour1: First hour
:param int hour2: Last hour (might be smaller than `hour1` if range spans midnight)
:return: A dataset that has been normalized by :py:meth:`normalize`
:rtype: xarray.Dataset
"""
ds = self.select_time(t1, t2)
rel_hours = (ds['time.hour'] - hour1) % 24
rel_hour2 = (hour2 - hour1) % 24
ds = ds.where(rel_hours <= rel_hour2, drop=True)
return ds
@staticmethod
def normalize(ds):
"""
Normalize an ECMWF dataset in the following way:
* Strip the (single) location
* Add a pressure field
* Sort by increasing altitude (decreasing level)
* Sort by time
"""
# We only have one location
ds = ds.isel(loc=0)
# Add Pressure
a = levels.hybrid_level['a']
b = levels.hybrid_level['b']
sp = xr.ufuncs.exp(ds['logarithm_of_surface_pressure'])
ds['pressure'] = a + b * sp
# Sort it from surface to top of atmosphere
ds = ds.sortby('level', ascending=False)
ds = ds.sortby('time')
return ds
@property
def path(self):
return self._path
@property
def fmt(self):
return self._fmt
@property
def file_index(self):
return self._files
@property
def file_names(self):
return sorted(set(self._files.values()))
@property
def location(self):
ds = xr.open_dataset(self.file_names[0])
name = ds['loc'].values[0]
lat = ds['lat'].values[0]
lon = ds['lon'].values[0]
return name, lat, lon
``` |
{
"source": "jonashagstedt/django-jsx",
"score": 2
} |
#### File: django-jsx/django_jsx/apps.py
```python
from django.apps import AppConfig
from django.conf import settings
from .server.template_server import TemplateServer
import atexit
template_server = TemplateServer()
class DjangoJsxConfig(AppConfig):
name = 'django_jsx'
verbose_name = "Django JSX"
def ready(self):
if getattr(settings, 'DJANGO_ISOMORPHIC_AUTOSTART', True):
template_server.start()
def kill_server():
if template_server.started:
print('Closing template server')
template_server.terminate()
atexit.register(kill_server)
```
#### File: django_jsx/server/template_server.py
```python
from subprocess import Popen
from django.conf import settings
import os
import signal
class TemplateServer():
def __init__(self):
self.started = False
self.proc = None
def get_cwd(self):
pth = os.path.join(
os.path.dirname(__file__),
'../../javascript/dist/'
)
return pth
def get_options(self):
options = [
'debug={}'.format(settings.DEBUG),
]
renderer = getattr(settings, 'DJANGO_ISOMORPHIC_RENDERER', None)
if renderer:
options.append('renderer={}'.format(renderer))
return options
def start(self):
if self.started:
return
self.started = True
cwd = self.get_cwd()
options = self.get_options()
self.proc = Popen(['node', 'template-server.js'] + options, cwd=cwd, preexec_fn=os.setsid)
def terminate(self):
os.killpg(self.proc.pid, signal.SIGTERM)
```
#### File: django-jsx/tests/test_backend.py
```python
from django.conf import settings
from django.test import TestCase
from django_jsx.template.backend import JsTemplates, JsTemplate
class TestEngine(TestCase):
def test_get_template(self):
"""
Get a template by name
"""
params = {
'DIRS': [settings.TEMPLATE_DIR],
'APP_DIRS': False,
'NAME': 'backend',
'OPTIONS': {}
}
templates = JsTemplates(params)
template = templates.get_template('empty-template.js')
self.assertIsInstance(template, JsTemplate)
```
#### File: django-jsx/tests/test_template_tag.py
```python
from django.template import RequestContext
from django.test import TestCase
from django_jsx.templatetags import djangojs
from django_jsx.templatetags.djangojs import JsMissingTemplateDirException
class TestLoader(TestCase):
def test_render_template(self):
"""
Render a template with the template tag
"""
context = RequestContext(request=None)
template = djangojs.include_js(context, template_name='test-component.js')
self.assertEqual(template, '<span>Test component</span>')
def test_try_to_render_template_without_the_js_backend(self):
"""
Raises JsMissingTemplateDirException if the backend is not specified
"""
context = RequestContext(request=None)
with self.settings(TEMPLATES=[{'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': []}]):
with self.assertRaises(JsMissingTemplateDirException):
djangojs.include_js(context, template_name='test-component.js')
``` |
{
"source": "jonashagstedt/django-nodetest",
"score": 2
} |
#### File: django-nodetest/nodetest/node_runner.py
```python
from os import remove
from django.conf import settings
from .utils import make_temp_file, parse_repl
import subprocess
import tempfile
import sys
import json
class JavaScriptException(Exception):
pass
def _get_script_root():
assert hasattr(settings, 'NODETEST_SCRIPT_ROOT'), """Set "NODETEST_SCRIPT_ROOT" in settings, \ne.g: NODETEST_SCRIPT_ROOT = join(BASE_DIR, 'static', 'js')"""
return settings.NODETEST_SCRIPT_ROOT
def process_script(script_file, plaintext=False, enable_console=False):
js_dir = _get_script_root()
copied_script_path = make_temp_file(js_dir, script_file)
# Only parse REPL if the console is enabled
# or the script will hang waiting for input
if enable_console:
parse_repl(copied_script_path['absolute_path'])
try:
err, out = run_node_script(
js_dir,
copied_script_path['relative_path'],
enable_console
)
if err:
err = '\n\n-------------- JAVASCRIPT EXCEPTION --------------\n{}'.format(err)
raise JavaScriptException(err)
if plaintext:
return out.strip()
if out == '':
return {}
return json.loads(out.strip())
except Exception as ex:
raise ex
finally:
remove(copied_script_path['absolute_path'])
def run_node_script(js_dir, script_path, enable_console=False):
node_path = getattr(settings, 'NODETEST_NODE_BIN', 'node')
with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file:
# Enabling console means output is written to stdout.
# this means no return values from the JavaScript code
# but it makes it possible to enter the Node REPL
if enable_console:
stdout_file = sys.stdout
# cmd = 'babel-node {}'.format(script_path)
cmd = '{} {}'.format(node_path, script_path)
popen = subprocess.Popen(cmd, stdout=stdout_file, stderr=stderr_file, shell=True, cwd=js_dir)
popen.wait()
stderr_file.seek(0)
stderr = stderr_file.read()
stderr = stderr.decode()
if not enable_console:
stdout_file.seek(0)
stdout = stdout_file.read()
stdout = stdout.decode()
else:
stdout = ''
return stderr, stdout
``` |
{
"source": "jonashagstedt/django-reform",
"score": 2
} |
#### File: django-reform/reform/form.py
```python
from collections import OrderedDict
from django.core.urlresolvers import reverse
from .drf_fields import drf_field_to_field
from .fields import Field
class ReactFormMeta(object):
def __init__(self, options=None):
self.fields = []
self.serializer_class = None
self.exclude = []
if options:
self.serializer_class = getattr(options, 'serializer_class', None)
self.fields = getattr(options, 'fields', [])
self.exclude = getattr(options, 'exclude', [])
class ReactForm(object):
Meta = None
create_url_name = None
create_url = None
update_url_name = None
update_url = None
form_name = None
id_field = 'id'
def __init__(self, name=None):
if name:
self.form_name = name
assert self.form_name is not None, 'A ReactForm requires a unique "name" per form'
self.opts = ReactFormMeta(self.Meta)
self.fields = OrderedDict()
fields = {}
if self.opts.serializer_class:
drf_fields = self.opts.serializer_class().fields.items()
for name, drf_field in drf_fields:
if name in self.opts.exclude:
continue
if self.opts.fields and name not in self.opts.fields:
continue
field = drf_field_to_field(drf_field)
if field:
fields[name] = field
for name, field in self.__class__.__dict__.items():
if name in self.opts.exclude or not isinstance(field, Field):
continue
fields[name] = field
if self.opts.fields:
for name in self.opts.fields:
self.fields[name] = fields[name]
else:
self.fields = fields
def get_create_url(self, **kwargs):
if self.create_url_name:
return reverse(self.create_url_name)
return self.create_url
def get_update_url(self, **kwargs):
if self.update_url_name:
return reverse(self.update_url_name)
return self.update_url
def to_dict(self):
data = dict({
'name': self.form_name,
'create_url': self.get_create_url(),
'update_url': self.get_update_url(),
'id_field': self.id_field
})
data['fields'] = []
for name, field in self.fields.items():
field_data = field.to_dict(name=name, id_field='id-{}-{}'.format(self.form_name, name))
data['fields'].append(field_data.copy())
return data
def is_valid(self):
return True # Implement this
def values(self, post_data):
data = {}
for name, field in self.fields.items():
key = '{}-{}'.format(self.form_name, name)
data[name] = field.get_value(post_data, key)
return data
```
#### File: reform/tests/test_form_get_url.py
```python
from unittest import TestCase
from django.core.urlresolvers import NoReverseMatch
from ..form import ReactForm
class FooForm(ReactForm):
form_name = 'foo-form'
create_url_name = 'test_url'
update_url_name = 'test_url'
class BarForm(ReactForm):
form_name = 'bar-form'
create_url = '/test/'
update_url = '/test/'
class ReactFormTest(TestCase):
def test_get_url_by_name(self):
form = FooForm()
with self.assertRaises(NoReverseMatch):
form.get_create_url()
def test_get_url_by_url(self):
form = BarForm()
self.assertEqual(form.get_create_url(), '/test/')
``` |
{
"source": "jonashein/baseline_combination",
"score": 2
} |
#### File: meshreg/models/domainnorm.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class DomainNorm(nn.Module):
def __init__(self, channel, l2=True):
super(DomainNorm, self).__init__()
self.normalize = nn.InstanceNorm2d(num_features=channel, affine=False)
self.l2 = l2
self.weight = nn.Parameter(torch.ones(1,channel,1,1))
self.bias = nn.Parameter(torch.zeros(1,channel,1,1))
self.weight.requires_grad = True
self.bias.requires_grad = True
def forward(self, x):
x = self.normalize(x)
if self.l2:
return F.normalize(x, p=2, dim=1)
return x * self.weight + self.bias
```
#### File: meshreg/models/pvnetutils.py
```python
import numpy as np
import cv2
from scipy.linalg import sqrtm
from scipy.optimize import leastsq
#from meshreg.models.uncertainty_pnp import un_pnp_utils
from joblib import Parallel, delayed
import multiprocessing
def pnp(points_3d, points_2d, camera_matrix, method=cv2.SOLVEPNP_EPNP):
try:
dist_coeffs = pnp.dist_coeffs
except:
dist_coeffs = np.zeros(shape=[8, 1], dtype='float64')
assert points_3d.shape[0] == points_2d.shape[0], 'points 3D and points 2D must have same number of vertices'
if method == cv2.SOLVEPNP_EPNP:
points_3d = np.expand_dims(points_3d, 0)
points_2d = np.expand_dims(points_2d, 0)
points_2d = np.ascontiguousarray(points_2d.astype(np.float64))
points_3d = np.ascontiguousarray(points_3d.astype(np.float64))
camera_matrix = camera_matrix.astype(np.float64)
_, R_exp, t = cv2.solvePnP(points_3d,
points_2d,
camera_matrix,
dist_coeffs,
flags=method)
# , None, None, False, cv2.SOLVEPNP_UPNP)
# R_exp, t, _ = cv2.solvePnPRansac(points_3D,
# points_2D,
# cameraMatrix,
# distCoeffs,
# reprojectionError=12.0)
R, _ = cv2.Rodrigues(R_exp)
# trans_3d=np.matmul(points_3d,R.transpose())+t.transpose()
# if np.max(trans_3d[:,2]<0):
# R=-R
# t=-t
return np.concatenate([R, t], axis=-1)
# def uncertainty_pnp(kpt_3d, kpt_2d, var, K, method=cv2.SOLVEPNP_P3P):
# cov_invs = []
# for vi in range(var.shape[0]):
# if var[vi, 0, 0] < 1e-6 or np.sum(np.isnan(var)[vi]) > 0:
# cov_invs.append(np.zeros([2, 2]).astype(np.float32))
# else:
# cov_inv = np.linalg.inv(sqrtm(var[vi]))
# cov_invs.append(cov_inv)
#
# cov_invs = np.asarray(cov_invs) # pn,2,2
# weights = cov_invs.reshape([-1, 4])
# weights = weights[:, (0, 1, 3)]
# pose_pred = un_pnp_utils.uncertainty_pnp(kpt_2d, weights, kpt_3d, K, method)
# return pose_pred
def uncertainty_pnp(points_3d, points_2d, var, camera_matrix, method=cv2.SOLVEPNP_EPNP):
# Compute weights
cov_invs = []
for vi in range(var.shape[0]):
if var[vi, 0, 0] < 1e-6 or np.sum(np.isnan(var)[vi]) > 0:
cov_invs.append(np.zeros([2, 2]).astype(np.float32))
else:
cov_inv = np.linalg.inv(sqrtm(var[vi]))
cov_invs.append(cov_inv)
cov_invs = np.asarray(cov_invs) # K,2,2
# Compute initialization with 4 best points
weights = cov_invs.reshape([-1, 4])
weights = weights[:, (0, 1, 3)]
idxs = np.argsort(weights[:, 0]+weights[:, 1])[-4:]
#idxs = np.argsort(weights[:, 0] + weights[:, 1])#[-6:]
init_rvec = np.array([np.pi, 0.0, 0.0])
init_tvec = np.array([0.0, 0.2, 0.4])
_, R_exp, t = cv2.solvePnP(np.expand_dims(points_3d[idxs, :], 0),
np.expand_dims(points_2d[idxs, :], 0),
camera_matrix, None, init_rvec, init_tvec, True, flags=cv2.SOLVEPNP_EPNP)
Rt_vec = np.concatenate([R_exp, t], axis=0)
# Return if we only have 4 points
if points_2d.shape[0] == 4:
R, _ = cv2.Rodrigues(Rt_vec[:3])
Rt = np.concatenate([R, Rt_vec[3:]], axis=-1)
return Rt
# Minimize Mahalanobis distance
Rt_vec, _ = leastsq(mahalanobis, Rt_vec, args=(points_3d, points_2d, cov_invs, camera_matrix))
R, _ = cv2.Rodrigues(Rt_vec[:3])
Rt = np.concatenate([R, Rt_vec[3:, None]], axis=-1)
return Rt
def mahalanobis(Rt_vec, points_3d, points_2d, var, camera_matrix):
# Rt_vec.shape: (6,)
# points_3d.shape: (K,3)
# points_2d.shape: (K,2)
# var.shape: (K,2,2)
# camera_matrix.shape: (3,3)
if np.any(np.iscomplex(var)):
var = np.real(var)
R, _ = cv2.Rodrigues(Rt_vec[:3])
Rt = np.concatenate([R, Rt_vec[3:, None]], axis=-1)
points_3d_hom = np.concatenate([points_3d, np.ones((points_3d.shape[0], 1))], axis=-1) # (K,4)
proj_2d_hom = camera_matrix @ Rt @ points_3d_hom.transpose() # (3, K)
proj_2d = proj_2d_hom[:2, :] / proj_2d_hom[2:, :] # (2,K)
err_2d = proj_2d.transpose() - points_2d # (K,2)
err_2d = np.expand_dims(err_2d, axis=1) # (K,1,2)
err = err_2d @ var @ err_2d.transpose((0,2,1)) # (K,1,2) x (K,2,2) x (K,2,1) = (K,1,1)
err = np.sqrt(err.squeeze())
return err
def _process_sample_pnp(points_3d, points_2d, camera_matrix, var=None, method=cv2.SOLVEPNP_EPNP):
if var is not None:
pose = uncertainty_pnp(points_3d, points_2d, var, camera_matrix, method)
else:
pose = pnp(points_3d, points_2d, camera_matrix, method)
return pose
def batched_pnp(points_3d, points_2d, camera_matrix, var=None, method=cv2.SOLVEPNP_EPNP):
batch_size = points_3d.shape[0]
# poses = [_process_sample_pnp(points_3d[0],
# points_2d[0],
# camera_matrix[0],
# None if var is None else var[0],
# method)]
poses = Parallel(n_jobs=8)(delayed(_process_sample_pnp)(points_3d[i],
points_2d[i],
camera_matrix[i],
None if var is None else var[i],
method)
for i in range(batch_size))
return np.stack(poses, axis=0)
def transform(verts, trans, convert_to_homogeneous=False):
assert len(verts.shape) == 2, "Expected 2 dimensions for verts, got: {}.".format(len(verts.shape))
assert len(trans.shape) == 2, "Expected 2 dimensions for trans, got: {}.".format(len(trans.shape))
if convert_to_homogeneous:
hom_verts = np.concatenate([verts, np.ones([verts.shape[0], 1])], axis=1)
else:
hom_verts = verts
assert trans.shape[1] == hom_verts.shape[1], \
"Incompatible shapes: verts.shape: {}, trans.shape: {}".format(verts.shape, trans.shape)
trans_verts = np.dot(trans, hom_verts.transpose()).transpose()
return trans_verts
# def batch_transform(points, camintr=None, camextr=None, add_hom=False, rem_hom=False):
# """Apply extrinsic transformation and/or intrinsic projection to points tensor.
# points has shape [batch, num_points, dim], where
# camintr has shape [batch, M, M]
# camextr has shape [batch, N, N]
# If add_hom, the points are converted to homogeneous points by adding another dimension at the end.
# If rem_hom, the transformed points are normalized by the last dimension. The last dimension is removed in the result.
# """
# if add_hom:
# torch.cat([points, torch.ones(points[:-1])])
#
# if camextr
```
#### File: meshreg/netscripts/evaluate.py
```python
def parse_evaluators(evaluators, config=None):
"""
Parse evaluators for which PCK curves and other statistics
must be computed
"""
if config is None:
config = {
# "joints2d_trans": [0, 50, 20],
"joints2d_base": [0, 100, 100],
"corners2d_base": [0, 100, 100],
"verts2d_base": [0, 100, 100],
"joints3d_cent": [0, 0.2, 20],
"joints3d": [0, 0.5, 20],
}
eval_results = {}
for evaluator_name, evaluator in evaluators.items():
start, end, steps = [config[evaluator_name][idx] for idx in range(3)]
(epe_mean, epe_mean_joints, epe_median, auc, pck_curve, thresholds) = evaluator.get_measures(
start, end, steps
)
eval_results[evaluator_name] = {
"epe_mean": epe_mean,
"epe_mean_joints": epe_mean_joints,
"epe_median": epe_median,
"auc": auc,
"thresholds": thresholds,
"pck_curve": pck_curve,
}
return eval_results
```
#### File: meshreg/visualize/evalvis.py
```python
from matplotlib import pyplot as plt
def eval_vis(eval_res, save_img_path, fig=None):
fig = None
fig = plt.figure(figsize=(10, 10))
fig_nb = len(eval_res)
axes = fig.subplots(len(eval_res))
for eval_idx, (eval_name, eval_res) in enumerate(eval_res.items()):
if fig_nb > 1:
ax = axes[eval_idx]
else:
ax = axes
ax.plot(eval_res["thresholds"], eval_res["pck_curve"], "ro-", markersize=1, label="Ours")
print("eval_name: {}".format(eval_name))
print("Thresholds: {}".format(eval_res["thresholds"]))
print("pck_curve: {}".format(eval_res["pck_curve"]))
auc = eval_res["auc"]
epe_mean = eval_res["epe_mean"]
epe_med = eval_res["epe_median"]
ax.set_title(f"{eval_name} epe_mean: {epe_mean:.3f}, auc: {auc:.3f}, epe_med: {epe_med:.3f}")
fig.savefig(save_img_path)
``` |
{
"source": "jonashein/handobjectnet_baseline",
"score": 2
} |
#### File: meshreg/datasets/syn_colibri_v1_utils.py
```python
from functools import lru_cache
import os
import pickle
import numpy as np
@lru_cache(128)
def load_manoinfo(pkl_path):
with open(pkl_path, "rb") as p_f:
data = pickle.load(p_f)
return data
def transform(verts, trans, convert_to_homogeneous=False):
assert len(verts.shape) == 2, "Expected 2 dimensions for verts, got: {}.".format(len(verts.shape))
assert len(trans.shape) == 2, "Expected 2 dimensions for trans, got: {}.".format(len(trans.shape))
if convert_to_homogeneous:
hom_verts = np.concatenate([verts, np.ones([verts.shape[0], 1])], axis=1)
else:
hom_verts = verts
assert trans.shape[1] == hom_verts.shape[1], \
"Incompatible shapes: verts.shape: {}, trans.shape: {}".format(verts.shape, trans.shape)
trans_verts = np.dot(trans, hom_verts.transpose()).transpose()
return trans_verts
def compute_vertex(mask, kpt_2d):
h, w = mask.shape
m = kpt_2d.shape[0]
xy = np.argwhere(mask != 0)[:, [1, 0]]
vertex = kpt_2d[None] - xy[:, None]
norm = np.linalg.norm(vertex, axis=2, keepdims=True)
norm[norm < 1e-3] += 1e-3
vertex = vertex / norm
vertex_out = np.zeros([h, w, m, 2], np.float32)
vertex_out[xy[:, 1], xy[:, 0]] = vertex
vertex_out = np.reshape(vertex_out, [h, w, m * 2])
return vertex_out
```
#### File: meshreg/netscripts/epochpass.py
```python
import os
import pickle
from tqdm import tqdm
import torch
from libyana.evalutils.avgmeter import AverageMeters
from libyana.evalutils.zimeval import EvalUtil
from meshreg.datasets.queries import BaseQueries
from meshreg.visualize import samplevis
from meshreg.visualize import consistdisplay
from meshreg.netscripts.monitor import MetricMonitor
from meshreg.netscripts.metrics import evaluate
def epoch_pass(
loader,
model,
train=False,
optimizer=None,
scheduler=None,
epoch=0,
img_folder=None,
fig=None,
display_freq=10,
epoch_display_freq=1,
lr_decay_gamma=0,
freeze_batchnorm=True,
monitor=None,
):
if train:
prefix = "train"
if not freeze_batchnorm:
model.train()
else:
prefix = "val"
model.eval()
render_step = 0
# Loop over dataset
for batch_idx, batch in enumerate(tqdm(loader, desc="batch")):
if 'compute_pnp' in dir(model):
model.compute_pnp = not train or (batch_idx % display_freq == 0 and epoch % epoch_display_freq == 0)
# Compute outputs and losses
if train:
loss, results, losses = model(batch)
# Optimize model
if torch.isnan(loss):
raise ValueError(f"Loss made of {losses} became nan!")
optimizer.zero_grad()
loss.backward()
optimizer.step()
else:
with torch.no_grad():
loss, results, losses = model(batch)
# Update metrics
if monitor is not None:
# Create loss dict, add _loss suffix where necessary
loss_dict = {}
for loss_name, loss_val in losses.items():
if not (loss_name.startswith("loss_") or loss_name.endswith("_loss")):
loss_name = "loss_{}".format(loss_name)
if loss_val is not None:
if isinstance(loss_val, torch.Tensor):
loss_val = loss_val.cpu().detach().numpy()
loss_dict[loss_name] = loss_val
monitor.add(prefix, epoch + 1, loss_dict)
monitor.add(prefix, epoch + 1, evaluate(batch, results))
# Visualize sample outputs
if batch_idx % display_freq == 0 and epoch % epoch_display_freq == 0:
img_filepath = f"{prefix}_epoch{epoch:04d}_batch{batch_idx:06d}.png"
save_img_path = os.path.join(img_folder, img_filepath)
samplevis.sample_vis(batch, results, fig=fig, save_img_path=save_img_path)
if lr_decay_gamma and scheduler is not None:
scheduler.step()
save_dict = {}
return save_dict
```
#### File: meshreg/visualize/samplevis.py
```python
import torch
import numpy as np
from libyana.visutils.viz2d import visualize_joints_2d
from meshreg.datasets.queries import BaseQueries, TransQueries
from meshreg.visualize import consistdisplay
def get_check_none(data, key, cpu=True):
if key in data and data[key] is not None:
if cpu:
return data[key].cpu().detach()
else:
return data[key].detach().cuda()
else:
return None
def sample_vis(sample, results, save_img_path, fig=None, max_rows=5, display_centered=False):
fig.clf()
images = sample[TransQueries.IMAGE].permute(0, 2, 3, 1).cpu() + 0.5
batch_size = images.shape[0]
# pred_handverts2d = get_check_none(results, "verts2d")
gt_objverts2d = get_check_none(sample, TransQueries.OBJVERTS2D)
pred_objverts2d = get_check_none(results, "obj_verts2d")
gt_objcorners2d = None #get_check_none(sample, TransQueries.OBJCORNERS2D)
pred_objcorners2d = None #get_check_none(results, "obj_corners2d")
gt_objcorners3dw = None #get_check_none(sample, BaseQueries.OBJCORNERS3D)
pred_objcorners3d = None #get_check_none(results, "obj_corners3d")
gt_objverts3d = get_check_none(sample, TransQueries.OBJVERTS3D)
pred_objverts3d = get_check_none(results, "obj_verts3d")
gt_canobjverts3d = get_check_none(sample, TransQueries.OBJCANROTVERTS)
pred_objverts3dw = get_check_none(results, "recov_objverts3d")
gt_canobjcorners3d = get_check_none(sample, TransQueries.OBJCANROTCORNERS)
pred_objcorners3dw = None #get_check_none(results, "recov_objcorners3d")
gt_handjoints2d = get_check_none(sample, TransQueries.JOINTS2D)
pred_handjoints2d = get_check_none(results, "joints2d")
gt_handjoints3d = get_check_none(sample, TransQueries.JOINTS3D)
pred_handjoints3d = get_check_none(results, "joints3d")
gt_handverts3d = get_check_none(sample, TransQueries.HANDVERTS3D)
pred_handverts3d = get_check_none(results, "verts3d")
gt_objverts3dw = get_check_none(sample, BaseQueries.OBJVERTS3D)
gt_handjoints3dw = get_check_none(sample, BaseQueries.JOINTS3D)
pred_handjoints3dw = get_check_none(results, "recov_joints3d")
row_nb = min(max_rows, batch_size)
if display_centered:
col_nb = 7
else:
col_nb = 4
axes = fig.subplots(row_nb, col_nb)
for row_idx in range(row_nb):
# Column 0
axes[row_idx, 0].imshow(images[row_idx])
axes[row_idx, 0].axis("off")
# Visualize 2D hand joints
if pred_handjoints2d is not None:
visualize_joints_2d(axes[row_idx, 0], pred_handjoints2d[row_idx], alpha=1, joint_idxs=False)
if gt_handjoints2d is not None:
visualize_joints_2d(axes[row_idx, 0], gt_handjoints2d[row_idx], alpha=0.5, joint_idxs=False)
# Column 1
axes[row_idx, 1].imshow(images[row_idx])
axes[row_idx, 1].axis("off")
# Visualize 2D object vertices
if pred_objverts2d is not None:
axes[row_idx, 1].scatter(
pred_objverts2d[row_idx, :, 0], pred_objverts2d[row_idx, :, 1], c="r", s=1, alpha=0.2
)
if gt_objverts2d is not None:
axes[row_idx, 1].scatter(
gt_objverts2d[row_idx, :, 0], gt_objverts2d[row_idx, :, 1], c="b", s=1, alpha=0.02
)
# Visualize 2D object bounding box
if pred_objcorners2d is not None:
visualize_joints_2d(
axes[row_idx, 1],
pred_objcorners2d[row_idx],
alpha=1,
joint_idxs=False,
links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
)
if gt_objcorners2d is not None:
visualize_joints_2d(
axes[row_idx, 1],
gt_objcorners2d[row_idx],
alpha=0.5,
joint_idxs=False,
links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
)
# Visualize some (vertex position) errors for the 2D object vertices
if gt_objverts2d is not None and pred_objverts2d is not None:
idxs = list(range(6))
arrow_nb = len(idxs)
arrows = torch.cat([gt_objverts2d[:, idxs].float(), pred_objverts2d[:, idxs].float()], 1)
links = [[i, i + arrow_nb] for i in range(arrow_nb)]
visualize_joints_2d(
axes[row_idx, 1],
arrows[row_idx],
alpha=0.5,
joint_idxs=False,
links=links,
color=["k"] * arrow_nb,
)
# Column 2
# view from the top
col_idx = 2
# axes[row_idx, col_idx].set_title("rotY: {:.1f}".format(gt_drill_angle_Y[row_idx]))
if gt_objverts3dw is not None:
axes[row_idx, col_idx].scatter(
gt_objverts3dw[row_idx, :, 2], gt_objverts3dw[row_idx, :, 0], c="b", s=1, alpha=0.02
)
if pred_objverts3dw is not None:
axes[row_idx, col_idx].scatter(
pred_objverts3dw[row_idx, :, 2], pred_objverts3dw[row_idx, :, 0], c="r", s=1, alpha=0.02
)
if pred_handjoints3dw is not None:
visualize_joints_2d(
axes[row_idx, col_idx], pred_handjoints3dw[row_idx, :, [2, 0]], alpha=1, joint_idxs=False
)
if gt_handjoints3dw is not None:
visualize_joints_2d(
axes[row_idx, col_idx], gt_handjoints3dw[row_idx, :, [2, 0]], alpha=0.5, joint_idxs=False
)
axes[row_idx, col_idx].invert_yaxis()
# if pred_objcorners3dw is not None:
# visualize_joints_2d(
# axes[row_idx, col_idx],
# pred_objcorners3dw[row_idx],
# alpha=1,
# joint_idxs=False,
# links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
# )
# if gt_objcorners3dw is not None:
# visualize_joints_2d(
# axes[row_idx, col_idx],
# gt_objcorners3dw[row_idx],
# alpha=0.5,
# joint_idxs=False,
# links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
# )
# if pred_objverts3dw is not None and gt_objverts3dw is not None:
# arrow_nb = 6
# arrows = torch.cat([gt_objverts3dw[:, :arrow_nb], pred_objverts3dw[:, :arrow_nb]], 1)
# links = [[i, i + arrow_nb] for i in range(arrow_nb)]
# visualize_joints_2d(
# axes[row_idx, col_idx],
# arrows[row_idx],
# alpha=0.5,
# joint_idxs=False,
# links=links,
# color=["k"] * arrow_nb,
# )
# Column 3
# view from the right
col_idx = 3
# axes[row_idx, col_idx].set_title("rotX: {:.1f}".format(gt_drill_angle_X[row_idx]))
# invert second axis here for more consistent viewpoints
if gt_objverts3dw is not None:
axes[row_idx, col_idx].scatter(
gt_objverts3dw[row_idx, :, 2], -gt_objverts3dw[row_idx, :, 1], c="b", s=1, alpha=0.02
)
if pred_objverts3dw is not None:
axes[row_idx, col_idx].scatter(
pred_objverts3dw[row_idx, :, 2], -pred_objverts3dw[row_idx, :, 1], c="r", s=1, alpha=0.02
)
if pred_handjoints3dw is not None:
pred_handjoints3dw_inv = np.stack([pred_handjoints3dw[:, :, 2], -pred_handjoints3dw[:, :, 1]], axis=-1)
visualize_joints_2d(
axes[row_idx, col_idx], pred_handjoints3dw_inv[row_idx, :, :], alpha=1, joint_idxs=False
)
if gt_handjoints3dw is not None:
gt_handjoints3dw_inv = np.stack([gt_handjoints3dw[:, :, 2], -gt_handjoints3dw[:, :, 1]], axis=-1)
visualize_joints_2d(
axes[row_idx, col_idx], gt_handjoints3dw_inv[row_idx, :, :], alpha=0.5, joint_idxs=False
)
# if pred_objcorners3dw is not None:
# visualize_joints_2d(
# axes[row_idx, col_idx],
# pred_objcorners3dw[row_idx, :, 1:],
# alpha=1,
# joint_idxs=False,
# links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
# )
# if gt_objcorners3dw is not None:
# visualize_joints_2d(
# axes[row_idx, col_idx],
# gt_objcorners3dw[row_idx, :, 1:],
# alpha=0.5,
# joint_idxs=False,
# links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
# )
# if pred_objverts3dw is not None and gt_objverts3dw is not None:
# arrow_nb = 6
# arrows = torch.cat([gt_objverts3dw[:, :arrow_nb, 1:], pred_objverts3dw[:, :arrow_nb, 1:]], 1)
# links = [[i, i + arrow_nb] for i in range(arrow_nb)]
# visualize_joints_2d(
# axes[row_idx, col_idx],
# arrows[row_idx],
# alpha=0.5,
# joint_idxs=False,
# links=links,
# color=["k"] * arrow_nb,
# )
if display_centered:
# Column 4
col_idx = 4
if gt_canobjverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_canobjverts3d[row_idx, :, 0], gt_canobjverts3d[row_idx, :, 1], c="b", s=1, alpha=0.02
)
if pred_objverts3d is not None:
axes[row_idx, col_idx].scatter(
pred_objverts3d[row_idx, :, 0], pred_objverts3d[row_idx, :, 1], c="r", s=1, alpha=0.02
)
if pred_objcorners3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx],
pred_objcorners3d[row_idx],
alpha=1,
joint_idxs=False,
links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
)
if gt_canobjcorners3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx],
gt_canobjcorners3d[row_idx],
alpha=0.5,
joint_idxs=False,
links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
)
if pred_objcorners3d is not None and gt_canobjcorners3d is not None:
arrow_nb = 6
arrows = torch.cat([gt_canobjcorners3d[:, :arrow_nb], pred_objcorners3d[:, :arrow_nb]], 1)
links = [[i, i + arrow_nb] for i in range(arrow_nb)]
visualize_joints_2d(
axes[row_idx, col_idx],
arrows[row_idx],
alpha=0.5,
joint_idxs=False,
links=links,
color=["k"] * arrow_nb,
)
axes[row_idx, col_idx].set_aspect("equal")
axes[row_idx, col_idx].invert_yaxis()
# Column 5
col_idx = 5
if gt_objverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_objverts3d[row_idx, :, 0], gt_objverts3d[row_idx, :, 1], c="b", s=1, alpha=0.02
)
# if pred_objverts3d is not None:
# axes[row_idx, 2].scatter(
# pred_objverts3d[row_idx, :, 0], pred_objverts3d[row_idx, :, 1], c="r", s=1, alpha=0.02
# )
if gt_handverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_handverts3d[row_idx, :, 0], gt_handverts3d[row_idx, :, 1], c="g", s=1, alpha=0.2
)
if pred_handverts3d is not None:
axes[row_idx, col_idx].scatter(
pred_handverts3d[row_idx, :, 0], pred_handverts3d[row_idx, :, 1], c="c", s=1, alpha=0.2
)
if pred_handjoints3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx], pred_handjoints3d[row_idx], alpha=1, joint_idxs=False
)
if gt_handjoints3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx], gt_handjoints3d[row_idx], alpha=0.5, joint_idxs=False
)
axes[row_idx, col_idx].invert_yaxis()
# Column 6
col_idx = 6
if gt_objverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_objverts3d[row_idx, :, 1], gt_objverts3d[row_idx, :, 2], c="b", s=1, alpha=0.02
)
# if pred_objverts3d is not None:
# axes[row_idx, 3].scatter(
# pred_objverts3d[row_idx, :, 1], pred_objverts3d[row_idx, :, 2], c="r", s=1, alpha=0.02
# )
if gt_handverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_handverts3d[row_idx, :, 1], gt_handverts3d[row_idx, :, 2], c="g", s=1, alpha=0.2
)
if pred_handverts3d is not None:
axes[row_idx, col_idx].scatter(
pred_handverts3d[row_idx, :, 1], pred_handverts3d[row_idx, :, 2], c="c", s=1, alpha=0.2
)
if pred_handjoints3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx], pred_handjoints3d[row_idx][:, 1:], alpha=1, joint_idxs=False
)
if gt_handjoints3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx], gt_handjoints3d[row_idx][:, 1:], alpha=0.5, joint_idxs=False
)
consistdisplay.squashfig(fig)
fig.savefig(save_img_path, dpi=300)
``` |
{
"source": "jonashein/pvnet_baseline",
"score": 2
} |
#### File: lib/datasets/dataset_catalog.py
```python
from lib.config import cfg
class DatasetCatalog(object):
dataset_attrs = {
'SynColibriV1_Train': {
'id': 'custom',
'data_root': 'data/syn_colibri_v1_train',
'ann_file': 'data/syn_colibri_v1_train/train.json',
'split': 'train'
},
'SynColibriV1_Val': {
'id': 'custom',
'data_root': 'data/syn_colibri_v1_val',
'ann_file': 'data/syn_colibri_v1_val/train.json',
'split': 'test'
},
'SynColibriV1_Test': {
'id': 'custom',
'data_root': 'data/syn_colibri_v1_test',
'ann_file': 'data/syn_colibri_v1_test/train.json',
'split': 'test'
},
'RealColibriV1_Train': {
'id': 'custom',
'data_root': 'data/real_colibri_v1_train',
'ann_file': 'data/real_colibri_v1_train/train.json',
'split': 'train'
},
'RealColibriV1_Val': {
'id': 'custom',
'data_root': 'data/real_colibri_v1_val',
'ann_file': 'data/real_colibri_v1_val/train.json',
'split': 'test'
},
'RealColibriV1_Test': {
'id': 'custom',
'data_root': 'data/real_colibri_v1_test',
'ann_file': 'data/real_colibri_v1_test/train.json',
'split': 'test'
}
}
@staticmethod
def get(name):
attrs = DatasetCatalog.dataset_attrs[name]
return attrs.copy()
``` |
{
"source": "Jonas-Heinrich/TelegramBot",
"score": 2
} |
#### File: Jonas-Heinrich/TelegramBot/telegram_bot.py
```python
import telegram
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler
import threading
import traceback
import atexit
import datetime
import random
import string
from . import telegram_config
class TelegramBot:
def __init__(self, token, chat_id, send_start=True, send_exit=True, print_to_console=True):
self.__chat_id = chat_id
self.__bot = telegram.Bot(token)
self.__updater = Updater(token)
self.print_to_console = print_to_console
self.option_registry = {}
# Option Registry
# Each key has an array of options associated with it.
# In this array is a tuple describing each option, i.e. the text and callback function.
self.__updater.dispatcher.add_handler(CallbackQueryHandler(self.__option_pressed))
self.__updater.dispatcher.add_error_handler(self.error)
if send_start:
self.send_meta_message("--Python Script Start--")
if send_exit:
def exit_handler():
self.send_meta_message("--Python Script Termination--")
atexit.register(exit_handler)
# Start the Bot
#
# Helper
#
def __get_chat_id(self, chat_id):
"""Returns the default chat_id if the given parameter is None."""
if chat_id == None:
return self.__chat_id
return chat_id
def __log(self, action, message, chat_id):
"""Logs the action, message and chat_id to the console, if enabled."""
if self.print_to_console:
print('[TELEGRAM] {} to {} ({}): "{}"'.format(
action,
chat_id,
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
message
))
def __generate_random_string(self, length=32):
"""Generates a string with random ascii letters and digits with the given length (default: 32)"""
return ''.join(
[random.choice(string.ascii_letters + string.digits)
for n in range(32)]
)
#
# Error
#
def error(self, update, context):
self.__log(
"Error",
'Update "{}" caused error "{}"'.format(update, context.error),
None
)
#
# Text Messages
#
def send_meta_message(self, message, chat_id=None):
"""Sends a message about the chatbot / program to the chat_id."""
self.send_markdown_message("*" + message + "*", self.__get_chat_id(chat_id))
def send_markdown_message(self, message, chat_id=None):
"""Sends a message that allows markdown tags."""
chat_id = self.__get_chat_id(chat_id)
self.__bot.send_message(
chat_id=chat_id,
text=message,
parse_mode=telegram.ParseMode.MARKDOWN
)
self.__log("Message", message, chat_id)
def send_message(self, message, chat_id=None):
"""Sends a plain text message to the given chat_id."""
chat_id = self.__get_chat_id(chat_id)
self.__bot.send_message(
chat_id,
text=message
)
self.__log("Message", message, chat_id)
#
# Image Messages
#
def send_image(self, path, chat_id=None):
"""Sends an image to the given chat_id."""
chat_id = self.__get_chat_id(chat_id)
self.__bot.send_photo(
chat_id=chat_id,
photo=open(path, 'rb')
)
self.__log("Image", path, chat_id)
#
# Options Helper
#
def __generate_option_id(self, options_id, option):
"""Generates an option_id for a given option and appends it to the option_registry."""
self.option_registry[options_id].append(option)
return options_id + "|" + str(len(self.option_registry[options_id]) - 1)
def __generate_options_id(self):
"""Generates a new options_id and creates it in the options_registry."""
options_id = self.__generate_random_string()
self.option_registry[options_id] = []
return options_id
#
# Options
#
def __option_pressed(self, context, update):
"""Handles all inline options presses."""
option_id = update["callback_query"]["data"].split("|")
if option_id[0] in self.option_registry:
# Get function from option_id
function = self.option_registry[option_id[0]][int(option_id[1])][1]
del self.option_registry[option_id[0]]
# Invoke and print errors
try:
function(update)
except Exception:
print()
print(80 * "-")
print("An error occurred while handling an option callback:\n")
print(traceback.format_exc())
print(80 * "-")
else:
self.send_message(
'The option selection does either not exist or has become invalid.',
update["callback_query"]["message"]["chat"]["id"]
)
if len(self.option_registry.keys()) == 0:
# New thread to avoid racing condition:
# Since this is running in the handler, the join_thread method of the updater
# waits for its own shutdown call to finish.
x = threading.Thread(target=self.__kill_updater)
x.start()
def __kill_updater(self):
"""Kills the updater poll if there are no options left to respond to."""
if len(self.option_registry.keys()) == 0:
self.__updater.stop()
def input_get_option(self, options, message="Please choose one keyboard action", chat_id=None):
"""Generates a option input dialogue.
Each option should be a tuple containing a text and callback function.
You can also put several options into a nested tuple to put them into one line."""
chat_id = self.__get_chat_id(chat_id)
# Build keyboard
options_id = self.__generate_options_id()
keyboard = []
for option_line in options:
line = []
# Option line actually has several options in it
if isinstance(option_line[0], tuple) or isinstance(option_line[0], list):
for option in option_line:
line.append(
telegram.InlineKeyboardButton(
option[0],
callback_data=self.__generate_option_id(options_id, option)
)
)
# Option line is just one item
else:
line.append(
telegram.InlineKeyboardButton(
option_line[0],
callback_data=self.__generate_option_id(options_id, option_line)
)
)
keyboard.append(line)
reply_markup = telegram.InlineKeyboardMarkup(keyboard)
# Send
self.__bot.send_message(
self.__get_chat_id(chat_id),
text=message,
reply_markup=reply_markup
)
# Log
self.__log("Option Keyboard", str(options), chat_id)
self.__updater.start_polling()
def get_bot():
return TelegramBot(
telegram_config.TOKEN,
telegram_config.CHAT_ID,
telegram_config.SEND_START,
telegram_config.SEND_EXIT,
telegram_config.PRINT_TO_CONSOLE
)
``` |
{
"source": "jonashellmann/informaticup21-team-chillow",
"score": 2
} |
#### File: chillow/controller/ai_evaluation_controller.py
```python
from contextlib import closing
from datetime import datetime, timedelta, timezone
from random import randint
import sqlite3
from typing import List
from chillow.controller import OfflineController
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.ai import NotKillingItselfAI, PathfindingSearchTreeAI, PathfindingAI, SearchTreeAI, \
SearchTreePathfindingAI, RandomAI, AIOptions
from chillow.service.ai.artificial_intelligence import ArtificialIntelligence
from chillow.view.headless_view import HeadlessView
# These AIs are considered as the top 25 after 1000 simulated game in the first part of the evaluation
best_ais_configurations = [
(PathfindingSearchTreeAI.__name__, (1, 50, 2, 0.75, 30)),
(SearchTreePathfindingAI.__name__, (1, 25, 2, 20)),
(PathfindingSearchTreeAI.__name__, (1, 25, 2, 0.75, 10)),
(PathfindingSearchTreeAI.__name__, (1, 75, 3, 0.75, 10)),
(PathfindingSearchTreeAI.__name__, (2, 75, 3, 0.75, 20)),
(PathfindingSearchTreeAI.__name__, (1, 75, 2, 0.75, 10)),
(PathfindingAI.__name__, (1, 50)),
(PathfindingSearchTreeAI.__name__, (1, 25, 2, 0.75, 20)),
(PathfindingSearchTreeAI.__name__, (2, 50, 2, 0.75, 20)),
(PathfindingSearchTreeAI.__name__, (1, 50, 2, 0.75, 20)),
(NotKillingItselfAI.__name__, ([AIOptions.max_distance], 1, 0, 1)),
(NotKillingItselfAI.__name__, ([AIOptions.max_distance], 2, 0, 3)),
(PathfindingSearchTreeAI.__name__, (1, 50, 3, 0.75, 10)),
(PathfindingSearchTreeAI.__name__, (1, 75, 3, 0.75, 30)),
(SearchTreePathfindingAI.__name__, (1, 75, 2, 10)),
(PathfindingAI.__name__, (1, 75)),
(PathfindingSearchTreeAI.__name__, (1, 75, 3, 0.75, 20)),
(SearchTreePathfindingAI.__name__, (2, 50, 2, 20)),
(SearchTreePathfindingAI.__name__, (1, 25, 2, 10)),
(PathfindingSearchTreeAI.__name__, (1, 75, 2, 0.75, 20)),
(PathfindingAI.__name__, (1, 25)),
(PathfindingSearchTreeAI.__name__, (1, 50, 3, 0.75, 30)),
(PathfindingSearchTreeAI.__name__, (1, 50, 3, 0.75, 20)),
(PathfindingSearchTreeAI.__name__, (2, 75, 2, 0.75, 30)),
(SearchTreePathfindingAI.__name__, (1, 50, 2, 10))
]
class AIEvaluationController(OfflineController):
"""Executes multiple games after each other with randomly created games and players.
The result of every game and the execution time for each player in each round is saved in an SQLite database."""
def __init__(self, runs: int, db_path: str, evaluation_type: int):
""" Creates a new AI evaluation controller.
Args:
runs: The number of games to be simulated.
db_path: The path of the SQLite database file.
evaluation_type: Defines which evaluation should be performed
"""
super().__init__(HeadlessView())
self.__runs = runs
self.__db_path = db_path
if 1 <= evaluation_type <= 2:
self.__evaluation_type = evaluation_type
else:
self.__evaluation_type = 1
self.__connection = None
self.__cursor = None
self.__current_game_id = None
def play(self):
"""See base class."""
with closing(sqlite3.connect(self.__db_path)) as connection:
with closing(connection.cursor()) as cursor:
self.__connection = connection
self.__cursor = cursor
self.__create_db_tables()
max_game_id = self.__cursor.execute("SELECT MAX(id) FROM games").fetchone()[0]
if max_game_id is None:
max_game_id = 0
self.__run_simulations(max_game_id)
def _create_game(self) -> None:
height = randint(30, 70)
width = randint(30, 70)
player_count = randint(3, 6)
players = []
occupied_coordinates = []
for i in range(1, player_count + 1):
next_coordinate = (randint(0, width - 1), randint(0, height - 1))
while next_coordinate in occupied_coordinates:
next_coordinate = (randint(0, width - 1), randint(0, height - 1))
occupied_coordinates.append(next_coordinate)
player = Player(i, next_coordinate[0], next_coordinate[1], Direction.get_random_direction(), 1, True,
str(i))
players.append(player)
cells = [[Cell() for _ in range(width)] for _ in range(height)]
for player in players:
cells[player.y][player.x] = Cell([player])
self._game = Game(width, height, cells, players, 1, True, datetime.now() + timedelta(5, 15))
self._game_round = 0
self._ais = []
if self.__evaluation_type == 1:
self.__generate_ais_for_first_evaluation(player_count, players)
elif self.__evaluation_type == 2:
self.__generate_ais_for_second_evaluation(player_count, players)
def __generate_ais_for_first_evaluation(self, player_count: int, players: List[Player]) -> None:
self._ais.append(PathfindingAI(players[0], randint(1, 3), randint(1, 3) * 25))
self._ais.append(PathfindingSearchTreeAI(players[1], randint(1, 3), randint(1, 3) * 25, randint(2, 3), 0.75,
randint(1, 3) * 10))
self._ais.append(SearchTreePathfindingAI(players[2], randint(1, 3), randint(1, 3) * 25, 2,
randint(1, 3) * 10))
if player_count > 3:
self._ais.append(SearchTreeAI(players[3], randint(1, 3), randint(2, 3), True, randint(1, 3) * 10))
if player_count > 4:
self._ais.append(NotKillingItselfAI(players[4], [AIOptions.max_distance], randint(1, 3), 0,
randint(1, 3)))
if player_count > 5:
self._ais.append(RandomAI(players[5], randint(1, 3)))
def __generate_ais_for_second_evaluation(self, player_count: int, players: List[Player]) -> None:
used_ai_indices = []
for i in range(player_count):
ai_index = randint(0, len(best_ais_configurations) - 1)
# Prevent that the same AI configuration is used in one game
while ai_index in used_ai_indices:
ai_index = randint(0, len(best_ais_configurations) - 1)
used_ai_indices.append(ai_index)
ai = best_ais_configurations[ai_index]
self._ais.append(globals()[ai[0]](players[i], *ai[1]))
def __run_simulations(self, max_game_id):
for i in range(self.__runs):
self.__current_game_id = i + 1 + max_game_id
super().play()
self.__cursor.execute("INSERT INTO games VALUES ({}, {}, {}, '{}', NULL)"
.format(self.__current_game_id, self._game.width, self._game.height,
datetime.now(timezone.utc)))
winner_player = self._game.get_winner()
for ai in self._ais:
ai_class = ai.__class__.__name__
ai_info = ai.get_information()
player_id = self.__get_player_id(ai_class, ai_info)
# Save how often an AI configuration participated in a game
self.__cursor.execute("INSERT INTO participants VALUES ({}, {})"
.format(player_id, self.__current_game_id))
# Save how often an AI configuration won a game
if ai.player == winner_player:
self.__cursor.execute("UPDATE games SET winner_id = {} WHERE id = {}"
.format(player_id, self.__current_game_id))
self.__connection.commit()
def __create_db_tables(self):
self.__cursor.execute("CREATE TABLE IF NOT EXISTS players ("
"id INTEGER NOT NULL PRIMARY KEY,"
"class TEXT NOT NULL,"
"info TEXT)")
self.__cursor.execute("CREATE TABLE IF NOT EXISTS games ("
"id INTEGER NOT NULL PRIMARY KEY,"
"width INTEGER NOT NULL,"
"height INTEGER NOT NULL,"
"date TEXT NOT NULL,"
"winner_id INTEGER,"
"FOREIGN KEY (winner_id) REFERENCES players (id))")
self.__cursor.execute("CREATE TABLE IF NOT EXISTS participants ("
"player_id INTEGER NOT NULL,"
"game_id INTEGER NOT NULL,"
"PRIMARY KEY(player_id, game_id),"
"FOREIGN KEY (player_id) REFERENCES players (id),"
"FOREIGN KEY (game_id) REFERENCES games (id))")
self.__cursor.execute("CREATE TABLE IF NOT EXISTS execution_times ("
"player_id INTEGER NOT NULL,"
"game_id INTEGER NOT NULL,"
"game_round INTEGER NOT NULL,"
"execution REAL NOT NULL,"
"PRIMARY KEY(player_id, game_id, game_round),"
"FOREIGN KEY (player_id) REFERENCES players (id),"
"FOREIGN KEY (game_id) REFERENCES games (id))")
def _log_execution_time(self, ai: ArtificialIntelligence, execution_time: float):
ai_class = ai.__class__.__name__
ai_info = ai.get_information()
player_id = self.__get_player_id(ai_class, ai_info)
self.__cursor.execute("INSERT INTO execution_times VALUES ({}, {}, {}, {})"
.format(player_id, self.__current_game_id, self._game_round, execution_time))
def __get_player_id(self, ai_class: str, ai_info: str) -> int:
player_id = self.__cursor.execute(
"SELECT MAX(id) FROM players p WHERE p.class = '{}' AND p.info = '{}'"
.format(ai_class, ai_info)).fetchone()[0]
if player_id is None:
max_player_id = self.__cursor.execute("SELECT MAX(id) FROM players").fetchone()[0]
if max_player_id is None:
max_player_id = 0
player_id = max_player_id + 1
self.__cursor.execute("INSERT INTO players VALUES ({}, '{}', '{}')".format(player_id, ai_class, ai_info))
return player_id
```
#### File: chillow/model/action.py
```python
import random
from enum import Enum
from itertools import product
from typing import Any, List, Tuple
class Action(Enum):
"""Enum to represent all possible actions which a player can perform to in a game."""
turn_left, turn_right, speed_up, slow_down, change_nothing = range(5)
@staticmethod
def get_actions(randomize: bool = False):
"""Returns all actions defined in this enum.
Args:
randomize: If this flag is true, the returned list is not in order but shuffled randomly.
Returns:
Returns all actions defined in this enum.
"""
if randomize:
return Action.__get_random_actions()
return list(Action)
@staticmethod
def get_random_action():
"""Randomly chooses one of the defined actions in this enum.
Returns:
A random action.
"""
return random.choice(Action.get_actions())
@staticmethod
def __get_random_actions():
actions = Action.get_actions()
random.shuffle(actions)
return actions
@staticmethod
def get_combinations(player_count: int) -> List[Tuple[Any]]:
"""Creates all combinations of actions.
E.g. if the parameter is 3, the returned list looks like following and contains 5^3 tuples.
[(left, left, left), (left, left, right), ..., (change_nothing, change_nothing, change_nothing)]
Args:
player_count: Defines how many actions should be in one tuple.
Returns:
A list of tuples with all possible combinations of actions.
"""
return list(product(Action.get_actions(), repeat=player_count))
@staticmethod
def get_by_index(index: int):
"""Finds an action by its position in the enum.
Args:
index: The index of the enum element.
Returns:
The enum element at the index.
"""
return Action.get_actions()[index]
def get_index(self):
"""Gets the index of an element in the enum.
Returns:
The index of an element in the enum
"""
return Action.get_actions().index(self)
@staticmethod
def get_default():
"""Defines the default action.
Returns:
The defined default action.
"""
return Action.change_nothing
```
#### File: chillow/model/direction.py
```python
from enum import Enum
import random
class Direction(Enum):
"""Enum to represent all possible directions in which a player can be directed to in a game."""
left, right, up, down = range(4)
@staticmethod
def get_random_direction():
"""Randomly chooses one of the defined directions in this enum.
Returns:
A random direction.
"""
return random.choice(list(Direction))
```
#### File: service/ai/not_killing_itself_ai.py
```python
import copy
import logging
import pickle
from enum import Enum
from random import choice
from typing import List, Dict
from multiprocessing import Value
from chillow.exceptions import InvalidPlayerMoveException
from chillow.service.ai.artificial_intelligence import ArtificialIntelligence
from chillow.model.action import Action
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
class AIOptions(Enum):
"""Enumeration that holds possible options for the AIs."""
max_distance = range(1)
class NotKillingItselfAI(ArtificialIntelligence):
"""AI implementation to choose an action that simply does not kill the player for the next rounds.
It does not consider the opponent's player actions.
Attributes:
player: The player associated with this AI.
"""
def __init__(self, player: Player, options: List[AIOptions], max_speed: int, max_worse_distance: int,
depth: int):
"""Creates a new object of the NotKillingItselfAI.
Args:
player: The player assigned to the AI.
options: List of possible options to change the behavior of the AI.
max_speed: The maximum speed the AI can reach.
max_worse_distance: A tolerance, whereby more than just the best action is calculated. Actions which are
worse, but within this tolerance, are also considered.
depth: Number of player actions that are looked into the future.
"""
super().__init__(player, max_speed)
self.__options = options
self.__max_worse_distance = max_worse_distance
assert depth > 0, "depth must be greater than 0"
self.__depth = depth
def get_information(self) -> str:
"""See base class."""
return (super().get_information() + ", max_worse_distance=" + str(self.__max_worse_distance)
+ ", depth=" + str(self.__depth))
def create_next_action(self, game: Game, return_value: Value):
"""See base class."""
self._turn_ctr += 1
game_service = GameService(game)
game_service.turn.turn_ctr = self._turn_ctr
surviving_actions = self.find_surviving_actions_with_best_depth(game_service)
if AIOptions.max_distance in self.__options:
max_distance_actions = self.calc_action_with_max_distance_to_visited_cells(game_service, surviving_actions)
action = choice(max_distance_actions) if max_distance_actions is not None and len(
max_distance_actions) > 0 else Action.change_nothing
else:
action = choice(surviving_actions) if surviving_actions is not None and len(
surviving_actions) > 0 else Action.change_nothing
return_value.value = action.get_index()
def calc_action_with_max_distance_to_visited_cells(self, game_service: GameService,
actions: List[Action]) -> List[Action]:
"""Calculates a list of actions that have the property to have as many free cells as possible in front of them
while running straight after the action has been executed.
Args:
game_service: The game service used for simulation of actions.
actions: The actions to be checked
Returns:
List of best actions with the property having as many free cells as possible in front of the player.
"""
max_straight_distance = 0
best_actions: Dict[Action, int] = {}
for action in actions:
gs_copy = copy.deepcopy(game_service)
try:
player = gs_copy.game.get_player_by_id(self.player.id)
gs_copy.visited_cells_by_player[player.id] = gs_copy.get_and_visit_cells(player, action)
straight_distance = 0
horizontal_multiplier, vertical_multiplier = GameService.get_horizontal_and_vertical_multiplier(player)
for i in range(max(gs_copy.game.height, gs_copy.game.width)):
x = player.x + (i + 1) * horizontal_multiplier
y = player.y + (i + 1) * vertical_multiplier
if x in range(gs_copy.game.width) and y in range(gs_copy.game.height) and (
gs_copy.game.cells[y][x].players is None or len(gs_copy.game.cells[y][x].players) == 0):
straight_distance += 1
else:
break
if len(best_actions) == 0 or straight_distance > max_straight_distance:
max_straight_distance = straight_distance
best_actions[action] = straight_distance
updated_best_actions: Dict[Action, int] = {}
for (act, dist) in best_actions.items(): # new max_straight_distance. Remove worth options
if dist >= max_straight_distance - self.__max_worse_distance:
updated_best_actions[act] = dist
best_actions = updated_best_actions
elif straight_distance >= max_straight_distance - self.__max_worse_distance: # still good option
best_actions[action] = straight_distance
except InvalidPlayerMoveException as ex:
logging.warning(ex)
continue
return list(best_actions.keys())
def find_surviving_actions(self, game_service: GameService, depth: int) -> List[Action]:
"""Finds all actions that will let the player survive for the next rounds.
Args:
game_service: The game service used for simulation of actions.
depth: The number of rounds the player should survive at least.
Returns:
Actions that will not kill the player in the next rounds.
"""
result: List[Action] = []
for action in Action:
gs_copy = pickle.loads(pickle.dumps(game_service))
try:
player = gs_copy.game.get_player_by_id(self.player.id)
if player.speed == self._max_speed and action == Action.speed_up:
continue
gs_copy.visited_cells_by_player[player.id] = gs_copy.get_and_visit_cells(player, action)
except InvalidPlayerMoveException:
continue
gs_copy.check_and_set_died_players()
if player.active:
interim_result = []
if depth > 1:
# recursive call to look further into the future
interim_result = self.find_surviving_actions(gs_copy, depth - 1)
if len(interim_result) > 0 or depth == 1:
result += [action]
return result
def find_surviving_actions_with_best_depth(self, game_service: GameService) -> List[Action]:
"""Finds all actions that won't kill the player in the next rounds.
The number of pre-calculated player moves is reduced until surviving actions are found.
Args:
game_service: The game service used for simulation of actions.
Returns:
Actions that will not kill the player in the next rounds.
"""
result: List[Action] = []
for current_depth in reversed(range(1, self.__depth + 1)):
result = self.find_surviving_actions(game_service, current_depth)
if len(result) > 0:
break
return result
```
#### File: chillow/view/graphical_view.py
```python
import sys
import time
from chillow.model.action import Action
from chillow.model.cell import Cell
from chillow.model.game import Game
from chillow.view.view import View
class GraphicalView(View):
"""Provides a graphical UI using PyGame."""
RECT_SIZE = 10
CLOCK_TICK = 60
def __init__(self, pygame):
"""Creates a new graphical view.
Args:
pygame: The PyGame library.
"""
colors = [(255, 61, 0), (156, 204, 101), (171, 71, 188), (38, 166, 154), (255, 238, 88), (66, 165, 245)]
super().__init__(colors)
self.__pygame = pygame
self.__clock = self.__pygame.time.Clock()
pygame.init()
self.__clock.tick(self.CLOCK_TICK)
self.__next_action = True # Flag to wait for a KEYUP event.
# Otherwise the user is doing multiple actions with one click.
self.__screen = None
def update(self, game: Game):
"""See base class."""
if not self._interface_initialized:
self._initialize_interface(game)
if not game.running:
player = game.get_winner()
if player is None:
print("No winner in game.")
else:
print("Winner: Player " + str(player.id) + " (" + player.name + "). Your player ID was " +
str(game.you.id) + ".")
self.__screen.fill((0, 0, 0)) # black background
for row in range(game.height):
for col in range(game.width):
self.__pygame.draw.rect(self.__screen,
self.__get_player_color(game.cells[row][col]),
(col * self.RECT_SIZE + col,
row * self.RECT_SIZE + row,
self.RECT_SIZE,
self.RECT_SIZE))
if game.cells[row][col].get_player_id() != 0:
player = game.get_player_by_id(game.cells[row][col].get_player_id())
if player.x == col and player.y == row: # print head
border_width = 2
if player == game.you:
border_width = 4 # head of the own player has a smaller dot
self.__pygame.draw.rect(self.__screen,
self._player_colors[0],
(col * self.RECT_SIZE + col + border_width,
row * self.RECT_SIZE + row + border_width,
self.RECT_SIZE - (2 * border_width),
self.RECT_SIZE - (2 * border_width)))
self.__pygame.display.update()
self.__clock.tick(60)
def __get_player_color(self, cell: Cell):
return self._player_colors[cell.get_player_id()]
def read_next_action(self) -> Action: # noqa: C901
"""See base class."""
while True:
for event in self.__pygame.event.get():
if event.type == self.__pygame.QUIT: # Allows to close the pygame-window
self.end()
return Action.get_default()
elif event.type == self.__pygame.KEYDOWN:
pressed_key = self.__pygame.key.get_pressed()
self.__next_action = False
if pressed_key[self.__pygame.K_UP]:
return Action.speed_up
elif pressed_key[self.__pygame.K_DOWN]:
return Action.slow_down
elif pressed_key[self.__pygame.K_RIGHT]:
return Action.turn_right
elif pressed_key[self.__pygame.K_LEFT]:
return Action.turn_left
elif pressed_key[self.__pygame.K_SPACE]:
return Action.change_nothing
elif event.type == self.__pygame.KEYUP:
self.__next_action = True
def end(self):
"""See base class."""
time.sleep(10)
self.__pygame.display.quit()
self.__pygame.quit()
sys.exit()
def _initialize_interface(self, game: Game):
super()._initialize_interface(game)
self.__screen = self.__pygame.display.set_mode(
[game.width * self.RECT_SIZE + game.width, game.height * self.RECT_SIZE + game.height])
```
#### File: chillow/view/headless_view.py
```python
from chillow.model.game import Game
from chillow.view.view import View
class HeadlessView(View):
"""This view may be used when there is no need for any feedback on how the game is progressing.
There is no UI and no human player can interact with the game using this view.
"""
def __init__(self):
"""Creates a new headless view."""
colors = ['red', 'blue', 'green', 'yellow', 'magenta', 'cyan']
super().__init__(colors)
def update(self, game: Game):
"""See base class."""
pass
def read_next_action(self):
"""See base class."""
pass
def end(self):
"""See base class."""
pass
```
#### File: chillow/view/view.py
```python
from abc import ABCMeta, abstractmethod
from typing import List, Any
from chillow.model.game import Game
class View(metaclass=ABCMeta):
"""Provides an UI to show the game progress."""
def __init__(self, colors: List[Any]):
"""Creates a new view.
Args:
colors:
A list of values that define colors in the specific view.
This may be human readable strings or strings with hex values.
The list may not be empty.
Raises:
AssertionError: The parameter list is empty.
"""
self._interface_initialized = False
self._player_colors = {0: (0, 0, 0)}
assert colors is not None and len(colors) > 0, "No colors available for interface"
self.__colors = colors
@abstractmethod
def update(self, game: Game):
"""Updates the view with the new game state.
Args:
game: The state of the game that should be shown in the view.
"""
pass
@abstractmethod
def read_next_action(self):
"""Reads the next action to be performed by a human player."""
pass
@abstractmethod
def end(self):
"""Performs actions to shut down the view."""
pass
def _initialize_interface(self, game: Game):
self._interface_initialized = True
for i in range(0, len(game.players)):
self._player_colors[int(game.players[i].id)] = self.__colors[i % (len(self.__colors) - 1)]
```
#### File: tests/model/test_direction.py
```python
import unittest
from chillow.model.direction import Direction
class DirectionTest(unittest.TestCase):
def test_should_have_four_different_directions(self):
self.assertEqual(len(Direction), 4)
```
#### File: tests/model/test_game.py
```python
import unittest
from datetime import datetime, timezone
import tests
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.exceptions import WrongGameWidthException, WrongGameHeightException, OwnPlayerMissingException, \
PlayerPositionException, PlayerWithGivenIdNotAvailableException
from chillow.service.data_loader import JSONDataLoader
class GameTest(unittest.TestCase):
def test_examines_your_player_after_creation(self):
player1 = Player(1, 0, 1, Direction.up, 0, True, "Name 1")
player2 = Player(2, 1, 0, Direction.up, 0, True, "Name 2")
player3 = Player(3, 0, 0, Direction.up, 0, True, "Name 3")
players = [player1, player2, player3]
cells = [[Cell([player3]), Cell([player2])], [Cell([player1]), Cell()]]
game = Game(2, 2, cells, players, 2, True, datetime.now())
self.assertEqual(game.you, player2)
def test_raise_exception_on_non_existing_own_player(self):
player1 = Player(1, 0, 1, Direction.up, 0, True, "Name 1")
player3 = Player(3, 0, 0, Direction.up, 0, True, "Name 3")
players = [player1, player3]
cells = [[Cell([player3]), Cell([])], [Cell([player1]), Cell()]]
with self.assertRaises(OwnPlayerMissingException):
Game(2, 2, cells, players, 2, True, datetime.now())
def test_raise_exception_on_wrong_player_position(self):
player1 = Player(1, 1, 1, Direction.up, 0, True, "Name 1")
player2 = Player(2, 0, 0, Direction.up, 0, True, "Name 2")
player3 = Player(3, 0, 1, Direction.up, 0, True, "Name 3")
players = [player1, player2, player3]
cells = [[Cell([player2]), Cell([player3])], [Cell(), Cell([player1])]]
with self.assertRaises(PlayerPositionException):
Game(2, 2, cells, players, 2, True, datetime.now())
def test_dont_raise_exception_on_wrong_inactive_player_position(self):
player1 = Player(1, 1, 1, Direction.up, 0, False, "Name 1")
player2 = Player(2, 1, 0, Direction.up, 0, True, "Name 2")
player3 = Player(3, 0, 1, Direction.up, 0, True, "Name 3")
players = [player1, player2, player3]
cells = [[Cell([]), Cell([player2])], [Cell([player3]), Cell([player3])]]
game = Game(2, 2, cells, players, 2, True, datetime.now())
self.assertEqual(game.you, player2)
def test_raise_exception_on_wrong_width(self):
cells = [
[
Cell()
],
[
Cell(), Cell()
]
]
with self.assertRaises(WrongGameWidthException):
Game(2, 2, cells, [], 0, True, datetime.now())
def test_raise_exception_on_wrong_height(self):
cells = [
[
Cell(), Cell()
]
]
with self.assertRaises(WrongGameHeightException):
Game(2, 2, cells, [], 0, True, datetime.now())
def test_find_winner_in_ended_game(self):
player1 = Player(1, 0, 0, Direction.up, 0, False, "Name")
player2 = Player(1, 1, 0, Direction.up, 0, True, "Name")
cells = [[Cell([player1]), Cell([player2])]]
game = Game(2, 1, cells, [player1, player2], 1, False, datetime.now())
result = game.get_winner()
self.assertEqual(player2, result)
def test_raise_exception_for_winner_in_running_game(self):
player = Player(1, 0, 0, Direction.up, 0, True, "Name")
cells = [[Cell([player]), Cell()]]
game = Game(2, 1, cells, [player], 1, True, datetime.now())
with self.assertRaises(Exception):
game.get_winner()
def test_return_no_winner_in_ended_game(self):
player1 = Player(1, 0, 0, Direction.up, 0, False, "Name")
player2 = Player(1, 1, 0, Direction.up, 0, False, "Name")
cells = [[Cell([player1]), Cell([player2])]]
game = Game(2, 1, cells, [player1, player2], 1, False, datetime.now())
result = game.get_winner()
self.assertEqual(None, result)
def test_player_with_id_should_be_returned(self):
player1 = Player(1, 0, 0, Direction.up, 0, True, "Name")
player2 = Player(2, 1, 0, Direction.up, 0, True, "Name")
cells = [[Cell([player1]), Cell([player2])]]
game = Game(2, 1, cells, [player1, player2], 1, True, datetime.now())
self.assertEqual(player1, game.get_player_by_id(1))
def test_raise_exception_when_player_id_invalid(self):
player1 = Player(1, 1, 0, Direction.up, 0, True, "Name")
player2 = Player(2, 0, 0, Direction.up, 0, True, "Name")
cells = [[Cell([player2]), Cell([player1])]]
game = Game(2, 1, cells, [player1, player2], 1, True, datetime.now())
with self.assertRaises(PlayerWithGivenIdNotAvailableException):
game.get_player_by_id(100)
def test_return_all_other_players(self):
player1 = Player(1, 1, 1, Direction.up, 0, True, "Name 1")
player2 = Player(2, 1, 0, Direction.up, 0, True, "Name 2")
player3 = Player(3, 0, 0, Direction.up, 0, True, "Name 3")
players = [player1, player2, player3]
cells = [[Cell([player3]), Cell([player2])], [Cell([]), Cell([player1])]]
game = Game(2, 2, cells, players, 2, True, datetime.now())
result = game.get_other_player_ids(player2)
self.assertEqual([1, 3], result)
def test_return_all_other_active_players(self):
player1 = Player(1, 1, 1, Direction.up, 0, True, "Name 1")
player2 = Player(2, 1, 0, Direction.up, 0, False, "Name 2")
player3 = Player(3, 0, 0, Direction.up, 0, True, "Name 3")
players = [player1, player2, player3]
cells = [[Cell([player3]), Cell([player2])], [Cell([]), Cell([player1])]]
game = Game(2, 2, cells, players, 1, True, datetime.now())
result = game.get_other_player_ids(player1, check_active=True)
self.assertEqual([3], result)
def test_return_all_players_except_one_within_distance_1(self):
player1 = Player(1, 3, 3, Direction.up, 0, True, "Name 1")
player2 = Player(2, 1, 3, Direction.up, 0, True, "Name 2")
player3 = Player(3, 0, 0, Direction.up, 0, True, "Name 3")
players = [player1, player2, player3]
cells = [
[Cell([player3]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell([player2]), Cell(), Cell([player1]), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]
]
game = Game(5, 5, cells, players, 1, True, datetime.now())
result = game.get_other_player_ids(player1, 2)
self.assertEqual([2], result)
def test_return_all_players_except_one_within_distance_2(self):
player1 = Player(1, 4, 4, Direction.up, 0, True, "Name 1")
player2 = Player(2, 2, 3, Direction.up, 0, True, "Name 2")
player3 = Player(3, 1, 4, Direction.up, 0, True, "Name 3")
players = [player1, player2, player3]
cells = [
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player3]), Cell([player2]), Cell(), Cell([player1])]
]
game = Game(5, 5, cells, players, 1, True, datetime.now())
result = game.get_other_player_ids(player1, 3)
self.assertEqual([2], result)
def test_return_no_player_who_is_not_reachable(self):
player1 = Player(1, 4, 4, Direction.up, 0, True, "Name 1")
player2 = Player(2, 2, 3, Direction.up, 0, True, "Name 2")
player3 = Player(3, 1, 4, Direction.up, 0, True, "Name 3")
players = [player1, player2, player3]
cells = [
[Cell(), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player3]), Cell([player2]), Cell(), Cell([player1])]
]
game = Game(5, 5, cells, players, 1, True, datetime.now())
result = game.get_other_player_ids(player1, 3)
self.assertEqual([2], result)
def test_translate_cell_matrix_to_pathfinding_matrix_should_be_correct(self):
player1 = Player(1, 0, 0, Direction.up, 1, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell([player1]), Cell()],
[Cell([player2]), Cell()],
[Cell(), Cell()]]
game = Game(2, 3, cells, players, 2, True, datetime.now())
expected_matrix = [[0, 1],
[0, 1],
[1, 1]]
matrix = game.translate_cell_matrix_to_pathfinding_matrix()
self.assertEqual(matrix, expected_matrix)
def test_copying_a_game_should_return_same_game_but_different_identity(self):
player1 = Player(1, 1, 1, Direction.up, 0, True, "Name")
player2 = Player(2, 1, 0, Direction.up, 0, True, "Name2")
player3 = Player(3, 0, 0, Direction.up, 0, True, "Name3")
players = [player1, player2, player3]
cells = [[Cell([player3]), Cell([player2])], [Cell([]), Cell([player1])]]
game = Game(2, 2, cells, players, 2, True, datetime.now())
result = game.copy()
self.assertEqual(game, result)
self.assertNotEqual(id(game), id(result))
def test_normalize_game_deadline_1(self):
server_time = datetime(2020, 11, 20, 10, 33, 11, 0, timezone.utc)
own_time = datetime(2020, 11, 20, 10, 33, 12, 941748, timezone.utc)
game = JSONDataLoader().load(tests.read_test_file("model/game_1.json"))
game.deadline = datetime(2020, 11, 20, 10, 33, 18, 0, timezone.utc)
expected = datetime(2020, 11, 20, 10, 33, 19, 941748, timezone.utc)
game.normalize_deadline(server_time, own_time)
self.assertEqual(expected, game.deadline)
def test_normalize_game_deadline_2(self):
server_time = datetime(2020, 11, 20, 10, 33, 12, 941748, timezone.utc)
own_time = datetime(2020, 11, 20, 10, 33, 11, 0, timezone.utc)
game = JSONDataLoader().load(tests.read_test_file("model/game_1.json"))
game.deadline = datetime(2020, 11, 20, 10, 33, 18, 941748, timezone.utc)
expected = datetime(2020, 11, 20, 10, 33, 17, 0, timezone.utc)
game.normalize_deadline(server_time, own_time)
self.assertEqual(expected, game.deadline)
```
#### File: service/ai/test_not_killing_itself_ai.py
```python
import unittest
from datetime import datetime, timezone
from typing import List
from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI
from chillow.model.action import Action
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
class NotKillingItselfAITest(unittest.TestCase):
def test_ai_should_choose_the_own_non_killing_itself_action(self):
player1 = Player(1, 0, 0, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell([player1]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself(self):
player1 = Player(1, 0, 1, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.change_nothing in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself2(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell([player2]), Cell(), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_left in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_in_turn_6(self):
player1 = Player(1, 0, 4, Direction.up, 3, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
game_service.turn.turn_ctr = 6
sut = NotKillingItselfAI(player1, [], 4, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 1)
self.assertTrue(Action.slow_down in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(Action.speed_up in actions)
self.assertTrue(len(actions) == 3)
def test_ai_should_not_choose_speed_up_if_max_speed_is_allready_reached(self):
MAX_SPEED = 3
player1 = Player(1, 0, 4, Direction.up, MAX_SPEED, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], MAX_SPEED, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 1)
self.assertTrue(Action.slow_down in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_calc_action_with_max_distance(self):
player1 = Player(1, 0, 4, Direction.up, 1, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up,
Action.change_nothing,
Action.turn_right])
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_calc_all_action_with_max_distance_with_max_worse_distance(self):
MAX_WORSE_DISTANCE = 1
player1 = Player(1, 0, 4, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, MAX_WORSE_DISTANCE, 3)
actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up,
Action.change_nothing,
Action.turn_right])
self.assertTrue(Action.speed_up in actions)
self.assertTrue(Action.change_nothing in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 3)
def test_get_information(self):
player = Player(1, 0, 4, Direction.up, 1, True, "")
sut = NotKillingItselfAI(player, [], 3, 1, 3)
expected = "max_speed=3, max_worse_distance=1, depth=3"
result = sut.get_information()
self.assertEqual(expected, result)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_with_depth_greater_than_one(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 2)
actions: List[Action] = sut.find_surviving_actions(game_service, 2)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_empty_list_with_depth_greater_than_one_and_no_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 2)
actions: List[Action] = sut.find_surviving_actions(game_service, 2)
self.assertTrue(len(actions) == 0)
def test_ai_should_choose_correct_list_with_depth_three_and_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_empty_list_with_depth_three_and_no_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(len(actions) == 0)
def test_ai_should_choose_best_list_of_actions_by_depth_from_lower_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_best_list_of_actions_by_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_best_list_of_actions_in_lowest_possible_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell([player2]), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_left in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
```
#### File: service/ai/test_search_tree_pathfinding_ai.py
```python
import unittest
from multiprocessing import Value
import tests
from chillow.service.ai.search_tree_pathfinding_ai import SearchTreePathfindingAI
from chillow.model.action import Action
from chillow.service.data_loader import JSONDataLoader
class SearchTreePathfindingAITest(unittest.TestCase):
def setUp(self):
self.data_loader = JSONDataLoader()
def test_should_select_action_to_let_player_survive_next_two_rounds(self):
game = self.data_loader.load(tests.read_test_file("ai/game_4.json"))
sut = SearchTreePathfindingAI(game.you, 3, 100, 2)
result = Value('i')
sut.create_next_action(game, result)
self.assertEqual(Action.turn_left, Action.get_by_index(result.value))
def test_should_select_action_of_pathfinding_ai_if_surviving_next_two_rounds_is_not_possible(self):
game = self.data_loader.load(tests.read_test_file("ai/game_5.json"))
sut = SearchTreePathfindingAI(game.you, 3, 50, 10)
result = Value('i')
sut.create_next_action(game, result)
self.assertEqual(Action.turn_right, Action.get_by_index(result.value))
def test_should_select_default_action(self):
game = self.data_loader.load(tests.read_test_file("ai/game_6.json"))
sut = SearchTreePathfindingAI(game.you, 3, 50, 10)
result = Value('i')
sut.create_next_action(game, result)
self.assertEqual(Action.get_default(), Action.get_by_index(result.value))
def test_get_information(self):
game = self.data_loader.load(tests.read_test_file("ai/game_4.json"))
sut = SearchTreePathfindingAI(game.you, 3, 100, 2, 5)
expected = "max_speed=3, count_paths_to_check=100, depth=2, distance_to_check=5"
result = sut.get_information()
self.assertEqual(expected, result)
```
#### File: tests/service/test_game_service.py
```python
import unittest
from datetime import datetime, timezone
from chillow.service.game_service import GameService
from chillow.model.action import Action
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
class GameTest(unittest.TestCase):
def setUp(self):
self.player1 = Player(1, 10, 10, Direction.down, 1, True, "")
self.player2 = Player(2, 10, 30, Direction.down, 3, True, "")
self.player3 = Player(3, 30, 10, Direction.right, 2, True, "Name 3")
players = [self.player1, self.player2, self.player3]
cells = [[Cell() for _ in range(40)] for _ in range(40)]
cells[self.player1.y][self.player1.x] = Cell([self.player1])
cells[self.player2.y][self.player2.x] = Cell([self.player2])
cells[self.player3.y][self.player3.x] = Cell([self.player3])
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
self.game = Game(40, 40, cells, players, 2, True, time)
self.sut = GameService(self.game)
def test_game_should_end_when_less_than_two_players_are_left(self):
self.player1.active = False
self.player2.active = False
self.assertEqual(self.sut.is_game_running(), False)
def test_game_should_not_end_when_more_than_one_players_are_left(self):
self.player1.active = False
self.assertEqual(self.sut.is_game_running(), True)
def test_player_should_loose_if_he_did_more_than_one_action_in_one_round(self):
self.sut.do_action(self.player1, Action.speed_up)
self.sut.do_action(self.player1, Action.speed_up)
self.assertEqual(self.player1.active, False)
def test_player_should_not_loose_if_he_did_exactly_one_action_in_one_round(self):
self.sut.do_action(self.player1, Action.speed_up)
self.assertEqual(self.player1.active, True)
def test_visited_cells_should_be_calculated_correctly_turn_1_to_5(self):
self.player1.direction = Direction.down
self.player1.speed = 1
player1_x = self.player1.x
player1_y = self.player1.y
self.game.cells[self.player1.y][self.player1.x] = Cell([self.player1])
self.player2.direction = Direction.up
self.player2.speed = 3
player2_x = self.player2.x
player2_y = self.player2.y
self.game.cells[self.player2.y][self.player2.x] = Cell([self.player2])
self.player3.direction = Direction.down
self.player3.speed = 5
player3_x = self.player3.x
player3_y = self.player3.y
self.game.cells[self.player3.y][self.player3.x] = Cell([self.player3])
visited_cells_p1_expected = [(player1_x, player1_y + 1), (player1_x, player1_y + 2)]
visited_cells_p2_expected = [(player2_x, player2_y - 1), (player2_x, player2_y - 2)]
visited_cells_p3_expected = [(player3_x + 1, player3_y), (player3_x + 2, player3_y), (player3_x + 3, player3_y),
(player3_x + 4, player3_y), (player3_x + 5, player3_y)]
visited_cells_p1 = self.sut.get_and_visit_cells(self.player1, Action.speed_up)
visited_cells_p2 = self.sut.get_and_visit_cells(self.player2, Action.slow_down)
visited_cells_p3 = self.sut.get_and_visit_cells(self.player3, Action.turn_left)
self.assertEqual(visited_cells_p1_expected, visited_cells_p1)
self.assertEqual(visited_cells_p2_expected, visited_cells_p2)
self.assertEqual(visited_cells_p3_expected, visited_cells_p3)
self.assertTrue(self.player1 in self.game.cells[player1_y + 1][player1_x].players)
self.assertTrue(self.player1 in self.game.cells[player1_y + 2][player1_x].players)
self.assertTrue(self.player2 in self.game.cells[player2_y - 2][player2_x].players)
self.assertTrue(self.player2 in self.game.cells[player2_y - 2][player2_x].players)
self.assertTrue(self.player3 in self.game.cells[player3_y][player3_x + 1].players)
self.assertTrue(self.player3 in self.game.cells[player3_y][player3_x + 2].players)
self.assertTrue(self.player3 in self.game.cells[player3_y][player3_x + 3].players)
self.assertTrue(self.player3 in self.game.cells[player3_y][player3_x + 4].players)
self.assertTrue(self.player3 in self.game.cells[player3_y][player3_x + 5].players)
def test_visited_cells_should_be_calculated_correctly_turn_6(self):
self.sut.turn.turn_ctr = 12 # 6, 12, 18 should all work
self.player1.direction = Direction.down
self.player1.speed = 1
player1_x = self.player1.x
player1_y = self.player1.y
self.game.cells[10][10] = Cell([self.player1])
self.player2.direction = Direction.up
self.player2.speed = 5
player2_x = self.player2.x
player2_y = self.player2.y
self.game.cells[10][30] = Cell([self.player2])
visited_cells_p1_expected = [(player1_x, player1_y + 1), (player1_x, player1_y + 2)]
visited_cells_p2_expected = [(player2_x, player2_y - 1), (player2_x, player2_y - 6)]
visited_cells_p1 = self.sut.get_and_visit_cells(self.player1, Action.speed_up)
visited_cells_p2 = self.sut.get_and_visit_cells(self.player2, Action.speed_up)
self.assertEqual(visited_cells_p1_expected, visited_cells_p1)
self.assertEqual(visited_cells_p2_expected, visited_cells_p2)
self.assertTrue(self.player1 in self.game.cells[player1_y + 1][player1_x].players)
self.assertTrue(self.player1 in self.game.cells[player1_y + 2][player1_x].players)
self.assertTrue(self.player2 in self.game.cells[player2_y - 1][player2_x].players)
self.assertTrue(self.player2 in self.game.cells[player2_y - 6][player2_x].players)
def test_game_cells_should_be_correct_after_collision(self):
self.player1.direction = Direction.left
self.player1.speed = 4
self.player1.x = 2
self.player1.y = 0
self.game.cells[self.player1.y][self.player1.x] = Cell([self.player1])
self.game.cells[self.player1.y][1] = Cell([self.player1])
self.sut.get_and_visit_cells(self.player1, Action.speed_up)
self.assertEqual(self.player1.x, 0)
self.assertEqual(self.player1.y, 0)
self.assertTrue(self.player1 in self.game.cells[0][0].players)
self.assertTrue(self.player1 in self.game.cells[0][1].players)
self.assertTrue(self.player1 in self.game.cells[0][2].players)
def test_visited_cells_should_be_correct_after_collision(self):
self.player1.direction = Direction.left
self.player1.speed = 4
self.player1.x = 2
self.player1.y = 0
self.game.cells[self.player1.y][self.player1.x] = Cell([self.player1])
self.game.cells[self.player1.y][1] = Cell([self.player1])
visited_cells = self.sut.get_and_visit_cells(self.player1, Action.speed_up)
self.assertEqual(self.player1.x, 0)
self.assertEqual(self.player1.y, 0)
self.assertTrue((0, 0) in visited_cells)
self.assertTrue((1, 0) in visited_cells)
def test_playerX_playerY_should_be_correct_after_collision(self):
self.player1.direction = Direction.left
self.player1.speed = 2
self.player1.x = 1
self.player1.y = 0
self.game.cells[self.player1.y][self.player1.x] = Cell([self.player1])
self.sut.get_and_visit_cells(self.player1, Action.speed_up)
self.assertEqual(self.player1.x, 0)
self.assertEqual(self.player1.y, 0)
def test_playerX_playerY_should_be_correct_without_collision(self):
self.player1.direction = Direction.left
self.player1.speed = 2
self.player1.x = 10
self.player1.y = 0
self.game.cells[self.player1.y][self.player1.x] = Cell([self.player1])
self.sut.get_and_visit_cells(self.player1, Action.change_nothing)
self.assertEqual(self.player1.x, 8)
self.assertEqual(self.player1.y, 0)
def test_correct_multiplier_should_be_returned_direction_up(self):
self.player1.direction = Direction.up
self.assertEqual((0, -1), self.sut.get_horizontal_and_vertical_multiplier(self.player1))
def test_correct_multiplier_should_be_returned_direction_down(self):
self.player1.direction = Direction.down
self.assertEqual((0, 1), self.sut.get_horizontal_and_vertical_multiplier(self.player1))
def test_correct_multiplier_should_be_returned_direction_left(self):
self.player1.direction = Direction.left
self.assertEqual((-1, 0), self.sut.get_horizontal_and_vertical_multiplier(self.player1))
def test_correct_multiplier_should_be_returned_direction_right(self):
self.player1.direction = Direction.right
self.assertEqual((1, 0), self.sut.get_horizontal_and_vertical_multiplier(self.player1))
```
#### File: tests/view/test_graphical_view.py
```python
import io
import unittest
from datetime import datetime
from unittest.mock import Mock, ANY, call, patch
from chillow.model.action import Action
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.view.graphical_view import GraphicalView
pygame_mock = Mock()
class GraphicalViewTest(unittest.TestCase):
def setUp(self) -> None:
self.sut = GraphicalView(pygame_mock)
mock_event = Mock()
mock_event.type = pygame_mock.KEYDOWN = 1
pygame_mock.event.get.return_value = [mock_event]
pygame_mock.K_UP = 0
pygame_mock.K_DOWN = 1
pygame_mock.K_RIGHT = 2
pygame_mock.K_LEFT = 3
pygame_mock.K_SPACE = 4
pygame_mock.key.get_pressed.return_value = [False for _ in range(pygame_mock.K_SPACE + 1)]
def test_draws_all_players_correctly(self):
player1 = Player(1, 0, 0, Direction.up, 1, True, "p1")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
cells = [[Cell([player1]), Cell([player1])],
[Cell([player2]), Cell()]]
game = Game(2, 2, cells, [player1, player2], 2, True, datetime.now())
expected_calls = [
call(ANY, (255, 61, 0), (0, 0, 10, 10)),
call(ANY, (0, 0, 0), (2, 2, 6, 6)),
call(ANY, (255, 61, 0), (11, 0, 10, 10)),
call(ANY, (156, 204, 101), (0, 11, 10, 10)),
call(ANY, (0, 0, 0), (4, 15, 2, 2)),
call(ANY, (0, 0, 0), (11, 11, 10, 10))
]
self.sut.update(game)
pygame_mock.init.assert_called_once()
pygame_mock.draw.rect.assert_has_calls(expected_calls, any_order=False)
@patch('sys.stdout', new_callable=io.StringIO)
def test_draws_all_players_correctly_in_ended_game_no_winner(self, mock_stdout):
player1 = Player(1, 0, 0, Direction.up, 1, False, "p1")
player2 = Player(2, 0, 1, Direction.down, 3, False, "")
cells = [[Cell([player1]), Cell([player1])],
[Cell([player2]), Cell()]]
game = Game(2, 2, cells, [player1, player2], 2, False, datetime.now())
self.sut.update(game)
self.assertTrue("No winner in game." in str(mock_stdout.getvalue()))
@patch('sys.stdout', new_callable=io.StringIO)
def test_draws_all_players_correctly_in_ended_game_with_winner(self, mock_stdout):
player1 = Player(1, 1, 0, Direction.up, 1, True, "Jonas")
player2 = Player(2, 0, 1, Direction.down, 3, False, "Florian")
cells = [[Cell(), Cell([player1])],
[Cell([player2]), Cell()]]
game = Game(2, 2, cells, [player1, player2], 2, False, datetime.now())
self.sut.update(game)
self.assertTrue("Winner: Player 1 (Jonas). Your player ID was 2." in str(mock_stdout.getvalue()))
@patch('sys.exit')
@patch('time.sleep')
def test_end_view(self, sys_exit, time_sleep):
self.sut.end()
pygame_mock.display.quit.assert_called_once()
pygame_mock.quit.assert_called_once()
def test_read_next_action_should_return_correct_action_input_up(self):
pygame_mock.key.get_pressed.return_value[pygame_mock.K_UP] = True
self.assertEqual(Action.speed_up, self.sut.read_next_action())
def test_read_next_action_should_return_correct_action_input_down(self):
pygame_mock.key.get_pressed.return_value[pygame_mock.K_DOWN] = True
self.assertEqual(Action.slow_down, self.sut.read_next_action())
def test_read_next_action_should_return_correct_action_input_right(self):
pygame_mock.key.get_pressed.return_value[pygame_mock.K_RIGHT] = True
self.assertEqual(Action.turn_right, self.sut.read_next_action())
def test_read_next_action_should_return_correct_action_input_left(self):
pygame_mock.key.get_pressed.return_value[pygame_mock.K_LEFT] = True
self.assertEqual(Action.turn_left, self.sut.read_next_action())
def test_read_next_action_should_return_correct_action_input_space(self):
pygame_mock.key.get_pressed.return_value[pygame_mock.K_SPACE] = True
self.assertEqual(Action.change_nothing, self.sut.read_next_action())
@patch('sys.exit')
@patch('time.sleep')
def test_read_next_action_should_return_correct_action_input_close(self, sys_exit, time_sleep):
mock_event = Mock()
mock_event.type = pygame_mock.QUIT = 2
pygame_mock.event.get.return_value = [mock_event]
result = self.sut.read_next_action()
self.assertEqual(Action.get_default(), result)
pygame_mock.display.quit.assert_called()
pygame_mock.quit.assert_called()
``` |
{
"source": "JonasHell/torch-em",
"score": 2
} |
#### File: dsb/spoco/train_spoco.py
```python
import torch
import torch_em
import torch_em.loss.spoco_loss as spoco
from torch_em.model import UNet2d
from torch_em.data.datasets import get_dsb_loader
from torch_em.trainer.spoco_trainer import SPOCOTrainer
def train_boundaries(args):
model = UNet2d(in_channels=1, out_channels=8, initial_features=64)
patch_shape = (1, 256, 256)
train_loader = get_dsb_loader(
args.input, patch_shape, split="train", download=True, batch_size=args.batch_size, label_dtype=torch.int64,
label_transform=torch_em.transform.label_consecutive, num_workers=4,
)
val_loader = get_dsb_loader(
args.input, patch_shape, split="test", batch_size=args.batch_size, label_dtype=torch.int64,
label_transform=torch_em.transform.label_consecutive, num_workers=4,
)
delta_var = 0.75
delta_dist = 2.0
pmaps_threshold = 0.9
aux_loss = "dice"
loss = spoco.SPOCOLoss(delta_var, delta_dist, aux_loss=aux_loss)
metric = spoco.SPOCOMetric(delta_dist, pmaps_threshold=pmaps_threshold)
trainer = torch_em.default_segmentation_trainer(
name="dsb-spoco-model",
model=model,
train_loader=train_loader,
val_loader=val_loader,
loss=loss,
metric=metric,
learning_rate=1e-4,
device=args.device,
mixed_precision=True,
log_image_interval=50,
trainer_class=SPOCOTrainer,
)
trainer.fit(iterations=args.n_iterations)
if __name__ == '__main__':
parser = torch_em.util.parser_helper(default_batch_size=8)
args = parser.parse_args()
train_boundaries(args)
```
#### File: experiments/epithelia/export_bioimageio_model.py
```python
import z5py
from torch_em.util import export_bioimageio_model, get_default_citations, export_parser_helper
def _load_data():
path = "/g/kreshuk/pape/Work/data/epethelia/test/per02_100.zarr"
with z5py.File(path, "r") as f:
raw = f["raw"][:]
return raw
def export_to_bioimageio(checkpoint, output):
input_data = _load_data()
postprocessing = None
offsets = [
[-1, 0], [0, -1],
[-3, 0], [0, -3],
[-9, 0], [0, -9],
[-27, 0], [0, -27]
]
config = {"mws": {"offsets": offsets}}
name = "EpitheliaAffinityModel"
tags = ["u-net", "segmentation"]
cite = get_default_citations(model="UNet2d", model_output="affinities")
doc = "Affinity prediction for epithelia cells"
export_bioimageio_model(
checkpoint, output, input_data,
name=name,
authors=[{"name": "<NAME>; @constantinpape"}],
tags=tags,
license="CC-BY-4.0",
documentation=doc,
git_repo="https://github.com/constantinpape/torch-em.git",
cite=cite,
model_postprocessing=postprocessing,
input_optional_parameters=False,
config=config
)
if __name__ == "__main__":
parser = export_parser_helper()
args = parser.parse_args()
export_to_bioimageio(args.checkpoint, args.output)
```
#### File: experiments/livecell/check_dataset.py
```python
from torch_em.data.datasets.livecell import get_livecell_loader
from torch_em.util.debug import check_loader
PATH = "/home/pape/Work/data/livecell"
def check_livecell_size():
patch_shape = (512, 512)
loader = get_livecell_loader(PATH, patch_shape, "train", download=True, batch_size=1)
print("Training images:", len(loader.dataset))
loader = get_livecell_loader(PATH, patch_shape, "val", download=True, batch_size=1)
print("Val images:", len(loader.dataset))
def check_livecell_images():
patch_shape = (512, 512)
loader = get_livecell_loader(PATH, patch_shape, "train", download=True, batch_size=1)
check_loader(loader, 10, instance_labels=True)
# NOTE:
# - Tischi had a problem containing similar data
# - overlapping instances! are not reflected in the current label processing!
# - there seem to be quite a lot of cells not captured in the segmentation labels
if __name__ == "__main__":
check_livecell_size()
check_livecell_images()
```
#### File: experiments/livecell/validate_model.py
```python
import argparse
import os
from glob import glob
from pathlib import Path
import imageio
import h5py
import pandas as pd
from bioimageio.core import load_resource_description
from bioimageio.core.prediction import predict_with_padding
from bioimageio.core.prediction_pipeline import create_prediction_pipeline
from elf.evaluation import mean_average_precision
from torch_em.util.segmentation import (connected_components_with_boundaries,
mutex_watershed, size_filter)
from tqdm import tqdm
from xarray import DataArray
try:
import napari
except ImportError:
napari = None
def segment(prediction_pipeline, path, out_path, view, offsets=None, strides=None, min_seg_size=50):
image = imageio.imread(path)
assert image.ndim == 2
input_ = DataArray(image[None, None], dims=prediction_pipeline.input_specs[0].axes)
padding = {"x": 16, "y": 16}
prediction = predict_with_padding(prediction_pipeline, input_, padding)[0][0]
foreground, prediction = prediction[0], prediction[1:]
if offsets is None:
assert prediction.shape[0] == 1, f"{prediction.shape}"
prediction = prediction[0]
assert foreground.shape == prediction.shape
seg = connected_components_with_boundaries(foreground, prediction)
else:
assert len(offsets) == prediction.shape[0]
mask = foreground > 0.5
seg = mutex_watershed(prediction, offsets, mask=mask, strides=strides)
seg = size_filter(seg, min_seg_size, hmap=prediction, with_background=True)
# implement more postprocessing?
# - merge noisy foreground prediction (that only have very weak boundary predictions) into the background
if out_path is not None:
with h5py.File(out_path, "w") as f:
f.create_dataset("prediction", data=prediction, compression="gzip")
f.create_dataset("foreground", data=foreground, compression="gzip")
f.create_dataset("segmentation", data=seg, compression="gzip")
if view:
assert napari is not None
v = napari.Viewer()
v.add_image(image)
v.add_image(foreground)
v.add_image(prediction)
v.add_labels(seg)
napari.run()
return seg
def validate(seg, gt_path):
gt = imageio.imread(gt_path)
assert gt.shape == seg.shape
map_, scores = mean_average_precision(seg, gt, return_aps=True)
# map, iou50, iou75, iou90
return [map_, scores[0], scores[5], scores[-1]]
def run_prediction(model_path, input_files, target_files, output_folder, view, min_seg_size, device):
model = load_resource_description(model_path)
offsets, strides = None, None
if "mws" in model.config:
offsets = model.config["mws"]["offsets"]
strides = [4, 4]
if output_folder is not None:
os.makedirs(output_folder, exist_ok=True)
validation_results = []
devices = None if device is None else [device]
with create_prediction_pipeline(bioimageio_model=model, devices=devices) as pp:
for in_path, target_path in tqdm(zip(input_files, target_files), total=len(input_files)):
fname = str(Path(in_path).stem)
out_path = None if output_folder is None else os.path.join(output_folder, f"{fname}.h5")
seg = segment(pp, in_path, out_path, view,
offsets=offsets, strides=strides, min_seg_size=min_seg_size)
if target_path:
val = validate(seg, target_path)
validation_results.append([fname] + val)
if validation_results:
cols = ["name", "mAP", "IoU50", "IoU75", "IoU90"]
validation_results = pd.DataFrame(validation_results, columns=cols)
print("Validation results averaged over all", len(input_files), "images:")
print(validation_results[cols[1:]].mean(axis=0))
return validation_results
# TODO needs update for live-cell data structure
def _load_data(input_folder, ext):
input_data = glob(os.path.join(input_folder, "images", f"*.{ext}"))
input_data.sort()
if os.path.exists(os.path.join(input_folder, "masks")):
input_target = glob(os.path.join(input_folder, "masks", f"*.{ext}"))
input_target.sort()
else:
input_target = [None] * len(input_data)
assert len(input_data) == len(input_target)
return input_data, input_target
def main():
parser = argparse.ArgumentParser(
"Run prediction and segmentation with a bioimagie.io model and save or validate the results."
"If 'output_folder' is passed, the results will be saved as hdf5 files with keys:"
"prediction: the affinity or boundary predictions"
"foreground: the foreground predictions"
"segmentation: the nucleus instance segmentation"
)
parser.add_argument("-m", "--model", required=True, help="Path to the bioimage.io model.")
parser.add_argument("-i", "--input_folder", required=True,
help="The root input folder with subfolders 'images' and (optionally) 'masks'")
parser.add_argument("--ext", default="tif", help="The file extension of the input files.")
parser.add_argument("-o", "--output_folder", default=None, help="Where to save the results.")
parser.add_argument("-v", "--view", default=0,
help="Whether to show segmentation results (needs napari).", type=int)
parser.add_argument("--min_seg_size", default=25, type=int)
parser.add_argument("--device", default=None, help="The device used for inference.")
parser.add_argument("--save_path", "-s", default=None, help="Where to save a csv with the validation results.")
args = parser.parse_args()
input_files, target_files = _load_data(args.input_folder, args.ext)
res = run_prediction(args.model, input_files, target_files, args.output_folder,
view=bool(args.view), min_seg_size=args.min_seg_size, device=args.device)
if args.save_path is not None:
assert res is not None
res.to_csv(args.save_path, index=False)
if __name__ == "__main__":
main()
```
#### File: mito-em/challenge/segment_and_validate.py
```python
import os
import numpy as np
import pandas as pd
import z5py
from segmentation_impl import segment_with_affinities, segment_with_boundaries
def compute_summaries(metric):
def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):
p = metric.params
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = metric.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind]
else:
# dimension of recall: [TxKxAxM]
s = metric.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, aind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
return mean_s
def _summarizeDets():
stats = np.zeros(6)
names = ['mean_ap', 'ap-.5', 'ap-.75', 'ap-.75_small', 'ap-.75_medium', 'ap-.75_large']
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5)
stats[2] = _summarize(1, iouThr=.75)
stats[3] = _summarize(1, areaRng='small', iouThr=.75)
stats[4] = _summarize(1, areaRng='medium', iouThr=.75)
stats[5] = _summarize(1, areaRng='large', iouThr=.75)
return names, stats
if not metric.eval:
raise Exception('Please run accumulate() first')
return _summarizeDets()
# the code looks pretty inefficient, I can probably compute the
# same with elf / cluster_tools stuff and do it out of core
def map3d_impl(seg, gt):
from map3d.vol3d_eval import VOL3Deval
from map3d.vol3d_util import seg_iou3d_sorted
ui, uc = np.unique(seg, return_counts=True)
uc = uc[ui > 0]
ui = ui[ui > 0]
pred_score = np.ones((len(ui), 2), int)
pred_score[:, 0] = ui
pred_score[:, 1] = uc
thres = [5e3, 1.5e4]
area_rng = np.zeros((len(thres) + 2, 2), int)
area_rng[0, 1] = 1e10
area_rng[-1, 1] = 1e10
area_rng[2:, 0] = thres
area_rng[1:-1, 1] = thres
result_p, result_fn, pred_score_sorted = seg_iou3d_sorted(seg, gt, pred_score, area_rng)
v3dEval = VOL3Deval(result_p, result_fn, pred_score_sorted)
v3dEval.params.areaRng = area_rng
v3dEval.accumulate()
v3dEval.summarize()
names, stats = compute_summaries(v3dEval)
return names, stats
def validate(checkpoint, sample, seg_name, seg_key):
print("Validating", sample, seg_key, "...")
path = f'./data/{sample}.n5'
labels_key = 'labels'
name = os.path.split(checkpoint)[1]
bb = np.s_[:]
# bb = np.s_[:25, :256, :256]
with z5py.File(path, 'r') as f:
ds_labels = f[labels_key]
ds_labels.n_threads = 16
ds_seg = f[seg_key]
ds_seg.n_threads = 16
labels = ds_labels[bb]
seg = ds_seg[bb]
names, stats = map3d_impl(labels, seg)
data = np.array([name, sample, seg_name] + stats.tolist())
result_table = './validation_results.csv'
if os.path.exists(result_table):
results = pd.read_csv(result_table)
results = results.append(pd.DataFrame(data[None], columns=results.columns))
else:
columns = ['network', 'sample', 'method'] + names
results = pd.DataFrame(data[None], columns=columns)
results.to_csv(result_table, index=False)
def prep_affinity_cache(checkpoint, sample):
checkpoint_name = os.path.split(checkpoint)[1]
tmp_folder = os.path.join('./tmp_folders', f'tmp_{checkpoint_name}_{sample}_mws')
os.makedirs(tmp_folder, exist_ok=True)
inference_log = os.path.join(tmp_folder, 'inference.log')
with open(inference_log, 'w'):
pass
def segment_and_validate(checkpoint, samples, target, beta, gpus,
only_prediction=False, gpu_type='2080Ti'):
checkpoint_name = os.path.split(checkpoint)[1]
is_affinity_model = 'affinity' in checkpoint_name
for sample in samples:
segment_with_boundaries(sample, checkpoint, target=target,
beta=beta, gpus=gpus,
only_prediction=only_prediction,
gpu_type=gpu_type,
is_affinity_model=is_affinity_model)
if only_prediction:
continue
if is_affinity_model:
prep_affinity_cache(checkpoint, sample)
segment_with_affinities(sample, checkpoint, target, gpus,
gpu_type=gpu_type)
validate(checkpoint, sample, seg_name='multicut',
seg_key=f'segmentation/{checkpoint_name}/multicut_postprocessed')
if is_affinity_model:
validate(checkpoint, sample, seg_name='mutex_watershed',
seg_key=f'segmentation/{checkpoint_name}/mutex_watershed_postprocessed')
def val_v1():
checkpoints = ['./checkpoints/affinity_model_large_human_rat',
'./checkpoints/affinity_model_large_train_on_val_human_rat']
# checkpoints = ['./checkpoints/affinity_model_default_human_rat']
samples = ['human_val', 'rat_val']
beta = .5
target = 'local'
only_prediction = False
gpus = list(range(2))
gpu_type = 'A100'
# gpus = [1, 2, 3, 5]
# gpu_type = '2080Ti'
for checkpoint in checkpoints:
segment_and_validate(checkpoint, samples, target, beta,
gpus=gpus,
only_prediction=only_prediction, gpu_type=gpu_type)
if __name__ == '__main__':
val_v1()
```
#### File: experiments/mito-em/export_bioimageio_model.py
```python
import os
from elf.io import open_file
from torch_em.data.datasets import get_bioimageio_dataset_id
from torch_em.util import (add_weight_formats, export_parser_helper,
export_bioimageio_model, get_default_citations,
get_training_summary)
def _get_name_and_description(is_aff):
name = "MitochondriaEMSegmentation"
if is_aff:
name += "AffinityModel"
else:
name += "BoundaryModel"
description = "Mitochondria segmentation for electron microscopy."
return name, description
def _load_data(input_):
with open_file(input_, 'r') as f:
ds = f['raw']
shape = ds.shape
halo = [16, 128, 128]
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo))
raw = ds[bb]
return raw
def _get_doc(is_aff_model, ckpt, name):
if is_aff_model:
pred_type = "affinity maps"
pp = "The affinities can be processed with the Mutex Watershed to obtain an instance segmentation."
else:
pred_type = "boundary maps"
pp = "The boundaries can be processed with Multicut to obtain an instance segmentation."
training_summary = get_training_summary(ckpt, to_md=True, lr=1.0e-4)
model_tag = name.lower()
doc = f"""# U-Net for Mitochondria Segmentation
This model segments mitochondria in electron microscopy images. It predicts {pred_type} and foreground probabilities. {pp}
## Training
The network was trained on data from the [MitoEM Segmentation Challenge](https://mitoem.grand-challenge.org/).
The training script can be found [here](https://github.com/constantinpape/torch-em/tree/main/experiments/mito-em).
This folder also includes example usages of this model.
### Training Data
- Imaging modality: serial blockface electron microscopy
- Dimensionality: 3D
- Source: https://mitoem.grand-challenge.org/
### Recommended Validation
It is recommended to validate the instance segmentation obtained from this model using intersection-over-union.
See [the validation script](https://github.com/constantinpape/torch-em/tree/main/experiments/mito-em/validate_model.py).
This model can also be used in ilastik, deepimageJ or other software that supports the bioimage.io model format.
### Training Schedule
{training_summary}
## Contact
For questions or issues with this models, please reach out by:
- opening a topic with tags bioimageio and {model_tag} on [image.sc](https://forum.image.sc/)
- or creating an issue in https://github.com/constantinpape/torch-em"""
return doc
def export_to_bioimageio(checkpoint, input_, output, affs_to_bd, additional_formats):
root, ckpt_name = os.path.split(checkpoint)
if input_ is None:
input_data = None
else:
input_data = _load_data(input_)
is_aff_model = "affinity" in ckpt_name
if is_aff_model and affs_to_bd:
postprocessing = "affinities_with_foreground_to_boundaries3d"
else:
postprocessing = None
if is_aff_model and affs_to_bd:
is_aff_model = False
name, desc = _get_name_and_description(is_aff_model)
if is_aff_model:
offsets = [
[-1, 0, 0], [0, -1, 0], [0, 0, -1],
[-2, 0, 0], [0, -3, 0], [0, 0, -3],
[-3, 0, 0], [0, -9, 0], [0, 0, -9]
]
config = {"mws": {"offsets": offsets}}
else:
config = {}
cite = get_default_citations(
model="AnisotropicUNet",
model_output="affinities" if is_aff_model else "boundaries"
)
cite["data"] = "https://doi.org/10.1007/978-3-030-59722-1_7"
tags = ["3d", "electron-microscopy", "mitochondria", "instance-segmentation", "unet"]
doc = _get_doc(is_aff_model, checkpoint, name)
if additional_formats is None:
additional_formats = []
export_bioimageio_model(
checkpoint, output,
input_data=input_data,
name=name,
description=desc,
authors=[{"name": "<NAME>; @constantinpape"}],
tags=tags,
license="CC-BY-4.0",
documentation=doc,
git_repo="https://github.com/constantinpape/torch-em.git",
cite=cite,
model_postprocessing=postprocessing,
input_optional_parameters=False,
for_deepimagej="torchscript" in additional_formats,
links=[get_bioimageio_dataset_id("mitoem")],
maintainers=[{"github_user": "constantinpape"}],
config=config,
)
add_weight_formats(output, additional_formats)
if __name__ == "__main__":
parser = export_parser_helper()
args = parser.parse_args()
export_to_bioimageio(args.checkpoint, args.input, args.output,
bool(args.affs_to_bd), args.additional_formats)
```
#### File: neuron-segmentation/isbi2012/predict_and_segment.py
```python
import argparse
import numpy as np
import torch
from elf.io import open_file
from train_boundaries_2d import get_model
# TODO
# - prediction in 3d
# - prediction with affinities
# - segmentation with multicut (for boundaries), mutex watershed (for affinities)
def predict_boundaries_2d(in_path, out_path, checkpoint, device=torch.device('cuda')):
model = get_model()
state = torch.load(checkpoint)['model_state']
model.load_state_dict(state)
model.to(device)
model.eval()
with open_file(in_path, 'r') as f:
raw = f['raw'][:]
prediction = np.zeros_like(raw, dtype='float32')
with torch.no_grad():
for z in range(raw.shape[0]):
input_ = raw[z].astype('float32') / 255.
input_ = torch.from_numpy(input_[None, None]).to(device)
pred = model(input_).cpu().numpy()[0, 0]
prediction[z] = pred
with open_file(out_path, 'a') as f:
ds = f.require_dataset('boundaries', prediction.shape, compression='gzip', dtype='float32',
chunks=(1,) + prediction.shape[1:])
ds[:] = prediction
return prediction
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', required=True)
parser.add_argument('-o', '--output', required=True)
parser.add_argument('-c', '--checkpoint', required=True)
args = parser.parse_args()
in_path = args.input
out_path = args.output
checkpoint = args.checkpoint
predict_boundaries_2d(in_path, out_path, checkpoint)
```
#### File: neuron-segmentation/snemi/train_affinities.py
```python
import numpy as np
import torch_em
from torch_em.model import AnisotropicUNet
from torch_em.loss import DiceLoss, LossWrapper, ApplyAndRemoveMask
from torch_em.data.datasets import get_snemi_loader
from torch_em.util import parser_helper
OFFSETS = [
[-1, 0, 0], [0, -1, 0], [0, 0, -1],
[-2, 0, 0], [0, -3, 0], [0, 0, -3],
[-3, 0, 0], [0, -9, 0], [0, 0, -9],
[-4, 0, 0], [0, -27, 0], [0, 0, -27]
]
def get_loader(input_path, train, patch_shape, batch_size=1, n_samples=None):
n_slices = 100
z = n_slices - patch_shape[0]
roi = np.s_[:z, :, :] if train else np.s_[z:, :, :]
return get_snemi_loader(
path=input_path,
patch_shape=patch_shape,
batch_size=batch_size,
rois=roi,
offsets=OFFSETS,
n_samples=n_samples,
num_workers=8*batch_size,
shuffle=True,
download=True
)
def get_model():
n_out = len(OFFSETS)
model = AnisotropicUNet(
scale_factors=[
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]
],
in_channels=1,
out_channels=n_out,
initial_features=32,
gain=2,
final_activation="Sigmoid"
)
return model
def train_affinities(args):
model = get_model()
patch_shape = [32, 320, 320]
train_loader = get_loader(
args.input, train=True,
patch_shape=patch_shape,
n_samples=1000
)
val_loader = get_loader(
args.input, train=False,
patch_shape=patch_shape,
n_samples=50
)
loss = LossWrapper(loss=DiceLoss(), transform=ApplyAndRemoveMask())
name = "affinity_model"
trainer = torch_em.default_segmentation_trainer(
name=name,
model=model,
train_loader=train_loader,
val_loader=val_loader,
loss=loss,
metric=loss,
learning_rate=1e-4,
mixed_precision=True,
log_image_interval=50
)
if args.from_checkpoint:
trainer.fit(args.n_iterations, "latest")
else:
trainer.fit(args.n_iterations)
def check(args, train=True, val=True, n_images=2):
from torch_em.util.debug import check_loader
patch_shape = [32, 320, 320]
if train:
print("Check train loader")
loader = get_loader(args.input, True, patch_shape)
check_loader(loader, n_images)
if val:
print("Check val loader")
loader = get_loader(args.input, False, patch_shape)
check_loader(loader, n_images)
if __name__ == "__main__":
parser = parser_helper()
args = parser.parse_args()
if args.check:
check(args, train=True, val=True)
else:
train_affinities(args)
```
#### File: bioimageio-examples/diff-output-shape/resize_unet.py
```python
import torch
import torch.nn as nn
class UNetBase(nn.Module):
"""
"""
def __init__(
self,
encoder,
base,
decoder,
out_conv=None,
final_activation=None
):
super().__init__()
if len(encoder) != len(decoder):
raise ValueError(f"Incompatible depth of encoder (depth={len(encoder)}) and decoder (depth={len(decoder)})")
self.encoder = encoder
self.base = base
self.decoder = decoder
if out_conv is None:
self._out_channels = self.decoder.out_channels
else:
self._out_channels = out_conv.out_channels
self.out_conv = out_conv
self.final_activation = self._get_activation(final_activation)
@property
def in_channels(self):
return self.encoder.in_channels
@property
def out_channels(self):
return self._out_channels
@property
def depth(self):
return len(self.encoder)
def _get_activation(self, activation):
return_activation = None
if activation is None:
return None
if isinstance(activation, nn.Module):
return activation
if isinstance(activation, str):
return_activation = getattr(nn, activation, None)
if return_activation is None:
raise ValueError(f"Invalid activation: {activation}")
return return_activation()
# load encoder / decoder / base states for pretraining
def load_encoder_state(self, state):
self.encoder.load_state_dict(state)
def load_decoder_state(self, state):
self.decoder.load_state_dict(state)
def load_base_state(self, state):
self.base.load_state_dict(state)
def _apply_default(self, x):
self.encoder.return_outputs = True
self.decoder.return_outputs = False
x, encoder_out = self.encoder(x)
x = self.base(x)
x = self.decoder(x, encoder_inputs=encoder_out[::-1])
if self.out_conv is not None:
x = self.out_conv(x)
x = torch.nn.functional.interpolate(x, scale_factor=0.5)
if self.final_activation is not None:
x = self.final_activation(x)
return x
def forward(self, x):
out = self._apply_default(x)
return out
def _update_conv_kwargs(kwargs, scale_factor):
# if the scale factor is a scalar or all entries are the same we don't need to update the kwargs
if isinstance(scale_factor, int) or scale_factor.count(scale_factor[0]) == len(scale_factor):
return kwargs
else: # otherwise set anisotropic kernel
kernel_size = kwargs.get('kernel_size', 3)
padding = kwargs.get('padding', 1)
# bail out if kernel size or padding aren't scalars, because it's
# unclear what to do in this case
if not (isinstance(kernel_size, int) and isinstance(padding, int)):
return kwargs
kernel_size = tuple(1 if factor == 1 else kernel_size for factor in scale_factor)
padding = tuple(0 if factor == 1 else padding for factor in scale_factor)
kwargs.update({'kernel_size': kernel_size, 'padding': padding})
return kwargs
class Encoder(nn.Module):
def __init__(
self,
features,
scale_factors,
conv_block_impl,
pooler_impl,
anisotropic_kernel=False,
**conv_block_kwargs
):
super().__init__()
if len(features) != len(scale_factors) + 1:
raise ValueError("Incompatible number of features {len(features)} and scale_factors {len(scale_factors)}")
conv_kwargs = [conv_block_kwargs] * len(scale_factors)
if anisotropic_kernel:
conv_kwargs = [_update_conv_kwargs(kwargs, scale_factor)
for kwargs, scale_factor in zip(conv_kwargs, scale_factors)]
self.blocks = nn.ModuleList(
[conv_block_impl(inc, outc, **kwargs)
for inc, outc, kwargs in zip(features[:-1], features[1:], conv_kwargs)]
)
self.poolers = nn.ModuleList(
[pooler_impl(factor) for factor in scale_factors]
)
self.return_outputs = True
self.in_channels = features[0]
self.out_channels = features[-1]
def __len__(self):
return len(self.blocks)
def forward(self, x):
encoder_out = []
for block, pooler in zip(self.blocks, self.poolers):
x = block(x)
encoder_out.append(x)
x = pooler(x)
if self.return_outputs:
return x, encoder_out
else:
return x
class Decoder(nn.Module):
def __init__(
self,
features,
scale_factors,
conv_block_impl,
sampler_impl,
anisotropic_kernel=False,
**conv_block_kwargs
):
super().__init__()
if len(features) != len(scale_factors) + 1:
raise ValueError("Incompatible number of features {len(features)} and scale_factors {len(scale_factors)}")
conv_kwargs = [conv_block_kwargs] * len(scale_factors)
if anisotropic_kernel:
conv_kwargs = [_update_conv_kwargs(kwargs, scale_factor)
for kwargs, scale_factor in zip(conv_kwargs, scale_factors)]
self.blocks = nn.ModuleList(
[conv_block_impl(inc, outc, **kwargs)
for inc, outc, kwargs in zip(features[:-1], features[1:], conv_kwargs)]
)
self.samplers = nn.ModuleList(
[sampler_impl(factor, inc, outc) for factor, inc, outc
in zip(scale_factors, features[:-1], features[1:])]
)
self.return_outputs = False
self.in_channels = features[0]
self.out_channels = features[-1]
def __len__(self):
return len(self.blocks)
# FIXME this prevents traces from being valid for other input sizes, need to find
# a solution to traceable cropping
def _crop(self, x, shape):
shape_diff = [(xsh - sh) // 2 for xsh, sh in zip(x.shape, shape)]
crop = tuple([slice(sd, xsh - sd) for sd, xsh in zip(shape_diff, x.shape)])
return x[crop]
# # Implementation with torch.narrow, does not fix the tracing warnings!
# for dim, (sh, sd) in enumerate(zip(shape, shape_diff)):
# x = torch.narrow(x, dim, sd, sh)
# return x
def _concat(self, x1, x2):
return torch.cat([x1, self._crop(x2, x1.shape)], dim=1)
def forward(self, x, encoder_inputs):
if len(encoder_inputs) != len(self.blocks):
raise ValueError(f"Invalid number of encoder_inputs: expect {len(self.blocks)}, got {len(encoder_inputs)}")
decoder_out = []
for block, sampler, from_encoder in zip(self.blocks, self.samplers, encoder_inputs):
x = sampler(x)
x = block(self._concat(x, from_encoder))
decoder_out.append(x)
if self.return_outputs:
return decoder_out + [x]
else:
return x
def get_norm_layer(norm, dim, channels, n_groups=32):
if norm is None:
return None
if norm == 'InstanceNorm':
return nn.InstanceNorm2d(channels) if dim == 2 else nn.InstanceNorm3d(channels)
elif norm == 'GroupNorm':
return nn.GroupNorm(min(n_groups, channels), channels)
elif norm == 'BatchNorm':
return nn.BatchNorm2d(channels) if dim == 2 else nn.BatchNorm3d(channels)
else:
raise ValueError(f"Invalid norm: expect one of 'InstanceNorm', 'BatchNorm' or 'GroupNorm', got {norm}")
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dim,
kernel_size=3, padding=1, norm='InstanceNorm'):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
conv = nn.Conv2d if dim == 2 else nn.Conv3d
if norm is None:
self.block = nn.Sequential(
conv(in_channels, out_channels,
kernel_size=kernel_size, padding=padding),
nn.ReLU(inplace=True),
conv(out_channels, out_channels,
kernel_size=kernel_size, padding=padding),
nn.ReLU(inplace=True)
)
else:
self.block = nn.Sequential(
get_norm_layer(norm, dim, in_channels),
conv(in_channels, out_channels,
kernel_size=kernel_size, padding=padding),
nn.ReLU(inplace=True),
get_norm_layer(norm, dim, out_channels),
conv(out_channels, out_channels,
kernel_size=kernel_size, padding=padding),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.block(x)
class Upsampler(nn.Module):
def __init__(self, scale_factor,
in_channels, out_channels,
dim, mode):
super().__init__()
self.mode = mode
self.scale_factor = scale_factor
conv = nn.Conv2d if dim == 2 else nn.Conv3d
self.conv = conv(in_channels, out_channels, 1)
def forward(self, x):
x = nn.functional.interpolate(x, scale_factor=self.scale_factor,
mode=self.mode, align_corners=False)
x = self.conv(x)
return x
#
# 2d unet implementations
#
class ConvBlock2d(ConvBlock):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__(in_channels, out_channels, dim=2, **kwargs)
class Upsampler2d(Upsampler):
def __init__(self, scale_factor,
in_channels, out_channels,
mode='bilinear'):
super().__init__(scale_factor, in_channels, out_channels,
dim=2, mode=mode)
class ResizeUNet(UNetBase):
def __init__(
self,
in_channels,
out_channels,
depth=4,
initial_features=32,
gain=2,
final_activation=None,
return_side_outputs=False,
conv_block_impl=ConvBlock2d,
pooler_impl=nn.MaxPool2d,
sampler_impl=Upsampler2d,
**conv_block_kwargs
):
features_encoder = [in_channels] + [initial_features * gain ** i for i in range(depth)]
features_decoder = [initial_features * gain ** i for i in range(depth + 1)][::-1]
scale_factors = depth * [2]
if return_side_outputs:
if isinstance(out_channels, int) or out_channels is None:
out_channels = [out_channels] * depth
if len(out_channels) != depth:
raise ValueError()
out_conv = nn.ModuleList(
[nn.Conv2d(feat, outc, 1) for feat, outc in zip(features_decoder[1:], out_channels)]
)
else:
out_conv = None if out_channels is None else nn.Conv2d(features_decoder[-1], out_channels, 1)
super().__init__(
encoder=Encoder(
features=features_encoder,
scale_factors=scale_factors,
conv_block_impl=conv_block_impl,
pooler_impl=pooler_impl,
**conv_block_kwargs
),
decoder=Decoder(
features=features_decoder,
scale_factors=scale_factors[::-1],
conv_block_impl=conv_block_impl,
sampler_impl=sampler_impl,
**conv_block_kwargs
),
base=conv_block_impl(
features_encoder[-1], features_encoder[-1] * gain,
**conv_block_kwargs
),
out_conv=out_conv,
final_activation=final_activation,
)
self.init_kwargs = {'in_channels': in_channels, 'out_channels': out_channels, 'depth': depth,
'initial_features': initial_features, 'gain': gain,
'final_activation': final_activation, 'return_side_outputs': return_side_outputs,
'conv_block_impl': conv_block_impl, 'pooler_impl': pooler_impl,
'sampler_impl': sampler_impl, **conv_block_kwargs}
```
#### File: data/datasets/covid_if.py
```python
import os
from glob import glob
import torch_em
from .util import download_source, unzip, update_kwargs
COVID_IF_URL = "https://zenodo.org/record/5092850/files/covid-if-groundtruth.zip?download=1"
CHECKSUM = "d9cd6c85a19b802c771fb4ff928894b19a8fab0e0af269c49235fdac3f7a60e1"
def _download_covid_if(path, download):
url = COVID_IF_URL
checksum = CHECKSUM
if os.path.exists(path):
return
os.makedirs(path, exist_ok=True)
zip_path = os.path.join(path, "covid-if.zip")
download_source(zip_path, url, download, checksum)
unzip(zip_path, path, True)
def get_covid_if_loader(path, patch_shape, sample_range=None,
target="cells", download=False,
offsets=None, boundaries=False, binary=False,
**kwargs):
available_targets = ("cells", "nuclei")
# TODO support all of these
# available_targets = ("cells", "nuclei", "infected_cells")
assert target in available_targets, f"{target} not found in {available_targets}"
if target == "cells":
raw_key = "raw/serum_IgG/s0"
label_key = "labels/cells/s0"
elif target == "nuclei":
raw_key = "raw/nuclei/s0"
label_key = "labels/nuclei/s0"
# elif target == "infected_cells":
_download_covid_if(path, download)
file_paths = glob(os.path.join(path, "*.h5"))
file_paths.sort()
if sample_range is not None:
start, stop = sample_range
if start is None:
start = 0
if stop is None:
stop = len(file_paths)
file_paths = [os.path.join(path, f"gt_image_{idx:03}.h5") for idx in range(start, stop)]
assert all(os.path.exists(fp) for fp in file_paths), f"Invalid sample range {sample_range}"
assert sum((offsets is not None, boundaries, binary)) <= 1
if offsets is not None:
# we add a binary target channel for foreground background segmentation
label_transform = torch_em.transform.label.AffinityTransform(offsets=offsets,
add_binary_target=True,
add_mask=True)
msg = "Offsets are passed, but 'label_transform2' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform2", label_transform, msg=msg)
elif boundaries:
label_transform = torch_em.transform.label.BoundaryTransform(add_binary_target=True)
msg = "Boundaries is set to true, but 'label_transform' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform", label_transform, msg=msg)
elif binary:
label_transform = torch_em.transform.label.labels_to_binary
msg = "Binary is set to true, but 'label_transform' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform", label_transform, msg=msg)
kwargs = update_kwargs(kwargs, "patch_shape", patch_shape)
kwargs = update_kwargs(kwargs, "ndim", 2)
return torch_em.default_segmentation_loader(
file_paths, raw_key,
file_paths, label_key,
**kwargs
)
```
#### File: data/datasets/hpa.py
```python
import os
import json
import shutil
from concurrent import futures
from functools import partial
from glob import glob
import imageio
import h5py
import numpy as np
from PIL import Image, ImageDraw
from skimage import draw as skimage_draw
from skimage import morphology
from tqdm import tqdm
import torch_em
from .util import download_source, unzip, update_kwargs
URLS = {
"segmentation": "https://zenodo.org/record/4665863/files/hpa_dataset_v2.zip"
}
CHECKSUMS = {
"segmentation": "dcd6072293d88d49c71376d3d99f3f4f102e4ee83efb0187faa89c95ec49faa9"
}
def _download_hpa_data(path, name, download):
os.makedirs(path, exist_ok=True)
url = URLS[name]
checksum = CHECKSUMS[name]
zip_path = os.path.join(path, "data.zip")
download_source(zip_path, url, download=download, checksum=checksum)
unzip(zip_path, path, remove=True)
def _load_features(features):
# Loop over list and create simple dictionary & get size of annotations
annot_dict = {}
skipped = []
for feat_idx, feat in enumerate(features):
if feat["geometry"]["type"] not in ["Polygon", "LineString"]:
skipped.append(feat["geometry"]["type"])
continue
# skip empty roi
if len(feat["geometry"]["coordinates"][0]) <= 0:
continue
key_annot = "annot_" + str(feat_idx)
annot_dict[key_annot] = {}
annot_dict[key_annot]["type"] = feat["geometry"]["type"]
annot_dict[key_annot]["pos"] = np.squeeze(
np.asarray(feat["geometry"]["coordinates"])
)
annot_dict[key_annot]["properties"] = feat["properties"]
# print("Skipped geometry type(s):", skipped)
return annot_dict
def _generate_binary_masks(annot_dict, shape, erose_size=5, obj_size_rem=500, save_indiv=False):
# Get dimensions of image and created masks of same size
# This we need to save somewhere (e.g. as part of the geojson file?)
# Filled masks and edge mask for polygons
mask_fill = np.zeros(shape, dtype=np.uint8)
mask_edge = np.zeros(shape, dtype=np.uint8)
mask_labels = np.zeros(shape, dtype=np.uint16)
rr_all = []
cc_all = []
if save_indiv is True:
mask_edge_indiv = np.zeros(
(shape[0], shape[1], len(annot_dict)), dtype=np.bool
)
mask_fill_indiv = np.zeros(
(shape[0], shape[1], len(annot_dict)), dtype=np.bool
)
# Image used to draw lines - for edge mask for freelines
im_freeline = Image.new("1", (shape[1], shape[0]), color=0)
draw = ImageDraw.Draw(im_freeline)
# Loop over all roi
i_roi = 0
for roi_key, roi in annot_dict.items():
roi_pos = roi["pos"]
# Check region type
# freeline - line
if roi["type"] == "freeline" or roi["type"] == "LineString":
# Loop over all pairs of points to draw the line
for ind in range(roi_pos.shape[0] - 1):
line_pos = (
roi_pos[ind, 1],
roi_pos[ind, 0],
roi_pos[ind + 1, 1],
roi_pos[ind + 1, 0],
)
draw.line(line_pos, fill=1, width=erose_size)
# freehand - polygon
elif (
roi["type"] == "freehand"
or roi["type"] == "polygon"
or roi["type"] == "polyline"
or roi["type"] == "Polygon"
):
# Draw polygon
rr, cc = skimage_draw.polygon(
[shape[0] - r for r in roi_pos[:, 1]], roi_pos[:, 0]
)
# Make sure it's not outside
rr[rr < 0] = 0
rr[rr > shape[0] - 1] = shape[0] - 1
cc[cc < 0] = 0
cc[cc > shape[1] - 1] = shape[1] - 1
# Test if this region has already been added
if any(np.array_equal(rr, rr_test) for rr_test in rr_all) and any(
np.array_equal(cc, cc_test) for cc_test in cc_all
):
# print('Region #{} has already been used'.format(i +
# 1))
continue
rr_all.append(rr)
cc_all.append(cc)
# Generate mask
mask_fill_roi = np.zeros(shape, dtype=np.uint8)
mask_fill_roi[rr, cc] = 1
# Erode to get cell edge - both arrays are boolean to be used as
# index arrays later
mask_fill_roi_erode = morphology.binary_erosion(
mask_fill_roi, np.ones((erose_size, erose_size))
)
mask_edge_roi = (
mask_fill_roi.astype("int") - mask_fill_roi_erode.astype("int")
).astype("bool")
# Save array for mask and edge
mask_fill[mask_fill_roi > 0] = 1
mask_edge[mask_edge_roi] = 1
mask_labels[mask_fill_roi > 0] = i_roi + 1
if save_indiv is True:
mask_edge_indiv[:, :, i_roi] = mask_edge_roi.astype("bool")
mask_fill_indiv[:, :, i_roi] = mask_fill_roi_erode.astype("bool")
i_roi = i_roi + 1
else:
roi_type = roi["type"]
raise NotImplementedError(
f'Mask for roi type "{roi_type}" can not be created'
)
del draw
# Convert mask from free-lines to numpy array
mask_edge_freeline = np.asarray(im_freeline)
mask_edge_freeline = mask_edge_freeline.astype("bool")
# Post-processing of fill and edge mask - if defined
mask_dict = {}
if np.any(mask_fill):
# (1) remove edges , (2) remove small objects
mask_fill = mask_fill & ~mask_edge
mask_fill = morphology.remove_small_objects(
mask_fill.astype("bool"), obj_size_rem
)
# For edge - consider also freeline edge mask
mask_edge = mask_edge.astype("bool")
mask_edge = np.logical_or(mask_edge, mask_edge_freeline)
# Assign to dictionary for return
mask_dict["edge"] = mask_edge
mask_dict["fill"] = mask_fill.astype("bool")
mask_dict["labels"] = mask_labels.astype("uint16")
if save_indiv is True:
mask_dict["edge_indiv"] = mask_edge_indiv
mask_dict["fill_indiv"] = mask_fill_indiv
else:
mask_dict["edge_indiv"] = np.zeros(shape + (1,), dtype=np.uint8)
mask_dict["fill_indiv"] = np.zeros(shape + (1,), dtype=np.uint8)
# Only edge mask present
elif np.any(mask_edge_freeline):
mask_dict["edge"] = mask_edge_freeline
mask_dict["fill"] = mask_fill.astype("bool")
mask_dict["labels"] = mask_labels.astype("uint16")
mask_dict["edge_indiv"] = np.zeros(shape + (1,), dtype=np.uint8)
mask_dict["fill_indiv"] = np.zeros(shape + (1,), dtype=np.uint8)
else:
raise Exception("No mask has been created.")
return mask_dict
# adapted from
# https://github.com/imjoy-team/kaibu-utils/blob/main/kaibu_utils/__init__.py#L267
def _get_labels(annotation_file, shape, label="*"):
with open(annotation_file) as f:
features = json.load(f)["features"]
if len(features) == 0:
return np.zeros(shape, dtype="uint16")
annot_dict_all = _load_features(features)
annot_types = set(
annot_dict_all[k]["properties"].get("label", "default")
for k in annot_dict_all.keys()
)
for annot_type in annot_types:
if label and label != "*" and annot_type != label:
continue
# print("annot_type: ", annot_type)
# Filter the annotations by label
annot_dict = {
k: annot_dict_all[k]
for k in annot_dict_all.keys()
if label == "*"
or annot_dict_all[k]["properties"].get("label", "default") == annot_type
}
mask_dict = _generate_binary_masks(
annot_dict, shape,
erose_size=5,
obj_size_rem=500,
save_indiv=True,
)
mask = mask_dict["labels"]
return mask
raise RuntimeError
def _process_image(in_folder, out_path, channels, with_labels):
# TODO double check the default order and color matching
# correspondence to the HPA kaggle data:
# microtubules: red
# nuclei: blue
# er: yellow
# protein: green
# default order: rgby = micro, prot, nuclei, er
all_channels = {"microtubules", "protein", "nuclei", "er"}
assert len(list(set(channels) - all_channels)) == 0
raw = []
for chan in channels:
im_path = os.path.join(in_folder, f"{chan}.png")
assert os.path.exists(im_path), im_path
raw.append(imageio.imread(im_path)[None])
raw = np.concatenate(raw, axis=0)
if with_labels:
annotation_file = os.path.join(in_folder, "annotation.json")
assert os.path.exists(annotation_file), annotation_file
labels = _get_labels(annotation_file, raw.shape[1:])
assert labels.shape == raw.shape[1:]
with h5py.File(out_path, "w") as f:
f.create_dataset("raw", data=raw, compression="gzip")
if with_labels:
f.create_dataset("labels", data=labels, compression="gzip")
def _process_split(root_in, root_out, channels, n_workers, with_labels):
os.makedirs(root_out, exist_ok=True)
inputs = glob(os.path.join(root_in, "*"))
outputs = [os.path.join(root_out, f"{os.path.split(inp)[1]}.h5") for inp in inputs]
process = partial(_process_image, channels=channels, with_labels=with_labels)
with futures.ProcessPoolExecutor(n_workers) as pp:
list(tqdm(pp.map(process, inputs, outputs), total=len(inputs), desc=f"Process data in {root_in}"))
# save data as h5 with 4 channel raw data and labels extracted from the geo json
def _process_hpa_data(path, channels, n_workers, remove):
in_path = os.path.join(path, "hpa_dataset_v2")
assert os.path.exists(in_path), in_path
for split in ("train", "test", "valid"):
out_split = "val" if split == "valid" else split
_process_split(os.path.join(in_path, split), os.path.join(path, out_split),
channels=channels, n_workers=n_workers, with_labels=split != "test")
if remove:
shutil.rmtree(in_path)
def _check_data(path):
have_train = len(glob(os.path.join(path, "train", "*.h5"))) == 257
have_test = len(glob(os.path.join(path, "test", "*.h5"))) == 10
have_val = len(glob(os.path.join(path, "val", "*.h5"))) == 9
return have_train and have_test and have_val
def get_hpa_segmentation_loader(path, patch_shape, split,
offsets=None, boundaries=False, binary=False,
channels=["microtubules", "protein", "nuclei", "er"],
download=False, n_workers_preproc=8, **kwargs):
data_is_complete = _check_data(path)
if not data_is_complete:
_download_hpa_data(path, "segmentation", download)
_process_hpa_data(path, channels, n_workers_preproc, remove=True)
assert sum((offsets is not None, boundaries, binary)) <= 1
if offsets is not None:
# we add a binary target channel for foreground background segmentation
label_transform = torch_em.transform.label.AffinityTransform(offsets=offsets,
add_binary_target=True,
add_mask=True)
msg = "Offsets are passed, but 'label_transform2' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform2", label_transform, msg=msg)
elif boundaries:
label_transform = torch_em.transform.label.BoundaryTransform(add_binary_target=True)
msg = "Boundaries is set to true, but 'label_transform' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform", label_transform, msg=msg)
elif binary:
label_transform = torch_em.transform.label.labels_to_binary
msg = "Binary is set to true, but 'label_transform' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform", label_transform, msg=msg)
kwargs = update_kwargs(kwargs, "patch_shape", patch_shape)
kwargs = update_kwargs(kwargs, "ndim", 2)
kwargs = update_kwargs(kwargs, "with_channels", True)
paths = glob(os.path.join(path, split, "*.h5"))
raw_key = "raw"
label_key = "labels"
return torch_em.default_segmentation_loader(
paths, raw_key, paths, label_key, **kwargs
)
```
#### File: data/datasets/plantseg.py
```python
import os
from glob import glob
import numpy as np
import torch_em
from elf.io import open_file, is_group
from skimage.transform import rescale
from .util import download_source, update_kwargs
# TODO just download the full zip from https://osf.io/uzq3w/ instead
# but this is currently broken
URLS = {
"root": {
"cells": [],
"nuclei": [
"https://osf.io/n9y34/download",
"https://osf.io/su27h/download",
"https://osf.io/q5rxz/download",
]
},
"ovules": {
"cells": []
}
}
CHECKSUMS = {
"root": {
"cells": [],
"nuclei": [
"ff9e86cb05d56ae2463e7482ad248a985a2378b1c7f3d92022d1191a6504adfa",
"b21fd70556591ca04e83b1461324d0a14e31b1dad24fe4b1efe9712dded2281c",
"c8976fefdc06d92290ba6c2b7686fd2c1a285a800a3b6d8a002e1ec67caca072",
]
},
"ovules": {
"cells": []
}
}
NATIVE_RESOLUTION = (0.235, 0.075, 0.075)
def _resize(path, native_resolution, target_resolution):
assert len(native_resolution) == len(target_resolution)
scale_factor = tuple(nres / tres for nres, tres in zip(native_resolution, target_resolution))
paths = glob(os.path.join(path, "*.h5"))
# check if anything needs to be resized
need_resize = []
for pp in paths:
with open_file(pp, "r") as f:
for name, obj in f.items():
rescaled_name = f"rescaled/{name}"
if is_group(obj):
continue
if rescaled_name in f:
this_resolution = f[rescaled_name].attrs["resolution"]
correct_res = all(
np.isclose(this_re, target_re) for this_re, target_re in zip(this_resolution, target_resolution)
)
if correct_res:
continue
need_resize.append(path)
# resize if necessary
need_resize = list(set(need_resize))
for pp in need_resize:
with open_file(pp, mode="a") as f:
if "rescaled" in f:
del f["rescaled"]
for name, obj in f.items():
print("Resizing", pp, name)
print("from resolution (microns)", native_resolution, "to", target_resolution)
print("with scale factor", scale_factor)
vol = obj[:]
if name == "raw":
vol = rescale(vol, scale_factor, preserve_range=True).astype(vol.dtype)
else:
vol = rescale(
vol, scale_factor, preserve_range=True, order=0, anti_aliasing=False
).astype(vol.dtype)
ds = f.create_dataset(rescaled_name, data=vol, compression="gzip")
ds.attrs["resolution"] = target_resolution
def _download_plantseg(path, download, name, type_):
urls = URLS[name][type_]
checksums = CHECKSUMS[name][type_]
assert len(urls) == len(checksums)
os.makedirs(path, exist_ok=True)
for ii, (url, checksum) in enumerate(zip(urls, checksums)):
out_path = os.path.join(path, f"{name}_{type_}_{ii}.h5")
if os.path.exists(out_path):
continue
download_source(out_path, url, download, checksum)
def get_root_nucleus_loader(
path,
patch_shape,
samples=None,
target_resolution=None,
download=False,
offsets=None,
boundaries=False,
binary=False,
**kwargs,
):
assert len(patch_shape) == 3
_download_plantseg(path, download, "root", "nuclei")
if target_resolution is not None:
_resize(path, NATIVE_RESOLUTION, target_resolution)
file_paths = glob(os.path.join(path, "*.h5"))
file_paths.sort()
if samples is not None:
assert all(isinstance(sample, int) for sample in samples)
assert all(sample < len(file_paths) for sample in samples)
file_paths = [file_paths[sample] for sample in samples]
assert sum((offsets is not None, boundaries, binary)) <= 1
if offsets is not None:
# we add a binary target channel for foreground background segmentation
label_transform = torch_em.transform.label.AffinityTransform(offsets=offsets,
add_binary_target=True,
add_mask=True)
msg = "Offsets are passed, but 'label_transform2' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform2", label_transform, msg=msg)
elif boundaries:
label_transform = torch_em.transform.label.BoundaryTransform(add_binary_target=True)
msg = "Boundaries is set to true, but 'label_transform' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform", label_transform, msg=msg)
elif binary:
label_transform = torch_em.transform.label.labels_to_binary
msg = "Binary is set to true, but 'label_transform' is in the kwargs. It will be over-ridden."
kwargs = update_kwargs(kwargs, "label_transform", label_transform, msg=msg)
kwargs = update_kwargs(kwargs, "patch_shape", patch_shape)
kwargs = update_kwargs(kwargs, "ndim", 3)
if target_resolution is None:
raw_key, label_key = "raw", "label_uint16_smooth"
else:
raw_key, label_key = "rescaled/raw", "rescaled/label_uint16_smooth"
return torch_em.default_segmentation_loader(
file_paths, raw_key,
file_paths, label_key,
**kwargs
)
# TODO
def get_root_cell_loader():
pass
# TODO
def get_ovules_loader():
pass
```
#### File: torch_em/data/image_collection_dataset.py
```python
import numpy as np
import torch
from ..util import (ensure_spatial_array, ensure_tensor_with_channels,
load_image, supports_memmap)
# TODO pad images that are too small for the patch shape
class ImageCollectionDataset(torch.utils.data.Dataset):
def _check_inputs(self, raw_images, label_images):
if len(raw_images) != len(label_images):
raise ValueError(f"Expect same number of and label images, got {len(raw_images)} and {len(label_images)}")
is_multichan = None
for raw_im, label_im in zip(raw_images, label_images):
# we only check for compatible shapes if both images support memmap, because
# we don't want to load everything into ram
if supports_memmap(raw_im) and supports_memmap(label_im):
shape = load_image(raw_im).shape
assert len(shape) in (2, 3)
multichan = len(shape) == 3
if is_multichan is None:
is_multichan = multichan
else:
assert is_multichan == multichan
# we assume axis last
if is_multichan:
shape = shape[:-1]
label_shape = load_image(label_im).shape
if shape != label_shape:
msg = f"Expect raw and labels of same shape, got {shape}, {label_shape} for {raw_im}, {label_im}"
raise ValueError(msg)
def __init__(
self,
raw_image_paths,
label_image_paths,
patch_shape,
raw_transform=None,
label_transform=None,
label_transform2=None,
transform=None,
dtype=torch.float32,
label_dtype=torch.float32,
n_samples=None,
):
self._check_inputs(raw_image_paths, label_image_paths)
self.raw_images = raw_image_paths
self.label_images = label_image_paths
self._ndim = 2
assert len(patch_shape) == self._ndim
self.patch_shape = patch_shape
self.raw_transform = raw_transform
self.label_transform = label_transform
self.label_transform2 = label_transform2
self.transform = transform
self.dtype = dtype
self.label_dtype = label_dtype
if n_samples is None:
self._len = len(self.raw_images)
self.sample_random_index = False
else:
self._len = n_samples
self.sample_random_index = True
def __len__(self):
return self._len
@property
def ndim(self):
return self._ndim
def _sample_bounding_box(self, shape):
if any(sh < psh for sh, psh in zip(shape, self.patch_shape)):
raise NotImplementedError("Image padding is not supported yet.")
bb_start = [
np.random.randint(0, sh - psh) if sh - psh > 0 else 0
for sh, psh in zip(shape, self.patch_shape)
]
return tuple(slice(start, start + psh) for start, psh in zip(bb_start, self.patch_shape))
def _get_sample(self, index):
if self.sample_random_index:
index = np.random.randint(0, len(self.raw_images))
# these are just the file paths
raw, label = self.raw_images[index], self.label_images[index]
raw = load_image(raw)
label = load_image(label)
have_raw_channels = raw.ndim == 3
have_label_channels = label.ndim == 3
if have_label_channels:
raise NotImplementedError("Multi-channel labels are not supported.")
shape = raw.shape
# we assume images are loaded with channel last!
if have_raw_channels:
shape = shape[:-1]
# sample random bounding box for this image
bb = self._sample_bounding_box(shape)
raw = np.array(raw[bb])
label = np.array(label[bb])
# to channel first
if have_raw_channels:
raw = raw.transpose((2, 0, 1))
return raw, label
def __getitem__(self, index):
raw, labels = self._get_sample(index)
initial_label_dtype = labels.dtype
if self.raw_transform is not None:
raw = self.raw_transform(raw)
if self.label_transform is not None:
labels = self.label_transform(labels)
if self.transform is not None:
raw, labels = self.transform(raw, labels)
# if self.trafo_halo is not None:
# raw = self.crop(raw)
# labels = self.crop(labels)
# support enlarging bounding box here as well (for affinity transform) ?
if self.label_transform2 is not None:
labels = ensure_spatial_array(labels, self.ndim, dtype=initial_label_dtype)
labels = self.label_transform2(labels)
raw = ensure_tensor_with_channels(raw, ndim=self._ndim, dtype=self.dtype)
labels = ensure_tensor_with_channels(labels, ndim=self._ndim, dtype=self.label_dtype)
return raw, labels
```
#### File: torch_em/shallow2deep/shallow2deep_dataset.py
```python
import pickle
import numpy as np
import torch
from torch_em.segmentation import (check_paths, is_segmentation_dataset,
get_data_loader, get_raw_transform,
samples_to_datasets, _get_default_transform)
from torch_em.data import ConcatDataset, SegmentationDataset
from .prepare_shallow2deep import _get_filters, _apply_filters
from ..util import ensure_tensor_with_channels, ensure_spatial_array
class Shallow2DeepDataset(SegmentationDataset):
_rf_paths = None
_filter_config = None
@property
def rf_paths(self):
return self._rf_paths
@rf_paths.setter
def rf_paths(self, value):
self._rf_paths = value
@property
def filter_config(self):
return self._filter_config
@filter_config.setter
def filter_config(self, value):
self._filter_config = value
@property
def rf_channels(self):
return self._rf_channels
@rf_channels.setter
def rf_channels(self, value):
if isinstance(value, int):
self.rf_channels = (value,)
else:
assert isinstance(value, tuple)
self._rf_channels = value
def _predict_rf(self, raw):
n_rfs = len(self._rf_paths)
rf_path = self._rf_paths[np.random.randint(0, n_rfs)]
with open(rf_path, "rb") as f:
rf = pickle.load(f)
filters_and_sigmas = _get_filters(self.ndim, self._filter_config)
features = _apply_filters(raw, filters_and_sigmas)
assert rf.n_features_in_ == features.shape[1], f"{rf.n_features_in_}, {features.shape[1]}"
try:
pred_ = rf.predict_proba(features)
assert pred_.shape[1] > max(self.rf_channels), f"{pred_.shape}, {self.rf_channels}"
pred_ = pred_[:, self.rf_channels]
except IndexError:
print("Prediction failed:", features.shape)
pred_shape = (len(features), len(self.rf_channels))
pred_ = np.zeros(pred_shape, dtype="float32")
spatial_shape = raw.shape
out_shape = (len(self.rf_channels),) + spatial_shape
prediction = np.zeros(out_shape, dtype="float32")
for chan in range(pred_.shape[1]):
prediction[chan] = pred_[:, chan].reshape(spatial_shape)
return prediction
def __getitem__(self, index):
assert self._rf_paths is not None
raw, labels = self._get_sample(index)
initial_label_dtype = labels.dtype
if self.raw_transform is not None:
raw = self.raw_transform(raw)
if self.label_transform is not None:
labels = self.label_transform(labels)
if self.transform is not None:
raw, labels = self.transform(raw, labels)
if self.trafo_halo is not None:
raw = self.crop(raw)
labels = self.crop(labels)
# support enlarging bounding box here as well (for affinity transform) ?
if self.label_transform2 is not None:
labels = ensure_spatial_array(labels, self.ndim, dtype=initial_label_dtype)
labels = self.label_transform2(labels)
if isinstance(raw, (list, tuple)): # this can be a list or tuple due to transforms
assert len(raw) == 1
raw = raw[0]
raw = ensure_tensor_with_channels(raw, ndim=self._ndim, dtype=self.dtype)
if raw.shape[0] > 1:
raise NotImplementedError(
f"Shallow2Deep training not implemented for multi-channel input yet; got {raw.shape[0]} channels"
)
# NOTE we assume single channel raw data here; this needs to be changed for multi-channel
prediction = self._predict_rf(raw[0].numpy())
prediction = ensure_tensor_with_channels(prediction, ndim=self._ndim, dtype=self.dtype)
labels = ensure_tensor_with_channels(labels, ndim=self._ndim, dtype=self.label_dtype)
return prediction, labels
def _load_shallow2deep_dataset(raw_paths, raw_key, label_paths, label_key, rf_paths, rf_channels, **kwargs):
rois = kwargs.pop("rois", None)
filter_config = kwargs.pop("filter_config", None)
if isinstance(raw_paths, str):
if rois is not None:
assert len(rois) == 3 and all(isinstance(roi, slice) for roi in rois)
ds = Shallow2DeepDataset(raw_paths, raw_key, label_paths, label_key, roi=rois, **kwargs)
ds.rf_paths = rf_paths
ds.filter_config = filter_config
ds.rf_channels = rf_channels
else:
assert len(raw_paths) > 0
if rois is not None:
assert len(rois) == len(label_paths), f"{len(rois)}, {len(label_paths)}"
assert all(isinstance(roi, tuple) for roi in rois)
n_samples = kwargs.pop("n_samples", None)
samples_per_ds = (
[None] * len(raw_paths) if n_samples is None else samples_to_datasets(n_samples, raw_paths, raw_key)
)
ds = []
for i, (raw_path, label_path) in enumerate(zip(raw_paths, label_paths)):
roi = None if rois is None else rois[i]
dset = Shallow2DeepDataset(
raw_path, raw_key, label_path, label_key, roi=roi, n_samples=samples_per_ds[i], **kwargs
)
dset.rf_paths = rf_paths
dset.filter_config = filter_config
dset.rf_channels = rf_channels
ds.append(dset)
ds = ConcatDataset(*ds)
return ds
def get_shallow2deep_dataset(
raw_paths,
raw_key,
label_paths,
label_key,
rf_paths,
patch_shape,
raw_transform=None,
label_transform=None,
transform=None,
dtype=torch.float32,
rois=None,
n_samples=None,
sampler=None,
ndim=None,
is_seg_dataset=None,
with_channels=False,
filter_config=None,
rf_channels=(1,),
):
check_paths(raw_paths, label_paths)
if is_seg_dataset is None:
is_seg_dataset = is_segmentation_dataset(raw_paths, raw_key,
label_paths, label_key)
# we always use a raw transform in the convenience function
if raw_transform is None:
raw_transform = get_raw_transform()
# we always use augmentations in the convenience function
if transform is None:
transform = _get_default_transform(
raw_paths if isinstance(raw_paths, str) else raw_paths[0], raw_key, is_seg_dataset, ndim
)
if is_seg_dataset:
ds = _load_shallow2deep_dataset(
raw_paths,
raw_key,
label_paths,
label_key,
rf_paths,
patch_shape=patch_shape,
raw_transform=raw_transform,
label_transform=label_transform,
transform=transform,
rois=rois,
n_samples=n_samples,
sampler=sampler,
ndim=ndim,
dtype=dtype,
with_channels=with_channels,
filter_config=filter_config,
rf_channels=rf_channels,
)
else:
raise NotImplementedError("Image collection dataset for shallow2deep not implemented yet.")
return ds
def get_shallow2deep_loader(
raw_paths,
raw_key,
label_paths,
label_key,
rf_paths,
batch_size,
patch_shape,
filter_config=None,
raw_transform=None,
label_transform=None,
transform=None,
rois=None,
n_samples=None,
sampler=None,
ndim=None,
is_seg_dataset=None,
with_channels=False,
rf_channels=(1,),
**loader_kwargs,
):
ds = get_shallow2deep_dataset(
raw_paths=raw_paths,
raw_key=raw_key,
label_paths=label_paths,
label_key=label_key,
rf_paths=rf_paths,
patch_shape=patch_shape,
raw_transform=raw_transform,
label_transform=label_transform,
transform=transform,
rois=rois,
n_samples=n_samples,
sampler=sampler,
ndim=ndim,
is_seg_dataset=is_seg_dataset,
with_channels=with_channels,
filter_config=filter_config,
rf_channels=rf_channels,
)
return get_data_loader(ds, batch_size=batch_size, **loader_kwargs)
```
#### File: torch_em/util/submit_slurm.py
```python
import os
import sys
import inspect
import subprocess
from datetime import datetime
# two days in minutes
TWO_DAYS = 2 * 24 * 60
def write_slurm_template(script, out_path, env_name,
n_threads, gpu_type, n_gpus,
mem_limit, time_limit, qos,
mail_address, exclude_nodes):
slurm_template = ("#!/bin/bash\n"
"#SBATCH -A kreshuk\n"
"#SBATCH -N 1\n"
f"#SBATCH -c {n_threads}\n"
f"#SBATCH --mem {mem_limit}\n"
f"#SBATCH -t {time_limit}\n"
f"#SBATCH --qos={qos}\n"
"#SBATCH -p gpu\n"
f"#SBATCH -C gpu={gpu_type}\n"
f"#SBATCH --gres=gpu:{n_gpus}\n")
if mail_address is not None:
slurm_template += ("#SBATCH --mail-type=FAIL,BEGIN,END\n"
f"#SBATCH --mail-user={mail_address}\n")
if exclude_nodes is not None:
slurm_template += "#SBATCH --exclude=%s\n" % ",".join(exclude_nodes)
slurm_template += ("\n"
"module purge \n"
"module load GCC \n"
"source activate {env_name}\n"
"\n"
"export TRAIN_ON_CLUSTER=1\n" # we set this env variable, so that script know we"re on slurm
f"python {script} $@ \n")
with open(out_path, "w") as f:
f.write(slurm_template)
def submit_slurm(script, input_, n_threads=7, n_gpus=1,
gpu_type="2080Ti", mem_limit="64G",
time_limit=TWO_DAYS, qos="normal",
env_name=None, mail_address=None,
exclude_nodes=None):
""" Submit python script that needs gpus with given inputs on a slurm node.
"""
tmp_folder = os.path.expanduser("~/.torch_em/submission")
os.makedirs(tmp_folder, exist_ok=True)
print("Submitting training script %s to cluster" % script)
print("with arguments %s" % " ".join(input_))
script_name = os.path.split(script)[1]
dt = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
tmp_name = os.path.splitext(script_name)[0] + dt
batch_script = os.path.join(tmp_folder, "%s.sh" % tmp_name)
log = os.path.join(tmp_folder, "%s.log" % tmp_name)
err = os.path.join(tmp_folder, "%s.err" % tmp_name)
if env_name is None:
env_name = os.environ.get("CONDA_DEFAULT_ENV", None)
if env_name is None:
raise RuntimeError("Could not find conda")
print("Batch script saved at", batch_script)
print("Log will be written to %s, error log to %s" % (log, err))
write_slurm_template(script, batch_script, env_name,
int(n_threads), gpu_type, int(n_gpus),
mem_limit, int(time_limit), qos, mail_address,
exclude_nodes=exclude_nodes)
cmd = ["sbatch", "-o", log, "-e", err, "-J", script_name, batch_script]
cmd.extend(input_)
subprocess.run(cmd)
def scrape_kwargs(input_):
params = inspect.signature(submit_slurm).parameters
kwarg_names = [name for name in params if params[name].default != inspect._empty]
kwarg_names.extend([f"-{name}" for name in kwarg_names])
kwarg_names.extend([f"--{name}" for name in kwarg_names])
kwarg_positions = [i for i, inp in enumerate(input_) if inp in kwarg_names]
kwargs = {input_[i].lstrip("-"): input_[i + 1] for i in kwarg_positions}
kwarg_positions += [i + 1 for i in kwarg_positions]
input_ = [inp for i, inp in enumerate(input_) if i not in kwarg_positions]
return input_, kwargs
def main():
script = os.path.realpath(os.path.abspath(sys.argv[1]))
input_ = sys.argv[2:]
# scrape the additional arguments (n_threads, mem_limit, etc. from the input)
input_, kwargs = scrape_kwargs(input_)
submit_slurm(script, input_, **kwargs)
```
#### File: torch_em/util/validation.py
```python
import os
import imageio
import numpy as np
from elf.io import open_file
from elf.util import normalize_index
from ..data import ConcatDataset, ImageCollectionDataset, SegmentationDataset
from .util import get_trainer, get_normalizer
from .prediction import predict_with_halo
try:
import napari
except ImportError:
napari = None
# TODO implement prefab metrics
class SampleGenerator:
def __init__(self, trainer, max_samples, need_gt, n_threads):
self.need_gt = need_gt
self.n_threads = n_threads
dataset = trainer.val_loader.dataset
self.ndim = dataset.ndim
(n_samples, load_2d_from_3d, rois,
raw_paths, raw_key,
label_paths, label_key) = self.paths_from_ds(dataset)
if max_samples is None:
self.n_samples = n_samples
else:
self.n_samples = min(max_samples, n_samples)
self.load_2d_from_3d = load_2d_from_3d
self.rois = rois
self.raw_paths, self.raw_key = raw_paths, raw_key
self.label_paths, self.label_key = label_paths, label_key
if self.load_2d_from_3d:
shapes = [
open_file(rp, 'r')[self.raw_key].shape if roi is None else tuple(r.stop - r.start for r in roi)
for rp, roi in zip(self.raw_paths, self.rois)
]
lens = [shape[0] for shape in shapes]
self.offsets = np.cumsum(lens)
def paths_from_ds(self, dataset):
if isinstance(dataset, ConcatDataset):
datasets = dataset.datasets
(n_samples, load_2d_from_3d, rois,
raw_paths, raw_key,
label_paths, label_key) = self.paths_from_ds(datasets[0])
for ds in datasets[1:]:
ns, l2d3d, bb, rp, rk, lp, lk = self.paths_from_ds(ds)
assert rk == raw_key
assert lk == label_key
assert l2d3d == load_2d_from_3d
raw_paths.extend(rp)
label_paths.extend(lp)
rois.append(bb)
n_samples += ns
elif isinstance(dataset, ImageCollectionDataset):
raw_paths, label_paths = dataset.raw_images, dataset.label_images
raw_key, label_key = None, None
n_samples = len(raw_paths)
load_2d_from_3d = False
rois = [None] * n_samples
elif isinstance(dataset, SegmentationDataset):
raw_paths, label_paths = [dataset.raw_path], [dataset.label_path]
raw_key, label_key = dataset.raw_key, dataset.label_key
shape = open_file(raw_paths[0], 'r')[raw_key].shape
roi = getattr(dataset, 'roi', None)
if roi is not None:
roi = normalize_index(roi, shape)
shape = tuple(r.stop - r.start for r in roi)
rois = [roi]
if self.ndim == len(shape):
n_samples = len(raw_paths)
load_2d_from_3d = False
elif self.ndim == 2 and len(shape) == 3:
n_samples = shape[0]
load_2d_from_3d = True
else:
raise RuntimeError
else:
raise RuntimeError(f"No support for dataset of type {type(dataset)}")
return (n_samples, load_2d_from_3d, rois,
raw_paths, raw_key, label_paths, label_key)
def load_data(self, path, key, roi, z):
if key is None:
assert roi is None and z is None
return imageio.imread(path)
bb = np.s_[:, :, :] if roi is None else roi
if z is not None:
bb[0] = z if roi is None else roi[0].start + z
with open_file(path, 'r') as f:
ds = f[key]
ds.n_threads = self.n_threads
data = ds[bb]
return data
def load_sample(self, sample_id):
if self.load_2d_from_3d:
ds_id = 0
while True:
if sample_id < self.offsets[ds_id]:
break
ds_id += 1
offset = self.offsets[ds_id - 1] if ds_id > 0 else 0
z = sample_id - offset
else:
ds_id = sample_id
z = None
roi = self.rois[ds_id]
raw = self.load_data(self.raw_paths[ds_id], self.raw_key, roi, z)
if not self.need_gt:
return raw
gt = self.load_data(self.label_paths[ds_id], self.label_key, roi, z)
return raw, gt
def __iter__(self):
for sample_id in range(self.n_samples):
sample = self.load_sample(sample_id)
yield sample
def _predict(model, raw, trainer, gpu_ids, save_path, sample_id):
save_key = f"sample{sample_id}"
if save_path is not None and os.path.exists(save_path):
with open_file(save_path, 'r') as f:
if save_key in f:
print("Loading predictions for sample", sample_id, "from file")
ds = f[save_key]
ds.n_threads = 8
return ds[:]
normalizer = get_normalizer(trainer)
dataset = trainer.val_loader.dataset
ndim = dataset.ndim
if isinstance(dataset, ConcatDataset):
patch_shape = dataset.datasets[0].patch_shape
else:
patch_shape = dataset.patch_shape
if ndim == 2 and len(patch_shape) == 3:
patch_shape = patch_shape[1:]
assert len(patch_shape) == ndim
# choose a small halo and set the correct block shape
halo = (32, 32) if ndim == 2 else (8, 16, 16)
block_shape = tuple(psh - 2 * ha for psh, ha in zip(patch_shape, halo))
if save_path is None:
output = None
else:
f = open_file(save_path, 'a')
out_shape = (trainer.model.out_channels,) + raw.shape
chunks = (1,) + block_shape
output = f.create_dataset(save_key, shape=out_shape, chunks=chunks,
compression='gzip', dtype='float32')
gpu_ids = [int(gpu) if gpu != 'cpu' else gpu for gpu in gpu_ids]
pred = predict_with_halo(
raw, model, gpu_ids, block_shape, halo,
preprocess=normalizer,
output=output
)
if output is not None:
f.close()
return pred
def _visualize(raw, prediction, ground_truth):
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(raw)
viewer.add_image(prediction)
if ground_truth is not None:
viewer.add_labels(ground_truth)
def validate_checkpoint(
checkpoint,
gpu_ids,
save_path=None,
samples=None,
max_samples=None,
visualize=True,
metrics=None,
n_threads=None
):
"""Validate model for the given checkpoint visually and/or via metrics.
"""
if visualize and napari is None:
raise RuntimeError
trainer = get_trainer(checkpoint, device='cpu')
n_threads = trainer.train_loader.num_workers if n_threads is None else n_threads
model = trainer.model
model.eval()
need_gt = metrics is not None
if samples is None:
samples = SampleGenerator(trainer, max_samples, need_gt, n_threads)
else:
assert isinstance(samples, (list, tuple))
if need_gt:
assert all(len(sample, 2) for sample in samples)
else:
assert all(isinstance(sample, np.ndarray) for sample in samples)
results = []
for sample_id, sample in enumerate(samples):
raw, gt = sample if need_gt else sample, None
pred = _predict(model, raw, trainer, gpu_ids, save_path, sample_id)
if visualize:
_visualize(raw, pred, gt)
if metrics is not None:
res = metrics(gt, pred)
results.append(res)
return results
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', required=True,
help="Path to the checkpoint")
parser.add_argument('-g', '--gpus', type=str, nargs='+', required=True)
parser.add_argument('-n', '--max_samples', type=int, default=None)
parser.add_argument('-d', '--data', default=None)
parser.add_argument('-s', '--save_path', default=None)
parser.add_argument('-k', '--key', default=None)
parser.add_argument('-t', '--n_threads', type=int, default=None)
args = parser.parse_args()
# TODO implement loading data
assert args.data is None
validate_checkpoint(args.path, args.gpus, args.save_path,
max_samples=args.max_samples,
n_threads=args.n_threads)
``` |
{
"source": "jonashering/webtable-recognition",
"score": 3
} |
#### File: webtable-recognition/webtable_recognition/loader.py
```python
import os
from sklearn.datasets import load_files
from pandas import DataFrame
def load_from_directory(container_path):
"""
Load text files with categories as subfolder names to dataframe
Args:
container_path: Path to the main folder holding one subfolder per category
Returns:
Dataframe containing raw file in raw column and true label in label column
"""
if not os.path.isdir(container_path):
raise NotADirectoryError(container_path)
dataset = load_files(container_path, random_state=0)
label_names = dataset['target_names']
raw = [idx.decode('utf-8', 'replace') for idx in dataset['data']]
labels = [label_names[idx] for idx in dataset['target']]
filenames = dataset['filenames']
return DataFrame({
'raw': raw,
'label': labels,
'path': filenames
})
```
#### File: webtable-recognition/webtable_recognition/transformer.py
```python
from tempfile import NamedTemporaryFile
from unicodedata import normalize
import numpy as np
import PIL
import imgkit
import regex as re
from pandas import read_html, DataFrame
from bs4 import BeautifulSoup as bs
from joblib import Parallel, delayed
from tqdm import tqdm_notebook as tqdm
class _BaselineSample(object):
def __init__(self, obj):
super().__init__()
self.obj = obj
self.raw = str(bs(self.obj['raw'], 'html.parser').find_all('table')[0])
self.as_df = read_html(self.raw)[0].fillna('')
def _load_row_html(self, idx):
row = ''
try:
row = bs(self.raw, 'html.parser').find_all('tr')[idx]
except IndexError:
row = bs(self.raw, 'html.parser').find_all('tr')[-1]
cells = []
for cell in row.find_all(['td', 'th']):
cells.append(str(cell))
return cells
def _load_row_clean(self, idx):
try:
row = self.as_df.iloc[idx, :]
except IndexError:
row = self.as_df.iloc[-1, :]
return row
def _load_col_html(self, idx):
col = bs(self.raw, 'html.parser').find_all('tr')
cells = []
for row in col:
cell = ''
try:
cell = row.find_all(['td', 'th'])[idx]
except IndexError:
cell = row.find_all(['td', 'th'])[-1]
cells.append(str(cell))
return cells
def _load_col_clean(self, idx):
try:
col = self.as_df.iloc[:, idx]
except IndexError:
col = self.as_df.iloc[: -1]
return col
def _parse(self):
self.as_df = read_html(self.raw)[0].fillna('')
self.rows = [
(self._load_row_html(0), self._load_row_clean(0)),
(self._load_row_html(1), self._load_row_clean(1)),
(self._load_row_html(self.as_df.shape[0] - 1), self._load_row_clean(self.as_df.shape[0] - 1))
]
self.cols = [
(self._load_col_html(0), self._load_col_clean(0)),
(self._load_col_html(1), self._load_col_clean(1)),
(self._load_col_html(self.as_df.shape[1] - 1), self._load_col_clean(self.as_df.shape[1] - 1))
]
def _add_global_layout_features(self):
features = {
'max_rows': self.as_df.shape[0],
'max_cols': self.as_df.shape[1],
'max_cell_length': max([len(str(elem)) for elem in np.array(self.as_df).flatten()]),
}
self.obj.update(features)
def _add_layout_features(self):
for idx, i in enumerate(self.cols):
total_rowspan = np.sum(
[int(bs(x, 'html.parser').find_all(['td', 'th'])[0].attrs.get('rowspan', 0)) for x in i[0]]
)
num_rowspan = len([1 for x in i[0] if 'rowspan' in bs(x, 'html.parser').find_all(['td', 'th'])[0].attrs])
features = {
f'avg_length_{idx}': np.mean([len(str(elem)) for elem in i[1]]),
f'length_variance_{idx}': np.var([len(str(elem)) for elem in i[1]]),
f'ratio_colspan_{idx}': 0, # this is a row!
f'ratio_rowspan_{idx}': (total_rowspan - num_rowspan) / len(i[1])
}
self.obj.update(features)
for idx, i in enumerate(self.rows):
total_colspan = np.sum(
[int(bs(x, 'html.parser').find_all(['td', 'th'])[0].attrs.get('colspan', 0)) for x in i[0]]
)
num_colspan = len([1 for x in i[0] if 'colspan' in bs(x, 'html.parser').find_all(['td', 'th'])[0].attrs])
features = {
f'avg_length_{idx}': np.mean([len(str(elem)) for elem in i[1]]),
f'length_variance_{idx}': np.var([len(str(elem)) for elem in i[1]]),
f'ratio_colspan_{idx}': (total_colspan - num_colspan) / len(i[1]),
f'ratio_rowspan_{idx}': 0
}
self.obj.update(features)
def _add_html_features(self):
for idx, i in enumerate(self.rows + self.cols):
features = {
f'dist_tags_{idx}': len([1 for x in i[0] if len(bs(x, 'html.parser').find_all('br'))]) / len(i[0]),
f'ratio_th_{idx}': len([1 for x in i[0] if len(bs(x, 'html.parser').find_all('th'))]) / len(i[0]),
f'ratio_anchor_{idx}': len([1 for x in i[0] if len(bs(x, 'html.parser').find_all('a'))]) / len(i[0]),
f'ratio_img_{idx}': len([1 for x in i[0] if len(bs(x, 'html.parser').find_all('img'))]) / len(i[0]),
f'ratio_input_{idx}': len([1 for x in i[0] if len(bs(x, 'html.parser').find_all('input'))]) / len(i[0]),
f'ratio_select_{idx}':
len([1 for x in i[0] if len(bs(x, 'html.parser').find_all('select'))]) / len(i[0]),
f'ratio_f_{idx}':
len([1 for x in i[0] if len(bs(x, 'html.parser').find_all(['b', 'u', 'font', 'i']))]) / len(i[0]),
f'ratio_br_{idx}': len([1 for x in i[0] if len(bs(x, 'html.parser').find_all('br'))]) / len(i[0]),
}
self.obj.update(features)
def _add_lexical_features(self):
for idx, i in enumerate(self.rows + self.cols):
features = {
f'dist_string_{idx}': len(list(set([re.sub(r'\b\d+\b', '', str(x)) for x in i[1]]))) / len(i[1]),
f'ratio_colon_{idx}': np.mean([int(str(x).endswith(':')) for x in i[1]]),
f'ratio_contain_number_{idx}': np.mean([int(any(char.isdigit() for char in str(x))) for x in i[1]]),
f'ratio_is_number_{idx}': np.mean([int(type(x) in ['float', 'int']) for x in i[1]]),
f'ratio_nonempty_{idx}': np.mean([int(len(str(x)) > 0) for x in i[1]]),
}
self.obj.update(features)
def transform(self):
"""
Generate feature vector for a single web table according to baseline
Args:
None
Returns:
Dataframe with raw, label and feture vector for a single web column
"""
self._parse()
self._add_global_layout_features()
self._add_layout_features()
self._add_html_features()
self._add_lexical_features()
return self.obj
def transform_for_baseline(raw_dataframe):
"""
Transform an unprocessed web table dataset to feature space according to baseline
Args:
Dataframe with columns raw and label
Returns:
Dataframe with columns raw, label and feature space (107 columns)
"""
records = raw_dataframe.to_dict('records')
new_records = []
def _transform(rec):
try:
new_records.append(_BaselineSample(rec).transform())
except:
print('Skip ', rec['path'])
Parallel(n_jobs=-1, require='sharedmem')(delayed(_transform)(i) for i in tqdm(records))
return DataFrame(new_records)
class _ApproachSample(object):
def __init__(self,
obj,
strategy=None,
scale_cell_dimensions=True,
cell_size='5px',
long_text_threshold=10,
use_long_text_threshold=False,
remove_borders=False,
target_shape=(224, 224),
resize_mode='stretch'):
super().__init__()
self.obj = obj
self.strategy = strategy
self.scale_cell_dimensions = scale_cell_dimensions
self.cell_size = cell_size
self.long_text_threshold = long_text_threshold
self.use_long_text_threshold = use_long_text_threshold
self.remove_borders = remove_borders
self.target_shape = target_shape
self.resize_mode = resize_mode
def _clear_styling_attributes(self, soup):
# clear all attributes that could impact styling (except col- and rowspan)
for tag in soup.find_all():
new_attr = {}
if 'colspan' in tag.attrs:
new_attr['colspan'] = tag.attrs['colspan']
if 'rowspan' in tag.attrs:
new_attr['rowspan'] = tag.attrs['rowspan']
tag.attrs = new_attr
return soup
def _scale_cell_dimensions(self, tag):
if self.scale_cell_dimensions:
tag['width'] = self.cell_size
tag['height'] = self.cell_size
return tag
def _remove_borders(self, soup):
if self.remove_borders:
tag = soup.find('table')
tag['cellspacing'] = 0
tag['cellpadding'] = 0
return soup
def _is_emphasized(self, tag):
return len(tag.find_all(['b', 'strong', 'i'])) > 0
def _preprocess_html_color_shades(self):
soup = bs(self.obj['raw'], 'html.parser')
soup = self._clear_styling_attributes(soup)
for tag in soup.find_all(['th', 'td']):
tag = self._scale_cell_dimensions(tag)
text = tag.text.strip()
# set red for data type
# set r_step so there is an equivalent distance between the groups (255 / 6 ~= 42)
r_step = 42
r = 0 * r_step
if tag.find('a'):
r = 1 * r_step
elif tag.find('img'):
r = 2 * r_step
elif tag.find('button'):
r = 3 * r_step
elif tag.find('form') or tag.find('input'):
r = 4 * r_step
elif len(text) > 0:
# cells text majority are numeric characters
if sum(c.isdigit() for c in text) > (len(text) / 2):
r = 5 * r_step
else:
r = 255
# set green for content length
g = min(len(text), 255)
# set blue for styling
b = 0
if self._is_emphasized(tag):
b = 127
elif tag.name == 'th':
b = 255
tag['style'] = f'background-color: rgb({r},{g},{b})'
tag.clear()
soup = self._remove_borders(soup)
self.obj.update({
'transformed_html': str(soup.prettify(formatter='minimal'))
})
def _preprocess_html_grid(self):
soup = bs(self.obj['raw'], 'html.parser')
soup = self._clear_styling_attributes(soup)
for tag in soup.find_all(['th', 'td']):
tag = self._scale_cell_dimensions(tag)
color = 'yellow'
if tag.name == 'th':
color = 'grey'
elif tag.find('a'):
color = 'blue'
elif tag.find('img'):
color = 'green'
elif tag.find('button'):
color = 'purple'
elif tag.find('form') or tag.find('input'):
color = 'pink'
else:
text = tag.text.strip()
# cells text majority are numeric characters
if sum(c.isdigit() for c in text) > (len(text) / 2):
color = 'red'
elif self.use_long_text_threshold and len(text) > self.long_text_threshold:
color = 'brown'
elif self._is_emphasized(tag):
color = 'orange'
tag['style'] = f'background-color: {color}'
# replace content
# ALTERNATIVE CODE INCASE WE DECIDE TO KEEP THE STRUCTURE
# if KEEP_STRUCTURE and tag.string:
# tag.string = " " * len(tag.string.strip())
tag.clear()
soup = self._remove_borders(soup)
self.obj.update({
'transformed_html': str(soup.prettify(formatter='minimal'))
})
def _preprocess_html_char_blocks(self):
soup = bs(self.obj['raw'], 'html.parser')
for cell in soup.find_all(): # table head cells
if 'style' in cell:
cell['style'] += ';background-color:none !important'
else:
cell['style'] = ';background-color:none !important'
# replace character classes with block symbol : digits, alphabetical, punctuation, whitespace
for elem in soup.find_all(text=True):
content = normalize('NFKD', elem)
for char in content:
color = 'white'
if re.match(r'[\p{N}]', char) is not None: # digits
color = 'red'
elif re.match(r'[\p{L}]', char) is not None: # alpha
color = 'blue'
elif re.match(r'[!"\#$%&\'()*+,\-./:;<=>?@\[\\\]^_`{|}~]', char) is not None: # punctuation
color = 'green'
new_char = soup.new_tag('span', style=f'color: {color} !important')
new_char.string = '█' if re.match(r'[ \t\r\n\v\f]', char) is None else char # whitespace
elem.parent.append(new_char)
elem.replace_with('')
# images
for img in soup.find_all('img'):
img['style'] = img.get('style', '') + ';background-color:yellow !important'
# emphasized text
for emp in soup.find_all(['a', 'strong', 'b', 'i', 'u', 'title']):
emp['style'] = emp.get('style', '') + ';opacity:0.4 !important'
# table head cells
for th in soup.find_all('th'):
th['style'] = th.get('style', '') + ';background-color:grey !important'
# input elements
for inp in soup.find_all(['button', 'select', 'input']):
inp['style'] = inp.get('style', '') + ';background-color:pink !important; border: 0; padding: 5px;'
# draw table border
for tab in soup.find_all('table'):
tab['style'] = 'border-collapse: collapse ! important'
tab['cellpadding'] = '5'
# draw table border pt. 2
for cell in soup.find_all(['th', 'td']):
cell['style'] = cell.get('style', '') + ';border: 2px solid black !important'
self.obj.update({
'transformed_html': str(soup.prettify(formatter='minimal'))
})
def _generate_image_from_html(self, html):
with NamedTemporaryFile(suffix='.png') as f:
try: # tables containing iframes or similar external sources cannot be rendered
imgkit.from_string(f'<meta charset="utf-8">{html}',
f.name,
options={'quiet': '',
'disable-plugins': '',
'no-images': '',
'disable-javascript': '',
'height': 1024,
'width': 1024,
'load-error-handling': 'ignore'})
image = PIL.Image.open(f.name)
except:
image = PIL.Image.new('RGB', self.target_shape, (255, 255, 255))
return image.convert('RGB')
def _crop_surrounding_whitespace(self, image):
bg = PIL.Image.new(image.mode, image.size, (255, 255, 255))
diff = PIL.ImageChops.difference(image, bg)
bbox = diff.getbbox()
if not bbox:
return image
return image.crop(bbox)
def _resize(self, image):
if self.resize_mode == 'none':
return image
if self.resize_mode == 'resize':
canvas = PIL.Image.new('RGB', self.target_shape, color=(255, 255, 255))
image.thumbnail(self.target_shape, PIL.Image.ANTIALIAS)
canvas.paste(image)
elif self.resize_mode == 'resize_fullwidth':
canvas = PIL.Image.new('RGB', self.target_shape, color=(255, 255, 255))
image.thumbnail((self.target_shape[0], 1024), PIL.Image.ANTIALIAS)
canvas.paste(image)
canvas = canvas.crop((0, 0, self.target_shape[0], self.target_shape[1]))
elif self.resize_mode == 'stretch':
canvas = image.resize(self.target_shape, PIL.Image.ANTIALIAS)
elif self.resize_mode == 'crop':
canvas = PIL.Image.new('RGB', self.target_shape, color=(255, 255, 255))
canvas.paste(image)
canvas = canvas.crop((0, 0, self.target_shape[0], self.target_shape[1]))
return canvas
def _render_html(self):
image = self._generate_image_from_html(self.obj['transformed_html']) # .decode('utf-8', 'replace'))
image = self._crop_surrounding_whitespace(image)
image = self._resize(image)
self.obj.update({
'image': image
})
def transform(self):
"""
Generate image re for a single web table according to our approach
Args:
None
Returns:
Dataframe with raw, label and feture vector for a single web column
"""
if self.strategy == 'raw':
self.obj['transformed_html'] = self.obj['raw']
elif self.strategy == 'grid':
self._preprocess_html_grid()
elif self.strategy == 'char_blocks':
self._preprocess_html_char_blocks()
elif self.strategy == 'color_shades':
self._preprocess_html_color_shades()
self._render_html()
return self.obj
def transform_for_approach(raw_dataframe, strategy='raw', resize_mode='stretch'):
"""
Transform an unprocessed web table dataset to feature space according to our approach
Args:
Dataframe with columns raw and label
strategy: raw, grid, char_blocks, color_shades
resize_mode: stretch, resize, resize_fullwidth, crop
Returns:
Dataframe with columns raw, label, transformed_html and image
Generates image representations of web table
"""
records = raw_dataframe.to_dict('records')
new_records = []
def _transform(rec):
try:
new_records.append(_ApproachSample(rec, strategy=strategy, resize_mode=resize_mode).transform())
except:
print('Skip ', rec['path'])
Parallel(n_jobs=-1, require='sharedmem')(delayed(_transform)(i) for i in tqdm(records))
return DataFrame(new_records)
``` |
{
"source": "JonasHimmetsbergerStudent/ScribbleFight",
"score": 3
} |
#### File: gym_game/envs/custom_env.py
```python
import gym
from gym import spaces
import numpy as np
from gym_game.envs.pygame_2d import PyGame2D
class CustomEnv(gym.Env):
#metadata = {'render.modes' : ['human']}
def __init__(self):
self.pygame = PyGame2D()
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(np.array([0, 0, 0, 0, 0]), np.array([10, 10, 10, 10, 10]), dtype=np.int)
def reset(self):
del self.pygame
self.pygame = PyGame2D()
obs = self.pygame.observe()
return obs
def step(self, action):
self.pygame.action(action)
obs = self.pygame.observe()
reward = self.pygame.evaluate()
done = self.pygame.is_done()
return obs, reward, done, {}
def render(self, mode="human", close=False):
self.pygame.view()
```
#### File: test_game_ai/neat_v01/thisisneat.py
```python
import multiprocessing
import os
import pickle
import neat
import numpy as np
import gym
runs_per_net = 2
# simulation_seconds = 60.0
# Use the NN network phenotype and the discrete actuator force function.
def eval_genome(genome, config):
net = neat.nn.FeedForwardNetwork.create(genome, config)
fitnesses = []
for runs in range(runs_per_net):
env = gym.make("CartPole-v1")
observation = env.reset()
# Run the given simulation for up to num_steps time steps.
fitness = 0.0
done = False
while not done:
action = np.argmax(net.activate(observation))
observation, reward, done, _ = env.step(action=action)
fitness += reward
fitnesses.append(fitness)
# The genome's fitness is its worst performance across all runs.
return min(fitnesses)
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run():
# Load the config file, which is assumed to live in
# the same directory as this script.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome)
winner = pop.run(pe.evaluate)
# Save the winner.
with open('winner', 'wb') as f:
pickle.dump(winner, f)
print(winner)
if __name__ == '__main__':
run()
```
#### File: AI/KI_versions/multiAgent.py
```python
from stable_baselines3 import A2C, PPO
from stable_baselines3.common.env_util import make_vec_env
# threading
import threading
from threads.socketHandler import *
# import env
import KI_v01 # important
# other
import os
class KI():
def __init__(self):
self.env = make_vec_env('ScribbleFight-v0', n_envs=2)
def run(self):
log_path = os.path.join('Traning', 'Logs')
model = A2C("MlpPolicy", self.env, verbose=1, tensorboard_log=log_path)
model = PPO("MlpPolicy", self.env, verbose=1, tensorboard_log=log_path)
model.learn(total_timesteps=4500000)
self.env.close()
if __name__ == "__main__":
ki = KI()
for item in ki.env.get_attr('pygame'):
while not item.scribble_fight.readystate:
continue
ki.run()
```
#### File: threads/dischargedConcept/singleAgent.py
```python
import gym
from gym import Env
from gym.spaces import Discrete, Box, Dict, Tuple, MultiBinary, MultiDiscrete
# stable baselines
from stable_baselines3 import A2C
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import VecFrameStack
from stable_baselines3.common.evaluation import evaluate_policy
# threading
import threading
from threads.socketHandler import *
# import env
import KI_v01
# other
import time
import os
from KI_v01.env.gym_env import CustomEnv
# NOTE TESTING
# class KI(threading.Thread):
# def __init__(self):
# threading.Thread.__init__(self)
# self.env = CustomEnv()
# def run(self):
# episodes = 100
# for episode in range(1, episodes+1):
# state = self.env.reset()
# done = False
# score = 0
# while not done:
# # self.env.render()
# actions = self.env.action_space.sample()
# # actions[0] = 1
# # actions[1] = 1
# state, reward, done, info = self.env.step(actions)
# score += reward
# print('Episode:{} Score:{}'.format(episode, score))
# self.env.close()
class KI(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# self.env = CustomEnv()
self.env = gym.make('ScribbleFight-v0')
def run(self):
log_path = os.path.join('Traning', 'Logs')
model = A2C("MlpPolicy", self.env, verbose=1, tensorboard_log=log_path)
model.learn(total_timesteps=200)
if __name__ == "__main__":
threads = []
thread1 = KI()
while not thread1.env.pygame.scribble_fight.readystate:
continue
thread2 = thread1.env.pygame.scribble_fight.socket_handler
# Start new Threads
print("now starting Game Thread")
thread1.start()
print("now starting Observation Thread")
thread2.start()
threads.append(thread1)
threads.append(thread2)
for t in threads:
t.join()
```
#### File: dischargedConcept/testSocketio/tread.py
```python
import threading
import time
import engineio
import asyncio
import socketio
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
exitFlag = 0
class Game(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.driver = None
self.startBrowser()
def startBrowser(self):
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
self.driver = webdriver.Chrome(options=options,
executable_path=ChromeDriverManager().install())
url = 'http://localhost:3000/'
self.driver.get(url)
def isPlaying(self):
try:
self.driver.execute_script(
'return myPlayer.id;')
except:
return False
return True
def run(self):
while True:
self.driver.execute_script('moveRight();')
class SocketHandl(threading.Thread):
def __init__(self, driver):
threading.Thread.__init__(self)
self.obs = None
self.sio = None
self.driver = driver
async def start_server(self):
await self.sio.connect('http://localhost:3001')
myPlayerId = self.driver.execute_script('return myPlayer.id;')
await self.sio.emit('clientId', myPlayerId)
self.sio.on('visCopyToPython', self.visCopyToPython)
await self.sio.wait()
async def connect(self):
print('connected to server !!!!!!!!!!!!!')
async def disconnect(self):
print('disconnect !!!!!!!!!!!!!')
async def visCopyToPython(self, data):
print('A')
self.obs = data
def run(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.sio = socketio.AsyncClient(reconnection=True,
logger=False,
engineio_logger=False)
loop.run_until_complete(self.start_server())
def get_or_create_eventloop(self):
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
class Fusion():
def init(self):
pass
def letsego(self):
threads = []
# Create new threads
thread1 = Game()
time.sleep(2)
thread2 = SocketHandl(thread1.driver)
# Start new Threads
print("now starting Thread 1")
thread1.start()
print("now starting Thread 2")
thread2.start()
threads.append(thread1)
threads.append(thread2)
for item in range(10):
time.sleep(1)
print(thread2.obs)
for t in threads:
t.join()
fusion = Fusion()
Fusion.letsego(fusion)
```
#### File: prototypes/document scanner/zwischenstand.py
```python
import cv2
import numpy as np
import utlis
import imutils
from time import sleep
from threading import Thread
########################################################################
webCamFeed = True
pathImage = "1.jpg"
cap = cv2.VideoCapture(1)
cap.set(10, 160)
heightImg = 480
widthImg = 640
borderColor = (1, 59, 218)
########################################################################
utlis.initializeTrackbars()
count = 0
printed = False
oldBiggest = []
# def check():
while True:
# SECTION webcam to open-cv
# NOTE checks webcam connection
# converts webcam image to readable open-cv data
# uses threshold values that can be set via slider
if webCamFeed:
success, img = cap.read()
if success:
if not printed: # BOOLEAN WHICH DESCRIBES IF STARTING MESSAGE IS PRINTED OR NOT
printed = True
print("document scanner running\nTh1:40\nTh2:20\nAcc:20\nArea:4000")
else:
print("scanner failed")
else:
img = cv2.imread(pathImage)
print("scanner failed")
img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE
# CREATE A BLANK IMAGE FOR TESTING DEBUGING IF REQUIRED
imgBlank = np.zeros((heightImg, widthImg, 3), np.uint8)
# CONVERT IMAGE TO GRAY SCALE
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
thres = utlis.valTrackbars() # GET TRACK BAR VALUES FOR THRESHOLDS
threshold1 = thres[0]
threshold2 = thres[1]
# threshold1 = 40
# threshold2 = 20
imgThreshold = cv2.Canny(
imgBlur, threshold1, threshold2) # APPLY CANNY BLUR
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
kernel = None
imgDial = cv2.dilate(imgThreshold, kernel, iterations=2) # APPLY DILATION
imgThreshold = cv2.erode(imgDial, kernel, iterations=1) # APPLY EROSION
# !SECTION
# SECTION find all contours + draw them
imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
contours, hierarchy = cv2.findContours(
imgThreshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # FIND ALL CONTOURS
# if len(contours) >= 5: # IF LESS THEN 5 CONTOURS WERE FOUND THE CODE THREW ERROR
# cnt = contours[4] # ONLY CONTOURS WITH 4 POINTS
# cv2.drawContours(img, [cnt], 0, borderColor, 3)
# else:
# cv2.drawContours(imgContours, contours, -1, borderColor,
# 3) # DRAW ALL DETECTED CONTOURS
# cv2.drawContours(imgContours, contours, -1, borderColor,
# 2) # DRAW ALL DETECTED CONTOURS
# FIND THE BIGGEST COUNTOUR
accuracy = thres[2]/1000
# accuracy = 20 / 1000
area = thres[3]
# area = 4000
biggest, maxArea = utlis.biggestContour(
contours, accuracy, area) # FIND THE BIGGEST CONTOUR
cnts = cv2.findContours(imgThreshold.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
if len(cnts) != 0 and cv2.contourArea:
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(imgContours, [c], -1, (0, 255, 255), 2)
if len(oldBiggest) == 0:
oldBiggest = biggest
a = np.array(biggest)
b = np.array(oldBiggest)
# RAISES NAN ERRORS THAT CAN BE IGNORED (CAN!!!)
biggestChanged = np.mean(a != b)
# BLUE BORDER WHICH WRAPS UP SMALL BORDERS
hull = utlis.findHulls(biggest)
for i in range(len(hull)):
cv2.drawContours(imgContours, hull, i, (255, 0, 0), 1, 8)
#!SECTION
# SECTION evaluate contours and draw biggest + flip image into perspective
if biggest.size != 0 and (biggestChanged >= 0.5 or biggestChanged is None) and biggest.tolist() != oldBiggest.tolist():
biggest = utlis.reorder(biggest)
oldBiggest = biggest
else:
# imageArray = ([img, imgGray, imgThreshold, imgContours],
# [imgBlank, imgBlank, imgBlank, imgBlank])
margin = 10
height, width, chanel = img.shape
width -= margin
height -= margin
windowPoints = np.array([[[margin, margin]], [[width, margin]],
[[margin, height]], [[width, height]]])
if biggest.size == 0 and oldBiggest.tolist() != windowPoints.tolist():
oldBiggest = biggest = windowPoints
# cv2.drawContours(imgBigContour, biggest, -1,
# (0, 255, 0), 10) # DRAW CIRCLES
# DRAW THE BIGGEST CONTOUR
imgBigContour = utlis.drawRectangle(
imgBigContour, oldBiggest, borderColor, 2)
pts1 = np.float32(oldBiggest) # PREPARE POINTS FOR WARP
pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [
widthImg, heightImg]]) # PREPARE POINTS FOR WARP
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarpColored = cv2.warpPerspective(
img, matrix, (widthImg, heightImg))
# REMOVE 20 PIXELS FORM EACH SIDE
imgWarpColored = imgWarpColored[20:imgWarpColored.shape[0] -
20, 20:imgWarpColored.shape[1] - 20]
imgWarpColored = cv2.resize(imgWarpColored, (widthImg, heightImg))
# APPLY ADAPTIVE THRESHOLD
imgWarpGray = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)
imgAdaptiveThre = cv2.adaptiveThreshold(imgWarpGray, 255, 1, 1, 7, 2)
imgAdaptiveThre = cv2.bitwise_not(imgAdaptiveThre)
imgAdaptiveThre = cv2.medianBlur(imgAdaptiveThre, 3)
# Image Array for Display
# imageArray = ([img, imgGray, imgThreshold, imgContours],
# [imgBigContour, imgWarpColored, imgWarpGray, imgAdaptiveThre])
imageArray = ([imgThreshold, imgContours],
[imgBigContour, imgWarpColored])
# !SECTION
# SECTION draw open-cv data
# LABELS FOR DISPLAY
# lables = [["Original", "Gray", "Threshold", "Contours"],
# ["Biggest Contour", "Warp Prespective", "Warp Gray", "Adaptive Threshold"]]
lables = [["Threshold", "Contours"],
["Biggest Contour", "Warp Prespective"]]
stackedImage = utlis.stackImages(imageArray, 0.75, lables)
cv2.imshow("Result", stackedImage)
# !SECTION
# SAVE IMAGE WHEN 's' key is pressed
if cv2.waitKey(1) & 0xFF == ord('s'):
cv2.imwrite("Scanned/myImage"+str(count)+".jpg", imgWarpColored)
cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),
(1100, 350), borderColor, cv2.FILLED)
cv2.putText(stackedImage, "Scan Saved", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),
cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
cv2.imshow('Result', stackedImage)
cv2.waitKey(300)
count += 1
print("image saved")
# sleep(0.25)
```
#### File: streamFusion/scanner/cv2scan.py
```python
from .utlis import *
from PIL import Image
import cv2
import numpy as np
import imutils
import sys
import math
def check(img):
contours = oldBiggest = biggest = np.array([])
imgGray = imgThreshold = imgContours = imgBlank = None
biggestChanged = 1
if img is not None:
heightImg, widthImg, chanel = img.shape
# CREATE A BLANK IMAGE FOR TESTING DEBUGING IF REQUIRED
imgBlank = np.zeros((heightImg, widthImg, 3), np.uint8)
imgGray = imgThreshold = imgContours = imgBlank
# CONVERT IMAGE TO GRAY SCALE
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
upperThres = 40
lowerThres = 40
imgThreshold = cv2.Canny(
imgBlur, upperThres, lowerThres) # APPLY CANNY BLUR
image_contours = np.zeros((heightImg, widthImg, 1), np.uint8)
image_binary = np.zeros((heightImg, widthImg, 1), np.uint8)
for channel in range(img.shape[2]):
ret, image_thresh = cv2.threshold(
img[:, :, channel], 200, 200, cv2.THRESH_BINARY)
special_contours = cv2.findContours(image_thresh, 1, 1)[0]
cv2.drawContours(image_contours, special_contours, -
1, (255, 255, 255), 3)
special_contours = cv2.findContours(image_contours, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)[0]
if len(special_contours) > 0:
cv2.drawContours(image_binary, [max(special_contours, key=cv2.contourArea, default=0)],
-1, (255, 255, 255), -1)
cv2.drawContours(image_binary, special_contours,
-1, (255, 255, 0), 2)
imgGray = image_binary
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
# kernel = None
imgDial = cv2.dilate(imgThreshold, kernel,
iterations=3) # APPLY DILATION
imgThreshold = cv2.erode(
imgDial, kernel, iterations=3) # APPLY EROSION
# !SECTION
contours, hierarchy = cv2.findContours(
imgThreshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # FIND ALL CONTOURS
# SECTION find all contours + draw them
imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
# FIND THE BIGGEST COUNTOUR
accuracy = 25/1000
area = 1000
biggest, maxArea = biggestContour(
contours, accuracy, area) # FIND THE BIGGEST CONTOUR
# SECTION draw enclosing yellow border
cnts = cv2.findContours(imgThreshold.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
if len(cnts) != 0 and cv2.contourArea:
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(
imgContours, [c], -1, (0, 255, 255), -1)
# !SECTION
if len(oldBiggest) == 0:
oldBiggest = biggest
# CALCULATE PERCENTAGE DIFFERENCE BETWEEN NEW AND OLD CONOURS
a = np.array(biggest)
b = np.array(oldBiggest)
# RAISES NAN ERRORS THAT CAN BE IGNORED (CAN!!!)
biggestChanged = np.mean(
a != b) if a.size != 0 and b.size != 0 else 1
#!SECTION
edges = getEdges(oldBiggest, biggest, contours, img, biggestChanged)
return edges
def getWrappedImg(img, snipset):
snipset = squarify(snipset)
pt_A = snipset[0]
pt_B = snipset[1]
pt_C = snipset[2]
pt_D = snipset[3]
lineAB = np.array([pt_A, pt_B])
lineBC = np.array([pt_B, pt_C])
lineCD = np.array([pt_C, pt_D])
lineDA = np.array([pt_D, pt_A])
width_AD = np.sqrt(((pt_A[0] - pt_D[0]) ** 2) + ((pt_A[1] - pt_D[1]) ** 2))
width_BC = np.sqrt(((pt_B[0] - pt_C[0]) ** 2) + ((pt_B[1] - pt_C[1]) ** 2))
maxHeight = max(int(width_AD), int(width_BC))
if maxHeight == width_AD:
lineA = lineDA
else:
lineA = lineBC
height_AB = np.sqrt(((pt_A[0] - pt_B[0]) ** 2) +
((pt_A[1] - pt_B[1]) ** 2))
height_CD = np.sqrt(((pt_C[0] - pt_D[0]) ** 2) +
((pt_C[1] - pt_D[1]) ** 2))
maxWidth = max(int(height_AB), int(height_CD))
if maxWidth == height_AB:
lineB = lineAB
else:
lineB = lineCD
angle = ang(lineA, lineB)
if angle == 90:
factor = 1
if angle > 90:
factor = 90 / (180-angle)
if angle < 90:
factor = 90 / angle
maxHeight *= factor
m = np.array([pt_A, pt_B, pt_C, pt_D])
m = rotateCW(m)
input_pts = np.float32(m)
output_pts = np.float32([[0, 0],
[0, maxHeight - 1],
[maxWidth - 1, maxHeight - 1],
[maxWidth - 1, 0]])
M = cv2.getPerspectiveTransform(input_pts, output_pts)
dst = cv2.warpPerspective(
img, M, (int(maxWidth), int(maxHeight)), flags=cv2.INTER_LINEAR)
return dst
def getPlayableArray(img):
np.set_printoptions(threshold=sys.maxsize)
alpha_img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA) # rgba
imgWarpGray = cv2.cvtColor(alpha_img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgWarpGray, (7, 7), 0)
imgAdaptiveThre = cv2.adaptiveThreshold(
blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 7, 2)
imgAdaptiveThre = cv2.bitwise_not(imgAdaptiveThre)
imgAdaptiveThre = cv2.medianBlur(imgAdaptiveThre, 3)
# make image square
imgAdaptiveThre = np.array(makeSquare(
cv2.cvtColor(imgAdaptiveThre, cv2.COLOR_BGR2BGRA)))
img = cv2.cvtColor(imgAdaptiveThre, cv2.COLOR_BGR2BGRA)
# pippoRGBA2 = Image.fromarray(np.array(img).astype('uint8'), mode='RGBA')
# pippoRGBA2.show()
cv2.imwrite(
'./source/prototypes/streamFusion/output/imgAdaptiveThre.png', imgAdaptiveThre)
iar = np.asarray(img).tolist()
rows = len(iar)
columns = len(iar[0])
meshes = 3025
# percent = perc(rows * columns)
percent = 95
n = math.ceil(np.sqrt(rows * columns / meshes))
x = 0
y = 0
newImg = []
while y < rows:
newImg.append([])
while x < columns:
i = 0
j = 0
bg = 0
while i < n:
while j < n:
if (y + j) < rows and (x + i) < columns:
if np.all(iar[y + j][x + i][:3] == [255, 255, 255], 0):
bg += 1
else:
bg += 1
j += 1
j = 0
i += 1
bgPercent = bg / (n**2)
if (bgPercent < (percent / 100)):
newImg[int(y / n)].append([0, 0, 0, 255])
else:
newImg[int(y / n)].append([255, 255, 255, 0])
x += n
x = 0
y += n
iar = np.asarray(newImg).tolist()
with open('./source/prototypes/streamFusion/output/mapArray.txt', 'w') as f:
f.writelines(repr(iar))
# pippoRGBA2 = Image.fromarray(np.array(newImg).astype('uint8'), mode='RGBA')
# pippoRGBA2.show()
cv2.imwrite(
'./source/prototypes/streamFusion/output/newImg.png', np.array(newImg))
return newImg
def makeSquare(im, size=8 * 55, fill_color=(255, 255, 255, 1)):
im = Image.fromarray(np.array(im).astype('uint8'),
mode='RGBA') # cv2 img to PIL img
x, y = im.size # get mesurements
new_im = Image.new('RGBA', (size, size), fill_color)
new_im.paste(im, (int((size - x) / 2), int((size - y) / 2)))
return new_im
``` |
{
"source": "jonashleyo/judge-pics",
"score": 3
} |
#### File: judge_pics/scrapers/dc_circuit_judges.py
```python
import hashlib
import json
import re
import requests
import shutil
import subprocess
import os
from lxml import html
from judge_pics import judge_pics, judge_root
root_url = 'http://dcchs.org/Portraits/'
line_re = re.compile('<a href="(.*)">(.*)</a')
def make_slug(name, path):
last_name = re.search('(.*),', name).group(1).lower()
first_name = re.search('([A-Z].*)[A-Z]', path).group(1).lower()
return '%s-%s' % (last_name, first_name)
def get_artist_and_date_created(full_url):
# Open firefox, prompt for answer, sanitize answer and return it.
subprocess.Popen(['firefox', full_url], shell=False).communicate()
artist = raw_input('Who made this: ')
if artist == '':
artist = None
d = raw_input('When did they make it: ')
if d == '':
d = None
return artist, d
def get_hash_from_file(image):
"""Get the hash from the current file"""
with open(image, 'r') as f:
return hashlib.sha256(f.read()).hexdigest()
def run_things():
with open('sources.txt', 'r') as f:
for line in f:
# <a href="JesseAdkins.html"><NAME>.</a><br>
path = line_re.search(line).group(1)
name = line_re.search(line).group(2)
slug = make_slug(name, path)
full_url = root_url + path
r = requests.get(full_url,
headers={'UserAgent': 'freelawproject.org'})
tree = html.fromstring(r.text)
try:
img_path = tree.xpath('//div[@id="contentcolumn"]//img/@src')[0]
full_img_src = root_url + img_path
except IndexError:
print "Failed to find image for %s" % full_url
continue
r_img = requests.get(full_img_src, stream=True)
if r_img.status_code == 200:
with open(slug + '.jpeg', 'wb') as f_img:
r_img.raw.decode_content = True
shutil.copyfileobj(r_img.raw, f_img)
artist, date_created = get_artist_and_date_created(full_url)
img_hash = get_hash_from_file(slug + '.jpeg')
# Update judges.json
judge_pics[slug] = {
'artist': artist,
'date_created': date_created,
'license': 'Work of Federal Government',
'source': 'Historical Society of the District of Columbia '
'Circuit',
'hash': img_hash,
}
json.dump(
judge_pics,
open(os.path.join(judge_root, 'judges.json'), 'w'),
sort_keys=True,
indent=2,
)
if __name__ == '__main__':
run_things()
``` |
{
"source": "jonashoechst/cbor2",
"score": 3
} |
#### File: cbor2/scripts/half_float_tables.py
```python
from itertools import zip_longest
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def sigtable():
print("static const uint32_t sigtable[] = {")
values = (
0 if i == 0 else
convertsig(i) if 1 <= i < 1024 else
0x38000000 + ((i - 1024) << 13)
for i in range(2048)
)
values = ('{:#010x}'.format(i) for i in values)
for row in grouper(values, 8):
print(' ' + (', '.join(row)) + ',')
print("};")
def exptable():
values = (
0 if i == 0 else
0x47800000 if i == 31 else
0x80000000 if i == 32 else
i << 23 if 1 <= i < 31 else
0x80000000 + ((i - 32) << 23) if 33 <= i < 63 else
0xC7800000 # i == 63
for i in range(64)
)
print("static const uint32_t exptable[] = {")
values = ('{:#010x}'.format(i) for i in values)
for row in grouper(values, 8):
print(' ' + (', '.join(row)) + ',')
print("};")
def offsettable():
values = (
0 if i in (0, 32) else 1024
for i in range(64)
)
print("static const uint16_t offsettable[] = {")
values = ('{:#06x}'.format(i) for i in values)
for row in grouper(values, 8):
print(' ' + (', '.join(row)) + ',')
print("};")
def convertsig(i):
if not i:
return 0
m = i << 13
e = 0
while not m & 0x00800000:
e -= 0x00800000
m <<= 1
m &= ~0x00800000
e += 0x38800000
return m | e
def basetable():
values = [0] * 512
for i in range(256):
e = i - 127
if e < -24: # underflow to 0
values[i | 0x000] = 0
values[i | 0x100] = 0x8000
elif e < -14: # smalls to denorms
values[i | 0x000] = (0x400 >> (-e - 14))
values[i | 0x100] = (0x400 >> (-e - 14)) | 0x8000
elif e < 15: # normal case
values[i | 0x000] = ((e + 15) << 10)
values[i | 0x100] = ((e + 15) << 10) | 0x8000
elif e < 128: # overflow to inf
values[i | 0x000] = 0x7c00
values[i | 0x100] = 0xfc00
else: # inf and nan
values[i | 0x000] = 0x7c00
values[i | 0x100] = 0xfc00
print("static const uint16_t basetable[] = {")
values = ('{:#06x}'.format(i) for i in values)
for row in grouper(values, 8):
print(' ' + (', '.join(row)) + ',')
print("};")
def shifttable():
values = [0] * 512
for i in range(256):
e = i - 127
if e < -24: # underflow to 0
values[i | 0x000] = 24
values[i | 0x100] = 24
elif e < -14: # smalls to denorms
values[i | 0x000] = -e - 1
values[i | 0x100] = -e - 1
elif e < 15: # normal case
values[i | 0x000] = 13
values[i | 0x100] = 13
elif e < 128: # overflow to inf
values[i | 0x000] = 24
values[i | 0x100] = 24
else: # inf and nan
values[i | 0x000] = 13
values[i | 0x100] = 13
print("static const uint16_t shifttable[] = {")
values = ('{:#06x}'.format(i) for i in values)
for row in grouper(values, 8):
print(' ' + (', '.join(row)) + ',')
print("};")
sigtable()
print()
exptable()
print()
offsettable()
print()
basetable()
print()
shifttable()
``` |
{
"source": "jonasht/CursoEmVideo-CursoDePython3",
"score": 3
} |
#### File: CursoEmVideo-CursoDePython3/mundo3-EstruturasCompostas/084-listaCompostaEAnaliseDeDados1.py
```python
import colorama
colorama.init() # p/ funcionar no windows, linux funciona normalmente
r = '\033[31m' # red
b = '\033[34m' # blue
g = '\033[32m' # green
f = '\33[m'
pessoas = list()
obter = list()
qtdp = maior = menor = 0
def l(): print(g, '=-'*30 + '=',f)
while 1:
l()
qtdp += 1
obter.append(str(input(f'{qtdp} nome:')))
obter.append(float(input(f'{qtdp} peso:')))
if len(pessoas) == 0:
maior = menor = obter[1]
else:
if obter[1] > maior:
maior = obter[1]
if obter[1] < menor:
menor = obter[1]
pessoas.append(obter[:])
obter.clear()
sair = input('quer continuar [S/n]:')
if sair in 'nN':
break
l()
print(f'quandidade de pessoas {qtdp} ')
print(r, f'maior peso foi de {maior}kg. o peso de ', f, end='')
for p in pessoas:
if p[1] == maior:
print(f' {p[0]}', end='')
print(b, f'\nmenor peso foi de {menor}kg. o peso de ', end='')
for p in pessoas:
if p[1] == menor:
print(f' {p[0]}')
# Exercício Python 084:
# Faça um programa que leia nome e peso de várias pessoas,
# guardando tudo em uma lista. No final, mostre:
#A) Quantas pessoas foram cadastradas.
#B) Uma listagem com as pessoas mais pesadas.
#C) Uma listagem com as pessoas mais leves.
```
#### File: CursoEmVideo-CursoDePython3/mundo3-EstruturasCompostas/086-matriz.py
```python
def l(ll): print(30*ll)
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
l('=')
print('valores de matriz a serem adionados')
for i in range(len(matriz)):
for ii in range(len(matriz)):
matriz[i][ii] = int(input(f'[{i}|{ii}]numero: '))
l('_')
print('Valores de matriz:')
for i in range(3):
for ii in range(3):
print(f'{matriz[i][ii]:^10} ', end='')
print()
l('-')
```
#### File: CursoEmVideo-CursoDePython3/mundo3-EstruturasCompostas/087-matriz.py
```python
def l(ll): print(30*ll)
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
sumPar = terC = 0
l('=')
print('valores de matriz a serem adionados')
for i in range(len(matriz)):
for ii in range(len(matriz)):
matriz[i][ii] = int(input(f'[{i}|{ii}]numero: '))
l('_')
print('Valores de matriz:')
for i in range(3):
for ii in range(3):
print(f'{matriz[i][ii]:^10} ', end='')
sumPar = sumPar+matriz[i][ii] if matriz[i][ii]%2==0 else sumPar # soma de tds os valores pares digitds
terC = terC + matriz[i][ii] if ii == 2 else terC
print()
l('_')
print(f'soma dos numeros pares: {sumPar}')
print(f'soma dos valores da terceira coluna: {terC}')# A soma dos valores da terceira coluna
print(f'maior valor da segunda linha: {max(matriz[1])}')# O maior valor da segunda linha
```
#### File: CursoEmVideo-CursoDePython3/mundo3-EstruturasCompostas/093-CadastroDeJogadorDeFutebol.py
```python
dados = dict()
qtd_gols = []
dados['nome'] = input('nome do jogador: ')
def l(): print('\n'+'=-'*25+'=')
qtd_partida = int(input('quantas partidas:'))
for i in range(qtd_partida):
qtd_gols.append(int(input(f'quantos gols na {1+i}º partida: ')))
dados['gols'] = qtd_gols[:]
dados['total'] = sum(qtd_gols)
l()
print(dados)
l()
for chave, valor in dados.items():
print(f'o campo {chave} tem o valor {valor}')
l()
print(f"o jogador {dados['nome']} jogou {len(dados['gols'])} partidas.")
for i, v in enumerate(dados['gols']):
print(f"\t=> Na {i+1}º partida, fez {dados['gols'][i]}")
l()
```
#### File: CursoEmVideo-CursoDePython3/mundo3-EstruturasCompostas/098-funcaoContador.py
```python
import time
inicio = fim = passo = 0
def contador(inicio, fim, passo=1):
print('=-'*30+'=')
print(f'contagem de {inicio} até {fim} com o passo {passo}')
# o proprio FOR evita loop infinito caso a regra não seja dada
for i in range(inicio, fim, passo):
print(f'{i} ', flush=True, end='')
time.sleep(.1)
print()
print('=-'*30+'=')
contador(1, 10)
contador(10, 0, -2)
inicio = int(input('(inicio) DE:'))
fim = int(input(' (fim) até: '))
passo = int(input(' passo: '))
contador(inicio, fim, passo)
```
#### File: CursoEmVideo-CursoDePython3/mundo3-EstruturasCompostas/102-funcaoFatorial.py
```python
def fatorial(n, show=False):
'''
-> calcula um fatorial de um numero
:para n: o numero para ser calculado
:para show: (opcional) mostrar ou não aconta (False/True)
:return: o valor do fatorial 'numero'
'''
fatorial = [i for i in range(1, 1+n)]
soma = 1
resposta = ''
for i in fatorial:
soma *= i
if show:
for i in fatorial:
resposta += f'{i} X '
return f'{resposta[:-2] }= {soma}' if resposta else soma
print(fatorial(5, True))
print(fatorial(9))
help(fatorial)
``` |
{
"source": "jonasht/Python",
"score": 3
} |
#### File: 02-snake-pygame/cobra-v1/main.py
```python
import pygame
from pygame.locals import *
import random
def on_grid_random():
x = random.randint(0,590)
y = random.randint(0,590)
return (x//10 * 10, y//10 * 10)
def collision(c1, c2):
return (c1[0] == c2[0]) and (c1[1] == c2[1])
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
pygame.init()
tela = pygame.display.set_mode((600,600))
pygame.display.set_caption('cobra')
cobra = [(200, 200), (210, 200), (220,200)]
cobra_skin = pygame.Surface((10,10))
cobra_skin.fill((255,255,255))
cobra_posicao = on_grid_random()
maca = pygame.Surface((10,10))
maca.fill((255,0,0))
my_direction = LEFT
clock = pygame.time.Clock()
while 1:
clock.tick(10)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
if event.type == KEYDOWN:
if event.key == K_UP:
my_direction = UP
if event.key == K_DOWN:
my_direction = DOWN
if event.key == K_LEFT:
my_direction = LEFT
if event.key == K_RIGHT:
my_direction = RIGHT
if collision(cobra[0], cobra_posicao):
cobra_posicao = on_grid_random()
cobra.append((0,0))
for i in range(len(cobra) - 1, 0, -1):
cobra[i] = (cobra[i-1][0], cobra[i-1][1])
if my_direction == UP:
cobra[0] = (cobra[0][0], cobra[0][1] - 10)
if my_direction == DOWN:
cobra[0] = (cobra[0][0], cobra[0][1] + 10)
if my_direction == RIGHT:
cobra[0] = (cobra[0][0] + 10, cobra[0][1])
if my_direction == LEFT:
cobra[0] = (cobra[0][0] - 10, cobra[0][1])
tela.fill((0,0,0))
tela.blit(maca, cobra_posicao)
for pos in cobra:
tela.blit(cobra_skin, pos)
pygame.display.update()
```
#### File: 0-versoesAnteriores/contador-v1/contador1.py
```python
from tkinter import*
janela = Tk()
janela.title('contagem')
janela['bg']='Black'
segundos = None
#dimisões da janela
largura = 230
altura = 250
#resolução do sistema
largura_screen = janela.winfo_screenwidth()
altura_screen = janela.winfo_screenheight()
#posição da janela
posX = largura_screen/2 - largura/2
posY = altura_screen/2 - altura/2
#definir a geometry
janela.geometry('%dx%d+%d+%d' % (largura, altura, posX, posY))
def Contagem():
global ate
global segundos
if segundos == None:
ate = int(entrada.get())
segundos = -1
if segundos == ate:
lb_contagem['text'] = 'Fim'
else:
segundos = segundos + 1
lb_contagem['text'] = segundos
lb_contagem.after(1000, Contagem)
label = Label(janela, text="quantos segundos:", fg='green', bg='black')
label.grid(row=0, column=0)
entrada = Entry(janela, textvariable=0, bg='gray')
entrada.grid(row=0, column=1)
lb_contagem = Label(janela, fg='green', font='Times 100 bold', bg='black', text='0')
lb_contagem.grid(row=2, column=0, columnspan=2, sticky=W+E)
bt = Button(janela, fg='dark green', bg='light sea green', text='Começar', command=Contagem, font='Arial 20 bold')
bt.grid(row=3, column=0, columnspan=2, sticky=W+E)
janela.mainloop()
```
#### File: 0-versoesAnteriores/contador-v4/main.py
```python
from tkinter import Tk
from tkinter.constants import DISABLED
from frame import Interface
class Principal(Tk):
def __init__(self):
super().__init__()
self.frame = Interface(self)
self.frame.pack()
self.bind('<Return>', self.teclaEnter)
# self.frame.entrada.focus()
def teclaEnter(self, event):
self.iniciar()
def iniciar(self):
self.frame.Contagem()
if __name__ == '__main__':
root = Principal()
root.mainloop()
```
#### File: 04-AprendizagemDeTabuada/0-versoesAnteriores/tabuadaLearning3.py
```python
from tkinter import *
import random
print('feito no windows')
t = Tk()
t.title('FTabuada v3')
t.iconbitmap('04/ico.xbm')
conta_1 = 2
conta_2 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
random.shuffle(conta_2)
c = 0
incorreto = correto = 0
def Start():
conta_1 = valor.get()
conta_total = [conta_1 * conta_2[0], (conta_1 * conta_2[0]) + 2, (conta_1 * conta_2[c]) * 1 ]
random.shuffle(conta_total)
lb_conta.config(text=str(conta_1) + ' X ' + str(conta_2[0]))
bt0.config(text=conta_total[0], state=NORMAL)
bt1.config(text=conta_total[1], state=NORMAL)
bt2.config(text=conta_total[2], state=NORMAL)
bt_start.config(state=DISABLED, fg='white', bg='white')
def comando(res_comando):
global conta_total
global conta_1
global conta_2
global c
global correto
global incorreto
conta_1 = valor.get()
if c <= len(conta_2):
c = c + 1
conta_total = [conta_1 * conta_2[c], (conta_1 * conta_2[c]) + 7,(conta_1 * conta_2[c]) * 2 + 9 ]
lb_conta.config(text=str(conta_1) + ' X ' + str(conta_2[c]))
lb_conta_resposta.config(text=str(conta_1) + ' X ' + str(conta_2[c-1]) + ' = ' + str(conta_1 * conta_2[c-1]))
bt0.config(text=conta_total[0], state=NORMAL)
bt1.config(text=conta_total[1], state=NORMAL)
bt2.config(text=conta_total[2], state=NORMAL)
if (conta_1 * conta_2[c]) == conta_total[res_comando]:
lb_resposta.config(text='correto')
correto = correto + 1
lb_correto.config(text=correto)
random.shuffle(conta_total)
else:
lb_resposta.config(text='incorreto')
incorreto = incorreto + 1
lb_incorreto.config(text=incorreto)
random.shuffle(conta_total)
lb_conta = Label(t, text='=-=-=-=', width=5, fg='red', font='arial 80 bold')
lb_resposta = Label(t, text='=-=-=-=-=-=', fg='blue', font='arial 20 bold')
lb_conta_resposta = Label(t, text='=-=-=-=-=-=', fg='dark blue', font='arial 20 bold')
bt0 = Button(t, text='', command=lambda: comando(0), state=DISABLED, font='arial 20 bold',
width=5, borderwidth=2)
bt1 = Button(t, text='', command=lambda: comando(1), state=DISABLED, font='arial 20 bold',
width=5, borderwidth=2)
bt2 = Button(t, text='', command=lambda: comando(2), state=DISABLED, font='arial 20 bold',
width=5, borderwidth=2)
bt_start = Button(t, text='start', command=Start, font='arial 20')
frame_op = Frame(t)
frame_op.grid(row=0, column=0, columnspan=3, sticky=W+E)
valor = IntVar()
lb_op = Label(frame_op, text='qual tabuada?:')
lb_op.grid(row=0)
rbt1 = Radiobutton(frame_op, text='1', variable=valor, value=1, indicatoron=0, padx=10, pady=10,
bg='green')
rbt1.grid(row=0, column=1)
rbt2 = Radiobutton(frame_op, text='2', variable=valor, value=2, indicatoron=0, padx=10, pady=10,
bg='green')
rbt2.grid(row=0, column=2)
rbt3 = Radiobutton(frame_op, text='3', variable=valor, value=3, indicatoron=0, padx=10, pady=10,
bg='green')
rbt3.grid(row=0, column=3)
rbt4 = Radiobutton(frame_op, text='4', variable=valor, value=4, indicatoron=0, padx=10, pady=10,
bg='green')
rbt4.grid(row=0, column=4)
rbt5 = Radiobutton(frame_op, text='5', variable=valor, value=5, indicatoron=0, padx=10, pady=10,
bg='green')
rbt5.grid(row=0, column=5)
rbt6 = Radiobutton(frame_op, text='6', variable=valor, value=6, indicatoron=0, padx=10, pady=10,
bg='green')
rbt6.grid(row=0, column=6)
rbt7 = Radiobutton(frame_op, text='7', variable=valor, value=7, indicatoron=0, padx=10, pady=10,
bg='green')
rbt7.grid(row=0, column=7)
rbt8 = Radiobutton(frame_op, text='8', variable=valor, value=8, indicatoron=0, padx=10, pady=10,
bg='green')
rbt8.grid(row=0, column=8)
rbt9 = Radiobutton(frame_op, text='9', variable=valor, value=9, indicatoron=0, padx=10, pady=10,
bg='green')
rbt9.grid(row=0, column=9)
rbt10 = Radiobutton(frame_op, text='10', variable=valor, value=10, indicatoron=0,
padx=10, pady=10, bg='green')
rbt10.grid(row=0, column=10)
rbt9.select()
lb_conta.grid(row=1, columnspan=3, sticky=W+E)
lb_resposta.grid(row=2, column=1,columnspan=2, sticky=W+E)
lb_conta_resposta.grid(row=3, column=1, columnspan=2, sticky=W+E)
bt0.grid(row=2, sticky=W+E)
bt1.grid(row=3, sticky=W+E)
bt2.grid(row=4, sticky=W+E)
bt_start.grid(row=5, columnspan=3, sticky=W+E)
lb_correto = Label(t, text=0, fg='blue', font='arial 20 bold')
lb_incorreto = Label(t, text=0, fg='red', font='arial 20 bold')
lb_incorreto.grid(row=4, column=1)
lb_correto.grid(row=4, column=2)
t.mainloop()
```
#### File: 0-versoesAnteriores/tabuada-V4/conta.py
```python
from random import shuffle
class Conta:
def __init__(self):
self.contas = list()
# self.fazerContas()
def set_numero1(self, n):
self.fazerContas(n)
def fazerContas(self, numero1):
numero2 = list(range(10))
for conta in numero2:
self.contas.append([numero1, conta])
shuffle(self.contas)
def mostrar(self):
print(self.contas)
if __name__ == '__main__':
conta = Conta()
conta.set_numero1(9)
conta.mostrar()
```
#### File: 04-AprendizagemDeTabuada/tabuada-V5/frameMenu.py
```python
from tkinter import *
class FrameMenu(Frame):
def __init__(self, parent, controller):
Frame.__init__(self, parent)
self.controller = controller
self.opcaoMenu = Frame(self)
self.bt_start = Button(self, text='Start', font='arial 20', command=lambda: controller.show_frame('FrameStart'))
self.valor = IntVar()
self.lb_op = Label(self.opcaoMenu, text='qual tabuada?:')
self.lb_op.pack()
# ======= radio Button ===========================================
# radio button das opcao para escolher
self.rbt1 = Radiobutton(self.opcaoMenu, text='1', variable=self.valor, value=1, indicatoron=0, padx=10, pady=10, bg='green')
self.rbt2 = Radiobutton(self.opcaoMenu, text='2', variable=self.valor, value=2, indicatoron=0, padx=10, pady=10, bg='green')
self.rbt3 = Radiobutton(self.opcaoMenu, text='3', variable=self.valor, value=3, indicatoron=0, padx=10, pady=10,bg='green')
self.rbt4 = Radiobutton(self.opcaoMenu, text='4', variable=self.valor, value=4, indicatoron=0, padx=10, pady=10, bg='green')
self.rbt5 = Radiobutton(self.opcaoMenu, text='5', variable=self.valor, value=5, indicatoron=0, padx=10, pady=10, bg='green')
self.rbt6 = Radiobutton(self.opcaoMenu, text='6', variable=self.valor, value=6, indicatoron=0, padx=10, pady=10,bg='green')
self.rbt7 = Radiobutton(self.opcaoMenu, text='7', variable=self.valor, value=7, indicatoron=0, padx=10, pady=10,bg='green')
self.rbt8 = Radiobutton(self.opcaoMenu, text='8', variable=self.valor, value=8, indicatoron=0, padx=10, pady=10,bg='green')
self.rbt9 = Radiobutton(self.opcaoMenu, text='9', variable=self.valor, value=9, indicatoron=0, padx=10, pady=10, bg='green')
self.rbt1.pack (side=LEFT)
self.rbt2.pack (side=LEFT)
self.rbt3.pack (side=LEFT)
self.rbt4.pack (side=LEFT)
self.rbt5.pack (side=LEFT)
self.rbt6.pack (side=LEFT)
self.rbt7.pack (side=LEFT)
self.rbt8.pack (side=LEFT)
self.rbt9.pack (side=LEFT)
self.rbt9.select()
self.opcaoMenu.pack(anchor=CENTER, padx=10, pady=20)
self.bt_start.pack(anchor=CENTER)
if __name__ == '__main__':
import main
# root = Tk()
# frame = FrameMenu(root)
# def tecla1(event):
# print('1 apertado')
# frame.rbt1.select()
# def tecla2(event):
# print('2 apertado')
# frame.rbt2.select()
# root.bind('1', tecla1)
# root.bind('2', tecla2)
# frame.pack()
# root.mainloop()
```
#### File: 06-sistemaLinear3x3/0versoesAnteriores/0-v0-delta.py
```python
conta = [
{'x': 1, 'y': 2, 'z': 1, '=': 8},
{'x': 2, 'y':-1, 'z': 1, '=': 3},
{'x': 3, 'y': 1, 'z':-1, '=': 2}
]
def enfeitar():
print('-' * 30)
for c in conta:
print(c)
enfeitar()
for dic in conta:
for chave, item in dic.items():
if chave == '=':
print(f' = {item}', end='')
else:
print(f' {item}{chave}', end='')
print()
enfeitar()
print('delta:')
lista_delta = [[], [], []]
for i, dic in enumerate(conta):
for chave, item in dic.items():
if chave != '=':
print(f' {item}', end='')
lista_delta[i].append(item)
print()
for i in range(3):
for ii in range(2):
lista_delta[i].append(lista_delta[i][ii])
enfeitar()
def mostrar_delta():
for lista in lista_delta:
for n in lista:
if len(str(n)) == 1:
print(f' {n}', end='')
else:
print(f' {n}', end='')
print()
mostrar_delta()
enfeitar()
print(lista_delta)
d_resultado = 1
def somarMatriz():
multiplicacao = 1
resultado = 0
for seguinte in range(3):
for i in range(3):
multiplicacao *= lista_delta[i][i+seguinte]
resultado += multiplicacao
multiplicacao = 1
for seguinte in range(3):
for i in range(3):
multiplicacao *= -(lista_delta[-(i-2)][i+seguinte])
resultado += multiplicacao
multiplicacao = 1
return resultado
enfeitar()
delta = somarMatriz()
print(f'delta = {delta}')
```
#### File: 0versoesAnteriores/3X3-v8/principal.py
```python
from cl_3x3 import *
# --------------------------------------------------
conta = [
{'x': 1, 'y': 2, 'z': 1, '=': 8},
{'x': 2, 'y': -1, 'z': 1, '=': 3},
{'x': 3, 'y': 1, 'z': -1, '=': 2}
]
# --------------------------------------------------
def enfeitar(oque='-', qtd=50):
print(oque * qtd)
a = cl_3x3(conta)
print()
enfeitar()
print('conta:')
a.mostrar_conta()
enfeitar()
print('matriz delta:')
a.mostrar_matriz()
print(f'delta = {a.delta}')
enfeitar()
a.mostrar_matriz('x')
print(f'deltaX = {a.deltaX}')
enfeitar()
a.mostrar_matriz('y')
print(f'deltaX = {a.deltaY}')
enfeitar()
a.mostrar_matriz('z')
print(f'deltaZ = {a.deltaZ}')
enfeitar()
print(f'delta={a.delta}, deltaX={a.deltaX}, deltaY={a.deltaY}, deltaZ={a.deltaZ}')
print(f'x = {a.deltaX}/{a.delta} = {a.x}\n')
print(f'y = {a.deltaY}/{a.delta} = {a.y}\n')
print(f'z = {a.deltaZ}/{a.delta} = {a.z}\n')
enfeitar()
print(f'x = {a.x}, y = {a.y}, z = {a.z}')
```
#### File: 0versoesAnteriores/7-3X3/principal.py
```python
from cl_3x3 import *
# --------------------------------------------------
conta = [
{'x': 1, 'y': 2, 'z': 1, '=': 8},
{'x': 2, 'y': -1, 'z': 1, '=': 3},
{'x': 3, 'y': 1, 'z': -1, '=': 2}
]
# --------------------------------------------------
def enfeitar():
print('-' * 30)
a = cl_3x3(conta)
print()
enfeitar()
print('conta:')
a.mostrar_conta()
enfeitar()
print('matriz delta:')
a.mostrar_matriz(a.matriz_delta)
print(f'delta = {a.delta}')
enfeitar()
a.mostrar_matriz(a.matriz_deltaX)
print(f'deltaX = {a.deltaX}')
enfeitar()
a.mostrar_matriz(a.matriz_deltaY)
print(f'deltaX = {a.deltaY}')
enfeitar()
a.mostrar_matriz(a.matriz_deltaZ)
print(f'deltaZ = {a.deltaZ}')
enfeitar()
print(f'delta={a.delta}, deltaX={a.deltaX}, deltaY={a.deltaY}, deltaZ={a.deltaZ}')
print(f'x = {a.deltaX}/{a.delta} = {a.x}')
print(f'y = {a.deltaY}/{a.delta} = {a.y}')
print(f'z = {a.deltaZ}/{a.delta} = {a.z}')
enfeitar()
print(f'x = {a.x}, y = {a.y}, z = {a.z}')
``` |
{
"source": "jonasht/python",
"score": 3
} |
#### File: teste/testeCores/corFunc.py
```python
def get_vars(conta) -> list:
vars = list()
for c in conta:
# print(c)
if c.isalpha() or c == '=':
vars.append(c)
# print(c, vars)
# print(vars)
return vars
cores = ['blue',
'green',
'yellow',
'red',
'white']
def formatar(num, conta) -> dict:
formatado = list()
print(conta)
vars = get_vars(conta)
if '=' in vars:
vars.remove('=')
for i, v in enumerate(vars):
p1 = conta.find(vars[i])
vars.pop(p1)
p2 = p1 +1
# print('i', 'v', 'p1 p2')
# print(i, v, p1, p2)
d = {
'nome':v+str(num+1),
'p1': str(num+1)+'.'+str(p1),
'p2':str(num+1)+'.'+str(p2),
'fg':cores[i]
}
formatado.append(d)
# mostrar
return formatado
if __name__ == '__main__':
print(get_vars('xxx'))
```
#### File: sistemaLinear_v11/teste/textEventTest.py
```python
from tkinter import ttk
from tkinter import *
class App(Tk):
def __init__(self):
super().__init__()
self.text = Text(self, background='black',
foreground='white',
font='arial 20 bold',
height=10, width=30)
self.bind('<KeyRelease>', self.event)
self.text.pack()
def event(self, event):
print(self.text.get('1.0', END))
root = App()
root.mainloop()
``` |
{
"source": "jonasht/Python",
"score": 3
} |
#### File: 0versoesAntigas/0-algoritmo/0.py
```python
from time import sleep
from random import shuffle
lista = list(range(1, 11))
shuffle(lista)
mostrarLista = [([0 for i in range(10)]) for i in range(10)]
print()
def mostrar():
for ls in mostrarLista:
for l in ls:
if l == True:
print(f'{l} ', end='')
else:
print(' ', end='')
#print(f'{l} ', end='')
print()
for i in lista:
print(f'{i} ', end='')
#mostrarLista[1][1] = 1
for contador in range(len(lista)):
for i, chars in enumerate(mostrarLista[::-1]):
if lista[contador] == i: break
chars[contador] = 1
sleep(.02)
mostrar()
```
#### File: 0versoesAntigas/4-programa/programa.py
```python
from time import sleep
from random import shuffle
from os import system
f = '\33[m'
bgBlue = '\033[44m' # backgournd blue
bgYellow = '\033[43m' #backgournd yellow
bgRed = '\033[41m' #backgournd red
# definição do tamanho da lista
tamanhoDaLista = 30
lista = list(range(1, tamanhoDaLista))
shuffle(lista)
print()
# mostrar lista, serve para mostrar a lista de forma legal
def Mostrar(mostrarLista):
system('clear')
for cs in mostrarLista:
for c in cs:
if c == 1:
print(f'{bgBlue} {f}', end='')
elif c == 2:
print(f'{bgYellow} {f}', end='')
else:
print(' ', end='')
print()
sleep(.01)
#mostrarLista.clear()
#mostrarLista[1][1] = 1
# aqui serve para tranformar uma lista em matrix para mostrar() conseguir mostrar
def converterPMostrar(n):
mostrarLista = [([0 for i in range(tamanhoDaLista)]) for i in range(tamanhoDaLista)]
for contador in range(len(lista)):
for i, chars in enumerate(mostrarLista[::-1]):
if lista[contador] == i: break
chars[contador] = 1
if contador == n:
chars[contador] = 2
Mostrar(mostrarLista)
mostrarLista.clear()
sleep(.02)
print(lista)
def Maneira():
guardarNumero = 0
for c in range(len(lista)):
for i in range(len(lista)):
#print(i, len(lista))
if i+1 == len(lista):
continue
else:
if lista[i] > lista[i+1]:
guardarNumero = lista[i]
lista[i] = lista[i+1]
lista[i+1] = guardarNumero
converterPMostrar(i+1)
Maneira()
```
#### File: 0versoesAntigas/5-programa/programa.py
```python
from time import sleep
from random import shuffle
from os import system
class Interface:
def __init__(self):
self.f = '\33[1;0m'
self.bgblack = '\033[1;40m' # background black
self.bggreen = '\033[1;42m' # background green
self.bgBlue = '\033[1;44m' # backgournd blue
self.bgYellow = '\033[1;43m'# backgournd yellow
self.bgRed = '\033[1;41m' # backgournd red
self.bgwhite = '\033[1;107m'# background white
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# definição do tamanho da lista
self.tamanhoDaLista = 15
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
self.lista = list(range(1, self.tamanhoDaLista))
shuffle(self.lista)
# mostrar lista, serve para mostrar a lista de forma legal
def Mostrar(self, mostrarLista):
system('clear')
for cs in mostrarLista:
for c in cs:
if c == 1:
print(f'{self.bgBlue} {self.f}', end='')
elif c == 2:
print(f'{self.bggreen} {self.f}', end='')
else:
print(f' ', end='')
print()
sleep(.01)
#mostrarLista.clear()
#mostrarLista[1][1] = 1
# aqui serve para tranformar uma lista em matrix para mostrar() conseguir mostrar
def converterPMostrar(self, n):
mostrarLista = [([0 for i in range(self.tamanhoDaLista)]) for i in range(self.tamanhoDaLista)]
for contador in range(len(self.lista)):
for i, chars in enumerate(mostrarLista[::-1]):
if self.lista[contador] == i: break
chars[contador] = 1
if contador == n:
chars[contador] = 2
self.Mostrar(mostrarLista)
mostrarLista.clear()
sleep(.02)
def Maneira(self):
guardarNumero = 0
for c in range(len(self.lista)):
for i in range(len(self.lista)):
#print(i, len(lista))
if i+1 == len(self.lista):
continue
else:
if self.lista[i] > self.lista[i+1]:
guardarNumero = self.lista[i]
self.lista[i] = self.lista[i+1]
self.lista[i+1] = guardarNumero
self.converterPMostrar(i+1)
m1 = Interface()
m1.Maneira()
```
#### File: 0-versoesAntigas/torre1/torre.py
```python
from auroDesenhante import *
fim = '\033[0m'
black = '\033[40m'
red = '\033[41m'
green = '\033[42m'
yellow = '\033[43m'
blue = '\033[44m'
pink = '\033[45m'
white = '\033[107m'
def magica():
sleep(0.2)
system('clear')
# Y
# |
# |
# ---|----X
# |
# a = auro(40, 10, f'{black} {fim}')
# partes
parte3 = 20
# magica()
# a.mostrar()
# a.desenhar(f'{blue} {fim}', x=5 + parte3, y=9, qtdCasas= 9 )
# magica()
# a.mostrar()
# a.desenhar(f'{red} {fim}', x=6 + parte3, y=8, qtdCasas=7)
# magica()
# a.mostrar()
# a.desenhar(f'{yellow} {fim}', x=7 + parte3, y=7, qtdCasas=5)
# magica()
# a.mostrar()
# a.desenhar(f'{green} {fim}', x=8 + parte3, y=6, qtdCasas=3)
# magica()
# a.mostrar()
def erguer(nomeBarra, x, y, qtdCasas, erguerAte=5):
ry = 0
for i in range(1, erguerAte):
backend.desenhar(nomeBarra, x, y-i, qtdCasas)
backend.apagar(x, y-i+1, qtdCasas)
backend.mostrar()
magica()
ry = y-1
# print('X: ', x, 'Y: ', ry )
return nomeBarra, x, ry, qtdCasas
def baixar():
pass
def movimentar(nomeBarra, x, y, qtdCasas, moveAte = 5):
pass
magica()
backend = auro(40, 10, f' ')
backend.mostrar()
backend.desenhar(f'4', x=5 + parte3, y=9, qtdCasas= 9 )
magica()
backend.mostrar()
backend.desenhar(f'3', x=6 + parte3, y=8, qtdCasas=7)
magica()
backend.mostrar()
backend.desenhar(f'2', x=7 + parte3, y=7, qtdCasas=5)
magica()
backend.mostrar()
backend.desenhar('1', x=8 + parte3, y=6, qtdCasas=3)
magica()
backend.mostrar()
magica()
rnomeBarra, rx, ry, rqtdCasas = erguer('1', x=8 + parte3, y=6, qtdCasas=3)
backend.mostrar()
```
#### File: 09-contadorComChars/0-versoesAntigas/contador0.py
```python
import random
import os
#feito no window/ it was made on windows
cont = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def l():
print('=-=-'*15,'=')
for i in cont:
print(' \n', end='')
for ii in cont:
print(f'{i}{ii}', end=' ')
```
#### File: 09-contadorComChars/0-versoesAntigas/contador5.py
```python
c = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'X', 'Y']
def l():
print('\n', '=-=-'*15 +'=')
l()
pl = 0 # pula linha
part = 0
l()
while 1:
qtd=int(input('type 0 to exit\ndigite 0 p/ sair\ncount until:\ncontar ate\n decimal n:'))
if qtd == 0:
break
qtd+=1
parts = []
for i in range(0, len(str(qtd))):
parts.insert(i, 0)
for i in range (0, qtd):
for p in reversed(parts): # mostrar
print(c[p], end='')
pl += 1
if pl == 10:
print('\n')
pl = 0
parts[part] = parts[part] + 1
for ii in range(0, len(parts)-1):
print(' ', end='')
if parts[ii] == 12:
parts[ii] = 0
parts[ii+1] = parts[ii+1] + 1
l()
l()
print('\n\tfinished program/ fim de programa')
l()
```
#### File: 11-auroTUI/0auroDesenhante/2- emDev.py
```python
class auro:
def __init__(self, largura, altura, encher=0):
self.largura = largura
self.altura = altura
self.encher = encher
self.desenho = [[self.encher for ii in range(self.largura)] for i in range(self.altura)]
def desenhar(self, comOque, x=2, y=2, qtdCasas=2, ):
for i in range(qtdCasas):
self.desenho[y][x+i] = comOque
def mostrar(self):
for chars in self.desenho:
for char in chars:
print(f'{char}', end='')
print()
a = auro(20, 10, '0 ')
a.mostrar()
print('----------------------------------------')
a.desenhar(' ', x=5, y=2, qtdCasas= 5 )
a.mostrar()
```
#### File: 11-auroTUI/1tela/tela3.py
```python
r = '\033[31m' # red
b = '\033[34m' # blue
g = '\033[32m' # green
y = '\033[33m' # yellow
f = '\33[m'
bb = '\033[44m' # backgournd blue
def tela(mensagem, colunatxt=5):
espaco = bb + ' ' + f
mensagem = bb + y + mensagem
for i in range(12):
print('')
if i == 5:
print(espaco * colunatxt + mensagem + espaco*(50 - (len(mensagem)-colunatxt)), end='')
else:
for ii in range(50):
print(espaco, end='')
print()
tela('aqui tem uma mensagem escrita')
```
#### File: 12-painterPrint/0-versoesAnterioes/a1.py
```python
import keyboard
from os import system
red = '\033[31m' # red
blue = '\033[35m' # blue
green = '\033[32m' # green
yellow = '\033[33m' # yellow
f = '\33[m'
bgBlue = '\033[44m' # backgournd blue
bgYellow = '\033[43m' #backgournd yellow
bgRed = '\033[41m' #backgournd red
bgGreen = '\033[42m' # background green
bgWhite = '\033[47m' # backgroundwhite
x = y = 0
tamanhaDeX = 15
tamanhaDeY = 10
picss = list(list(bgWhite+' ' + f for i in range(tamanhaDeX))for ii in range(tamanhaDeY))
corDeLapis = green
corDePrint = bgWhite
def mostrar():
global x
global y
global corDeLapis
global corDePrint
system('clear')
print('\n')
print('space for printing// espaço p pintar')
print('aperte esc para sair// ESC to exit')
print(f'{bgRed} 1 {bgGreen} 2 {bgYellow} 3 {bgBlue} 4 {f}'+ f)
for i, pics in enumerate(picss):
for l, pic in enumerate(pics):
if i == x and l == y:
print(f'{bgWhite+corDeLapis}><', flush=True, end='')
else:
print(f'{pic}', flush=True, end='')
print(f)
mostrar()
def up():
global x
if x == 0:
return
x -= 1
mostrar()
def down():
global x
if x == tamanhaDeY:
return
x += 1
mostrar()
def left():
global y
if y == 0:
return
y -= 1
mostrar()
def right():
global y
if y == tamanhaDeX:
return
y += 1
mostrar()
def space():
global corDePrint
picss[x][y] = corDePrint + ' '
mostrar()
def lapis1(): # vermelho red
global corDeLapis
global corDePrint
corDeLapis = bgRed
corDePrint = bgRed
mostrar()
def lapis2(): # verde green
global corDeLapis
global corDePrint
corDeLapis = bgGreen
corDePrint = bgGreen
mostrar()
def lapis3(): # amarelo yellow
global corDeLapis
global corDePrint
corDeLapis = bgYellow
corDePrint = bgYellow
mostrar()
def lapis4(): # azul blue
global corDeLapis
global corDePrint
corDeLapis = bgBlue
corDePrint = bgBlue
mostrar()
keyboard.add_hotkey('up', up)
keyboard.add_hotkey('down', down)
keyboard.add_hotkey('right', right)
keyboard.add_hotkey('left', left)
keyboard.add_hotkey('w', up)
keyboard.add_hotkey('s', down)
keyboard.add_hotkey('d', right)
keyboard.add_hotkey('a', left)
keyboard.add_hotkey('1', lapis1)
keyboard.add_hotkey('2', lapis2)
keyboard.add_hotkey('3', lapis3)
keyboard.add_hotkey('4', lapis4)
keyboard.add_hotkey('space', space)
keyboard.wait('ESC')
```
#### File: Python/13-criptografiaSimples/1.py
```python
import colorama
colorama.init()
red = '\033[31m' # red
blue = '\033[34m' # blue
green = '\033[32m' # green
yellow = '\033[33m' # yellow
f = '\33[m'
bgBlue = '\033[46m' # backgournd blue
bgYellow = '\033[43m' #backgournd yellow
bgRed = '\033[41m' #backgournd red
palavras = []
palavrasN = []
tipo = []
def tprint(p='', bg=bgBlue , fg=green, op=1, tamanhoDaPalavra=0):
if tamanhoDaPalavra!= 0: # decidir o tamanho da palavra
palavrasN.append(tamanhoDaPalavra)
else:
palavrasN.append(len(p))
if op == 2: # enfeite
if p!='':
tipo.append(2)
palavras.append(bg + fg + p)
else:
tipo.append(2)
palavras.append(bg + fg + '=')
if op == 1 and p != '': # pohr texto
tipo.append(1)
palavras.append(bg + fg + ' ' + p + ' ' + ' ' * (max(palavrasN) - len(p)))
if op == 0: # iniciar
print(end='')
for i, palavra in enumerate(palavras):
if tipo[i] == 2:
print(palavra * (max(palavrasN)+2) + f)
else:
print(palavra + f)
print(f, end='')
mensagem = 'esta eh uma mensagem'
letras = []
print(f'\nmensagem a ser criptada:\n {mensagem}')
letras.extend(mensagem)
cletras = []
for letra in letras:
cletras.append(chr(ord(letra)+1))
tprint('/', op=2)
tprint(op=2)
tprint(tamanhoDaPalavra=30)
tprint('mensagem para ser criptografada:'.upper())
tprint(mensagem.title())
tprint('-', op=2)
tprint('mensagem criptada:'.upper())
tprint(''.join(cletras) )
tprint(op=2)
tprint('/', op=2)
tprint(op=0)
``` |
{
"source": "jonasht/python",
"score": 3
} |
#### File: versaoInterfaceGrafica/teste/checkbt.py
```python
from tkinter import *
from tkinter import ttk
from webbrowser import BackgroundBrowser
root = Tk()
chbt_value = BooleanVar()
lb = ttk.Label(root, text='=-=-=')
def c_evento():
print('checkbt:', chbt_value.get())
if chbt_value.get():
print('ch ativado')
lb.config(foreground='green', text='ativado')
else:
print('ch desativado')
lb.config(foreground='red', text='desativado')
root.geometry('500x500')
chbt = ttk.Checkbutton(root, text='teste', variable=chbt_value, command=c_evento)
chbt.pack(anchor=CENTER)
lb.pack(anchor=CENTER)
root.config(background='orange')
lb.config(font='gothic 40 bold', background='orange')
c_evento()
from sys import exit
root.bind('q', exit)
root.mainloop()
```
#### File: 13-criptografiaSimples/versaoInterfaceGrafica/uteis.py
```python
from cryptography.fernet import Fernet
def criptar(msg):
key = Fernet.generate_key()
f = Fernet(key)
token = f.encrypt(msg.encode())
# convertendo para string
key = key.decode('utf-8')
token = token.decode('utf-8')
return key, token
def descriptar(key, t):
key = key.encode()
print()
print('tipo:', type(t))
print('t:', t)
t = t.encode()
f = Fernet(key)
msg = f.decrypt(t)
msg = msg.decode('utf-8')
return msg
if __name__ == '__main__':
key = '<KEY>
msg = '<KEY>
print(descriptar(key, msg))
``` |
{
"source": "jonasht/Python",
"score": 3
} |
#### File: 1-versaoTerminal/flashcardsContador3/flashcardsContador.py
```python
from Soma import *
# cores para definir
fim = '\033[0m'
black = '\033[40m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
pink = '\033[35m'
white = '\033[107m'
# definindo soma
recomeçar = Soma()
começar = Soma()
def mostrar():
system('clear')
print('digite 0 para sair')
print('=-'*30+'=')
print(f'|\tCartas para ReComeçar: {green}{recomeçar.Somar()}{fim}')
print(f'|\tCartas para Começar: {blue}{começar.Somar()}{fim}')
print(f'|\tFaltam {red}{30-(recomeçar.Somar()+começar.Somar())}{fim} Total: {blue}{recomeçar.Somar()+começar.Somar()}{fim}')
while True:
mostrar()
n = input(f'|\tNumero Para ReComeçar: ')
if n == '0': break
recomeçar.set_numeroDeCartas(n)
while True:
mostrar()
n = input(f'|\tNumero Para Começar: ')
if n == '0':
break
começar.set_numeroDeCartas(n)
mostrar()
print('=-'*30+'=')
print('\n')
```
#### File: 2-versaoInterfaceGrafica/frashcardsContador-v3/frame.py
```python
from tkinter import ttk
import tkinter as tk
from tkinter.constants import LEFT, RIGHT
class Interface(ttk.Frame):
def __init__ (self, parent):
super().__init__(parent)
self.frameRecomecar = ttk.Frame(self)
self.frameComecar = ttk.Frame(self)
self.frameTotal = ttk.Frame(self)
self.lb1 = ttk.Label(self.frameRecomecar, text='Cartas para Recomeçar:', width=20, font='arial 16')
self.lb_numero1 = ttk.Label(self.frameRecomecar, text='0', foreground='purple', font='arial 16 bold', width=2)
self.lb2 = ttk.Label(self.frameComecar, text='Cartas para Começar:', width=20, font='arial 16')
self.lb_numero2 = ttk.Label(self.frameComecar, text='0', foreground='blue', font='arial 16 bold', width=2)
self.lb_falta = ttk.Label(self.frameTotal, text='Faltam:', width=8, font='arial 16')
self.lb_faltaNum = ttk.Label(self.frameTotal, text='0', foreground='red', width=2, font='arial 16 bold')
self.lb_total = ttk.Label(self.frameTotal, text=' Total:', width=9, font='arial 16')
self.lb_totalNum = ttk.Label(self.frameTotal, text='0', foreground='blue', font='arial 16 bold')
self.lb_lista1 = ttk.Label(self, text='', foreground='purple')
self.lb_lista2 = ttk.Label(self, text='', foreground='blue')
self.lb_falta.pack(side=LEFT)
self.lb_faltaNum.pack(side=LEFT)
self.lb_total.pack(side=LEFT)
self.lb_totalNum.pack(side=RIGHT)
self.lb1.pack(side=LEFT)
self.lb_numero1.pack(side=RIGHT)
self.lb2.pack(side=LEFT)
self.lb_numero2.pack(side=RIGHT)
self.frameRecomecar.pack()
self.frameComecar.pack()
self.frameTotal.pack()
self.lb_lista1.pack()
self.lb_lista2.pack()
def atualizarTotal(self, n):
self.lb_totalNum.config(text=n)
def atualizarRecomecar(self, n):
self.lb_numero1.config(text=n)
def atualizarComecar(self, n):
self.lb_numero2.config(text=n)
def atualizarRestanteDeCartas(self, n):
if n > 0:
self.lb_faltaNum.config(text=n, foreground='blue')
else:
self.lb_faltaNum.config(text=n, foreground='red')
if __name__ == '__main__':
root = tk.Tk()
root.geometry('400x300')
frame = Interface(root)
frame.pack()
root.mainloop()
```
#### File: 0-versoesAntigas/3-versao3/interfaceCadastro.py
```python
from tkinter import *
from registradorDB import Db
class FrameCadastro(Frame):
def __init__(self, container):
super().__init__(container)
self.db = Db()
# id
self.lb_id = Label(self, text='Id:', width=10)
self.lb_id_mostrar = Label(self, text=self.db.get_nextId())
self.lb_nome = Label(self, text='Nome:', width=10)
# nome
self.etd_nome = Entry(self)
self.etd_nome.bind('<Return>', self.onReturn)
# # sexo
# lbsexo = Label(self, text='sexo:')
# entdSexo = Entry(self)
# lbsexo.grid(row=2, column=1, pady=2)
# entdSexo.grid(row=2, column=2, pady=2)
# idade
self.lb_idade = Label(self, text='Idade:', width=10)
self.etd_idade = Entry(self)
self.etd_idade.bind('<Return>', self.onReturn)
# label de aviso
self.lb_aviso = Label(self, text=' ')
# botao cadatrar, limpar, sair
self.bt_cadastrar = Button(self, text='Cadastrar', command=self.cadastrar)
self.bt_limpar = Button(self, text='Limpar', width=10, command=self.limpar_Entradas)
# labels de confirmacao
self.lb_id_confirmacao = Label(self, width=15)
self.lb_nome_confirmacao = Label(self, width=15)
self.lb_idade_confirmacao = Label(self, width=15)
# --- posicoes ---------------
# id:, id_mostrar, entradaNome, lb_idade, entrada_idade
self.lb_id.grid(row=0, column=1, pady=2)
self.lb_id_mostrar.grid(row=0, column=2, pady=2)
self.lb_nome.grid(row=1, column=1, pady=2)
self.etd_nome.grid(row=1, column=2, pady=2)
self.lb_idade.grid(row=3, column=1, pady=2)
self.etd_idade.grid(row=3, column=2, pady=2)
# lb aviso
self.lb_aviso.grid(row=4, column=1, columnspan=3, padx=2, pady=2, sticky='news')
# botao cadastrar, limpar, sair
self.bt_cadastrar.grid(row=5, column=2, columnspan=3, padx=2, pady=2, sticky='news')
self.bt_limpar.grid(row=5, column=1, padx=2, pady=2)
# confirmacoes
self.lb_id_confirmacao.grid(row=0, column=3, padx=2, pady=2)
self.lb_nome_confirmacao.grid(row=1, column=3, padx=2, pady=2)
self.lb_idade_confirmacao.grid(row=3, column=3)
def ehNumero(self, n):
try:
int(n)
return True
except:
return False
def limpar_Entradas(self):
self.etd_nome.delete(0, END)
self.etd_idade.delete(0, END)
def proximoId(self):
id = int(self.db.id)+1
return id
# botao return (key=enter)
def onReturn(self, evento):
self.cadastrar()
def cadastrar(self):
nome = self.etd_nome.get()
idade = self.etd_idade.get()
if not self.ehNumero(idade) and nome:
self.lb_aviso.config(text='idade precisa ser numero', fg='red')
self.etd_idade.config(relief=SOLID, highlightbackground='red')
elif nome and idade:
self.db.nome = nome
self.db.idade = idade
self.db.cadastrar()
self.db.mostrar()
print(self.db.get_db()[self.db.id])
# mostrando a confirmacao na tela do que foi salvo
self.lb_aviso.config(text='cadastro feito com sucesso', fg='green')
self.lb_id_confirmacao.config(text='Id: '+self.db.id, fg='green')
self.lb_nome_confirmacao.config(
text='Nome: '+self.db.get_db()[self.db.id]['nome'], fg='green')
self.lb_idade_confirmacao.config(
text='Idade: '+self.db.get_db()[self.db.id]['idade'], fg='green')
self.lb_id_mostrar.config(text=self.db.get_nextId())
# limpar todas as entradas/entry
self.limpar_Entradas()
else:
self.lb_aviso.config(text='preencha todos os pontos', fg='red')
class Cadastro(Tk):
def __init__(self):
super().__init__()
self.title('cadastro')
self.geometry('407x200+651+300')
self.frame = FrameCadastro(self)
self.frame.grid()
if __name__ == '__main__':
cadastro = Cadastro()
cadastro.mainloop()
```
#### File: 16-jogoDaVelha/versao-terminal-EmDev/teste.py
```python
import colors as c
from uteis import *
from random import shuffle
# colocar numero no local de matriz
def colocarNumeros(matriz):
matrizNum = [[0 for _ in range(3)] for _ in range(3)]
for i, ms in enumerate(matriz):
for ii, m in enumerate(ms):
if m == 'X':
matrizNum[i][ii] = 3
elif m == 'O':
matrizNum[i][ii] = 2
else:
matrizNum[i][ii] = 0
print('colocarNumero, matrizNum:', matrizNum)
return matrizNum
# somar numeros, de todas possibidades
def somarNumeros(matrizNum):
matrizNum = colocarNumeros(matrizNum)
numeros = []
soma = 0
for i, ms in enumerate(matrizNum):
for ii, m in enumerate(ms):
soma += matrizNum[ii][i]
numeros.append(soma)
soma = 0
soma = 0
# /
for i in range(3):
soma += matrizNum[2-i][i]
# soma todas as fileiras
numeros.append(soma)
for ms in matrizNum:
numeros.append(sum(ms))
soma=0
# \
for i in range(3):
soma += matrizNum[i][i]
numeros.append(soma)
print('somar numeros: numeros:', numeros)
return numeros, matrizNum
# encontrar espaca na matrizNum para poder colocar em uma determinada fileira
def encontrarEspoco(opcao, matrizNum):
fileira = []
for i, ms in enumerate(matrizNum):
# ||| 1,2,3
if opcao == 0:
if matrizNum[i][0] == 0:
fileira.append([i, 0])
elif opcao == 1:
if matrizNum[i][1] == 0:
fileira.append([i, 1])
elif opcao == 2:
if matrizNum[i][2] == 0:
fileira.append([i, 2])
# / 3
elif opcao == 3:
if matriz[2-i][i] == 0:
fileira.append([2-i, i])
# --- 4
elif opcao == 4:
if matrizNum[0][i] == 0:
fileira.append([0, i])
elif opcao == 5:
if matrizNum[1][i] == 0:
fileira.append([1, i])
elif opcao == 6:
if matrizNum[2][i] == 0:
fileira.append([2, i])
# =======================================
# \ 7
elif opcao == 7:
if matrizNum[i][i] == 0:
fileira.append([i, i])
shuffle(fileira)
print('fileira:', fileira)
return fileira[0]
def fazerJogada(matriz):
numeros, matrizNum = somarNumeros(matriz)
maior = 0
for char in numeros:
if (char > maior and char <= 6) and char != 5:
maior = char
print('maior:', maior)
fileira = numeros.index(maior)
print('retornarEspaço: fileira:', fileira, matrizNum)
return encontrarEspoco(fileira, matrizNum)
# fazer a matriz
matriz = [[str(i)+str(ii) for ii in range(3)] for i in range(3)]
# mostrar(matriz)
# colocar('O', 0, 0, matriz)
# mostrar(matriz)
# colocar('x', 0, 1, matriz)
# mostrar(matriz)
# colocar('o', 1, 0, matriz)
# # colocar('x', 1, 1, matriz)
# x, y = fazerJogada(matriz)
# print('x:', x, 'y:', y)
# mostrar(matriz)
colocar('o', 0, 0, matriz)
mostrar(matriz)
x, y = fazerJogada(matriz)
colocar('x', x, y, matriz)
mostrar(matriz)
colocar('o', 1, 1, matriz)
mostrar(matriz)
x, y = fazerJogada(matriz)
colocar('x', x, y, matriz)
mostrar(matriz)
colocar('o', 0, 2, matriz)
mostrar(matriz)
x, y = fazerJogada(matriz)
colocar('x', x, y, matriz)
mostrar(matriz)
``` |
{
"source": "jonasht/python",
"score": 3
} |
#### File: 18-gerador_de_documento/frames/cns.py
```python
from tkinter import ttk
from tkinter import *
from validate_docbr import CNS
import pyperclip as pc
class Fr_CNS(ttk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.CNS = CNS()
self.CNS_num = ''
# CNS =================================================
self.lbfr = ttk.Labelframe(self, text='CNS', border=10)
self.etd = ttk.Entry(self.lbfr)
self.bt_gerar = ttk.Button(self.lbfr, text='Gerar', command=self.gerar)
self.chbt_mask = ttk.Checkbutton(self.lbfr, text='mask', command=self.chbt_Evento)
self.bt_copy = ttk.Button(self.lbfr, text='Copiar', command=self.copiar)
self.etd.grid(row=0, column=0, padx=2, pady=5, columnspan=2, sticky=EW)
self.bt_gerar.grid(row=1, column=1, padx=2, pady=5)
self.chbt_mask.grid(row=1, column=2, padx=2, pady=5)
self.bt_copy.grid(row=1, column=0, padx=2, pady=5)
self.lbfr.pack()
# gerando =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
self.gerar()
# desativando checkbox
self.chbt_mask.state(['!alternate'])
def gerar(self):
self.etd.delete(0, END)
self.CNS_num = self.CNS.generate()
self.etd.insert(0, self.CNS_num)
if 'selected' in self.chbt_mask.state():
self.chbt_Evento()
def copiar(self):
pc.copy(self.etd.get())
def chbt_Evento(self):
if 'selected' in self.chbt_mask.state():
self.etd.delete(0, END)
self.etd.insert(0, self.CNS.mask(self.CNS_num))
else:
self.etd.delete(0, END)
self.etd.insert(0, self.CNS_num)
``` |
{
"source": "jonasht/Python",
"score": 2
} |
#### File: 19-programaDeVendas-EmDev/frames/frameCadastroCliente.py
```python
from tkinter import ttk
import tkinter as tk
from tkinter.constants import BOTH, END, LEFT, RIGHT, W, EW
# import func_clientes as fc
import func.clientes as fc
from validate_docbr import CPF
class FrameCadastroCliente(ttk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.lbfr_cadastrarCliente = ttk.LabelFrame(self, text='Cadastrar Cliente')
self.lb_nome = ttk.Label(self.lbfr_cadastrarCliente, text='Nome:')
self.etd_nome = ttk.Entry(self.lbfr_cadastrarCliente)
self.lb_cpf = ttk.Label(self.lbfr_cadastrarCliente, text='CPF:')
self.etd_cpf = ttk.Entry(self.lbfr_cadastrarCliente)
self.lb_uf = ttk.Label(self.lbfr_cadastrarCliente, text='UF:')
self.etd_uf = ttk.Entry(self.lbfr_cadastrarCliente)
self.lb_cidade = ttk.Label(self.lbfr_cadastrarCliente, text='Cidade:')
self.etd_cidade = ttk.Entry(self.lbfr_cadastrarCliente)
self.lb_rua = ttk.Label(self.lbfr_cadastrarCliente, text='Rua:')
self.etd_rua = ttk.Entry(self.lbfr_cadastrarCliente)
self.lb_numeroCasa = ttk.Label(self.lbfr_cadastrarCliente, text='Numero:')
self.etd_numeroCasa = ttk.Entry(self.lbfr_cadastrarCliente)
self.lb_telefone = ttk.Label(self.lbfr_cadastrarCliente, text='Telefone:')
self.etd_telefone = ttk.Entry(self.lbfr_cadastrarCliente)
self.lb_email = ttk.Label(self.lbfr_cadastrarCliente, text='Email:')
self.etd_email = ttk.Entry(self.lbfr_cadastrarCliente)
self.fr_bts = ttk.Frame(self)
self.bt_cadastrar = ttk.Button(self.fr_bts, text='Cadastrar', command=self.cadastrar)
self.bt_resetar = ttk.Button(self.fr_bts, text='Resetar', command=self.resetar)
# label aviso
self.lb_aviso = ttk.Label(self, text='')
# -------------------------------------------------
self.lb_nome.grid(row=0, column=0, padx=5, pady=2)
self.etd_nome.grid(row=0, column=1, padx=5, pady=2)
self.lb_cpf.grid(row=1, column=0, padx=5, pady=2)
self.etd_cpf.grid(row=1, column=1, padx=5, pady=2)
self.lb_uf.grid(row=2, column=0, padx=5, pady=2)
self.etd_uf.grid(row=2, column=1, padx=5, pady=2)
self.lb_cidade.grid(row=3, column=0, padx=5, pady=2)
self.etd_cidade.grid(row=3, column=1, padx=5, pady=2)
self.lb_rua.grid(row=4, column=0, padx=5, pady=2)
self.etd_rua.grid(row=4, column=1, padx=5, pady=2)
self.lb_numeroCasa.grid(row=5, column=0, padx=5, pady=2)
self.etd_numeroCasa.grid(row=5, column=1, padx=5, pady=2)
self.lb_telefone.grid(row=6, column=0, padx=5, pady=2)
self.etd_telefone .grid(row=6, column=1, padx=5, pady=2)
self.lb_email.grid(row=7, column=0, padx=5, pady=2)
self.etd_email.grid(row=7, column=1, padx=5, pady=2)
self.bt_cadastrar.pack(side=RIGHT, fill=BOTH, expand=True, padx=1, pady=2)
self.bt_resetar.pack(side=LEFT, fill=BOTH, expand=True, padx=1, pady=2)
# colocando labelFrame
self.lbfr_cadastrarCliente.pack()
self.fr_bts.pack(fill=BOTH, expand=True)
self.lb_aviso.pack()
# focar no nome
self.etd_nome.focus()
def cadastrar(self):
nome = self.etd_nome.get()
cpf = self.etd_cpf.get()
uf = self.etd_uf.get()
cidade = self.etd_cidade.get()
rua = self.etd_rua.get()
numero = self.etd_numeroCasa.get()
telefone = self.etd_telefone.get()
email = self.etd_email.get()
# print('dados:')
# print('nome:', nome, 'cpf:', cpf )
# print('uf:', uf, 'cidade:', cidade, 'rua:', rua, numero)
# print('fone:', telefone, 'email:', email)
v_cpf = CPF()
if cpf == '' or v_cpf.validate(cpf):
if nome != '':
fc.add_(nome, cpf, uf, cidade, rua, numero, telefone, email)
self.resetar()
self.lb_aviso.config(text='cadastro feito com sucesso', foreground='green')
else:
self.lb_aviso.config(text='campo nome obrigatorio', foreground='red')
else:
self.lb_aviso.config(text='CPF invalido', foreground='red')
def resetar(self):
self.etd_nome.delete(0, END)
self.etd_cpf.delete(0, END)
self.etd_uf.delete(0, END)
self.etd_cidade.delete(0, END)
self.etd_rua.delete(0, END)
self.etd_numeroCasa.delete(0, END)
self.etd_telefone.delete(0, END)
self.etd_email.delete(0, END)
self.etd_nome.focus()
if __name__ == '__main__':
root = tk.Tk()
frame = FrameCadastroCliente(root)
frame.pack()
root.geometry('600x500')
root.mainloop()
```
#### File: Python/19-programaDeVendas-EmDev/frameVenda_lbCliente.py
```python
from tkinter import ttk
import tkinter as tk
class Fr_lbCliente(ttk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.lbfr_dadosClientes = ttk.LabelFrame(self, text='Info clientes')
self.fr1 = ttk.Frame(self.lbfr_dadosClientes)
self.fr2 = ttk.Frame(self.lbfr_dadosClientes)
self.lb_id = ttk.Label(self.fr1, text='id:', width=7)
self.lb_nome = ttk.Label(self.fr1, text='nome:', width=7)
self.lb_cpf = ttk.Label(self.fr1, text='cpf:', width=7)
self.lb_uf = ttk.Label(self.fr1, text='uf:', width=7)
self.lb_cidade = ttk.Label(self.fr1, text='cidade:', width=7)
self.lb_rua = ttk.Label(self.fr1, text='rua:', width=7)
self.lb_numero = ttk.Label(self.fr1, text='numero:', width=7)
self.lb_telefone = ttk.Label(self.fr1, text='telefone:', width=7)
self.lb_email = ttk.Label(self.fr1, text='email:', width=7)
self.lb_id.grid(row=0, column=0, padx=5, pady=2)
self.lb_nome.grid(row=1, column=0, padx=5, pady=2)
self.lb_cpf.grid(row=2, column=0, padx=5, pady=2)
self.lb_uf.grid(row=3, column=0, padx=5, pady=2)
self.lb_cidade.grid(row=4, column=0, padx=5, pady=2)
self.lb_rua.grid(row=5, column=0, padx=5, pady=2)
self.lb_numero.grid(row=6, column=0, padx=5, pady=2)
self.lb_telefone.grid(row=7, column=0, padx=5, pady=2)
self.lb_email.grid(row=8, column=0, padx=5, pady=2)
self.lb_idInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_nomeInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_cpfInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_ufInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_cidadeInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_ruaInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_numeroInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_telefoneInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_emailInfo = ttk.Label(self.fr2, text='', width=30)
self.lb_idInfo.grid(row=0, column=1, padx=5, pady=2)
self.lb_nomeInfo.grid(row=1, column=1, padx=5, pady=2)
self.lb_cpfInfo.grid(row=2, column=1, padx=5, pady=2)
self.lb_ufInfo.grid(row=3, column=1, padx=5, pady=2)
self.lb_cidadeInfo.grid(row=4, column=1, padx=5, pady=2)
self.lb_ruaInfo.grid(row=5, column=1, padx=5, pady=2)
self.lb_numeroInfo.grid(row=6, column=1, padx=5, pady=2)
self.lb_telefoneInfo.grid(row=7, column=1, padx=5, pady=2)
self.lb_emailInfo.grid(row=8, column=1, padx=5, pady=2)
self.fr1.grid(row=0, column=0)
self.fr2.grid(row=0, column=1)
self.lbfr_dadosClientes.pack()
def inserir_dados(self, dados):
# dados = dados[0]
# print(dados)
id = dados[0]
nome = dados[1]
cpf = dados[2]
uf = dados[3]
cidade = dados[4]
rua = dados[5]
numero = dados[6]
telefone = dados[7]
email = dados[8]
# print('id:', id)
# print('nome:', nome)
# print('cpf:', cpf)
# print('uf:', uf)
# print('cidade:', cidade)
# print('rua:', rua)
# print('numero:', numero)
# print('telefone:', telefone)
# print('email:', email)
self.lb_idInfo.config(text=id)
self.lb_nomeInfo.config(text=nome)
self.lb_cpfInfo.config(text=cpf)
self.lb_ufInfo.config(text=uf)
self.lb_cidadeInfo.config(text=cidade)
self.lb_ruaInfo.config(text=rua)
self.lb_numeroInfo.config(text=numero)
self.lb_telefoneInfo.config(text=telefone)
self.lb_emailInfo.config(text=email)
def deletar_dados(self):
self.lb_idInfo.config(text='')
self.lb_nomeInfo.config(text='')
self.lb_cpfInfo.config(text='')
self.lb_ufInfo.config(text='')
self.lb_cidadeInfo.config(text='')
self.lb_ruaInfo.config(text='')
self.lb_numeroInfo.config(text='')
self.lb_telefoneInfo.config(text='')
self.lb_emailInfo.config(text='')
if __name__ == '__main__':
dados = (5, 'Henrique', '522', 'SP', 'CAMPINAS',
'<NAME>', '896', '1938363132',
'<EMAIL>')
root = tk.Tk()
root.geometry('500x500')
frame = Fr_lbCliente(root)
frame.inserir_dados(dados)
frame.pack()
root.mainloop()
```
#### File: Python/19-programaDeVendas-EmDev/frameVenda_lbProduto.py
```python
from tkinter import Tk, Text, ttk
from tkinter.constants import ANCHOR, BOTTOM, END, LEFT, NW, RIGHT, SW, W
class Fr_lbProduto(ttk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.lbfr_produto = ttk.Labelframe(self, text='Info Produto')
self.fr = ttk.Frame(self.lbfr_produto)
self.fr1 = ttk.Frame(self.fr)
self.fr2 = ttk.Frame(self.fr)
self.fr_descricao = ttk.Frame(self.lbfr_produto)
self.lb_id = ttk.Label(self.fr1, text='id:', width=10)
self.lb_nome = ttk.Label(self.fr1, text='nome:', width=10)
self.lb_marca = ttk.Label(self.fr1, text='marca:', width=10)
self.lb_qtd = ttk.Label(self.fr1, text='Qtd:', width=10)
self.lb_preco = ttk.Label(self.fr1, text='R$:', width=10)
self.lb_descricao = ttk.Label(self.fr_descricao, text='descrição:', width=10)
self.lb_id.grid(row=0, column=0)
self.lb_nome.grid(row=1, column=0)
self.lb_marca.grid(row=2, column=0)
self.lb_qtd.grid(row=3, column=0)
self.lb_preco.grid(row=4, column=0)
self.lb_descricao.grid(row=0, column=0)
self.lb_idInfo = ttk.Label(self.fr2, text='', width=25)
self.lb_nomeInfo = ttk.Label(self.fr2, text='', width=25)
self.lb_marcaInfo = ttk.Label(self.fr2, text='', width=25)
self.lb_qtdInfo = ttk.Label(self.fr2, text='', width=25)
self.lb_precoInfo = ttk.Label(self.fr2, text='', width=25)
self.txt_discricao = Text(self.fr_descricao, width=35, height=6)
self.lb_idInfo.grid(row=0, column=1)
self.lb_nomeInfo.grid(row=1, column=1)
self.lb_marcaInfo.grid(row=2, column=1)
self.lb_qtdInfo.grid(row=3, column=1)
self.lb_precoInfo.grid(row=4, column=1)
self.txt_discricao.grid(row=1, column=0)
self.fr1.pack(side=LEFT, anchor=NW)
self.fr2.pack(side=LEFT, anchor=NW)
self.fr.pack()
self.fr_descricao.pack(side=BOTTOM)
self.lbfr_produto.pack()
def inserir_dados(self, dados):
id = dados[0]
nome = dados[1]
marca = dados[2]
qtd = dados[3]
preco = dados[4]
descricao = dados[5]
# print('id:', id)
# print('nome:', nome)
# print('marca:', marca)
# print('qtd:', qtd)
# print('preco:', preco)
# print('descricao:', descricao)
self.lb_idInfo.config(text=id)
self.lb_nomeInfo.config(text=nome)
self.lb_marcaInfo.config(text=marca)
self.lb_qtdInfo.config(text=qtd)
self.lb_precoInfo.config(text=preco)
# self.txt_discricao.config(text=descricao)
self.txt_discricao.delete(1.0, END)
self.txt_discricao.insert(1.0, descricao)
def deletar_dados(self):
self.lb_idInfo.config(text='')
self.lb_nomeInfo.config(text='')
self.lb_marcaInfo.config(text='')
self.lb_qtdInfo.config(text='')
self.lb_precoInfo.config(text='')
# self.txt_discricao.config(text='')
self.txt_discricao.delete(1.0, END)
if __name__ == '__main__':
# print('so um teste')
root = Tk()
root.geometry('500x500')
dados = (5, 'Iphone', 'Apple', 5254, 20000.0, 'um smartphone caro, que vem carregador\n\n\n\n')
frame = Fr_lbProduto(root)
frame.inserir_dados(dados)
frame.pack()
root.mainloop()
```
#### File: Python/19-programaDeVendas-EmDev/frameVenda_treeVenda.py
```python
from tkinter import ttk
import tkinter as tk
from tkinter.constants import END, EW, NS, VERTICAL
class Fr_treeVenda(ttk.Frame):
def __init__(self, parent):
super().__init__(parent)
# Treeview produto ------------------------------------
# definindo colunas
self.colunas = ['cod', 'nome', 'marca', 'preco', 'x', 'total']
self.tree_venda = ttk.Treeview(self, columns=self.colunas, show='headings')
# definindo heading
self.tree_venda.heading('cod', text='ID')
self.tree_venda.heading('nome', text='Nome')
self.tree_venda.heading('marca', text='Marca')
self.tree_venda.heading('preco', text='Preço')
self.tree_venda.heading('x', text='X')
self.tree_venda.heading('total', text='Total')
# definindo tamanho da coluna
self.tree_venda.column('cod', width=20)
self.tree_venda.column('nome', width=125)
self.tree_venda.column('marca', width=90)
self.tree_venda.column('preco', width=100)
self.tree_venda.column('x', width=40)
self.tree_venda.column('total', width=100)
self.scroll = ttk.Scrollbar(self, orient=VERTICAL, command=self.tree_venda.yview)
self.tree_venda.grid(row=1, column=0, columnspan=1)
self.scroll.grid(row=1, column=1, rowspan=1, sticky=NS)
# total de tudo
self.fr_total = ttk.Frame(self)
self.total = 0
self.lb_total = ttk.Label(self.fr_total, text='Total:')
self.lb_totalInfo = ttk.Label(self.fr_total, text='----')
self.lb_total.grid(row=0, column=0)
self.lb_totalInfo.grid(row=0, column=1)
self.fr_total.grid(row=2, column=0)
def editar_dados(self) -> None:
# print(self.codigo)
if self.codigo != '':
codigo = self.codigo
qtd =self.etd_qtd.get()
preco =self.etd_preco.get()
# add dados =-=-=-=-=-=-=-=-=-=-=-=-=
# print('codigo:', codigo)
# # print('nome:', nome)
# # print('marca:', marca)
# print('qtd:', qtd)
# print('preco:', preco)
self.deletar_tree()
self.mostrar_tree()
def item_selected(self, event):
for selected_item in self.tree_venda.selection():
item = self.tree_venda.item(selected_item)
record = item['values']
# print(record)
self.codigo = record[0]
self.inserir_dados()
def digitar_evento(self, event):
variavel = event.widget.get()
# print(variavel)
# deletar tree view
self.deletar_tree()
# mostrar treeview com a palavra digitada
self.mostrar_tree(palavras=variavel)
def deletar_tree(self):
# deletar toda a tree view
self.tree_venda.delete(*self.tree_venda.get_children())
def adicionar(self, d):
self.total = float(self.total) + float(d[5])
self.tree_venda.insert('', END, values=d)
self.lb_totalInfo.config(text=f'{self.total:.2f}')
def get_items(self) -> list:
listas = []
for line in self.tree_venda.get_children():
listas.append(self.tree_venda.item(line)['values'])
return listas
if __name__ == '__main__':
root = tk.Tk()
frame = Fr_treeVenda(root)
frame.pack()
d = [1, 'celular', 'sony', 2000.0, 1, 2000.0]
frame.adicionar(d)
d = [2, 'celular', 'sony', 1000.0, 2, 2000.0]
frame.adicionar(d)
print(frame.get_items())
# print(frame.tree_venda.get_children())
# for line in frame.tree_venda.get_children():
# print(frame.tree_venda.item(line)['values'])
# for value in frame.tree_venda.item(line)['values']:
# print(value)
root.geometry('800x800')
root.mainloop()
```
#### File: 19-programaDeVendas-EmDev/teste/cpf.py
```python
from validate_docbr import CPF
from colorama.ansi import Fore
from random import randint
def validar_cpf(cpf):
va = CPF()
if cpf == '' or va.validate(cpf):
print(Fore.GREEN+'cadastro feito com sucesso', Fore.RESET)
else:
print(Fore.RED+'erro de cpf', Fore.RESET)
cpf1 = ''
cpf2 = '123'
# cpf valido
cpf3 = '38218758100'
# outro cpf
cpf4 = '821875810d0'
validar_cpf(cpf1)
validar_cpf(cpf2)
validar_cpf(cpf3)
validar_cpf(cpf4)
```
#### File: teste/testeSomaPreco/0.py
```python
import tkinter as tk
from tkinter import ttk
from tkinter.constants import END
class Fr(ttk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.valor = 9
ttk.Label(self, text='rtee').pack()
self.lb_valor = ttk.Label(self, text=self.valor)
self.etd = ttk.Entry(self)
self.lb_valor.pack()
self.etd.pack()
self.etd.insert(0, 1)
self.etd.bind('<KeyRelease>', self.evento_somar)
def evento_somar(self, event):
# serve para somar a entrada do valor quando colocado (tempo real)
valor_etd = self.etd.get()
if not valor_etd:
print('nenhum valor')
self.lb_valor.config(text=self.valor)
elif valor_etd.isnumeric():
print(valor_etd, 'tipo:', type(valor_etd))
valor_etd = int(valor_etd)
self.lb_valor.config(text=valor_etd*self.valor)
else:
# digete apenas numeros
print('por favor digite apenas numeros')
def main():
root = tk.Tk()
frame = Fr(root)
frame.pack()
root.geometry('500x500')
root.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "jonasht/python",
"score": 3
} |
#### File: python/20-leitorDeTexto/1.py
```python
import gtts
import gtts.langs
from playsound import playsound
def speak():
with open('frase.txt', 'r') as arq:
for linha in arq:
# frase = gtts.gTTS(linha, lang='pt-br')
frase = gtts.gTTS(linha, lang='pt-br')
frase.save('fraseTeste.mp3')
playsound('./fraseTeste.mp3')
playsound('./musica.mp3')
speak()
```
#### File: python/20-leitorDeTexto/main.py
```python
from tkinter import END, EW, BooleanVar, ttk, Tk, Text
import func
class Fr(ttk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.event_ch = BooleanVar()
self.bt_read = ttk.Button(self, text='ler')
self.bt_read.config(command=self.bt_press)
self.txt = Text(self)
self.txt.grid(row=0, column=0, pady=3)
self.bt_read.grid(row=1, column=0, padx=3, ipady=2, sticky=EW)
self.txt.config(font='arial 15 bold')
def bt_press(self):
txt = self.txt.get('1.0', END)
print(txt)
print(txt.split('\n'))
func.read(txt)
if __name__ == '__main__':
root = Tk()
root.title('leitorDeTexto')
fr = Fr(root)
fr.pack()
root.geometry('890x640')
root.mainloop()
``` |
{
"source": "jonasht/Python",
"score": 4
} |
#### File: 21-conversorNumerico/frames/uteis.py
```python
def dec_to_base(num, base): #base maxima - 36
base_num = ''
while num>0:
dig = int(num%base)
if dig < 10:
base_num += str(dig)
else:
base_num += chr(ord('A')+dig-10)
num //= base
base_num = base_num[::-1]
return base_num
def to_dec(num):
if '0b' in num:
return int(num, 2)
elif '0o' in num:
return int(num, 8)
elif '0x' in num:
return int(num, 16)
else:
return num
if __name__ == '__main__':
var = 10
print(var)
varbin = bin(var)
print(to_dec(varbin))
print()
varoct = oct(var)
print('oct:', varoct)
print('convertido:', to_dec(varoct))
print()
varhex = hex(var)
print('hex:', varhex)
print('convertido:', to_dec(varhex))
print()
varduo = dec_to_base(var, 12)
print('duodecimal:', varduo)
print('convertido:', int(varduo, 12))
``` |
{
"source": "jonasht/python",
"score": 3
} |
#### File: 23-noName-emDev/teste/testImg.py
```python
from tkinter import *
from PIL import Image, ImageTk
class Window(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.pack(fill=BOTH, expand=1)
load = Image.open("./placas de transito/R-1.jpg")
resize_img = load.resize((200, 200))
render = ImageTk.PhotoImage(resize_img)
img = Label(self, image=render)
img.image = render
img.place(x=0, y=0)
if __name__ == '__main__':
root = Tk()
app = Window(root)
root.geometry("1000x1000")
root.mainloop()
```
#### File: python/24-programaDeVendas-EmDev/frameVenda_frFinalizacao.py
```python
from tkinter import ttk
import tkinter as tk
from tkinter.constants import DISABLED, END, EW, NORMAL
from func.venda import add_venda, add_entregas
class Fr_finalizacao(ttk.Frame):
def __init__(self, parent, con):
super().__init__(parent)
self.con = con
self.chbt_cpf = ttk.Checkbutton(self, text='CPF na nota', command=self.chbt_cpfEvento)
self.etd_cpf = ttk.Entry(self)
self.chbt_entrega = ttk.Checkbutton(self, text='É para a entrega')
self.bt_finalizar = ttk.Button(self, text='Finalizar', command=self.finalizar_evento)
self.chbt_cpf.grid(padx=5, pady=2)
self.etd_cpf.grid(padx=5, pady=2)
self.chbt_entrega.grid(padx=5, pady=2)
self.bt_finalizar.grid(sticky=EW)
self.chbt_cpf.state(['!alternate'])
self.chbt_entrega.state(['!alternate'])
def chbt_cpfEvento(self):
cpf = self.con.dados_cliente[2]
if 'selected' in self.chbt_cpf.state():
self.etd_cpf.config(state=NORMAL)
self.etd_cpf.delete(0, END)
self.etd_cpf.insert(0, cpf)
else:
self.etd_cpf.delete(0, END)
self.etd_cpf.config(state=DISABLED)
def finalizar_evento(self):
# gravar dados
# pegar items/produtos p registrar a venda
dados_produto = self.con.get_itemsTreeVenda()
dados_cliente = self.con.dados_cliente
total = self.con.get_totalTreeVenda()
# adicionando no banco de dados a venda
add_venda(
dados_cliente=dados_cliente,
dados_produto=dados_produto
total=total
)
self.etd_cpf.delete(0, END)
self.chbt_cpf.state(['!selected'])
self.chbt_entrega.state(['!selected'])
self.con.apagar_tudo()
if __name__ == '__main__':
root = tk.Tk()
frame = Fr_finalizacao(root, root)
frame.pack()
root.geometry('500x500')
root.mainloop()
``` |
{
"source": "jonasht/pythonEstudos",
"score": 3
} |
#### File: 01-coisas-E-Estudos/0diritories0to9/02.py
```python
def oi(nome, maisculo=False):
if maisculo:
msg = f'oi, {nome.upper()}'
else:
msg = f'oi, {nome}'
return msg
print(oi('jonas'))
print(oi('jonas', 1))
print(oi('jonas', 'sim'))
```
#### File: alura/1-python3IntroOrientacaoObjetos/cliente.py
```python
class Cliente:
def __init__(self, nome) -> None:
self.__nome = nome
@property
def nome(self):
print('chamando property')
return self.__nome.title()
@nome.setter
def nome(self, novoNome):
print('chamando setter')
self.__nome = novoNome
```
#### File: alura/1-python3IntroOrientacaoObjetos/conta.py
```python
from colorama.ansi import Back, Fore, Style
class Conta:
def __init__(self, numero, titular, saldo, limite ):
self.__numero = numero
self.__titular = titular.title()
self.__saldo = float(saldo)
self.__limite = float(limite)
self.__codigoBanco = '001'
def extrato(self):
print('=-'*20+'=')
print(Fore.BLUE+ f' nome: {Fore.RESET}{self.__titular:^5}')
saldo = f'{self.__saldo:.2f}'
print(Fore.BLUE+ f'saldo: {Fore.GREEN}R${saldo:^5}{Fore.RESET}')
print('=-'*20+'=')
def depositar(self, valor):
self.__saldo += valor
def __pode_sacar(self, valorASacar):
valorDisponivelASacar = self.__saldo + self.__limite
return True if valorASacar <= valorDisponivelASacar else False
def sacar(self, valor):
if self.__pode_sacar(valor):
self.__saldo -= valor
else:
print(f'o vaor {valor} passou o limite')
def transferir(self, destino, valor):
self.sacar(valor)
destino.depositar(valor)
print('=-'*20+'=')
print(f'tranferencia:\n',
f'\t de: {self.__titular:^5}\n',
f'\tpara: {destino.__titular:^5}\n'
f'feita com {Fore.GREEN}sucesso{Fore.RESET}')
print('=-'*20+'=')
@property
def saldo(self):
return self.__saldo
@property
def titular(self):
return self.__titular
@property
def limite(self):
return self.__limite
@limite.setter
def limite(self, NovoLimite):
self.__limite = float(NovoLimite)
@staticmethod
def codigoBanco():
return '001'
@staticmethod
def codigoBancos():
return {'BB':'001', 'caixa':'104', 'bradesco':'237'}
if __name__ == '__main__':
conta1 = Conta(123, 'jonas', 520.0, 1000.0)
conta1.extrato()
conta2 = Conta(123, 'henrique', 520.0, 1000.0)
conta2.extrato()
conta1.transferir(destino=conta2, valor=10)
conta1.limite
```
#### File: 2-python3AvacandoNaOrientacaoAObjetos/01-/modulo.py
```python
class Filme:
def __init__(self, nome, ano, duracao):
self.nome = nome
self.ano = ano
self.duracao = duracao
class Serie:
def __init__(self, nome, ano, temporadas):
self.nome = nome
self.ano = ano
self.temporada = temporadas
vingadores = Filme('vigadores - a guerra', 2020, 160)
print(f'nome: {vingadores.nome} - ano: {vingadores.ano} - duracao: {vingadores.duracao}')
gameOf = Serie('game of thrones', 2010, 9)
print(f'Nome: {gameOf.nome} - ano: {gameOf.ano} - temporadas: {gameOf.temporada}')
```
#### File: mqtt/0coisas/1.py
```python
import paho.mqtt.client as mqtt
import sys
#definicoes:
Broker = "iot.eclipse.org"
PortaBroker = 1883
KeepAliveBroker = 60
TopicoSubscribe = "PAHOMQTTRaspPi3" #dica: troque o nome do topico por algo "unico",
#Dessa maneira, ninguem ira saber seu topico de
#subscribe e interferir em seus testes
#Callback - conexao ao broker realizada
def on_connect(client, userdata, flags, rc):
print("[STATUS] Conectado ao Broker. Resultado de conexao: "+str(rc))
#faz subscribe automatico no topico
client.subscribe(TopicoSubscribe)
#Callback - mensagem recebida do broker
def on_message(client, userdata, msg):
MensagemRecebida = str(msg.payload)
print("[MSG RECEBIDA] Topico: "+msg.topic+" / Mensagem: "+MensagemRecebida)
#programa principal:
try:
print("[STATUS] Inicializando MQTT...")
#inicializa MQTT:
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(Broker, PortaBroker, KeepAliveBroker)
client.loop_forever()
except KeyboardInterrupt:
print "\nCtrl+C pressionado, encerrando aplicacao e saindo..."
sys.exit(0)
```
#### File: paraVerComoFuncionaAlgumasCoisas/projetoUm-emDev/opcua_generalFunc.py
```python
import sqlite3
def set_delayBetweenScan(delay):
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute("""
UPDATE tbl_opcua_general
SET delayBetweenScan = ?
""", (delay, ))
banco.commit()
banco.close()
def get_delayBetweenScan():
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute('SELECT delayBetweenScan FROM tbl_opcua_general')
retornar = cursor.fetchall()
banco.commit()
banco.close()
return retornar[0][0]
def mostrar():
banco = sqlite3.connect('bancoDeDados.db')
cursor = banco.cursor()
cursor.execute('SELECT * FROM tbl_opcua_general')
print(cursor.fetchall())
banco.commit()
banco.close()
if __name__ == '__main__':
set_delayBetweenScan(1)
print(get_delayBetweenScan())
mostrar()
```
#### File: 0-outros/outro2/2.py
```python
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import sys
class Window(QWindow):
def __init__(self):
QWindow.__init__(self)
self.setTitle('janela')
self.resize(400,300)
app = QApplication(sys.argv)
tela = Window()
tela.show()
sys.exit(app.exec_())
```
#### File: paraVerComoFuncionaAlgumasCoisas/pyQt5/2-BuildingSignal-slotConnection.py
```python
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
def window():
app = QApplication(sys.argv)
win = QDialog()
# definindo botao 1
bt1 = QPushButton(win)
bt1.setText('botao 1')
bt1.move(50, 20)
bt1.clicked.connect(bt1_clicado)
# definindo botao 2
bt2 = QPushButton(win)
bt2.setText('botao 2')
bt2.move(50, 50)
bt2.clicked.connect(bt2_clicado)
win.setGeometry(100,100,200,100)
win.show()
sys.exit(app.exec_())
def bt1_clicado():
print('botao 1 foi clicado')
def bt2_clicado():
print('botao 2 foi clicado')
if __name__ == '__main__':
window()
```
#### File: tkinter-coisas/treeview_codeYoutube/0.py
```python
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import sqlite3
from tkinter import colorchooser
root = Tk()
root.title('Codemy.com - TreeBase')
root.iconbitmap('')
root.geometry("1000x550")
def query_database():
# Clear the Treeview
for record in my_tree.get_children():
my_tree.delete(record)
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
c.execute("SELECT rowid, * FROM customers")
records = c.fetchall()
# Add our data to the screen
global count
count = 0
#for record in records:
# print(record)
for record in records:
if count % 2 == 0:
my_tree.insert(parent='', index='end', iid=count, text='', values=(record[1], record[2], record[0], record[4], record[5], record[6], record[7]), tags=('evenrow',))
else:
my_tree.insert(parent='', index='end', iid=count, text='', values=(record[1], record[2], record[0], record[4], record[5], record[6], record[7]), tags=('oddrow',))
# increment counter
count += 1
# Commit changes
conn.commit()
# Close our connection
conn.close()
def search_records():
lookup_record = search_entry.get()
# close the search box
search.destroy()
# Clear the Treeview
for record in my_tree.get_children():
my_tree.delete(record)
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
c.execute("SELECT rowid, * FROM customers WHERE last_name like ?", (lookup_record,))
records = c.fetchall()
# Add our data to the screen
global count
count = 0
#for record in records:
# print(record)
for record in records:
if count % 2 == 0:
my_tree.insert(parent='', index='end', iid=count, text='', values=(record[1], record[2], record[0], record[4], record[5], record[6], record[7]), tags=('evenrow',))
else:
my_tree.insert(parent='', index='end', iid=count, text='', values=(record[1], record[2], record[0], record[4], record[5], record[6], record[7]), tags=('oddrow',))
# increment counter
count += 1
# Commit changes
conn.commit()
# Close our connection
conn.close()
def lookup_records():
global search_entry, search
search = Toplevel(root)
search.title("Lookup Records")
search.geometry("400x200")
search.iconbitmap('c:/gui/codemy.ico')
# Create label frame
search_frame = LabelFrame(search, text="<NAME>")
search_frame.pack(padx=10, pady=10)
# Add entry box
search_entry = Entry(search_frame, font=("Helvetica", 18))
search_entry.pack(pady=20, padx=20)
# Add button
search_button = Button(search, text="Search Records", command=search_records)
search_button.pack(padx=20, pady=20)
def primary_color():
# Pick Color
primary_color = colorchooser.askcolor()[1]
# Update Treeview Color
if primary_color:
# Create Striped Row Tags
my_tree.tag_configure('evenrow', background=primary_color)
def secondary_color():
# Pick Color
secondary_color = colorchooser.askcolor()[1]
# Update Treeview Color
if secondary_color:
# Create Striped Row Tags
my_tree.tag_configure('oddrow', background=secondary_color)
def highlight_color():
# Pick Color
highlight_color = colorchooser.askcolor()[1]
#Update Treeview Color
# Change Selected Color
if highlight_color:
style.map('Treeview',
background=[('selected', highlight_color)])
# Add Menu
my_menu = Menu(root)
root.config(menu=my_menu)
# Configure our menu
option_menu = Menu(my_menu, tearoff=0)
my_menu.add_cascade(label="Options", menu=option_menu)
# Drop down menu
option_menu.add_command(label="Primary Color", command=primary_color)
option_menu.add_command(label="Secondary Color", command=secondary_color)
option_menu.add_command(label="Highlight Color", command=highlight_color)
option_menu.add_separator()
option_menu.add_command(label="Exit", command=root.quit)
#Search Menu
search_menu = Menu(my_menu, tearoff=0)
my_menu.add_cascade(label="Search", menu=search_menu)
# Drop down menu
search_menu.add_command(label="Search", command=lookup_records)
search_menu.add_separator()
search_menu.add_command(label="Reset", command=query_database)
# Add Fake Data
'''
data = [
["John", "Elder", 1, "123 Elder St.", "Las Vegas", "NV", "89137"],
["Mary", "Smith", 2, "435 West Lookout", "Chicago", "IL", "60610"],
["Tim", "Tanaka", 3, "246 Main St.", "New York", "NY", "12345"],
["Erin", "Erinton", 4, "333 Top Way.", "Los Angeles", "CA", "90210"],
["Bob", "Bobberly", 5, "876 Left St.", "Memphis", "TN", "34321"],
["Steve", "Smith", 6, "1234 Main St.", "Miami", "FL", "12321"],
["Tina", "Browne", 7, "654 Street Ave.", "Chicago", "IL", "60611"],
["Mark", "Lane", 8, "12 East St.", "Nashville", "TN", "54345"],
["John", "Smith", 9, "678 North Ave.", "St. Louis", "MO", "67821"],
["Mary", "Todd", 10, "9 Elder Way.", "Dallas", "TX", "88948"],
["John", "Lincoln", 11, "123 Elder St.", "Las Vegas", "NV", "89137"],
["Mary", "Bush", 12, "435 West Lookout", "Chicago", "IL", "60610"],
["Tim", "Reagan", 13, "246 Main St.", "New York", "NY", "12345"],
["Erin", "Smith", 14, "333 Top Way.", "Los Angeles", "CA", "90210"],
["Bob", "Field", 15, "876 Left St.", "Memphis", "TN", "34321"],
["Steve", "Target", 16, "1234 Main St.", "Miami", "FL", "12321"],
["Tina", "Walton", 17, "654 Street Ave.", "Chicago", "IL", "60611"],
["Mark", "Erendale", 18, "12 East St.", "Nashville", "TN", "54345"],
["John", "Nowerton", 19, "678 North Ave.", "St. Louis", "MO", "67821"],
["Mary", "Hornblower", 20, "9 Elder Way.", "Dallas", "TX", "88948"]
]
'''
# Do some database stuff
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
# Create Table
c.execute("""CREATE TABLE if not exists customers (
first_name text,
last_name text,
id integer,
address text,
city text,
state text,
zipcode text)
""")
# Add dummy data to table
'''
for record in data:
c.execute("INSERT INTO customers VALUES (:first_name, :last_name, :id, :address, :city, :state, :zipcode)",
{
'first_name': record[0],
'last_name': record[1],
'id': record[2],
'address': record[3],
'city': record[4],
'state': record[5],
'zipcode': record[6]
}
)
'''
# Commit changes
conn.commit()
# Close our connection
conn.close()
# Add Some Style
style = ttk.Style()
# Pick A Theme
style.theme_use('default')
# Configure the Treeview Colors
style.configure("Treeview",
background="#D3D3D3",
foreground="black",
rowheight=25,
fieldbackground="#D3D3D3")
# Change Selected Color
style.map('Treeview',
background=[('selected', "#347083")])
# Create a Treeview Frame
tree_frame = Frame(root)
tree_frame.pack(pady=10)
# Create a Treeview Scrollbar
tree_scroll = Scrollbar(tree_frame)
tree_scroll.pack(side=RIGHT, fill=Y)
# Create The Treeview
my_tree = ttk.Treeview(tree_frame, yscrollcommand=tree_scroll.set, selectmode="extended")
my_tree.pack()
# Configure the Scrollbar
tree_scroll.config(command=my_tree.yview)
# Define Our Columns
my_tree['columns'] = ("<NAME>", "<NAME>", "ID", "Address", "City", "State", "Zipcode")
# Format Our Columns
my_tree.column("#0", width=0, stretch=NO)
my_tree.column("<NAME>", anchor=W, width=140)
my_tree.column("<NAME>", anchor=W, width=140)
my_tree.column("ID", anchor=CENTER, width=100)
my_tree.column("Address", anchor=CENTER, width=140)
my_tree.column("City", anchor=CENTER, width=140)
my_tree.column("State", anchor=CENTER, width=140)
my_tree.column("Zipcode", anchor=CENTER, width=140)
# Create Headings
my_tree.heading("#0", text="", anchor=W)
my_tree.heading("<NAME>", text="<NAME>", anchor=W)
my_tree.heading("<NAME>", text="<NAME>", anchor=W)
my_tree.heading("ID", text="ID", anchor=CENTER)
my_tree.heading("Address", text="Address", anchor=CENTER)
my_tree.heading("City", text="City", anchor=CENTER)
my_tree.heading("State", text="State", anchor=CENTER)
my_tree.heading("Zipcode", text="Zipcode", anchor=CENTER)
# Create Striped Row Tags
my_tree.tag_configure('oddrow', background="white")
my_tree.tag_configure('evenrow', background="lightblue")
# Add Record Entry Boxes
data_frame = LabelFrame(root, text="Record")
data_frame.pack(fill="x", expand="yes", padx=20)
fn_label = Label(data_frame, text="<NAME>")
fn_label.grid(row=0, column=0, padx=10, pady=10)
fn_entry = Entry(data_frame)
fn_entry.grid(row=0, column=1, padx=10, pady=10)
ln_label = Label(data_frame, text="<NAME>")
ln_label.grid(row=0, column=2, padx=10, pady=10)
ln_entry = Entry(data_frame)
ln_entry.grid(row=0, column=3, padx=10, pady=10)
id_label = Label(data_frame, text="ID")
id_label.grid(row=0, column=4, padx=10, pady=10)
id_entry = Entry(data_frame)
id_entry.grid(row=0, column=5, padx=10, pady=10)
address_label = Label(data_frame, text="Address")
address_label.grid(row=1, column=0, padx=10, pady=10)
address_entry = Entry(data_frame)
address_entry.grid(row=1, column=1, padx=10, pady=10)
city_label = Label(data_frame, text="City")
city_label.grid(row=1, column=2, padx=10, pady=10)
city_entry = Entry(data_frame)
city_entry.grid(row=1, column=3, padx=10, pady=10)
state_label = Label(data_frame, text="State")
state_label.grid(row=1, column=4, padx=10, pady=10)
state_entry = Entry(data_frame)
state_entry.grid(row=1, column=5, padx=10, pady=10)
zipcode_label = Label(data_frame, text="Zipcode")
zipcode_label.grid(row=1, column=6, padx=10, pady=10)
zipcode_entry = Entry(data_frame)
zipcode_entry.grid(row=1, column=7, padx=10, pady=10)
# Move Row Up
def up():
rows = my_tree.selection()
for row in rows:
my_tree.move(row, my_tree.parent(row), my_tree.index(row)-1)
# Move Rown Down
def down():
rows = my_tree.selection()
for row in reversed(rows):
my_tree.move(row, my_tree.parent(row), my_tree.index(row)+1)
# Remove one record
def remove_one():
x = my_tree.selection()[0]
my_tree.delete(x)
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
# Delete From Database
c.execute("DELETE from customers WHERE oid=" + id_entry.get())
# Commit changes
conn.commit()
# Close our connection
conn.close()
# Clear The Entry Boxes
clear_entries()
# Add a little message box for fun
messagebox.showinfo("Deleted!", "Your Record Has Been Deleted!")
# Remove Many records
def remove_many():
# Add a little message box for fun
response = messagebox.askyesno("WOAH!!!!", "This Will Delete EVERYTHING SELECTED From The Table\nAre You Sure?!")
#Add logic for message box
if response == 1:
# Designate selections
x = my_tree.selection()
# Create List of ID's
ids_to_delete = []
# Add selections to ids_to_delete list
for record in x:
ids_to_delete.append(my_tree.item(record, 'values')[2])
# Delete From Treeview
for record in x:
my_tree.delete(record)
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
# Delete Everything From The Table
c.executemany("DELETE FROM customers WHERE id = ?", [(a,) for a in ids_to_delete])
# Reset List
ids_to_delete = []
# Commit changes
conn.commit()
# Close our connection
conn.close()
# Clear entry boxes if filled
clear_entries()
# Remove all records
def remove_all():
# Add a little message box for fun
response = messagebox.askyesno("WOAH!!!!", "This Will Delete EVERYTHING From The Table\nAre You Sure?!")
#Add logic for message box
if response == 1:
# Clear the Treeview
for record in my_tree.get_children():
my_tree.delete(record)
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
# Delete Everything From The Table
c.execute("DROP TABLE customers")
# Commit changes
conn.commit()
# Close our connection
conn.close()
# Clear entry boxes if filled
clear_entries()
# Recreate The Table
create_table_again()
# Clear entry boxes
def clear_entries():
# Clear entry boxes
fn_entry.delete(0, END)
ln_entry.delete(0, END)
id_entry.delete(0, END)
address_entry.delete(0, END)
city_entry.delete(0, END)
state_entry.delete(0, END)
zipcode_entry.delete(0, END)
# Select Record
def select_record(event):
# Clear entry boxes
fn_entry.delete(0, END)
ln_entry.delete(0, END)
id_entry.delete(0, END)
address_entry.delete(0, END)
city_entry.delete(0, END)
state_entry.delete(0, END)
zipcode_entry.delete(0, END)
# Grab record Number
selected = my_tree.focus()
# Grab record values
values = my_tree.item(selected, 'values')
# outpus to entry boxes
fn_entry.insert(0, values[0])
ln_entry.insert(0, values[1])
id_entry.insert(0, values[2])
address_entry.insert(0, values[3])
city_entry.insert(0, values[4])
state_entry.insert(0, values[5])
zipcode_entry.insert(0, values[6])
# Update record
def update_record():
# Grab the record number
selected = my_tree.focus()
# Update record
my_tree.item(selected, text="", values=(fn_entry.get(), ln_entry.get(), id_entry.get(), address_entry.get(), city_entry.get(), state_entry.get(), zipcode_entry.get(),))
# Update the database
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
c.execute("""UPDATE customers SET
first_name = :first,
last_name = :last,
address = :address,
city = :city,
state = :state,
zipcode = :zipcode
WHERE oid = :oid""",
{
'first': fn_entry.get(),
'last': ln_entry.get(),
'address': address_entry.get(),
'city': city_entry.get(),
'state': state_entry.get(),
'zipcode': zipcode_entry.get(),
'oid': id_entry.get(),
})
# Commit changes
conn.commit()
# Close our connection
conn.close()
# Clear entry boxes
fn_entry.delete(0, END)
ln_entry.delete(0, END)
id_entry.delete(0, END)
address_entry.delete(0, END)
city_entry.delete(0, END)
state_entry.delete(0, END)
zipcode_entry.delete(0, END)
# add new record to database
def add_record():
# Update the database
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
# Add New Record
c.execute("INSERT INTO customers VALUES (:first, :last, :id, :address, :city, :state, :zipcode)",
{
'first': fn_entry.get(),
'last': ln_entry.get(),
'id': id_entry.get(),
'address': address_entry.get(),
'city': city_entry.get(),
'state': state_entry.get(),
'zipcode': zipcode_entry.get(),
})
# Commit changes
conn.commit()
# Close our connection
conn.close()
# Clear entry boxes
fn_entry.delete(0, END)
ln_entry.delete(0, END)
id_entry.delete(0, END)
address_entry.delete(0, END)
city_entry.delete(0, END)
state_entry.delete(0, END)
zipcode_entry.delete(0, END)
# Clear The Treeview Table
my_tree.delete(*my_tree.get_children())
# Run to pull data from database on start
query_database()
def create_table_again():
# Create a database or connect to one that exists
conn = sqlite3.connect('tree_crm.db')
# Create a cursor instance
c = conn.cursor()
# Create Table
c.execute("""CREATE TABLE if not exists customers (
first_name text,
last_name text,
id integer,
address text,
city text,
state text,
zipcode text)
""")
# Commit changes
conn.commit()
# Close our connection
conn.close()
# Add Buttons
button_frame = LabelFrame(root, text="Commands")
button_frame.pack(fill="x", expand="yes", padx=20)
update_button = Button(button_frame, text="Update Record", command=update_record)
update_button.grid(row=0, column=0, padx=10, pady=10)
add_button = Button(button_frame, text="Add Record", command=add_record)
add_button.grid(row=0, column=1, padx=10, pady=10)
remove_all_button = Button(button_frame, text="Remove All Records", command=remove_all)
remove_all_button.grid(row=0, column=2, padx=10, pady=10)
remove_one_button = Button(button_frame, text="Remove One Selected", command=remove_one)
remove_one_button.grid(row=0, column=3, padx=10, pady=10)
remove_many_button = Button(button_frame, text="Remove Many Selected", command=remove_many)
remove_many_button.grid(row=0, column=4, padx=10, pady=10)
move_up_button = Button(button_frame, text="Move Up", command=up)
move_up_button.grid(row=0, column=5, padx=10, pady=10)
move_down_button = Button(button_frame, text="Move Down", command=down)
move_down_button.grid(row=0, column=6, padx=10, pady=10)
select_record_button = Button(button_frame, text="Clear Entry Boxes", command=clear_entries)
select_record_button.grid(row=0, column=7, padx=10, pady=10)
# Bind the treeview
# my_tree.bind("", select_record)
# Run to pull data from database on start
query_database()
root.mainloop()
```
#### File: tkinter-coisas/treeview_tutoriais/frame1.py
```python
import tkinter as tk
from tkinter import Button, Entry, ttk
from tkinter.constants import NSEW, VERTICAL
from tkinter.messagebox import showinfo
class Fr1(ttk.Frame):
def __init__(self, parent, controller):
ttk.Frame.__init__(self, parent)
self.controller = controller
self.lb_pesquisar = ttk.Label(self, text='pesquisar:')
self.etd_pesquisar = ttk.Entry(self)
self.bt_pesquisar = ttk.Button(self, text='pesquisar')
self.lb_pesquisar.grid()
self.etd_pesquisar.grid()
self.bt_pesquisar.grid()
self.tree = self.create_tree_widget()
def create_tree_widget(self):
columns = ('first_name', 'last_name', 'email')
tree = ttk.Treeview(self, columns=columns, show='headings')
# define headings
tree.heading('first_name', text='First Name')
tree.heading('last_name', text='Last Name')
tree.heading('email', text='Email')
tree.bind('<<TreeviewSelect>>', self.item_selected)
tree.grid(row=0, column=0, sticky=NSEW)
# add a scrollbar
scrollbar = ttk.Scrollbar(self, orient=VERTICAL, command=tree.yview)
tree.configure(yscroll=scrollbar.set)
scrollbar.grid(row=0, column=1, sticky='ns')
# generate sample data
contacts = []
for n in range(1, 100):
contacts.append((f'first {n}', f'last {n}', f'<EMAIL>'))
# add data to the treeview
for contact in contacts:
tree.insert('', tk.END, values=contact)
return tree
def item_selected(self, event):
for selected_item in self.tree.selection():
item = self.tree.item(selected_item)
record = item['values']
# show a message
showinfo(title='Information', message=','.join(record))
print(record)
self.controller.mostrar_dados(record)
if __name__ == '__main__':
root = tk.Tk()
frame = Fr(root)
frame.pack()
root.title('Treeview demo')
root.geometry('620x200')
root.mainloop()
```
#### File: tkinter-gui-basico/2-tk/2-tk.py
```python
from tkinter import *
from tkinter.messagebox import showinfo
def replay():
showinfo(title='popup', message = 'botão apertado')
janela = Tk()
botao = Button(janela, text='press', command=replay)
botao.pack()
janela.mainloop()
```
#### File: tkinter-gui-basico/5-tk-poo/MeuGui.py
```python
from tkinter import *
from tkinter.messagebox import showinfo
class MeuGui(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
botao = Button(self, text='Aperte aqui', width=50, height=25, command=self.reply)
botao.pack()
def reply(self):
showinfo(title='popup', message='botao apertado')
if __name__ == '__main__':
janela = MeuGui()
janela.pack()
janela.mainloop()
```
#### File: meu_site/blog/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status='publicado')
class Post(models.Model):
STATUS = (
('rascunho', 'Rascunho'),
('publicado', 'Publicado'))
titulo = models.CharField(max_length=250)
slug = models.SlugField(max_length=250)
autor = models.ForeignKey(User, on_delete=models.CASCADE)
conteudo = models.TextField()
publicado = models.DateTimeField(default=timezone.now)
criado = models.DateTimeField(auto_now_add=True)
alterado = models.DateTimeField(auto_now=True)
status = models.CharField(
max_length=10, choices=STATUS,
default='rascunho')
objects = models.Manager()
published = PublishedManager()
class meta:
ordering = ('-publicado',)
def __str__(self):
# return f'{self.slug} - {self.conte}'
# return f'{self.slug} - {self.titulo}'
return self.titulo
``` |
{
"source": "jonashund/mechkit",
"score": 3
} |
#### File: source/notebooks/02.py
```python
import mechkit
import numpy as np
import sympy as sp
import itertools
np.set_printoptions(
linewidth=140,
precision=3,
# suppress=False,
)
# ### Symbolic with numbers
converter = mechkit.notation.ConverterSymbolic()
ones_tensor = np.ones((3, 3, 3, 3), dtype=sp.Symbol)
print(ones_tensor)
ones_mandel6 = converter.to_mandel6(ones_tensor)
print(ones_mandel6)
ones_mandel9 = converter.to_mandel9(ones_tensor)
print(ones_mandel9)
# ### Symbolic with letters
def tensor(
order=2, symbol="A", dim=3, latex_index=False, kwargs_symbol={}, indice_offset=0
):
A = np.zeros((dim,) * order, dtype=sp.Symbol)
for x in itertools.product(range(dim), repeat=order):
index = "".join(map(str, map(lambda x: x + indice_offset, x)))
if latex_index:
index = "_{" + index + "}"
A[x] = sp.Symbol(symbol + index, **kwargs_symbol)
return A
def make_it_hooke_symmetric(A, dim=3):
for i in range(dim):
for j in range(dim):
for k in range(dim):
for m in range(dim):
A[i, j, m, k] = A[i, j, k, m]
A[j, i, m, k] = A[i, j, k, m]
A[k, m, i, j] = A[i, j, k, m]
return A
def make_it_left_symmetric(A, dim=3):
for i in range(dim):
for j in range(dim):
for k in range(dim):
for m in range(dim):
A[j, i, k, m] = A[i, j, k, m]
return A
def make_it_right_symmetric(A, dim=3):
for i in range(dim):
for j in range(dim):
for k in range(dim):
for m in range(dim):
A[i, j, m, k] = A[i, j, k, m]
return A
def make_it_minor_symmetric(A, dim=3):
tmp = make_it_left_symmetric(A)
tmp = make_it_right_symmetric(A)
return tmp
tensor = make_it_minor_symmetric(tensor(order=4, indice_offset=1))
print(tensor)
tensor_mandel6 = converter.to_mandel6(tensor)
print(tensor_mandel6)
tensor_mandel9 = converter.to_mandel9(tensor)
print(tensor_mandel9)
```
#### File: mechkit/test/test_fabric_tensors.py
```python
import numpy as np
from pprint import pprint
import mechkit
basic = mechkit.tensors.Basic()
con = mechkit.notation.Converter()
##########################################
# Helpers
def evenly_distributed_vectors_on_sphere(nbr_vectors=1000):
"""
Define nbr_vectors evenly distributed vectors on a sphere
Using the golden spiral method kindly provided by
stackoverflow-user "<NAME>"
https://stackoverflow.com/a/44164075/8935243
"""
from numpy import pi, cos, sin, arccos, arange
indices = arange(0, nbr_vectors, dtype=float) + 0.5
phi = arccos(1 - 2 * indices / nbr_vectors)
theta = pi * (1 + 5 ** 0.5) * indices
x, y, z = cos(theta) * sin(phi), sin(theta) * sin(phi), cos(phi)
orientations = np.column_stack((x, y, z))
return orientations
def evenly_distributed_vectors_on_circle_on_zplane(nbr_vectors=1000):
"""
Define nbr_vectors evenly distributed vectors on a sphere
Using the golden spiral method kindly provided by
stackoverflow-user "<NAME>"
https://stackoverflow.com/a/44164075/8935243
"""
phi = np.linspace(0, 2.0 * np.pi, nbr_vectors, endpoint=False)
x, y, z = np.cos(phi), np.sin(phi), np.zeros_like(phi)
orientations = np.column_stack((x, y, z))
return orientations
##########################################
# Tests
def test_isotropic_discrete_N2():
converter = mechkit.notation.Converter()
orientations = evenly_distributed_vectors_on_sphere(10000)
basic = converter.to_tensor(mechkit.fabric_tensors.Basic().N2["iso"])
discrete = mechkit.fabric_tensors.first_kind_discrete(
order=2, orientations=orientations
)
pprint(basic)
pprint(discrete)
assert np.allclose(basic, discrete, rtol=1e-6, atol=1e-6)
def test_isotropic_discrete_N4():
converter = mechkit.notation.Converter()
orientations = evenly_distributed_vectors_on_sphere(10000)
basic = converter.to_tensor(mechkit.fabric_tensors.Basic().N4["iso"])
discrete = mechkit.fabric_tensors.first_kind_discrete(
order=4, orientations=orientations
)
pprint(basic)
pprint(discrete)
assert np.allclose(basic, discrete, rtol=1e-6, atol=1e-6)
def test_planar_isotropic_discrete_N4():
converter = mechkit.notation.Converter()
orientations = evenly_distributed_vectors_on_circle_on_zplane(10000)
basic = converter.to_tensor(mechkit.fabric_tensors.Basic().N4["planar_iso_xy"])
discrete = mechkit.fabric_tensors.first_kind_discrete(
order=4, orientations=orientations
)
pprint("basic")
pprint(converter.to_mandel6(basic))
pprint("discrete")
pprint(converter.to_mandel6(discrete))
assert np.allclose(basic, discrete, rtol=1e-6, atol=1e-6)
def test_fabric_tensor_first_kind_discrete():
"""Compare einsum-implementation with loop-implementation"""
orientations = np.random.rand(10, 3) # Ten random vectors in 3D
# Normalize orientations
orientations = [np.array(v) / np.linalg.norm(v) for v in orientations]
def oT_loops(orientations, order=4):
N = np.zeros((3,) * order)
for p in orientations:
out = p
for index in range(order - 1):
out = np.multiply.outer(out, p)
N[:] = N[:] + out
N = N / len(orientations)
return N
for order in range(1, 6):
assert np.allclose(
mechkit.fabric_tensors.first_kind_discrete(
order=order, orientations=orientations
),
oT_loops(order=order, orientations=orientations),
)
def test_fabric_tensor_first_kind_discrete_benchmarks():
orientations = [
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
]
converter = mechkit.notation.Converter()
f = mechkit.fabric_tensors.first_kind_discrete
assert np.allclose(
converter.to_tensor(mechkit.fabric_tensors.Basic().N2["iso"]),
f(order=2, orientations=orientations),
)
if __name__ == "__main__":
pass
```
#### File: mechkit/test/test_visualization.py
```python
import numpy as np
import mechkit
class Test_StiffnessAnalyser:
def test_get_E_in_principal_direction_and_perpendicular_transverselly_iso(self):
direction = [1, 1, 0]
E_l = 100.0
E_t = 20.0
mat = mechkit.material.TransversalIsotropic(
E_l=E_l,
E_t=E_t,
nu_lt=0.3,
G_lt=10.0,
G_tt=7.0,
principal_axis=direction,
)
analyzer = mechkit.visualization.StiffnessAnalyser(stiffness=mat.stiffness)
# E Modulus in principal direction has to be equal to E_l
assert np.allclose(analyzer.E_in_direction(direction=direction), E_l)
# E Modulus in any direction perpendicular to principal direction
# has to be equal to E_t
assert np.allclose(analyzer.E_in_direction(direction=[0, 0, 1]), E_t)
def test_E_and_K_generalized_of_isotropic_vectorized(self):
E_modul = 2e3
K_modul = 1e3
mat = mechkit.material.Isotropic(E=E_modul, K=K_modul)
analyzer = mechkit.visualization.StiffnessAnalyser(stiffness=mat.stiffness)
shape = (2, 4)
tmp_1, tmp_2 = shape
# Unpacking by "*shape" is not valid Python2.x
directions = np.random.rand(tmp_1, tmp_2, 3)
youngs_moduli = analyzer.E_in_direction(direction=directions)
print(youngs_moduli)
assert youngs_moduli.shape == shape
assert np.allclose(youngs_moduli, np.ones(shape) * E_modul)
gen_bulk_modulus = analyzer.K_in_direction(direction=directions)
print(gen_bulk_modulus)
assert gen_bulk_modulus.shape == shape
assert np.allclose(gen_bulk_modulus, np.ones(shape) * K_modul)
if __name__ == "__main__":
instance = Test_StiffnessAnalyser()
instance.test_get_E_in_principal_direction_and_perpendicular_transverselly_iso()
instance.test_E_and_K_generalized_of_isotropic_vectorized()
``` |
{
"source": "jonasitzmann/pix2latent",
"score": 2
} |
#### File: pix2latent/pix2latent/loss_functions.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path as osp
import sys
import logging
import traceback
import torch
from torch import nn
import lpips
from pix2latent.utils.misc import HiddenPrints
def l1_loss(out, target):
""" computes loss = | x - y |"""
return torch.abs(target - out)
def l2_loss(out, target):
""" computes loss = (x - y)^2 """
return ((target - out) ** 2)
def invertibility_loss(ims, target_transform, transform_params, mask=None):
""" Computes invertibility loss MSE(ims - T^{-1}(T(ims))) """
if ims.size(0) == 1:
ims = ims.repeat(len(transform_params), 1, 1, 1)
transformed = target_transform(ims, transform_params)
inverted = target_transform(transformed, transform_params, invert=True)
if mask is None:
return torch.mean((ims - inverted) ** 2, [1, 2, 3])
return masked_l2_loss(ims, inverted, mask)
def masked_l1_loss(out, target, mask):
if mask.size(0) == 1:
mask = mask.repeat(out.size(0), 1, 1, 1)
if target.size(0) == 1:
target = target.repeat(out.size(0), 1, 1, 1)
loss = l1_loss(out, target)
n = torch.sum(loss * mask, [1, 2, 3])
d = torch.sum(mask, [1, 2, 3])
return (n / d)
def masked_l2_loss(out, target, mask):
if mask.size(0) == 1:
mask = mask.repeat(out.size(0), 1, 1, 1)
if target.size(0) == 1:
target = target.repeat(out.size(0), 1, 1, 1)
loss = l2_loss(out, target)
n = torch.sum(loss * mask, [1, 2, 3])
d = torch.sum(mask, [1, 2, 3])
return (n / d)
def weight_regularization(orig_model, curr_model, reg='l1', weight_dict=None):
w = 1.0
reg_loss = 0.0
orig_state_dict = orig_model.state_dict()
for param_name, curr_param in curr_model.named_parameters():
if 'bn' in param_name:
continue
orig_param = orig_state_dict[param_name]
if reg == 'l1':
l = torch.abs(curr_param - orig_param).mean()
elif reg == 'l2':
l = ((curr_param - orig_param) ** 2).mean()
elif reg == 'inf':
l = torch.max(torch.abs(curr_param - orig_param))
if weight_dict is not None:
w = weight_dict[param_name]
reg_loss += w * l
return reg_loss
class ProjectionLoss(nn.Module):
""" The default loss that is used in the paper """
def __init__(self, lpips_net='alex', beta=10):
super().__init__()
self.beta = beta
self.rloss_fn = ReconstructionLoss()
self.ploss_fn = PerceptualLoss(net=lpips_net)
return
def __call__(self, output, target, weight=None, loss_mask=None):
rec_loss = self.rloss_fn(output, target, weight, loss_mask)
per_loss = self.ploss_fn(output, target, weight, loss_mask)
return rec_loss + (self.beta * per_loss)
class ReconstructionLoss(nn.Module):
""" Reconstruction loss with spatial weighting """
def __init__(self, loss_type='l1'):
super(ReconstructionLoss, self).__init__()
if loss_type in ['l1', 1]:
self.loss_fn = l1_loss
elif loss_type in ['l2', 2]:
self.loss_fn = l2_loss
else:
raise ValueError('Unknown loss_type {}'.format(loss_type))
return
def __call__(self, output, target, weight=None, loss_mask=None):
loss = self.loss_fn(output, target)
if weight is not None:
_weight = weight if loss_mask == None else (loss_mask * weight)
n = torch.sum(loss * _weight, [1, 2, 3])
d = torch.sum(_weight, [1, 2, 3])
loss = n / d
return loss
class PerceptualLoss(nn.Module):
def __init__(self, net='vgg', use_gpu=True):
""" LPIPS loss with spatial weighting """
super(PerceptualLoss, self).__init__()
self.loss_fn = lpips.LPIPS(net=net, spatial=True)
if torch.cuda.is_available():
self.loss_fn = self.loss_fn.cuda()
# current pip version does not support DataParallel
# self.loss_fn = nn.DataParallel(self.loss_fn)
return
def __call__(self, output, target, weight=None, loss_mask=None):
# lpips takes the sum of each spatial map
loss = self.loss_fn(output, target)
if weight is not None:
_weight = weight if loss_mask == None else (loss_mask * weight)
n = torch.sum(loss * _weight, [1, 2, 3])
d = torch.sum(_weight, [1, 2, 3])
loss = n / d
return loss
``` |
{
"source": "jonasitzmann/ultimate-tactic-board",
"score": 2
} |
#### File: jonasitzmann/ultimate-tactic-board/mode.py
```python
from abc import ABC
from kivy.graphics import Rectangle, Color
from kivy.properties import ObjectProperty
from kivy.uix.behaviors import DragBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.widget import Widget
import cfg
from player_widget import PlayerWidget
import command
import numpy as np
class SelectionRect(Widget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.p1 = self.pos.copy()
self.p2 = self.pos.copy()
self.set_p2(self.p2)
def set_p2(self, p2):
self.p2 = p2
self.x = min(self.p1[0], self.p2[0])
self.y = min(self.p1[1], self.p2[1])
self.width = abs(self.p1[0] - self.p2[0])
self.height = abs(self.p1[1] - self.p2[1])
return
class Mode(ABC):
def __init__(self, field, get_widgets_func=None):
self.get_widgets = get_widgets_func or self.get_players
self.field = field
field.reset()
self.widgets = []
self.add_widgets()
def add_widgets(self):
self.widgets = self.get_widgets()
for w in self.widgets:
self.field.add_widget(w)
def remove_widgets(self):
for widget in self.widgets:
self.field.remove_widget(widget)
def on_touch_down(self, touch):
pass
def on_touch_up(self, touch):
pass
def on_touch_move(self, touch):
pass
def reload(self):
self.remove_widgets()
self.add_widgets()
pass
def get_widget_at(self, pos, widget_list=None):
if widget_list is None:
widget_list = self.widgets
for widget in widget_list:
if widget.collide_point(*pos):
return widget
def get_players(self):
return [PlayerWidget(p, self.field) for p in self.field.state.players]
def __del__(self):
self.remove_widgets()
class EditPoseMode(Mode):
def __init__(self, field):
super().__init__(field)
self.angle_mode = False
self.current_player = None
def on_touch_down(self, touch):
self.current_player = self.get_widget_at(touch.pos)
if self.current_player is not None:
if touch.button == 'right' or self.field.ctrl_pressed:
self.angle_mode = True
else:
self.current_player.on_touch_down(touch)
def on_touch_up(self, touch):
if self.current_player is not None:
angle = pos = None
if not touch.button == 'left' or not self.field.ctrl_pressed:
self.current_player.on_touch_up(touch)
pos = self.current_player.pix2pos()
prev_player = self.field.get_previous_player(self.current_player.player_state)
if prev_player is not None:
max_distance_no_turn = 3
move_distance = np.linalg.norm(pos - prev_player.pos)
if move_distance > max_distance_no_turn:
angle = float(np.arctan2(*(pos - prev_player.pos)) * -180 / np.pi)
elif self.angle_mode and not self.current_player.collide_point(*touch.pos):
pos1 = self.current_player.pix2pos()
pos2 = self.field.pix2pos(*touch.pos)
angle = int(np.arctan2(*(pos2 - pos1)) * -180 / np.pi)
if pos is not None or angle is not None:
cmd = command.MovePlayer(self.field, self.current_player.player_state, pos, angle)
self.field.execute_cmd(cmd)
self.current_player = None
self.angle_mode = False
class AddPlayerMode(Mode):
def __init__(self, field):
super().__init__(field)
self.current_player = None
def on_touch_down(self, touch):
role = 'd' if self.field.ctrl_pressed else 'o'
if not self.field.collide_point(*touch.pos):
return
pos = self.field.pix2pos(*touch.pos)
if touch.button == 'left':
cmd = command.AddPlayer(self.field, role, pos)
self.field.execute_cmd(cmd)
self.current_player = cmd.player_widget
def on_touch_up(self, touch):
if self.current_player is None or self.current_player.player_state is None or self.current_player.collide_point(*touch.pos):
return
pos1 = self.current_player.player_state.pos
pos2 = self.field.pix2pos(touch.x, touch.y)
angle = int(np.arctan2(*(pos2 - pos1)) * -180 / np.pi)
cmd = command.MovePlayer(self.field, self.current_player.player_state, None, angle)
self.field.execute_cmd(cmd)
self.current_player = None
class SetupHexMode(Mode):
def __init__(self, field):
super().__init__(field)
self.pos = None
def on_touch_down(self, touch):
if touch.button == 'left':
self.pos = self.field.pix2pos(*touch.pos)
self.field.state.setup_hex(self.pos)
self.field.update_img()
# def on_touch_up(self, touch):
# if self.pos is not None:
# pos2 = self.field.pix2pos(touch.x, touch.y)
# angle = int(np.arctan2(*(pos2 - self.pos)) * 180 / np.pi + 180)
# self.field.state.setup_hex(angle, self.pos)
# self.field.update_img()
class SelectMode(Mode):
def __init__(self, field):
super().__init__(field)
# self.selection_commands = []
self.selected_players = []
self.selection_rect = None
def __del__(self):
super().__del__()
self.undo_annotations()
def undo_annotations(self):
# for cmd in self.selection_commands:
# self.field.do_and_reload(cmd.undo)
# self.selection_commands = []
self.selected_players = []
def on_touch_down(self, touch):
self.undo_annotations()
if touch.button == 'left':
player = self.get_widget_at(touch.pos)
if isinstance(player, PlayerWidget):
# cmd = command.FieldOfView(self.field, player.player_state)
self.selected_players = [player.player_state]
# self.field.do_and_reload(cmd.execute)
# self.selection_commands.append(cmd)
else:
self.selection_rect = SelectionRect(pos=touch.pos)
self.widgets.append(self.selection_rect)
self.field.add_widget(self.selection_rect)
def on_touch_move(self, touch):
if self.selection_rect is not None:
self.selection_rect.set_p2(touch.pos)
def on_touch_up(self, touch):
if self.selection_rect is not None:
self.selected_players = [
p.player_state for p in self.widgets
if self.selection_rect.collide_widget(p) and p is not self.selection_rect]
self.field.remove_widget(self.selection_rect)
self.widgets.remove(self.selection_rect)
self.selection_rect = None
if self.selected_players:
self.field.set_mode_(PlayersSelectedMode(self.field, self.selected_players))
class SelectionMenu(BoxLayout):
mode = ObjectProperty(None)
class PlayersSelectedMode(Mode):
def __init__(self, field, players):
self.players = players
super().__init__(field, self.get_widgets)
self.drag = False
self.menu = SelectionMenu(mode=self)
self.field.parent.add_widget(self.menu, index=1)
self.highlight_cmd = None
self.fov_annotations = None
def __del__(self):
self.field.parent.remove_widget(self.menu)
super().__del__()
def get_widgets(self):
player_states = [self.field.state.get_player(p) for p in self.players]
return [PlayerWidget(p, self.field) for p in player_states]
def on_touch_down(self, touch):
if self.get_widget_at(touch.pos) is None:
self.field.set_mode_(SelectMode(self.field))
self.field.mode.on_touch_down(touch)
else:
self.drag = True
def on_touch_move(self, touch):
if self.drag:
for widget in self.widgets:
widget.x += touch.dx
widget.y += touch.dy
def on_touch_up(self, touch):
if self.drag:
cmds = []
for player_widget in self.widgets:
cmds.append(command.MovePlayer(self.field, player_widget.player_state, pos=player_widget.pix2pos()))
self.field.execute_cmd(command.CommandList(cmds))
self.drag = False
def align_x(self):
self.field.do_and_reload(lambda: self.field.state.align_x(' '.join([p.name for p in self.players])))
def align_y(self):
self.field.do_and_reload(lambda: self.field.state.align_y(' '.join([p.name for p in self.players])))
def highlight_toggle(self):
if self.highlight_cmd is None:
self.highlight_cmd = command.HighlightPlayers(self.field, *self.players)
self.field.do_and_reload(self.highlight_cmd.execute)
else:
self.field.do_and_reload(self.highlight_cmd.undo)
self.highlight_cmd = None
self.field.do_and_reload(lambda: None)
def fov_toggle(self):
if self.fov_annotations is None:
self.fov_annotations = command.FieldOfView(self.field, *self.players)
self.field.do_and_reload(self.fov_annotations.execute)
else:
self.field.do_and_reload(self.fov_annotations.undo)
self.fov_annotations = None
self.field.do_and_reload(lambda: None)
class ViewMode(Mode):
pass
```
#### File: not_a_playbook/0_what_is_a_playbook/intro_scene.py
```python
from manim_animations import create_movie
from scenes import UltimateScene
from manim import *
from manim_presentation import Slide
class Play1(UltimateScene, Slide):
def pplay(self, *args, **kwargs):
self.play(*args, **kwargs)
self.pause()
def construct(self):
plan_a = Tex("Plan A").to_edge(UP, MED_SMALL_BUFF).shift(2*RIGHT)
plan_b = Tex("Plan B").to_corner(UR, MED_SMALL_BUFF).shift(LEFT)
tex = Tex(self.get_tex('slide_1')).to_corner(UL, MED_SMALL_BUFF)
self.wait(0.1)
self.pause()
self.pplay(Write(tex))
self.play(Write(plan_a))
f_1, s = self.prepare()
f_1.landscape_to_portrait(animate=False).scale(0.9).next_to(plan_a, DOWN, MED_SMALL_BUFF)
self.play(FadeIn(f_1))
plan_a_arrows = f_1.get_arrows(s[0], s[1])
f_1.add(plan_a_arrows)
self.play(Write(plan_a_arrows))
self.add(plan_a_arrows)
self.pause()
f_1.transition(s[1], run_time=3, disc_delay=0.2)
self.pause()
f2 = f_1.copy().next_to(plan_b, DOWN)
self.play(Write(plan_b))
self.play(FadeIn(f2))
plan_b_arrows = f2.get_arrows(s[2], s[3])
self.add(plan_b_arrows)
self.pause()
f2.transition(s[0], run_time=0.3)
self.play(Write(plan_b_arrows))
f2.add(plan_b_arrows)
self.pause()
f2.transition(s[2], run_time=3), f2.transition(s[3], run_time=3)
self.pause()
l1 = Line(f_1.get_corner(UL), f2.get_corner(DR), color=RED_D, stroke_width=10, z_index=5)
l2 = Line(f_1.get_corner(DL), f2.get_corner(UR), color=RED_D, stroke_width=10, z_index=5)
self.pplay(DrawBorderThenFill(l1), DrawBorderThenFill(l2))
self.wait()
def render_scene():
# create_movie(Play1, debug=False, hq=True, output_file='play1.mp4')
bin_dir = '/home/jonas/.conda/envs/tactics_board/bin'
os.system(f'{bin_dir}/manim-presentation Play1 --fullscreen')
if __name__ == '__main__':
render_scene()
```
#### File: scenes/profile_scene/profile_scene.py
```python
import sys
from scenes import UltimateScene
from manim_animations import create_movie
from contextlib import contextmanager
import cProfile
import os
def main():
create_movie(ProfileScene, debug=True, hq=True, opengl=False)
@contextmanager
def profile(filename=None, *args, **kwargs):
profiler = cProfile.Profile(*args, **kwargs)
profiler.enable()
yield
profiler.disable()
if filename:
profiler.dump_stats(os.path.expanduser(filename))
profiler.print_stats(sort=1)
class ProfileScene(UltimateScene):
def construct(self):
f, s = self.prepare()
f.transition(s[1], run_time=5)
if __name__ == '__main__':
main()
``` |
{
"source": "Jonasiz/milestone_task",
"score": 2
} |
#### File: Jonasiz/milestone_task/demo_modes.py
```python
import json
import os
import random
import time
from dotenv import load_dotenv
from ClientManager import ClientManager
from MQTT import MQTTClient
import console_prompts
def freezer_callback(client, userdata, message):
print_msg = 'Received {0} on topic: {1} with QoS {2}'.format(
message.payload, message.topic, message.qos
)
client.logger.info(print_msg)
living_room_temp = json.loads(message.payload)['temp']
client.temperature = living_room_temp - 30
client.logger.info('Updated freezer temperature to {0} C (living room temp: {1})'.format(
client.temperature, living_room_temp
))
def living_room_callback(client, userdata, message):
print_msg = 'Received {0} on topic: {1} with QoS {2}'.format(
message.payload, message.topic, message.qos
)
client.logger.info(print_msg)
freezer_temp = json.loads(message.payload)['temp']
client.temperature = freezer_temp + 30
client.logger.info('Updated living room temperature to {0} C (freezer temp: {1})'.format(
client.temperature, freezer_temp
))
def main():
load_dotenv()
broker_domain = str(os.getenv('BROKER_HOST'))
broker_port = int(os.getenv('BROKER_PORT'))
living_room_sensor = 'interview/ioma/sensors/temperature/living_room/ABC1'
freezer_sensor = 'interview/ioma/sensors/temperature/freezer/CBA1'
# Freezer client/sensor
freezer_client = MQTTClient('freezer_id', clean_session=True,
msg_handler=freezer_callback)
freezer_client.connect(broker_domain, port=broker_port, keepalive=120)
freezer_client.subscribe(living_room_sensor, qos=1)
# Living room client/sensor
living_room_client = MQTTClient('living_room_id', clean_session=True,
msg_handler=living_room_callback)
living_room_client.connect(broker_domain, port=broker_port, keepalive=60)
living_room_client.subscribe(freezer_sensor, qos=1)
print('Both clients started! Check log files')
try:
while True:
# Fluctuate freezer temperature
freezer_temp = freezer_client.temperature + random.randint(-5, 5)
freezer_client.publish(freezer_sensor,
json.dumps({'temp': freezer_temp}),
qos=1)
time.sleep(random.randint(1, 2))
# Fluctuate living room temperature
living_room_temp = living_room_client.temperature + random.randint(-5, 5)
living_room_client.publish(living_room_sensor,
json.dumps({'temp': living_room_temp}),
qos=1)
time.sleep(random.randint(1, 2))
except KeyboardInterrupt:
freezer_client.disconnect()
living_room_client.disconnect()
print('Interrupted, exiting...')
def main_interactive():
load_dotenv()
broker_domain = str(os.getenv('BROKER_HOST'))
broker_port = int(os.getenv('BROKER_PORT'))
client_manager = ClientManager()
try:
while True:
client_ids = [client.client_id for client in client_manager.clients]
choice = console_prompts.main_menu()
if choice == console_prompts.main_actions['show']:
console_prompts.show_clients(client_ids)
elif choice == console_prompts.main_actions['add']:
inputs = console_prompts.add_client(client_ids, broker_domain, broker_port)
client_manager.add_client(inputs['client_id'],
inputs['host'],
int(inputs['port']),
int(inputs['keepalive']),
inputs['clean_session'] == 'y')
print('Client added and connected.')
elif choice == console_prompts.main_actions['remove']:
removed_id = console_prompts.remove_client(client_ids)
if removed_id is not None:
client_manager.remove_client(removed_id)
print('Client removed and disconnected.')
elif choice == console_prompts.main_actions['pub']:
inputs = console_prompts.publish(client_ids)
if inputs is not None:
client_manager.client_publish(inputs['pub_id'],
inputs['topic'],
inputs['data'],
int(inputs['qos']))
print('Published "{0}" to topic {1} for client "{2}" (qos={3})'.format(
inputs['data'], inputs['topic'],
inputs['pub_id'], inputs['qos']
))
elif choice == console_prompts.main_actions['sub']:
inputs = console_prompts.subscribe_client(client_ids)
if inputs is not None:
client_manager.client_subscribe(inputs['sub_id'],
inputs['topic'],
int(inputs['qos']))
print('Subscribed client "{0}" to topic "{1}" (QoS: {2})'.format(
inputs['sub_id'], inputs['topic'], inputs['qos']
))
elif choice == console_prompts.main_actions['unsub']:
unsub_id = console_prompts.unsubscribe_client_id(client_ids)
client = [client for client in client_manager.clients
if client.client_id == unsub_id][0]
unsub_topic = console_prompts.unsubscribe_client_topic(client)
if unsub_topic is not None:
client_manager.client_unsubscribe(client.client_id, unsub_topic)
print('Unsubscribed client "{0}" from topic "{1}"'.format(
unsub_id, unsub_topic
))
except (EOFError, KeyError):
print('Interrupted, exiting...')
client_manager.disconnect_all()
print('Done.')
``` |
{
"source": "jonasj76/lldpd",
"score": 2
} |
#### File: integration/fixtures/programs.py
```python
import pytest
import glob
import os
import pwd
import grp
import re
import signal
import subprocess
import multiprocessing
import uuid
import time
import platform
import ctypes
from collections import namedtuple
libc = ctypes.CDLL('libc.so.6', use_errno=True)
def mount_bind(source, target):
ret = libc.mount(source.encode('ascii'),
target.encode('ascii'),
None,
4096, # MS_BIND
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def mount_tmpfs(target, private=False):
flags = [0]
if private:
flags.append(1 << 18) # MS_PRIVATE
flags.append(1 << 19) # MS_SLAVE
for fl in flags:
ret = libc.mount(b"none",
target.encode('ascii'),
b"tmpfs",
fl,
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def _mount_proc(target):
flags = [2 | 4 | 8] # MS_NOSUID | MS_NODEV | MS_NOEXEC
flags.append(1 << 18) # MS_PRIVATE
flags.append(1 << 19) # MS_SLAVE
for fl in flags:
ret = libc.mount(b"proc",
target.encode('ascii'),
b"proc",
fl,
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def mount_proc(target="/proc"):
# We need to be sure /proc is correct. We do that in another
# process as this doesn't play well with setns().
if not os.path.isdir(target):
os.mkdir(target)
p = multiprocessing.Process(target=_mount_proc, args=(target,))
p.start()
p.join()
def most_recent(*args):
"""Return the most recent files matching one of the provided glob
expression."""
candidates = [l
for location in args
for l in glob.glob(location)]
candidates.sort(key=lambda x: os.stat(x).st_mtime)
assert len(candidates) > 0
return candidates[0]
libtool_location = most_recent('../../libtool',
'../../*/libtool')
lldpcli_location = most_recent('../../src/client/lldpcli',
'../../*/src/client/lldpcli')
lldpd_location = most_recent('../../src/daemon/lldpd',
'../../*/src/daemon/lldpd')
def _replace_file(tmpdir, target, content):
tmpname = str(uuid.uuid1())
with tmpdir.join(tmpname).open("w") as tmp:
tmp.write(content)
mount_bind(str(tmpdir.join(tmpname)),
target)
@pytest.fixture
def replace_file(tmpdir):
"""Replace a file by another content by bind-mounting on it."""
return lambda target, content: _replace_file(tmpdir, target, content)
def format_process_output(program, args, result):
"""Return a string representing the result of a process."""
return "\n".join([
'P: {} {}'.format(program, " ".join(args)),
'C: {}'.format(os.getcwd()),
'\n'.join(['O: {}'.format(l)
for l in result.stdout.decode(
'ascii', 'ignore').strip().split('\n')]),
'\n'.join(['E: {}'.format(l)
for l in result.stderr.decode(
'ascii', 'ignore').strip().split('\n')]),
'S: {}'.format(result.returncode),
''])
class LldpdFactory(object):
"""Factory for lldpd. When invoked, lldpd will configure the current
namespace to be in a reproducible environment and spawn itself in
the background. On termination, output will be logged to temporary
file.
"""
def __init__(self, tmpdir, config):
"""Create a new wrapped program."""
tmpdir.join('lldpd-outputs').ensure(dir=True)
self.tmpdir = tmpdir
self.config = config
self.pids = []
self.threads = []
self.counter = 0
def __call__(self, *args, sleep=3, silent=False):
self.counter += 1
self.setup_namespace("ns-{}".format(self.counter))
args = (self.config.option.verbose > 2 and "-dddd" or "-dd",
"-L",
lldpcli_location,
"-u",
str(self.tmpdir.join("ns", "lldpd.socket"))) + args
p = subprocess.Popen((libtool_location, 'execute',
lldpd_location) + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.pids.append(p.pid)
t = multiprocessing.Process(target=self.run, args=(p, args, silent))
self.threads.append(t)
t.start()
time.sleep(sleep)
return t
def run(self, p, args, silent):
stdout, stderr = p.communicate()
self.pids.remove(p.pid)
if not silent:
o = format_process_output("lldpd",
args,
namedtuple('ProcessResult',
['returncode',
'stdout',
'stderr'])(
p.returncode,
stdout,
stderr))
self.tmpdir.join('lldpd-outputs', '{}-{}'.format(
os.getpid(),
p.pid)).write(o)
def killall(self):
for p in self.pids[:]:
os.kill(p, signal.SIGTERM)
for t in self.threads:
if t.is_alive():
t.join(1)
for p in self.pids[:]:
os.kill(p, signal.SIGKILL)
for t in self.threads:
if t.is_alive():
t.join(1)
def setup_namespace(self, name):
# Setup privsep. While not enforced, we assume we are running in a
# throwaway mount namespace.
tmpdir = self.tmpdir
mount_proc()
if self.config.lldpd.privsep.enabled:
# Chroot
chroot = self.config.lldpd.privsep.chroot
if os.path.isdir(chroot):
mount_tmpfs(chroot)
else:
parent = os.path.abspath(os.path.join(chroot, os.pardir))
assert os.path.isdir(parent)
mount_tmpfs(parent)
if not os.path.isdir(chroot):
os.mkdir(chroot)
mount_proc(os.path.join(chroot, "proc"))
# User/group
user = self.config.lldpd.privsep.user
group = self.config.lldpd.privsep.group
try:
pwd.getpwnam(user)
grp.getgrnam(group)
except KeyError:
passwd = ""
for l in open("/etc/passwd", "r").readlines():
if not l.startswith("{}:".format(user)):
passwd += l
passwd += "{}:x:39861:39861::{}:/bin/false\n".format(
user, chroot)
fgroup = ""
for l in open("/etc/group", "r").readlines():
if not l.startswith("{}:".format(group)):
fgroup += l
fgroup += "{}:x:39861:\n".format(group)
_replace_file(tmpdir, "/etc/passwd", passwd)
_replace_file(tmpdir, "/etc/group", fgroup)
# Also setup the "namespace-dependant" directory
tmpdir.join("ns").ensure(dir=True)
mount_tmpfs(str(tmpdir.join("ns")), private=True)
# We also need a proper /etc/os-release
_replace_file(tmpdir, "/etc/os-release",
"""PRETTY_NAME="Spectacular GNU/Linux 2016"
NAME="Spectacular GNU/Linux"
ID=spectacular
HOME_URL="https://www.example.com/spectacular"
SUPPORT_URL="https://www.example.com/spectacular/support"
BUG_REPORT_URL="https://www.example.com/spectacular/bugs"
""")
# We also need a proper name
subprocess.check_call(["hostname", name])
# And we need to ensure name resolution is sane
_replace_file(tmpdir, "/etc/hosts",
"""
127.0.0.1 localhost.localdomain localhost
127.0.1.1 {name}.example.com {name}
::1 ip6-localhost ip6-loopback
""".format(name=name))
_replace_file(tmpdir, "/etc/nsswitch.conf",
"""
passwd: <PASSWORD>
group: files
shadow: files
hosts: files
networks: files
protocols: files
services: files
""")
# Remove any config
path = os.path.join(self.config.lldpd.confdir, "lldpd.conf")
if os.path.isfile(path):
_replace_file(tmpdir, path, "")
path = os.path.join(self.config.lldpd.confdir, "lldpd.d")
if os.path.isdir(path):
mount_tmpfs(path)
@pytest.fixture()
def lldpd(request, tmpdir):
"""Execute ``lldpd``."""
p = LldpdFactory(tmpdir, request.config)
request.addfinalizer(p.killall)
return p
@pytest.fixture()
def lldpd1(lldpd, links, namespaces):
"""Shortcut for a first receive-only lldpd daemon."""
links(namespaces(1), namespaces(2))
with namespaces(1):
lldpd("-r")
@pytest.fixture()
def lldpcli(request, tmpdir):
"""Execute ``lldpcli``."""
socketdir = tmpdir.join("ns", "lldpd.socket")
count = [0]
def run(*args):
cargs = ("-u", str(socketdir)) + args
p = subprocess.Popen((libtool_location, 'execute',
lldpcli_location) + cargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate(timeout=30)
result = namedtuple('ProcessResult',
['returncode', 'stdout', 'stderr'])(
p.returncode, stdout, stderr)
request.node.add_report_section(
'run', 'lldpcli output {}'.format(count[0]),
format_process_output("lldpcli", cargs, result))
count[0] += 1
# When keyvalue is requested, return a formatted result
if args[:2] == ("-f", "keyvalue"):
assert result.returncode == 0
out = {}
for k, v in [l.split('=', 2)
for l in result.stdout.decode('ascii').split("\n")
if '=' in l]:
if k in out:
out[k] += [v]
else:
out[k] = [v]
for k in out:
if len(out[k]) == 1:
out[k] = out[k][0]
return out
# Otherwise, return the named tuple
return result
return run
def pytest_runtest_makereport(item, call):
"""Collect outputs written to tmpdir and put them in report."""
# Only do that after tests are run, but not on teardown (too late)
if call.when != 'call':
return
# We can't wait for teardown, kill any running lldpd daemon right
# now. Otherwise, we won't get any output.
if "lldpd" in item.fixturenames and "lldpd" in item.funcargs:
lldpd = item.funcargs["lldpd"]
lldpd.killall()
if "tmpdir" in item.fixturenames and "tmpdir" in item.funcargs:
tmpdir = item.funcargs["tmpdir"]
if tmpdir.join('lldpd-outputs').check(dir=1):
for path in tmpdir.join('lldpd-outputs').visit():
item.add_report_section(
call.when,
'lldpd {}'.format(path.basename),
path.read())
def pytest_configure(config):
"""Put lldpd/lldpcli configuration into the config object."""
output = subprocess.check_output([lldpcli_location, "-vv"])
output = output.decode('ascii')
config.lldpcli = namedtuple(
'lldpcli',
['version',
'outputs'])(
re.search(
r"^lldpcli (.*)$", output,
re.MULTILINE).group(1),
re.search(
r"^Additional output formats:\s+(.*)$",
output,
re.MULTILINE).group(1).split(", "))
output = subprocess.check_output([lldpd_location, "-vv"])
output = output.decode('ascii')
if {"enabled": True,
"disabled": False}[re.search(r"^Privilege separation:\s+(.*)$",
output, re.MULTILINE).group(1)]:
privsep = namedtuple('privsep',
['user',
'group',
'chroot',
'enabled'])(
re.search(
r"^Privilege separation user:\s+(.*)$",
output,
re.MULTILINE).group(1),
re.search(
r"^Privilege separation group:\s+(.*)$",
output,
re.MULTILINE).group(1),
re.search(
r"^Privilege separation chroot:\s(.*)$",
output,
re.MULTILINE).group(1),
True)
else:
privsep = namedtuple('privsep',
['enabled'])(False)
config.lldpd = namedtuple('lldpd',
['features',
'protocols',
'confdir',
'snmp',
'privsep',
'version'])(
re.search(
r"^Additional LLDP features:\s+(.*)$",
output,
re.MULTILINE).group(1).split(", "),
re.search(
r"^Additional protocols:\s+(.*)$",
output,
re.MULTILINE).group(1).split(", "),
re.search(
r"^Configuration directory:\s+(.*)$",
output, re.MULTILINE).group(1),
{"yes": True,
"no": False}[re.search(
r"^SNMP support:\s+(.*)$",
output,
re.MULTILINE).group(1)],
privsep,
re.search(r"^lldpd (.*)$",
output, re.MULTILINE).group(1))
def pytest_report_header(config):
"""Report lldpd/lldpcli version and configuration."""
print('lldpd: {} {}'.format(config.lldpd.version,
", ".join(config.lldpd.protocols +
config.lldpd.features)))
print('lldpcli: {} {}'.format(config.lldpcli.version,
", ".join(config.lldpcli.outputs)))
print('{}: {} {} {}'.format(platform.system().lower(),
platform.release(),
platform.version(),
platform.machine()))
``` |
{
"source": "jonasjancarik/waybackpy",
"score": 2
} |
#### File: waybackpy/tests/test_cli.py
```python
import sys
import os
import pytest
import random
import string
import argparse
import waybackpy.cli as cli
from waybackpy.wrapper import Url # noqa: E402
from waybackpy.__version__ import __version__
def test_save():
args = argparse.Namespace(
user_agent=None,
url="https://hfjfjfjfyu6r6rfjvj.fjhgjhfjgvjm",
total=False,
version=False,
file=False,
oldest=False,
save=True,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "could happen because either your waybackpy" or "cannot be archived by wayback machine as it is a redirect" in str(reply)
def test_json():
args = argparse.Namespace(
user_agent=None,
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=True,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "archived_snapshots" in str(reply)
def test_archive_url():
args = argparse.Namespace(
user_agent=None,
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=True,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "https://web.archive.org/web/" in str(reply)
def test_oldest():
args = argparse.Namespace(
user_agent=None,
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=True,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "pypi.org/user/akamhy" in str(reply)
uid = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(6)
)
url = "https://pypi.org/yfvjvycyc667r67ed67r" + uid
args = argparse.Namespace(
user_agent=None,
url=url,
total=False,
version=False,
file=False,
oldest=True,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "Can not find archive for" in str(reply)
def test_newest():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=True,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "pypi.org/user/akamhy" in str(reply)
uid = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(6)
)
url = "https://pypi.org/yfvjvycyc667r67ed67r" + uid
args = argparse.Namespace(
user_agent=None,
url=url,
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=True,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert "Can not find archive for" in str(reply)
def test_total_archives():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://pypi.org/user/akamhy/",
total=True,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get=None,
)
reply = cli.args_handler(args)
assert isinstance(reply, int)
def test_known_urls():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://www.keybr.com",
total=False,
version=False,
file=True,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=True,
get=None,
)
reply = cli.args_handler(args)
assert "keybr" in str(reply)
def test_near():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=True,
subdomain=False,
known_urls=False,
get=None,
year=2020,
month=7,
day=15,
hour=1,
minute=1,
)
reply = cli.args_handler(args)
assert "202007" in str(reply)
uid = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(6)
)
url = "https://pypi.org/yfvjvycyc667r67ed67r" + uid
args = argparse.Namespace(
user_agent=None,
url=url,
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=True,
subdomain=False,
known_urls=False,
get=None,
year=2020,
month=7,
day=15,
hour=1,
minute=1,
)
reply = cli.args_handler(args)
assert "Can not find archive for" in str(reply)
def test_get():
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://github.com/akamhy",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get="url",
)
reply = cli.args_handler(args)
assert "waybackpy" in str(reply)
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://github.com/akamhy/waybackpy",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get="oldest",
)
reply = cli.args_handler(args)
assert "waybackpy" in str(reply)
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://akamhy.github.io/waybackpy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get="newest",
)
reply = cli.args_handler(args)
assert "waybackpy" in str(reply)
args = argparse.Namespace(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9",
url="https://pypi.org/user/akamhy/",
total=False,
version=False,
file=False,
oldest=False,
save=False,
json=False,
archive_url=False,
newest=False,
near=False,
subdomain=False,
known_urls=False,
get="foobar",
)
reply = cli.args_handler(args)
assert "get the source code of the" in str(reply)
def test_args_handler():
args = argparse.Namespace(version=True)
reply = cli.args_handler(args)
assert ("waybackpy version %s" % (__version__)) == reply
args = argparse.Namespace(url=None, version=False)
reply = cli.args_handler(args)
assert ("waybackpy %s" % (__version__)) in str(reply)
def test_main():
# This also tests the parse_args method in cli.py
cli.main(["temp.py", "--version"])
``` |
{
"source": "jonasjeeliasson/core",
"score": 2
} |
#### File: components/adguard/switch.py
```python
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Callable
from adguardhome import AdGuardHome, AdGuardHomeConnectionError, AdGuardHomeError
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from . import AdGuardHomeDeviceEntity
from .const import DATA_ADGUARD_CLIENT, DATA_ADGUARD_VERSION, DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[list[Entity], bool], None],
) -> None:
"""Set up AdGuard Home switch based on a config entry."""
adguard = hass.data[DOMAIN][entry.entry_id][DATA_ADGUARD_CLIENT]
try:
version = await adguard.version()
except AdGuardHomeConnectionError as exception:
raise PlatformNotReady from exception
hass.data[DOMAIN][entry.entry_id][DATA_ADGUARD_VERSION] = version
switches = [
AdGuardHomeProtectionSwitch(adguard, entry),
AdGuardHomeFilteringSwitch(adguard, entry),
AdGuardHomeParentalSwitch(adguard, entry),
AdGuardHomeSafeBrowsingSwitch(adguard, entry),
AdGuardHomeSafeSearchSwitch(adguard, entry),
AdGuardHomeQueryLogSwitch(adguard, entry),
]
async_add_entities(switches, True)
class AdGuardHomeSwitch(AdGuardHomeDeviceEntity, SwitchEntity):
"""Defines a AdGuard Home switch."""
def __init__(
self,
adguard: AdGuardHome,
entry: ConfigEntry,
name: str,
icon: str,
key: str,
enabled_default: bool = True,
) -> None:
"""Initialize AdGuard Home switch."""
self._state = False
self._key = key
super().__init__(adguard, entry, name, icon, enabled_default)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return "_".join(
[DOMAIN, self.adguard.host, str(self.adguard.port), "switch", self._key]
)
@property
def is_on(self) -> bool:
"""Return the state of the switch."""
return self._state
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the switch."""
try:
await self._adguard_turn_off()
except AdGuardHomeError:
_LOGGER.error("An error occurred while turning off AdGuard Home switch")
self._available = False
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs) -> None:
"""Turn on the switch."""
try:
await self._adguard_turn_on()
except AdGuardHomeError:
_LOGGER.error("An error occurred while turning on AdGuard Home switch")
self._available = False
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
raise NotImplementedError()
class AdGuardHomeProtectionSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home protection switch."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, entry, "AdGuard Protection", "mdi:shield-check", "protection"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.disable_protection()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.enable_protection()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.protection_enabled()
class AdGuardHomeParentalSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home parental control switch."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, entry, "AdGuard Parental Control", "mdi:shield-check", "parental"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.parental.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.parental.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.parental.enabled()
class AdGuardHomeSafeSearchSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home safe search switch."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, entry, "AdGuard Safe Search", "mdi:shield-check", "safesearch"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.safesearch.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.safesearch.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.safesearch.enabled()
class AdGuardHomeSafeBrowsingSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home safe search switch."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, entry, "AdGuard Safe Browsing", "mdi:shield-check", "safebrowsing"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.safebrowsing.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.safebrowsing.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.safebrowsing.enabled()
class AdGuardHomeFilteringSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home filtering switch."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, entry, "AdGuard Filtering", "mdi:shield-check", "filtering"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.filtering.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.filtering.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.filtering.enabled()
class AdGuardHomeQueryLogSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home query log switch."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard,
entry,
"AdGuard Query Log",
"mdi:shield-check",
"querylog",
enabled_default=False,
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.querylog.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.querylog.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.querylog.enabled()
```
#### File: components/canary/__init__.py
```python
from datetime import timedelta
import logging
from canary.api import Api
from requests import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.components.camera.const import DOMAIN as CAMERA_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_FFMPEG_ARGUMENTS,
DATA_COORDINATOR,
DATA_UNDO_UPDATE_LISTENER,
DEFAULT_FFMPEG_ARGUMENTS,
DEFAULT_TIMEOUT,
DOMAIN,
)
from .coordinator import CanaryDataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["alarm_control_panel", "camera", "sensor"]
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the Canary integration."""
hass.data.setdefault(DOMAIN, {})
if hass.config_entries.async_entries(DOMAIN):
return True
ffmpeg_arguments = DEFAULT_FFMPEG_ARGUMENTS
if CAMERA_DOMAIN in config:
camera_config = next(
(item for item in config[CAMERA_DOMAIN] if item["platform"] == DOMAIN),
None,
)
if camera_config:
ffmpeg_arguments = camera_config.get(
CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS
)
if DOMAIN in config:
if ffmpeg_arguments != DEFAULT_FFMPEG_ARGUMENTS:
config[DOMAIN][CONF_FFMPEG_ARGUMENTS] = ffmpeg_arguments
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Canary from a config entry."""
if not entry.options:
options = {
CONF_FFMPEG_ARGUMENTS: entry.data.get(
CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS
),
CONF_TIMEOUT: entry.data.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
}
hass.config_entries.async_update_entry(entry, options=options)
try:
canary_api = await hass.async_add_executor_job(_get_canary_api_instance, entry)
except (ConnectTimeout, HTTPError) as error:
_LOGGER.error("Unable to connect to Canary service: %s", str(error))
raise ConfigEntryNotReady from error
coordinator = CanaryDataUpdateCoordinator(hass, api=canary_api)
await coordinator.async_config_entry_first_refresh()
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA_COORDINATOR: coordinator,
DATA_UNDO_UPDATE_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][entry.entry_id][DATA_UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
def _get_canary_api_instance(entry: ConfigEntry) -> Api:
"""Initialize a new instance of CanaryApi."""
canary = Api(
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
entry.options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
return canary
```
#### File: components/gdacs/config_flow.py
```python
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers import config_validation as cv
from .const import CONF_CATEGORIES, DEFAULT_RADIUS, DEFAULT_SCAN_INTERVAL, DOMAIN
DATA_SCHEMA = vol.Schema(
{vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): cv.positive_int}
)
_LOGGER = logging.getLogger(__name__)
class GdacsFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a GDACS config flow."""
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors or {}
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
_LOGGER.debug("User input: %s", user_input)
if not user_input:
return await self._show_form()
latitude = user_input.get(CONF_LATITUDE, self.hass.config.latitude)
user_input[CONF_LATITUDE] = latitude
longitude = user_input.get(CONF_LONGITUDE, self.hass.config.longitude)
user_input[CONF_LONGITUDE] = longitude
identifier = f"{user_input[CONF_LATITUDE]}, {user_input[CONF_LONGITUDE]}"
await self.async_set_unique_id(identifier)
self._abort_if_unique_id_configured()
scan_interval = user_input.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
user_input[CONF_SCAN_INTERVAL] = scan_interval.total_seconds()
categories = user_input.get(CONF_CATEGORIES, [])
user_input[CONF_CATEGORIES] = categories
return self.async_create_entry(title=identifier, data=user_input)
```
#### File: components/knx/switch.py
```python
from __future__ import annotations
from collections.abc import Iterable
from typing import Any, Callable
from xknx import XKNX
from xknx.devices import Switch as XknxSwitch
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import DOMAIN, KNX_ADDRESS
from .knx_entity import KnxEntity
from .schema import SwitchSchema
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: Callable[[Iterable[Entity]], None],
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up switch(es) for KNX platform."""
if not discovery_info or not discovery_info["platform_config"]:
return
platform_config = discovery_info["platform_config"]
xknx: XKNX = hass.data[DOMAIN].xknx
entities = []
for entity_config in platform_config:
entities.append(KNXSwitch(xknx, entity_config))
async_add_entities(entities)
class KNXSwitch(KnxEntity, SwitchEntity):
"""Representation of a KNX switch."""
def __init__(self, xknx: XKNX, config: ConfigType) -> None:
"""Initialize of KNX switch."""
self._device: XknxSwitch
super().__init__(
device=XknxSwitch(
xknx,
name=config[CONF_NAME],
group_address=config[KNX_ADDRESS],
group_address_state=config.get(SwitchSchema.CONF_STATE_ADDRESS),
invert=config[SwitchSchema.CONF_INVERT],
)
)
self._unique_id = f"{self._device.switch.group_address}"
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return bool(self._device.state)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
await self._device.set_on()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
await self._device.set_off()
```
#### File: components/risco/__init__.py
```python
import asyncio
from datetime import timedelta
import logging
from pyrisco import CannotConnectError, OperationError, RiscoAPI, UnauthorizedError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_PASSWORD,
CONF_PIN,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.storage import Store
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DATA_COORDINATOR, DEFAULT_SCAN_INTERVAL, DOMAIN, EVENTS_COORDINATOR
PLATFORMS = ["alarm_control_panel", "binary_sensor", "sensor"]
UNDO_UPDATE_LISTENER = "undo_update_listener"
LAST_EVENT_STORAGE_VERSION = 1
LAST_EVENT_TIMESTAMP_KEY = "last_event_timestamp"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Risco from a config entry."""
data = entry.data
risco = RiscoAPI(data[CONF_USERNAME], data[CONF_PASSWORD], data[CONF_PIN])
try:
await risco.login(async_get_clientsession(hass))
except CannotConnectError as error:
raise ConfigEntryNotReady() from error
except UnauthorizedError:
_LOGGER.exception("Failed to login to Risco cloud")
return False
scan_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
coordinator = RiscoDataUpdateCoordinator(hass, risco, scan_interval)
await coordinator.async_config_entry_first_refresh()
events_coordinator = RiscoEventsDataUpdateCoordinator(
hass, risco, entry.entry_id, 60
)
undo_listener = entry.add_update_listener(_update_listener)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_COORDINATOR: coordinator,
UNDO_UPDATE_LISTENER: undo_listener,
EVENTS_COORDINATOR: events_coordinator,
}
async def start_platforms():
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
]
)
await events_coordinator.async_refresh()
hass.async_create_task(start_platforms())
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
class RiscoDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching risco data."""
def __init__(self, hass, risco, scan_interval):
"""Initialize global risco data updater."""
self.risco = risco
interval = timedelta(seconds=scan_interval)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=interval,
)
async def _async_update_data(self):
"""Fetch data from risco."""
try:
return await self.risco.get_state()
except (CannotConnectError, UnauthorizedError, OperationError) as error:
raise UpdateFailed(error) from error
class RiscoEventsDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching risco data."""
def __init__(self, hass, risco, eid, scan_interval):
"""Initialize global risco data updater."""
self.risco = risco
self._store = Store(
hass, LAST_EVENT_STORAGE_VERSION, f"risco_{eid}_last_event_timestamp"
)
interval = timedelta(seconds=scan_interval)
super().__init__(
hass,
_LOGGER,
name=f"{DOMAIN}_events",
update_interval=interval,
)
async def _async_update_data(self):
"""Fetch data from risco."""
last_store = await self._store.async_load() or {}
last_timestamp = last_store.get(
LAST_EVENT_TIMESTAMP_KEY, "2020-01-01T00:00:00Z"
)
try:
events = await self.risco.get_events(last_timestamp, 10)
except (CannotConnectError, UnauthorizedError, OperationError) as error:
raise UpdateFailed(error) from error
if len(events) > 0:
await self._store.async_save({LAST_EVENT_TIMESTAMP_KEY: events[0].time})
return events
```
#### File: components/smarttub/entity.py
```python
import logging
import smarttub
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DOMAIN
from .helpers import get_spa_name
_LOGGER = logging.getLogger(__name__)
class SmartTubEntity(CoordinatorEntity):
"""Base class for SmartTub entities."""
def __init__(
self, coordinator: DataUpdateCoordinator, spa: smarttub.Spa, entity_name
):
"""Initialize the entity.
Given a spa id and a short name for the entity, we provide basic device
info, name, unique id, etc. for all derived entities.
"""
super().__init__(coordinator)
self.spa = spa
self._entity_name = entity_name
@property
def unique_id(self) -> str:
"""Return a unique id for the entity."""
return f"{self.spa.id}-{self._entity_name}"
@property
def device_info(self) -> str:
"""Return device info."""
return {
"identifiers": {(DOMAIN, self.spa.id)},
"manufacturer": self.spa.brand,
"model": self.spa.model,
}
@property
def name(self) -> str:
"""Return the name of the entity."""
spa_name = get_spa_name(self.spa)
return f"{spa_name} {self._entity_name}"
@property
def spa_status(self) -> smarttub.SpaState:
"""Retrieve the result of Spa.get_status()."""
return self.coordinator.data[self.spa.id].get("status")
class SmartTubSensorBase(SmartTubEntity):
"""Base class for SmartTub sensors."""
def __init__(self, coordinator, spa, sensor_name, attr_name):
"""Initialize the entity."""
super().__init__(coordinator, spa, sensor_name)
self._attr_name = attr_name
@property
def _state(self):
"""Retrieve the underlying state from the spa."""
return getattr(self.spa_status, self._attr_name)
```
#### File: components/synology_dsm/sensor.py
```python
from __future__ import annotations
from datetime import timedelta
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_DISKS,
DATA_MEGABYTES,
DATA_RATE_KILOBYTES_PER_SECOND,
DATA_TERABYTES,
PRECISION_TENTHS,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.temperature import display_temp
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util.dt import utcnow
from . import SynoApi, SynologyDSMBaseEntity, SynologyDSMDeviceEntity
from .const import (
CONF_VOLUMES,
COORDINATOR_CENTRAL,
DOMAIN,
ENTITY_UNIT_LOAD,
INFORMATION_SENSORS,
STORAGE_DISK_SENSORS,
STORAGE_VOL_SENSORS,
SYNO_API,
TEMP_SENSORS_KEYS,
UTILISATION_SENSORS,
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Synology NAS Sensor."""
data = hass.data[DOMAIN][entry.unique_id]
api = data[SYNO_API]
coordinator = data[COORDINATOR_CENTRAL]
entities = [
SynoDSMUtilSensor(
api, sensor_type, UTILISATION_SENSORS[sensor_type], coordinator
)
for sensor_type in UTILISATION_SENSORS
]
# Handle all volumes
if api.storage.volumes_ids:
for volume in entry.data.get(CONF_VOLUMES, api.storage.volumes_ids):
entities += [
SynoDSMStorageSensor(
api,
sensor_type,
STORAGE_VOL_SENSORS[sensor_type],
coordinator,
volume,
)
for sensor_type in STORAGE_VOL_SENSORS
]
# Handle all disks
if api.storage.disks_ids:
for disk in entry.data.get(CONF_DISKS, api.storage.disks_ids):
entities += [
SynoDSMStorageSensor(
api,
sensor_type,
STORAGE_DISK_SENSORS[sensor_type],
coordinator,
disk,
)
for sensor_type in STORAGE_DISK_SENSORS
]
entities += [
SynoDSMInfoSensor(
api, sensor_type, INFORMATION_SENSORS[sensor_type], coordinator
)
for sensor_type in INFORMATION_SENSORS
]
async_add_entities(entities)
class SynoDSMSensor(SynologyDSMBaseEntity):
"""Mixin for sensor specific attributes."""
@property
def unit_of_measurement(self) -> str:
"""Return the unit the value is expressed in."""
if self.entity_type in TEMP_SENSORS_KEYS:
return self.hass.config.units.temperature_unit
return self._unit
class SynoDSMUtilSensor(SynoDSMSensor, SensorEntity):
"""Representation a Synology Utilisation sensor."""
@property
def state(self):
"""Return the state."""
attr = getattr(self._api.utilisation, self.entity_type)
if callable(attr):
attr = attr()
if attr is None:
return None
# Data (RAM)
if self._unit == DATA_MEGABYTES:
return round(attr / 1024.0 ** 2, 1)
# Network
if self._unit == DATA_RATE_KILOBYTES_PER_SECOND:
return round(attr / 1024.0, 1)
# CPU load average
if self._unit == ENTITY_UNIT_LOAD:
return round(attr / 100, 2)
return attr
@property
def available(self) -> bool:
"""Return True if entity is available."""
return bool(self._api.utilisation)
class SynoDSMStorageSensor(SynologyDSMDeviceEntity, SynoDSMSensor, SensorEntity):
"""Representation a Synology Storage sensor."""
@property
def state(self):
"""Return the state."""
attr = getattr(self._api.storage, self.entity_type)(self._device_id)
if attr is None:
return None
# Data (disk space)
if self._unit == DATA_TERABYTES:
return round(attr / 1024.0 ** 4, 2)
# Temperature
if self.entity_type in TEMP_SENSORS_KEYS:
return display_temp(self.hass, attr, TEMP_CELSIUS, PRECISION_TENTHS)
return attr
class SynoDSMInfoSensor(SynoDSMSensor, SensorEntity):
"""Representation a Synology information sensor."""
def __init__(
self,
api: SynoApi,
entity_type: str,
entity_info: dict[str, str],
coordinator: DataUpdateCoordinator,
):
"""Initialize the Synology SynoDSMInfoSensor entity."""
super().__init__(api, entity_type, entity_info, coordinator)
self._previous_uptime = None
self._last_boot = None
@property
def state(self):
"""Return the state."""
attr = getattr(self._api.information, self.entity_type)
if attr is None:
return None
# Temperature
if self.entity_type in TEMP_SENSORS_KEYS:
return display_temp(self.hass, attr, TEMP_CELSIUS, PRECISION_TENTHS)
if self.entity_type == "uptime":
# reboot happened or entity creation
if self._previous_uptime is None or self._previous_uptime > attr:
last_boot = utcnow() - timedelta(seconds=attr)
self._last_boot = last_boot.replace(microsecond=0).isoformat()
self._previous_uptime = attr
return self._last_boot
return attr
```
#### File: components/waze_travel_time/__init__.py
```python
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Load the saved entities."""
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS)
```
#### File: components/wemo/__init__.py
```python
import asyncio
import logging
import pywemo
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.fan import DOMAIN as FAN_DOMAIN
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_DISCOVERY, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_call_later
from .const import DOMAIN
# Mapping from Wemo model_name to domain.
WEMO_MODEL_DISPATCH = {
"Bridge": LIGHT_DOMAIN,
"CoffeeMaker": SWITCH_DOMAIN,
"Dimmer": LIGHT_DOMAIN,
"Humidifier": FAN_DOMAIN,
"Insight": SWITCH_DOMAIN,
"LightSwitch": SWITCH_DOMAIN,
"Maker": SWITCH_DOMAIN,
"Motion": BINARY_SENSOR_DOMAIN,
"OutdoorPlug": SWITCH_DOMAIN,
"Sensor": BINARY_SENSOR_DOMAIN,
"Socket": SWITCH_DOMAIN,
}
_LOGGER = logging.getLogger(__name__)
def coerce_host_port(value):
"""Validate that provided value is either just host or host:port.
Returns (host, None) or (host, port) respectively.
"""
host, _, port = value.partition(":")
if not host:
raise vol.Invalid("host cannot be empty")
if port:
port = cv.port(port)
else:
port = None
return host, port
CONF_STATIC = "static"
DEFAULT_DISCOVERY = True
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_STATIC, default=[]): vol.Schema(
[vol.All(cv.string, coerce_host_port)]
),
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up for WeMo devices."""
hass.data[DOMAIN] = {
"config": config.get(DOMAIN, {}),
"registry": None,
"pending": {},
}
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a wemo config entry."""
config = hass.data[DOMAIN].pop("config")
# Keep track of WeMo device subscriptions for push updates
registry = hass.data[DOMAIN]["registry"] = pywemo.SubscriptionRegistry()
await hass.async_add_executor_job(registry.start)
wemo_dispatcher = WemoDispatcher(entry)
wemo_discovery = WemoDiscovery(hass, wemo_dispatcher)
async def async_stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.debug("Shutting down WeMo event subscriptions")
await hass.async_add_executor_job(registry.stop)
wemo_discovery.async_stop_discovery()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_wemo)
)
static_conf = config.get(CONF_STATIC, [])
if static_conf:
_LOGGER.debug("Adding statically configured WeMo devices")
for device in await asyncio.gather(
*[
hass.async_add_executor_job(validate_static_config, host, port)
for host, port in static_conf
]
):
if device:
wemo_dispatcher.async_add_unique_device(hass, device)
if config.get(CONF_DISCOVERY, DEFAULT_DISCOVERY):
await wemo_discovery.async_discover_and_schedule()
return True
class WemoDispatcher:
"""Dispatch WeMo devices to the correct platform."""
def __init__(self, config_entry: ConfigEntry):
"""Initialize the WemoDispatcher."""
self._config_entry = config_entry
self._added_serial_numbers = set()
self._loaded_components = set()
@callback
def async_add_unique_device(
self, hass: HomeAssistant, device: pywemo.WeMoDevice
) -> None:
"""Add a WeMo device to hass if it has not already been added."""
if device.serialnumber in self._added_serial_numbers:
return
component = WEMO_MODEL_DISPATCH.get(device.model_name, SWITCH_DOMAIN)
# Three cases:
# - First time we see component, we need to load it and initialize the backlog
# - Component is being loaded, add to backlog
# - Component is loaded, backlog is gone, dispatch discovery
if component not in self._loaded_components:
hass.data[DOMAIN]["pending"][component] = [device]
self._loaded_components.add(component)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
self._config_entry, component
)
)
elif component in hass.data[DOMAIN]["pending"]:
hass.data[DOMAIN]["pending"][component].append(device)
else:
async_dispatcher_send(
hass,
f"{DOMAIN}.{component}",
device,
)
self._added_serial_numbers.add(device.serialnumber)
class WemoDiscovery:
"""Use SSDP to discover WeMo devices."""
ADDITIONAL_SECONDS_BETWEEN_SCANS = 10
MAX_SECONDS_BETWEEN_SCANS = 300
def __init__(self, hass: HomeAssistant, wemo_dispatcher: WemoDispatcher) -> None:
"""Initialize the WemoDiscovery."""
self._hass = hass
self._wemo_dispatcher = wemo_dispatcher
self._stop = None
self._scan_delay = 0
async def async_discover_and_schedule(self, *_) -> None:
"""Periodically scan the network looking for WeMo devices."""
_LOGGER.debug("Scanning network for WeMo devices")
try:
for device in await self._hass.async_add_executor_job(
pywemo.discover_devices
):
self._wemo_dispatcher.async_add_unique_device(self._hass, device)
finally:
# Run discovery more frequently after hass has just started.
self._scan_delay = min(
self._scan_delay + self.ADDITIONAL_SECONDS_BETWEEN_SCANS,
self.MAX_SECONDS_BETWEEN_SCANS,
)
self._stop = async_call_later(
self._hass,
self._scan_delay,
self.async_discover_and_schedule,
)
@callback
def async_stop_discovery(self) -> None:
"""Stop the periodic background scanning."""
if self._stop:
self._stop()
self._stop = None
def validate_static_config(host, port):
"""Handle a static config."""
url = pywemo.setup_url_for_address(host, port)
if not url:
_LOGGER.error(
"Unable to get description url for WeMo at: %s",
f"{host}:{port}" if port else host,
)
return None
try:
device = pywemo.discovery.device_from_description(url)
except (
pywemo.exceptions.ActionException,
pywemo.exceptions.HTTPException,
) as err:
_LOGGER.error("Unable to access WeMo at %s (%s)", url, err)
return None
return device
```
#### File: components/wilight/__init__.py
```python
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.entity import Entity
from .parent_device import WiLightParent
DOMAIN = "wilight"
# List the platforms that you want to support.
PLATFORMS = ["cover", "fan", "light"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a wilight config entry."""
parent = WiLightParent(hass, entry)
if not await parent.async_setup():
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = parent
# Set up all platforms for this device/entry.
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload WiLight config entry."""
# Unload entities for this entry/device.
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
# Cleanup
parent = hass.data[DOMAIN][entry.entry_id]
await parent.async_reset()
del hass.data[DOMAIN][entry.entry_id]
return unload_ok
class WiLightDevice(Entity):
"""Representation of a WiLight device.
Contains the common logic for WiLight entities.
"""
def __init__(self, api_device, index, item_name):
"""Initialize the device."""
# WiLight specific attributes for every component type
self._device_id = api_device.device_id
self._sw_version = api_device.swversion
self._client = api_device.client
self._model = api_device.model
self._name = item_name
self._index = index
self._unique_id = f"{self._device_id}_{self._index}"
self._status = {}
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for this WiLight item."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this WiLight item."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return {
"name": self._name,
"identifiers": {(DOMAIN, self._unique_id)},
"model": self._model,
"manufacturer": "WiLight",
"sw_version": self._sw_version,
"via_device": (DOMAIN, self._device_id),
}
@property
def available(self):
"""Return True if entity is available."""
return bool(self._client.is_connected)
@callback
def handle_event_callback(self, states):
"""Propagate changes through ha."""
self._status = states
self.async_write_ha_state()
async def async_update(self):
"""Synchronize state with api_device."""
await self._client.status(self._index)
async def async_added_to_hass(self):
"""Register update callback."""
self._client.register_status_callback(self.handle_event_callback, self._index)
await self._client.status(self._index)
```
#### File: components/philips_js/test_config_flow.py
```python
from unittest.mock import ANY, patch
from haphilipsjs import PairingFailure
from pytest import fixture
from homeassistant import config_entries
from homeassistant.components.philips_js.const import DOMAIN
from . import (
MOCK_CONFIG,
MOCK_CONFIG_PAIRED,
MOCK_IMPORT,
MOCK_PASSWORD,
MOCK_SYSTEM_UNPAIRED,
MOCK_USERINPUT,
MOCK_USERNAME,
)
@fixture(autouse=True)
def mock_setup_entry():
"""Disable component setup."""
with patch(
"homeassistant.components.philips_js.async_setup_entry", return_value=True
) as mock_setup_entry:
yield mock_setup_entry
@fixture
async def mock_tv_pairable(mock_tv):
"""Return a mock tv that is pariable."""
mock_tv.system = MOCK_SYSTEM_UNPAIRED
mock_tv.pairing_type = "digest_auth_pairing"
mock_tv.api_version = 6
mock_tv.api_version_detected = 6
mock_tv.secured_transport = True
mock_tv.pairRequest.return_value = {}
mock_tv.pairGrant.return_value = MOCK_USERNAME, MOCK_PASSWORD
return mock_tv
async def test_import(hass, mock_setup_entry):
"""Test we get an item on import."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT,
)
assert result["type"] == "create_entry"
assert result["title"] == "Philips TV (1234567890)"
assert result["data"] == MOCK_CONFIG
assert len(mock_setup_entry.mock_calls) == 1
async def test_import_exist(hass, mock_config_entry):
"""Test we get an item on import."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form(hass, mock_setup_entry):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Philips TV (1234567890)"
assert result2["data"] == MOCK_CONFIG
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass, mock_tv):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_tv.system = None
result = await hass.config_entries.flow.async_configure(
result["flow_id"], MOCK_USERINPUT
)
assert result["type"] == "form"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_unexpected_error(hass, mock_tv):
"""Test we handle unexpected exceptions."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_tv.getSystem.side_effect = Exception("Unexpected exception")
result = await hass.config_entries.flow.async_configure(
result["flow_id"], MOCK_USERINPUT
)
assert result["type"] == "form"
assert result["errors"] == {"base": "unknown"}
async def test_pairing(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_tv.setTransport.assert_called_with(True)
mock_tv.pairRequest.assert_called()
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result == {
"flow_id": ANY,
"type": "create_entry",
"description": None,
"description_placeholders": None,
"handler": "philips_js",
"result": ANY,
"title": "55PUS7181/12 (ABCDEFGHIJKLF)",
"data": MOCK_CONFIG_PAIRED,
"version": 1,
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_pair_request_failed(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
mock_tv.pairRequest.side_effect = PairingFailure({})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result == {
"flow_id": ANY,
"description_placeholders": {"error_id": None},
"handler": "philips_js",
"reason": "pairing_failure",
"type": "abort",
}
async def test_pair_grant_failed(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_tv.setTransport.assert_called_with(True)
mock_tv.pairRequest.assert_called()
# Test with invalid pin
mock_tv.pairGrant.side_effect = PairingFailure({"error_id": "INVALID_PIN"})
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result["type"] == "form"
assert result["errors"] == {"pin": "invalid_pin"}
# Test with unexpected failure
mock_tv.pairGrant.side_effect = PairingFailure({})
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result == {
"flow_id": ANY,
"description_placeholders": {"error_id": None},
"handler": "philips_js",
"reason": "pairing_failure",
"type": "abort",
}
```
#### File: components/pvpc_hourly_pricing/test_config_flow.py
```python
from datetime import datetime
from unittest.mock import patch
from pytz import timezone
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.pvpc_hourly_pricing import ATTR_TARIFF, DOMAIN
from homeassistant.const import CONF_NAME
from homeassistant.helpers import entity_registry as er
from .conftest import check_valid_state
from tests.common import date_util
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_config_flow(
hass, legacy_patchable_time, pvpc_aioclient_mock: AiohttpClientMocker
):
"""
Test config flow for pvpc_hourly_pricing.
- Create a new entry with tariff "normal"
- Check state and attributes
- Check abort when trying to config another with same tariff
- Check removal and add again to check state restoration
"""
hass.config.time_zone = timezone("Europe/Madrid")
mock_data = {"return_time": datetime(2019, 10, 26, 14, 0, tzinfo=date_util.UTC)}
def mock_now():
return mock_data["return_time"]
with patch("homeassistant.util.dt.utcnow", new=mock_now):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_NAME: "test", ATTR_TARIFF: "normal"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
check_valid_state(state, tariff="normal")
assert pvpc_aioclient_mock.call_count == 1
# Check abort when configuring another with same tariff
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_NAME: "test", ATTR_TARIFF: "normal"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert pvpc_aioclient_mock.call_count == 1
# Check removal
registry = er.async_get(hass)
registry_entity = registry.async_get("sensor.test")
assert await hass.config_entries.async_remove(registry_entity.config_entry_id)
# and add it again with UI
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_NAME: "test", ATTR_TARIFF: "normal"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
check_valid_state(state, tariff="normal")
assert pvpc_aioclient_mock.call_count == 2
```
#### File: components/recorder/conftest.py
```python
from __future__ import annotations
from collections.abc import AsyncGenerator
from typing import Awaitable, Callable, cast
import pytest
from homeassistant.components.recorder import Recorder
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .common import async_recorder_block_till_done
from tests.common import (
async_init_recorder_component,
get_test_home_assistant,
init_recorder_component,
)
SetupRecorderInstanceT = Callable[..., Awaitable[Recorder]]
@pytest.fixture
def hass_recorder():
"""Home Assistant fixture with in-memory recorder."""
hass = get_test_home_assistant()
def setup_recorder(config=None):
"""Set up with params."""
init_recorder_component(hass, config)
hass.start()
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
return hass
yield setup_recorder
hass.stop()
@pytest.fixture
async def async_setup_recorder_instance() -> AsyncGenerator[
SetupRecorderInstanceT, None
]:
"""Yield callable to setup recorder instance."""
async def async_setup_recorder(
hass: HomeAssistant, config: ConfigType | None = None
) -> Recorder:
"""Setup and return recorder instance.""" # noqa: D401
await async_init_recorder_component(hass, config)
await hass.async_block_till_done()
instance = cast(Recorder, hass.data[DATA_INSTANCE])
await async_recorder_block_till_done(hass, instance)
assert isinstance(instance, Recorder)
return instance
yield async_setup_recorder
```
#### File: components/zeroconf/test_init.py
```python
from unittest.mock import patch
from zeroconf import (
BadTypeInNameException,
Error as ZeroconfError,
InterfaceChoice,
IPVersion,
ServiceInfo,
ServiceStateChange,
)
from homeassistant.components import zeroconf
from homeassistant.components.zeroconf import CONF_DEFAULT_INTERFACE, CONF_IPV6
from homeassistant.const import (
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.generated import zeroconf as zc_gen
from homeassistant.setup import async_setup_component
NON_UTF8_VALUE = b"ABCDEF\x8a"
NON_ASCII_KEY = b"non-ascii-key\x8a"
PROPERTIES = {
b"macaddress": b"ABCDEF012345",
b"non-utf8-value": NON_UTF8_VALUE,
NON_ASCII_KEY: None,
}
HOMEKIT_STATUS_UNPAIRED = b"1"
HOMEKIT_STATUS_PAIRED = b"0"
_ROUTE_NO_LOOPBACK = (
{
"attrs": [
("RTA_TABLE", 254),
("RTA_DST", "192.168.127.12"),
("RTA_OIF", 4),
("RTA_PREFSRC", "192.168.1.5"),
],
},
)
_ROUTE_LOOPBACK = (
{
"attrs": [
("RTA_TABLE", 254),
("RTA_DST", "192.168.127.12"),
("RTA_OIF", 4),
("RTA_PREFSRC", "127.0.0.1"),
],
},
)
def service_update_mock(zeroconf, services, handlers, *, limit_service=None):
"""Call service update handler."""
for service in services:
if limit_service is not None and service != limit_service:
continue
handlers[0](zeroconf, service, f"_name.{service}", ServiceStateChange.Added)
def get_service_info_mock(service_type, name):
"""Return service info for get_service_info."""
return ServiceInfo(
service_type,
name,
addresses=[b"\n\x00\x00\x14"],
port=80,
weight=0,
priority=0,
server="name.local.",
properties=PROPERTIES,
)
def get_service_info_mock_without_an_address(service_type, name):
"""Return service info for get_service_info without any addresses."""
return ServiceInfo(
service_type,
name,
addresses=[],
port=80,
weight=0,
priority=0,
server="name.local.",
properties=PROPERTIES,
)
def get_homekit_info_mock(model, pairing_status):
"""Return homekit info for get_service_info for an homekit device."""
def mock_homekit_info(service_type, name):
return ServiceInfo(
service_type,
name,
addresses=[b"\n\x00\x00\x14"],
port=80,
weight=0,
priority=0,
server="name.local.",
properties={b"md": model.encode(), b"sf": pairing_status},
)
return mock_homekit_info
def get_zeroconf_info_mock(macaddress):
"""Return info for get_service_info for an zeroconf device."""
def mock_zc_info(service_type, name):
return ServiceInfo(
service_type,
name,
addresses=[b"\n\x00\x00\x14"],
port=80,
weight=0,
priority=0,
server="name.local.",
properties={b"macaddress": macaddress.encode()},
)
return mock_zc_info
def get_zeroconf_info_mock_manufacturer(manufacturer):
"""Return info for get_service_info for an zeroconf device."""
def mock_zc_info(service_type, name):
return ServiceInfo(
service_type,
name,
addresses=[b"\n\x00\x00\x14"],
port=80,
weight=0,
priority=0,
server="name.local.",
properties={b"manufacturer": manufacturer.encode()},
)
return mock_zc_info
async def test_setup(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
with patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
expected_flow_calls = 0
for matching_components in zc_gen.ZEROCONF.values():
domains = set()
for component in matching_components:
if len(component) == 1:
domains.add(component["domain"])
expected_flow_calls += len(domains)
assert len(mock_config_flow.mock_calls) == expected_flow_calls
# Test instance is set.
assert "zeroconf" in hass.data
assert await hass.components.zeroconf.async_get_instance() is mock_zeroconf
async def test_setup_with_overly_long_url_and_name(hass, mock_zeroconf, caplog):
"""Test we still setup with long urls and names."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch(
"homeassistant.components.zeroconf.get_url",
return_value="https://this.url.is.way.too.long/very/deep/path/that/will/make/us/go/over/the/maximum/string/length/and/would/cause/zeroconf/to/fail/to/startup/because/the/key/and/value/can/only/be/255/bytes/and/this/string/is/a/bit/longer/than/the/maximum/length/that/we/allow/for/a/value",
), patch.object(
hass.config,
"location_name",
"\u00dcBER \u00dcber German Umlaut long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string long string",
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert "https://this.url.is.way.too.long" in caplog.text
assert "German Umlaut" in caplog.text
async def test_setup_with_default_interface(hass, mock_zeroconf):
"""Test default interface config."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(
hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_DEFAULT_INTERFACE: True}}
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.Default)
async def test_setup_without_default_interface(hass, mock_zeroconf):
"""Test without default interface config."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(
hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_DEFAULT_INTERFACE: False}}
)
assert mock_zeroconf.called_with()
async def test_setup_without_ipv6(hass, mock_zeroconf):
"""Test without ipv6."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(
hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_IPV6: False}}
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(ip_version=IPVersion.V4Only)
async def test_setup_with_ipv6(hass, mock_zeroconf):
"""Test without ipv6."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(
hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_IPV6: True}}
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with()
async def test_setup_with_ipv6_default(hass, mock_zeroconf):
"""Test without ipv6 as default."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with()
async def test_service_with_invalid_name(hass, mock_zeroconf, caplog):
"""Test we do not crash on service with an invalid name."""
with patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = BadTypeInNameException
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert "Failed to get info for device" in caplog.text
async def test_zeroconf_match_macaddress(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
def http_only_service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf,
"_http._tcp.local.",
"Shelly108._http._tcp.local.",
ServiceStateChange.Added,
)
with patch.dict(
zc_gen.ZEROCONF,
{
"_http._tcp.local.": [
{"domain": "shelly", "name": "shelly*", "macaddress": "FFAADD*"}
]
},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=http_only_service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_zeroconf_info_mock(
"FFAADDCC11DD"
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "shelly"
async def test_zeroconf_match_manufacturer(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
def http_only_service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf,
"_airplay._tcp.local.",
"s1000._airplay._tcp.local.",
ServiceStateChange.Added,
)
with patch.dict(
zc_gen.ZEROCONF,
{"_airplay._tcp.local.": [{"domain": "samsungtv", "manufacturer": "samsung*"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=http_only_service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = (
get_zeroconf_info_mock_manufacturer("Samsung Electronics")
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "samsungtv"
async def test_zeroconf_no_match(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
def http_only_service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf,
"_http._tcp.local.",
"somethingelse._http._tcp.local.",
ServiceStateChange.Added,
)
with patch.dict(
zc_gen.ZEROCONF,
{"_http._tcp.local.": [{"domain": "shelly", "name": "shelly*"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=http_only_service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_zeroconf_info_mock(
"FFAADDCC11DD"
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 0
async def test_zeroconf_no_match_manufacturer(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
def http_only_service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf,
"_airplay._tcp.local.",
"s1000._airplay._tcp.local.",
ServiceStateChange.Added,
)
with patch.dict(
zc_gen.ZEROCONF,
{"_airplay._tcp.local.": [{"domain": "samsungtv", "manufacturer": "samsung*"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=http_only_service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = (
get_zeroconf_info_mock_manufacturer("Not Samsung Electronics")
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 0
async def test_homekit_match_partial_space(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._tcp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._tcp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"LIFX bulb", HOMEKIT_STATUS_UNPAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "lifx"
async def test_homekit_match_partial_dash(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._udp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._udp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"Rachio-fa46ba", HOMEKIT_STATUS_UNPAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "rachio"
async def test_homekit_match_full(hass, mock_zeroconf):
"""Test configured options for a device are loaded via config entry."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._udp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._udp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"BSB002", HOMEKIT_STATUS_UNPAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "hue"
async def test_homekit_already_paired(hass, mock_zeroconf):
"""Test that an already paired device is sent to homekit_controller."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._tcp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._tcp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"tado", HOMEKIT_STATUS_PAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 2
assert mock_config_flow.mock_calls[0][1][0] == "tado"
assert mock_config_flow.mock_calls[1][1][0] == "homekit_controller"
async def test_homekit_invalid_paring_status(hass, mock_zeroconf):
"""Test that missing paring data is not sent to homekit_controller."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._tcp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf,
"HaServiceBrowser",
side_effect=lambda *args, **kwargs: service_update_mock(
*args, **kwargs, limit_service="_hap._tcp.local."
),
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"tado", b"invalid"
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "tado"
async def test_homekit_not_paired(hass, mock_zeroconf):
"""Test that an not paired device is sent to homekit_controller."""
with patch.dict(
zc_gen.ZEROCONF,
{"_hap._tcp.local.": [{"domain": "homekit_controller"}]},
clear=True,
), patch.object(
hass.config_entries.flow, "async_init"
) as mock_config_flow, patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
) as mock_service_browser:
mock_zeroconf.get_service_info.side_effect = get_homekit_info_mock(
"this_will_not_match_any_integration", HOMEKIT_STATUS_UNPAIRED
)
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_service_browser.mock_calls) == 1
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "homekit_controller"
async def test_info_from_service_non_utf8(hass):
"""Test info_from_service handles non UTF-8 property keys and values correctly."""
service_type = "_test._tcp.local."
info = zeroconf.info_from_service(
get_service_info_mock(service_type, f"test.{service_type}")
)
raw_info = info["properties"].pop("_raw", False)
assert raw_info
assert len(raw_info) == len(PROPERTIES) - 1
assert NON_ASCII_KEY not in raw_info
assert len(info["properties"]) <= len(raw_info)
assert "non-utf8-value" not in info["properties"]
assert raw_info["non-utf8-value"] is NON_UTF8_VALUE
async def test_info_from_service_with_addresses(hass):
"""Test info_from_service does not throw when there are no addresses."""
service_type = "_test._tcp.local."
info = zeroconf.info_from_service(
get_service_info_mock_without_an_address(service_type, f"test.{service_type}")
)
assert info is None
async def test_get_instance(hass, mock_zeroconf):
"""Test we get an instance."""
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
assert await hass.components.zeroconf.async_get_instance() is mock_zeroconf
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert len(mock_zeroconf.ha_close.mock_calls) == 1
async def test_removed_ignored(hass, mock_zeroconf):
"""Test we remove it when a zeroconf entry is removed."""
mock_zeroconf.get_service_info.side_effect = ZeroconfError
def service_update_mock(zeroconf, services, handlers):
"""Call service update handler."""
handlers[0](
zeroconf, "_service.added", "name._service.added", ServiceStateChange.Added
)
handlers[0](
zeroconf,
"_service.updated",
"name._service.updated",
ServiceStateChange.Updated,
)
handlers[0](
zeroconf,
"_service.removed",
"name._service.removed",
ServiceStateChange.Removed,
)
with patch.object(zeroconf, "HaServiceBrowser", side_effect=service_update_mock):
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_zeroconf.get_service_info.mock_calls) == 2
assert mock_zeroconf.get_service_info.mock_calls[0][1][0] == "_service.added"
assert mock_zeroconf.get_service_info.mock_calls[1][1][0] == "_service.updated"
async def test_async_detect_interfaces_setting_non_loopback_route(hass, mock_zeroconf):
"""Test without default interface config and the route returns a non-loopback address."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch(
"homeassistant.components.zeroconf.IPRoute.route",
return_value=_ROUTE_NO_LOOPBACK,
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.Default)
async def test_async_detect_interfaces_setting_loopback_route(hass, mock_zeroconf):
"""Test without default interface config and the route returns a loopback address."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch(
"homeassistant.components.zeroconf.IPRoute.route", return_value=_ROUTE_LOOPBACK
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.All)
async def test_async_detect_interfaces_setting_empty_route(hass, mock_zeroconf):
"""Test without default interface config and the route returns nothing."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch("homeassistant.components.zeroconf.IPRoute.route", return_value=[]):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.All)
async def test_async_detect_interfaces_setting_exception(hass, mock_zeroconf):
"""Test without default interface config and the route throws an exception."""
with patch.object(hass.config_entries.flow, "async_init"), patch.object(
zeroconf, "HaServiceBrowser", side_effect=service_update_mock
), patch(
"homeassistant.components.zeroconf.IPRoute.route", side_effect=AttributeError
):
mock_zeroconf.get_service_info.side_effect = get_service_info_mock
assert await async_setup_component(hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_zeroconf.called_with(interface_choice=InterfaceChoice.All)
``` |
{
"source": "JonasJF360/Calculadora_com_tkinter",
"score": 3
} |
#### File: JonasJF360/Calculadora_com_tkinter/calculadora.py
```python
from tkinter import *
# cores
co1 = '#feffff' # (white) - Branco
co2 = '#e26165' # (red) - Vermelho
co2_s = '#fa8f92' # red
co3 = '#757f7f' # (grey) - Cinza
co3_s = '#bebebe' # grey
co4 = '#3ea5e0' # (blue) - Azul
co4_s = '#71c8fa' # blue
co5 = '#3cd878' # (green) - Verde
co5_s = '#75faa8' # green
fundo = '#3b3b3b' # (black grey) - Cinza escuro
# para armazenar todas as expressões que serão avaliadas
todos_valores = ''
################# Funções ####################
def pegar_valor(event):
global todos_valores
todos_valores = todos_valores + str(event)
valor_texto.set(todos_valores)
def calcular():
global todos_valores
resultado = eval(todos_valores)
resultado = str(int(resultado)) if resultado - \
int(resultado) == 0 else str(round(resultado, 4))
valor_texto.set(resultado)
todos_valores = resultado
def limpar():
global todos_valores
valor_texto.set('0')
todos_valores = ''
# para entrada de valor único
root = Tk()
icone = PhotoImage(file='img/icon.png')
root.iconphoto(0, icone)
root.title('Calculadora')
root.geometry('265x365')
root.resizable(0, 0)
root.configure(bg=fundo)
valor_texto = StringVar()
valor_texto.set('0')
################# Label ####################
app_tela = Label(root, textvariable=valor_texto, relief='flat',
bd=0, anchor=E, font=('Ivy 16 bold'), background='#b9ce98', foreground='#3b3b3b', padx=8)
app_tela.pack(ipady=15, fill='both')
################# Buttongs ####################
b_1 = Button(root, text='C', height=2, bg=co2, highlightbackground=co2_s, activebackground=co2_s, command=limpar,
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_1.place(x=0, y=61, width=131)
b_2 = Button(root, text='%', width=3, height=2, bg=co4, highlightbackground=co4_s, activebackground=co4_s, command=lambda: pegar_valor('%'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_2.place(x=134, y=61)
b_3 = Button(root, text='/', width=3, height=2, bg=co4, highlightbackground=co4_s, activebackground=co4_s, command=lambda: pegar_valor('/'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_3.place(x=201, y=61)
b_4 = Button(root, text='7', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('7'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_4.place(x=0, y=122)
b_5 = Button(root, text='8', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('8'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_5.place(x=67, y=122)
b_6 = Button(root, text='9', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('9'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_6.place(x=134, y=122)
b_7 = Button(root, text='*', width=3, height=2, bg=co4, highlightbackground=co4_s, activebackground=co4_s, command=lambda: pegar_valor('*'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_7.place(x=201, y=122)
b_8 = Button(root, text='4', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('4'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_8.place(x=0, y=183)
b_9 = Button(root, text='5', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('5'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_9.place(x=67, y=183)
b_10 = Button(root, text='6', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('6'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_10.place(x=134, y=183)
b_11 = Button(root, text='-', width=3, height=2, bg=co4, highlightbackground=co4_s, activebackground=co4_s, command=lambda: pegar_valor('-'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_11.place(x=201, y=183)
b_12 = Button(root, text='1', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('1'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_12.place(x=0, y=244)
b_13 = Button(root, text='2', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('2'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_13.place(x=67, y=244)
b_14 = Button(root, text='3', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('3'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_14.place(x=134, y=244)
b_15 = Button(root, text='+', width=3, height=2, bg=co4, highlightbackground=co4_s, activebackground=co4_s, command=lambda: pegar_valor('+'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_15.place(x=201, y=244)
b_16 = Button(root, text='0', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('0'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_16.place(x=0, y=305)
b_17 = Button(root, text='.', width=3, height=2, bg=co3, highlightbackground=co3_s, activebackground=co3_s, command=lambda: pegar_valor('.'),
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_17.place(x=67, y=305)
b_18 = Button(root, text='=', height=2, bg=co5, highlightbackground=co5_s, activebackground=co5_s, command=calcular,
fg=co1, activeforeground=co1, font=('Ivy 13 bold'), relief=RAISED,
overrelief=RIDGE)
b_18.place(x=134, y=305, width=131)
root.mainloop()
``` |
{
"source": "JonasJF360/Curso_Tkinter",
"score": 4
} |
#### File: Aulas/colocando_em_pratica/messagebox.py
```python
from tkinter import *
from tkinter import messagebox
# https://pythonguides.com/category/python-tutorials/python-tkinter/
janela = Tk()
janela.title('Messagebox')
janela.geometry('300x200')
janela.config(bg='#5FB691')
def msg1():
messagebox.showinfo('information', 'Hi! You got a prompt.')
messagebox.showerror('error', 'Something went wrong!')
messagebox.showwarning('warning', 'accept T&C')
messagebox.askquestion('Ask Question', 'Do you want to continue?')
messagebox.askokcancel('Ok Cancel', 'Are You sure?')
messagebox.askyesno('Yes|No', 'Do you want to proceed?')
messagebox.askretrycancel('retry', 'Failed! want to try again?')
Button(janela, text='Click Me', command=msg1).pack(pady=50)
janela.mainloop()
```
#### File: Aulas/colocando_em_pratica/treinando_checkbox.py
```python
from tkinter import *
# Funções e lista de dados
paises = [
'Afeganistão', 'Albania', 'Algeria', 'Andorra',
'Angola', 'Australia', 'Bolivia', 'Brasil',
'Canada', 'China', 'Clile', 'Congo', 'Mexico',
'Islandia', 'Israel', 'Estados Unidos', 'Zimbabwe',
'Ucrânia', 'Belgica', 'Eslovaqui', 'Peru', 'Onduras'
]
def deletar():
texto = list(lista.curselection())
decremento = 0
for i in texto:
lista.delete(i - decremento, i - decremento)
decremento += 1
# cores
fundo = '#2d3b58'
fonte = '#ffffff'
selec_listbox = '#798aac'
cor_linha1 = '#97aacf'
fundo_listbox = '#a8bfee'
# GUI
root = Tk()
root.config(bg=fundo)
root.title('Apagando da lista')
largura = 500
altura = 400
posx = (root.winfo_screenwidth() - largura) / 2
posy = (root.winfo_screenheight() - altura) / 2 - 15
root.geometry('%dx%d+%d+%d' % (largura, altura, posx, posy))
root.resizable(0, 0) # 0 = False | 1 = True
# Widgets
show = Label(root, text='Selecione um ou mais países',
bg=fundo, fg=fonte, font=('Verdana', 14))
show.pack(pady=5, padx=10)
lista = Listbox(root, selectmode='multiple', bg=fundo_listbox,
highlightbackground='#42506b',
selectbackground=selec_listbox)
lista.pack(padx=10, expand=YES, fill='both')
for i, item in enumerate(paises):
num = i + 1
num = '0' + str(num) if num < 10 else num
lista.insert(END, f' {num} - {item}')
lista.itemconfig(i, bg=cor_linha1)
Button(root, text='Apagar', font='Verdana 10 bold',
activebackground='#42506b', highlightbackground='#42506b',
bg=fundo, activeforeground=fonte, fg=fonte, command=deletar
).pack(pady=5)
root.mainloop()
``` |
{
"source": "jonasjonker/gadod",
"score": 3
} |
#### File: gadod/apps/some_parser.py
```python
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
def setGlobalSparkSession(appName):
global spark
spark = SparkSession.builder.appName(appName).getOrCreate()
def readDataFrameFromText(file):
return spark.read.text(file)
def verwerkDataFrame(df):
return df.withColumn("value", F.upper(F.col("value")))
```
#### File: gadod/test/test_apps.py
```python
from pyspark.sql import Row
import pyspark.sql.types as T
import pytest
import apps.some_parser as parser
@pytest.fixture
def sdf(tmp_path):
parser.setGlobalSparkSession("HelloWorld")
file = tmp_path / "helloworld.txt"
file.write_text("hello, world!")
df = parser.readDataFrameFromText(str(file))
return parser.verwerkDataFrame(df)
def test_some_parser_output(sdf):
assert sdf.collect() == [Row(**{
"value": "HELLO, WORLD!"
})]
def test_some_parser_schema(sdf):
assert sdf.schema == T.StructType([
T.StructField("value", T.StringType(), True),
])
``` |
{
"source": "jonasjucker/spack-c2sm",
"score": 2
} |
#### File: packages/cosmo-grib-api-definitions/package.py
```python
from spack import *
class CosmoGribApiDefinitions(Package):
"""To simplify the usage of the GRIB 2 format within the COSMO Consortium, a COSMO GRIB 2 Policy has been defined. One element of this policy is to define a unified GRIB API system for the COSMO community, which is compatible with all COSMO software. This unified system is split into two parts, the vendor distribution of the GRIB API, available from ECMWF or from the repository libgrib-api-vendor, and the modified samples and definitions used by the COSMO consortium, available in the current repository."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "https://github.com/elsagermann/libgrib-api-cosmo-resources.git"
url = "<EMAIL>:elsagermann/libgrib-api-cosmo-resources.git"
git = '<EMAIL>:elsagermann/libgrib-api-cosmo-resources.git'
maintainers = ['elsagermann']
version('172.16.58.3', commit='<PASSWORD>')
depends_on('[email protected]', when='@1.20.0.2')
def setup_run_environment(self, env):
grib_definition_path = self.spec[
'cosmo-grib-api-definitions'].prefix + '/cosmoDefinitions/definition s/:' + self.spec[
'cosmo-grib-api'].prefix + '/share/grib_api/definitions/'
env.prepend_path('GRIB_DEFINITION_PATH', grib_definition_path)
grib_samples_path = self.spec[
'cosmo-grib-api-definitions'].prefix + '/cosmoDefinitions/samples/'
env.prepend_path('GRIB_SAMPLES_PATH', grib_samples_path)
def setup_dependent_build_environment(self, env, dependent_spec):
self.setup_run_environment(env)
def install(self, spec, prefix):
mkdir(prefix.cosmoDefinitions)
mkdir(prefix.cosmoDefinitions + '/definitions')
mkdir(prefix.cosmoDefinitions + '/samples')
install_tree('definitions', prefix.cosmoDefinitions + '/definitions')
install_tree('samples', prefix.cosmoDefinitions + '/samples')
``` |
{
"source": "jonasjucker/wildlife-telegram",
"score": 3
} |
#### File: jonasjucker/wildlife-telegram/image_processing.py
```python
import os
import math
import sys
import logging
from PIL import Image
from natsort import natsorted
def images_from_folder(folder):
images = natsorted([os.path.join(folder,images) for images in os.listdir(folder) if 'jpg' in images])
# we want most recent images first
images.reverse()
return images
def load_images(image_names):
return [Image.open(name) for name in image_names]
def dimensions_of_total_image(images):
total_nr = len(images)
logging.debug(f'Compute ratios for {total_nr} sub-images')
sizes = {}
sizes['x'] = math.ceil(math.sqrt(total_nr))
sizes['y'] = math.ceil(math.sqrt(total_nr))
orig_size = images[0].size
shrink_size = (int(orig_size[0]/sizes['x']), int(orig_size[1]/sizes['y']))
sizes['total'] = orig_size
sizes['image'] = shrink_size
return sizes
def compose_image(images, sizes):
total_image = Image.new('RGB',sizes['total'], (250,250,250))
location = (0,0)
col = 0
for image in images:
small_image = image.resize(sizes['image'])
if col < sizes['x']:
logging.debug(f'Place sub-image at location:{location}')
total_image.paste(small_image,location)
location = (location[0] + sizes['image'][0], location[1])
col += 1
else:
location = (0, location[1] + sizes['image'][1])
logging.debug(f'Place sub-image at location:{location}')
total_image.paste(small_image,location)
location = (sizes['image'][0], location[1] )
col = 1
return total_image
def split_into_chunks(image_names,chunksize):
return [image_names[i:i+chunksize] for i in range(0,len(image_names),chunksize)]
def collective_image(source,destination,chunksize,identifier=None):
logging.info(f'Create collective image with {chunksize} sub-images')
image_chunks = split_into_chunks(images_from_folder(source), chunksize)
names = []
count = 0
for chunk in image_chunks:
if identifier:
names.append(os.path.join(destination,f'{identifier}_composite_{count}.jpg'))
else:
names.append(os.path.join(destination,f'composite_{count}.jpg'))
images = load_images(chunk)
dims = dimensions_of_total_image(images)
image = compose_image(images,dims)
image.save(names[-1])
count+=1
return names
if __name__ == '__main__' :
from events import EventHandler
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG,
)
logger = logging.getLogger(__name__)
event = EventHandler('photos','videos','test')
event_names = event.list('p',ignore=['.gitkeep','test'])
print(event_names)
_ = collective_image('photos/2022-05-23_08','composites',25,identifier='y')
```
#### File: jonasjucker/wildlife-telegram/location.py
```python
import time
from datetime import date,datetime
from astral import LocationInfo
from astral.sun import sun
class CamLocation:
def __init__(self,lat,lon,info,country,timezone):
self.info = LocationInfo(info, country, timezone, lat, lon)
def is_night(self):
s = sun(self.info.observer, date=date.today(),tzinfo=self.info.timezone)
sunrise = s["sunrise"].timestamp()
sunset = s["sunset"].timestamp()
time_now = datetime.now().timestamp()
if time_now > sunrise and time_now < sunset:
return False
else:
return True
```
#### File: jonasjucker/wildlife-telegram/main.py
```python
import RPi.GPIO as GPIO
import logging
import argparse
import time
import sys
import os
from sensors import Pir
from cam import WildCam
from bot import WildBot
from location import CamLocation
from events import EventHandler
from image_processing import collective_image
def bot_launch(bot_token,retry=3, wait=1):
for i in range(retry):
try:
bot = WildBot(bot_token)
except Exception as e:
logging.warning(f'Bot cannot connect: {e}')
logging.info(f'Retry again in {wait} seconds')
bot = WildBot(bot_token, offline=True)
time.sleep(wait)
else:
break
return bot
def shutdown(cam,bot,pir):
cam.close()
pir.deactivate()
bot_shutdown(bot)
logging.info('Shutdown')
sys.exit(0)
def bot_shutdown(bot):
if bot.is_offline_for_failover:
logging.info('Shutdown-failover-bot')
else:
bot.stop()
logging.info('Shutdown-bot')
def main():
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO,
)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--bot_token', \
dest='bot_token', \
type=str, \
help='unique token of bot (KEEP PRIVATE!)')
args = parser.parse_args()
GPIO.setmode(GPIO.BCM)
pir = Pir()
pir.activate()
cam = WildCam()
event = EventHandler('photos','videos','test')
bot = bot_launch(args.bot_token, retry=10, wait=5)
spot = CamLocation(47.3,8.5,"Somewhere in the forest", "Switzerland", "Europe/Zurich")
snooze = 5
logging.info('Enter infinite loop')
while True:
logging.debug('loop iteration')
# normal mode
if bot.is_sensible_to_motion:
pir.wait_for_movement()
if bot.is_sensible_to_motion:
photos = cam.shot(nr_of_shots=2,pause=10,night_mode=spot.is_night(),name_generator=event.new_record_name)
#video = cam.record(10,night_mode=True, new_record_name,name_generator=event.new_record_name))
if not bot.already_down:
logging.info('Skip boradcast for now')
# bot.broadcast(photos,[])
# summary of all photo-events
if bot.user_wants_event_summary:
bot.user_wants_event_summary = False
event_names = event.list('p',ignore=['.gitkeep','test'])
counter = 0
photos = []
for name in event_names:
photos = collective_image(name,'composites',25,identifier=counter)
counter += 1
bot.broadcast(photos,[],message=f'Event: {event.strip(name)}')
# test mode
if bot.user_wants_test:
bot.user_wants_test = False
photos,videos = cam.test(event.new_test_record_name)
bot.broadcast(photos,videos)
# shutdown
if bot.user_wants_shutdown:
shutdown(cam,bot,pir)
# bot-shutdown
if bot.user_wants_bot_shutdown and not bot.already_down:
bot_shutdown(bot)
bot_already_down = True
# reconnect bot
if bot.has_no_connection:
bot_shutdown(bot)
bot = bot_launch(args.bot_token,retry=5, wait=3)
logging.info(f'snooze {snooze}s ...')
time.sleep(snooze)
if __name__ == '__main__':
main()
``` |
{
"source": "jonasjungaker/VectorsAlgebra",
"score": 4
} |
#### File: VectorsAlgebra/Algebra/vector.py
```python
class vector:
def __init__(self, *vals):
self.x = list(vals)
for val in vals:
float(val)
self.dimension = len(self.x)
def __getitem__(self, key):
return self.x[key]
def __setitem__(self, key, value):
self.x[key] = value
return self
def __add__(self, other):
if type(other) == type(int): # This also needs to support floating point types
for i in range(self.dimension):
self[i] += other
return self
self._checkDimension(other)
newx = []
for i in range(self.dimension):
newx.append(self[i] + other[i])
return vector(*newx)
def __eq__(self, other):
if self.dimension != other.dimension:
return False
for i in range(self.dimension):
if self[i] != other[i]:
return False
return True
def __mul__(self, other):
if type(other) == type(int):
x = []
for i in range(self.dimension):
x.append(self[i] * other)
return vector(*x)
self._checkDimension(other)
value = 0
for i in range(self.dimension):
value += self[i] * other[i]
return value
def __rmul__(self, other):
return self * other
def __matmul__(self, other):
if self.dimension != other.dimension != 3:
raise TypeError("Vector dimensions must be 3")
v = vector(0, 0, 0)
v[0] = (self[1] * other[2]) - (self[2] * other[1])
v[1] = (self[2] * other[0]) - (self[0] * other[2])
v[2] = (self[0] * other[1]) - (self[1] * other[0])
return v
def __sub__(self, other):
return self + ( - other)
def __neg__(self):
v = []
for i in range(self):
v.append( - self[i])
return vector(*v)
def __abs__(self):
value = self.magnitude()
return value**0.5
def _checkDimension(self, other):
if self.dimension != other.dimension:
raise TypeError("Vector dimensions must agree")
def magnitude(self):
# Returns the value of the sum of all values of the vector squared
powerMagnitude = 0
for a in self.x:
powerMagnitude += a*a
return powerMagnitude
``` |
{
"source": "jonaskaempf/bpftools",
"score": 2
} |
#### File: src/bpftools/defs.py
```python
import re
def parse_defs(scope, defines):
'''
Parse CPP #defines of the basic form '#define [a-zA-Z0-9_] <some replacement><newline>'
'''
p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+(.*)$')
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
p_cpp_comment = re.compile('//.*')
# remove comments
defines = p_comment.sub(' ', defines)
# remove continuation lines
defines = defines.replace('\\\n', ' ')
local_scope = dict(scope)
defs = {}
for ln in defines.splitlines():
m = p_define.match(ln)
if m:
defs[m.group(1)] = eval(m.group(2), globals(), local_scope)
local_scope[m.group(1)] = defs[m.group(1)]
return defs
generic = {
'__X32_SYSCALL_BIT': 0x40000000,
}
generic.update(parse_defs(generic, '''
#define EM_NONE 0
#define EM_M32 1
#define EM_SPARC 2
#define EM_386 3
#define EM_68K 4
#define EM_88K 5
#define EM_486 6 /* Perhaps disused */
#define EM_860 7
#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */
#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */
#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */
#define EM_PARISC 15 /* HPPA */
#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
#define EM_PPC 20 /* PowerPC */
#define EM_PPC64 21 /* PowerPC64 */
#define EM_SPU 23 /* Cell BE SPU */
#define EM_ARM 40 /* ARM 32 bit */
#define EM_SH 42 /* SuperH */
#define EM_SPARCV9 43 /* SPARC v9 64-bit */
#define EM_H8_300 46 /* Renesas H8/300 */
#define EM_IA_64 50 /* HP/Intel IA-64 */
#define EM_X86_64 62 /* AMD x86-64 */
#define EM_S390 22 /* IBM S/390 */
#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
#define EM_V850 87 /* NEC v850 */
#define EM_M32R 88 /* Renesas M32R */
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
#define EM_BLACKFIN 106 /* ADI Blackfin Processor */
#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */
#define EM_AARCH64 183 /* ARM 64 bit */
#define EM_TILEPRO 188 /* Tilera TILEPro */
#define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */
#define EM_TILEGX 191 /* Tilera TILE-Gx */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
#define EM_AVR32 0x18ad /* Atmel AVR32 */
/*
* This is an interim value that we will use until the committee comes
* up with a final number.
*/
#define EM_ALPHA 0x9026
/* Bogus old v850 magic number, used by old tools. */
#define EM_CYGNUS_V850 0x9080
/* Bogus old m32r magic number, used by old tools. */
#define EM_CYGNUS_M32R 0x9041
/* This is the old interim value for S/390 architecture */
#define EM_S390_OLD 0xA390
/* Also Panasonic/MEI MN10300, AM33 */
#define EM_CYGNUS_MN10300 0xbeef
#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
#define __AUDIT_ARCH_64BIT 0x80000000
#define __AUDIT_ARCH_LE 0x40000000
#define AUDIT_ARCH_AARCH64 (EM_AARCH64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ARMEB (EM_ARM)
#define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_FRV (EM_FRV)
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_M32R (EM_M32R)
#define AUDIT_ARCH_M68K (EM_68K)
#define AUDIT_ARCH_MICROBLAZE (EM_MICROBLAZE)
#define AUDIT_ARCH_MIPS (EM_MIPS)
#define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\
__AUDIT_ARCH_CONVENTION_MIPS64_N32)
#define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE|__AUDIT_ARCH_CONVENTION_MIPS64_N32)
#define AUDIT_ARCH_OPENRISC (EM_OPENRISC)
#define AUDIT_ARCH_PARISC (EM_PARISC)
#define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_PPC (EM_PPC)
#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_S390 (EM_S390)
#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_SH (EM_SH)
#define AUDIT_ARCH_SHEL (EM_SH|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_SH64 (EM_SH|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_SPARC (EM_SPARC)
#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_TILEGX (EM_TILEGX|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
'''))
arch_x86_64 = parse_defs(generic, '''
#define __NR_read 0
#define __NR_write 1
#define __NR_open 2
#define __NR_close 3
#define __NR_stat 4
#define __NR_fstat 5
#define __NR_lstat 6
#define __NR_poll 7
#define __NR_lseek 8
#define __NR_mmap 9
#define __NR_mprotect 10
#define __NR_munmap 11
#define __NR_brk 12
#define __NR_rt_sigaction 13
#define __NR_rt_sigprocmask 14
#define __NR_rt_sigreturn 15
#define __NR_ioctl 16
#define __NR_pread64 17
#define __NR_pwrite64 18
#define __NR_readv 19
#define __NR_writev 20
#define __NR_access 21
#define __NR_pipe 22
#define __NR_select 23
#define __NR_sched_yield 24
#define __NR_mremap 25
#define __NR_msync 26
#define __NR_mincore 27
#define __NR_madvise 28
#define __NR_shmget 29
#define __NR_shmat 30
#define __NR_shmctl 31
#define __NR_dup 32
#define __NR_dup2 33
#define __NR_pause 34
#define __NR_nanosleep 35
#define __NR_getitimer 36
#define __NR_alarm 37
#define __NR_setitimer 38
#define __NR_getpid 39
#define __NR_sendfile 40
#define __NR_socket 41
#define __NR_connect 42
#define __NR_accept 43
#define __NR_sendto 44
#define __NR_recvfrom 45
#define __NR_sendmsg 46
#define __NR_recvmsg 47
#define __NR_shutdown 48
#define __NR_bind 49
#define __NR_listen 50
#define __NR_getsockname 51
#define __NR_getpeername 52
#define __NR_socketpair 53
#define __NR_setsockopt 54
#define __NR_getsockopt 55
#define __NR_clone 56
#define __NR_fork 57
#define __NR_vfork 58
#define __NR_execve 59
#define __NR_exit 60
#define __NR_wait4 61
#define __NR_kill 62
#define __NR_uname 63
#define __NR_semget 64
#define __NR_semop 65
#define __NR_semctl 66
#define __NR_shmdt 67
#define __NR_msgget 68
#define __NR_msgsnd 69
#define __NR_msgrcv 70
#define __NR_msgctl 71
#define __NR_fcntl 72
#define __NR_flock 73
#define __NR_fsync 74
#define __NR_fdatasync 75
#define __NR_truncate 76
#define __NR_ftruncate 77
#define __NR_getdents 78
#define __NR_getcwd 79
#define __NR_chdir 80
#define __NR_fchdir 81
#define __NR_rename 82
#define __NR_mkdir 83
#define __NR_rmdir 84
#define __NR_creat 85
#define __NR_link 86
#define __NR_unlink 87
#define __NR_symlink 88
#define __NR_readlink 89
#define __NR_chmod 90
#define __NR_fchmod 91
#define __NR_chown 92
#define __NR_fchown 93
#define __NR_lchown 94
#define __NR_umask 95
#define __NR_gettimeofday 96
#define __NR_getrlimit 97
#define __NR_getrusage 98
#define __NR_sysinfo 99
#define __NR_times 100
#define __NR_ptrace 101
#define __NR_getuid 102
#define __NR_syslog 103
#define __NR_getgid 104
#define __NR_setuid 105
#define __NR_setgid 106
#define __NR_geteuid 107
#define __NR_getegid 108
#define __NR_setpgid 109
#define __NR_getppid 110
#define __NR_getpgrp 111
#define __NR_setsid 112
#define __NR_setreuid 113
#define __NR_setregid 114
#define __NR_getgroups 115
#define __NR_setgroups 116
#define __NR_setresuid 117
#define __NR_getresuid 118
#define __NR_setresgid 119
#define __NR_getresgid 120
#define __NR_getpgid 121
#define __NR_setfsuid 122
#define __NR_setfsgid 123
#define __NR_getsid 124
#define __NR_capget 125
#define __NR_capset 126
#define __NR_rt_sigpending 127
#define __NR_rt_sigtimedwait 128
#define __NR_rt_sigqueueinfo 129
#define __NR_rt_sigsuspend 130
#define __NR_sigaltstack 131
#define __NR_utime 132
#define __NR_mknod 133
#define __NR_uselib 134
#define __NR_personality 135
#define __NR_ustat 136
#define __NR_statfs 137
#define __NR_fstatfs 138
#define __NR_sysfs 139
#define __NR_getpriority 140
#define __NR_setpriority 141
#define __NR_sched_setparam 142
#define __NR_sched_getparam 143
#define __NR_sched_setscheduler 144
#define __NR_sched_getscheduler 145
#define __NR_sched_get_priority_max 146
#define __NR_sched_get_priority_min 147
#define __NR_sched_rr_get_interval 148
#define __NR_mlock 149
#define __NR_munlock 150
#define __NR_mlockall 151
#define __NR_munlockall 152
#define __NR_vhangup 153
#define __NR_modify_ldt 154
#define __NR_pivot_root 155
#define __NR__sysctl 156
#define __NR_prctl 157
#define __NR_arch_prctl 158
#define __NR_adjtimex 159
#define __NR_setrlimit 160
#define __NR_chroot 161
#define __NR_sync 162
#define __NR_acct 163
#define __NR_settimeofday 164
#define __NR_mount 165
#define __NR_umount2 166
#define __NR_swapon 167
#define __NR_swapoff 168
#define __NR_reboot 169
#define __NR_sethostname 170
#define __NR_setdomainname 171
#define __NR_iopl 172
#define __NR_ioperm 173
#define __NR_create_module 174
#define __NR_init_module 175
#define __NR_delete_module 176
#define __NR_get_kernel_syms 177
#define __NR_query_module 178
#define __NR_quotactl 179
#define __NR_nfsservctl 180
#define __NR_getpmsg 181
#define __NR_putpmsg 182
#define __NR_afs_syscall 183
#define __NR_tuxcall 184
#define __NR_security 185
#define __NR_gettid 186
#define __NR_readahead 187
#define __NR_setxattr 188
#define __NR_lsetxattr 189
#define __NR_fsetxattr 190
#define __NR_getxattr 191
#define __NR_lgetxattr 192
#define __NR_fgetxattr 193
#define __NR_listxattr 194
#define __NR_llistxattr 195
#define __NR_flistxattr 196
#define __NR_removexattr 197
#define __NR_lremovexattr 198
#define __NR_fremovexattr 199
#define __NR_tkill 200
#define __NR_time 201
#define __NR_futex 202
#define __NR_sched_setaffinity 203
#define __NR_sched_getaffinity 204
#define __NR_set_thread_area 205
#define __NR_io_setup 206
#define __NR_io_destroy 207
#define __NR_io_getevents 208
#define __NR_io_submit 209
#define __NR_io_cancel 210
#define __NR_get_thread_area 211
#define __NR_lookup_dcookie 212
#define __NR_epoll_create 213
#define __NR_epoll_ctl_old 214
#define __NR_epoll_wait_old 215
#define __NR_remap_file_pages 216
#define __NR_getdents64 217
#define __NR_set_tid_address 218
#define __NR_restart_syscall 219
#define __NR_semtimedop 220
#define __NR_fadvise64 221
#define __NR_timer_create 222
#define __NR_timer_settime 223
#define __NR_timer_gettime 224
#define __NR_timer_getoverrun 225
#define __NR_timer_delete 226
#define __NR_clock_settime 227
#define __NR_clock_gettime 228
#define __NR_clock_getres 229
#define __NR_clock_nanosleep 230
#define __NR_exit_group 231
#define __NR_epoll_wait 232
#define __NR_epoll_ctl 233
#define __NR_tgkill 234
#define __NR_utimes 235
#define __NR_vserver 236
#define __NR_mbind 237
#define __NR_set_mempolicy 238
#define __NR_get_mempolicy 239
#define __NR_mq_open 240
#define __NR_mq_unlink 241
#define __NR_mq_timedsend 242
#define __NR_mq_timedreceive 243
#define __NR_mq_notify 244
#define __NR_mq_getsetattr 245
#define __NR_kexec_load 246
#define __NR_waitid 247
#define __NR_add_key 248
#define __NR_request_key 249
#define __NR_keyctl 250
#define __NR_ioprio_set 251
#define __NR_ioprio_get 252
#define __NR_inotify_init 253
#define __NR_inotify_add_watch 254
#define __NR_inotify_rm_watch 255
#define __NR_migrate_pages 256
#define __NR_openat 257
#define __NR_mkdirat 258
#define __NR_mknodat 259
#define __NR_fchownat 260
#define __NR_futimesat 261
#define __NR_newfstatat 262
#define __NR_unlinkat 263
#define __NR_renameat 264
#define __NR_linkat 265
#define __NR_symlinkat 266
#define __NR_readlinkat 267
#define __NR_fchmodat 268
#define __NR_faccessat 269
#define __NR_pselect6 270
#define __NR_ppoll 271
#define __NR_unshare 272
#define __NR_set_robust_list 273
#define __NR_get_robust_list 274
#define __NR_splice 275
#define __NR_tee 276
#define __NR_sync_file_range 277
#define __NR_vmsplice 278
#define __NR_move_pages 279
#define __NR_utimensat 280
#define __NR_epoll_pwait 281
#define __NR_signalfd 282
#define __NR_timerfd_create 283
#define __NR_eventfd 284
#define __NR_fallocate 285
#define __NR_timerfd_settime 286
#define __NR_timerfd_gettime 287
#define __NR_accept4 288
#define __NR_signalfd4 289
#define __NR_eventfd2 290
#define __NR_epoll_create1 291
#define __NR_dup3 292
#define __NR_pipe2 293
#define __NR_inotify_init1 294
#define __NR_preadv 295
#define __NR_pwritev 296
#define __NR_rt_tgsigqueueinfo 297
#define __NR_perf_event_open 298
#define __NR_recvmmsg 299
#define __NR_fanotify_init 300
#define __NR_fanotify_mark 301
#define __NR_prlimit64 302
#define __NR_name_to_handle_at 303
#define __NR_open_by_handle_at 304
#define __NR_clock_adjtime 305
#define __NR_syncfs 306
#define __NR_sendmmsg 307
#define __NR_setns 308
#define __NR_getcpu 309
#define __NR_process_vm_readv 310
#define __NR_process_vm_writev 311
#define __NR_kcmp 312
#define __NR_finit_module 313
#define __NR_sched_setattr 314
#define __NR_sched_getattr 315
#define __NR_renameat2 316
#define __NR_seccomp 317
#define __NR_getrandom 318
#define __NR_memfd_create 319
#define __NR_kexec_file_load 320
#define __NR_bpf 321
#define __NR_execveat 322
#define __NR_userfaultfd 323
#define __NR_membarrier 324
#define __NR_mlock2 325
''')
arch_i386 = parse_defs(generic, '''
#define __NR_restart_syscall 0
#define __NR_exit 1
#define __NR_fork 2
#define __NR_read 3
#define __NR_write 4
#define __NR_open 5
#define __NR_close 6
#define __NR_waitpid 7
#define __NR_creat 8
#define __NR_link 9
#define __NR_unlink 10
#define __NR_execve 11
#define __NR_chdir 12
#define __NR_time 13
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_lchown 16
#define __NR_break 17
#define __NR_oldstat 18
#define __NR_lseek 19
#define __NR_getpid 20
#define __NR_mount 21
#define __NR_umount 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
#define __NR_ptrace 26
#define __NR_alarm 27
#define __NR_oldfstat 28
#define __NR_pause 29
#define __NR_utime 30
#define __NR_stty 31
#define __NR_gtty 32
#define __NR_access 33
#define __NR_nice 34
#define __NR_ftime 35
#define __NR_sync 36
#define __NR_kill 37
#define __NR_rename 38
#define __NR_mkdir 39
#define __NR_rmdir 40
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_times 43
#define __NR_prof 44
#define __NR_brk 45
#define __NR_setgid 46
#define __NR_getgid 47
#define __NR_signal 48
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
#define __NR_umount2 52
#define __NR_lock 53
#define __NR_ioctl 54
#define __NR_fcntl 55
#define __NR_mpx 56
#define __NR_setpgid 57
#define __NR_ulimit 58
#define __NR_oldolduname 59
#define __NR_umask 60
#define __NR_chroot 61
#define __NR_ustat 62
#define __NR_dup2 63
#define __NR_getppid 64
#define __NR_getpgrp 65
#define __NR_setsid 66
#define __NR_sigaction 67
#define __NR_sgetmask 68
#define __NR_ssetmask 69
#define __NR_setreuid 70
#define __NR_setregid 71
#define __NR_sigsuspend 72
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
#define __NR_getrlimit 76
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
#define __NR_getgroups 80
#define __NR_setgroups 81
#define __NR_select 82
#define __NR_symlink 83
#define __NR_oldlstat 84
#define __NR_readlink 85
#define __NR_uselib 86
#define __NR_swapon 87
#define __NR_reboot 88
#define __NR_readdir 89
#define __NR_mmap 90
#define __NR_munmap 91
#define __NR_truncate 92
#define __NR_ftruncate 93
#define __NR_fchmod 94
#define __NR_fchown 95
#define __NR_getpriority 96
#define __NR_setpriority 97
#define __NR_profil 98
#define __NR_statfs 99
#define __NR_fstatfs 100
#define __NR_ioperm 101
#define __NR_socketcall 102
#define __NR_syslog 103
#define __NR_setitimer 104
#define __NR_getitimer 105
#define __NR_stat 106
#define __NR_lstat 107
#define __NR_fstat 108
#define __NR_olduname 109
#define __NR_iopl 110
#define __NR_vhangup 111
#define __NR_idle 112
#define __NR_vm86old 113
#define __NR_wait4 114
#define __NR_swapoff 115
#define __NR_sysinfo 116
#define __NR_ipc 117
#define __NR_fsync 118
#define __NR_sigreturn 119
#define __NR_clone 120
#define __NR_setdomainname 121
#define __NR_uname 122
#define __NR_modify_ldt 123
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
#define __NR_create_module 127
#define __NR_init_module 128
#define __NR_delete_module 129
#define __NR_get_kernel_syms 130
#define __NR_quotactl 131
#define __NR_getpgid 132
#define __NR_fchdir 133
#define __NR_bdflush 134
#define __NR_sysfs 135
#define __NR_personality 136
#define __NR_afs_syscall 137
#define __NR_setfsuid 138
#define __NR_setfsgid 139
#define __NR__llseek 140
#define __NR_getdents 141
#define __NR__newselect 142
#define __NR_flock 143
#define __NR_msync 144
#define __NR_readv 145
#define __NR_writev 146
#define __NR_getsid 147
#define __NR_fdatasync 148
#define __NR__sysctl 149
#define __NR_mlock 150
#define __NR_munlock 151
#define __NR_mlockall 152
#define __NR_munlockall 153
#define __NR_sched_setparam 154
#define __NR_sched_getparam 155
#define __NR_sched_setscheduler 156
#define __NR_sched_getscheduler 157
#define __NR_sched_yield 158
#define __NR_sched_get_priority_max 159
#define __NR_sched_get_priority_min 160
#define __NR_sched_rr_get_interval 161
#define __NR_nanosleep 162
#define __NR_mremap 163
#define __NR_setresuid 164
#define __NR_getresuid 165
#define __NR_vm86 166
#define __NR_query_module 167
#define __NR_poll 168
#define __NR_nfsservctl 169
#define __NR_setresgid 170
#define __NR_getresgid 171
#define __NR_prctl 172
#define __NR_rt_sigreturn 173
#define __NR_rt_sigaction 174
#define __NR_rt_sigprocmask 175
#define __NR_rt_sigpending 176
#define __NR_rt_sigtimedwait 177
#define __NR_rt_sigqueueinfo 178
#define __NR_rt_sigsuspend 179
#define __NR_pread64 180
#define __NR_pwrite64 181
#define __NR_chown 182
#define __NR_getcwd 183
#define __NR_capget 184
#define __NR_capset 185
#define __NR_sigaltstack 186
#define __NR_sendfile 187
#define __NR_getpmsg 188
#define __NR_putpmsg 189
#define __NR_vfork 190
#define __NR_ugetrlimit 191
#define __NR_mmap2 192
#define __NR_truncate64 193
#define __NR_ftruncate64 194
#define __NR_stat64 195
#define __NR_lstat64 196
#define __NR_fstat64 197
#define __NR_lchown32 198
#define __NR_getuid32 199
#define __NR_getgid32 200
#define __NR_geteuid32 201
#define __NR_getegid32 202
#define __NR_setreuid32 203
#define __NR_setregid32 204
#define __NR_getgroups32 205
#define __NR_setgroups32 206
#define __NR_fchown32 207
#define __NR_setresuid32 208
#define __NR_getresuid32 209
#define __NR_setresgid32 210
#define __NR_getresgid32 211
#define __NR_chown32 212
#define __NR_setuid32 213
#define __NR_setgid32 214
#define __NR_setfsuid32 215
#define __NR_setfsgid32 216
#define __NR_pivot_root 217
#define __NR_mincore 218
#define __NR_madvise 219
#define __NR_getdents64 220
#define __NR_fcntl64 221
#define __NR_gettid 224
#define __NR_readahead 225
#define __NR_setxattr 226
#define __NR_lsetxattr 227
#define __NR_fsetxattr 228
#define __NR_getxattr 229
#define __NR_lgetxattr 230
#define __NR_fgetxattr 231
#define __NR_listxattr 232
#define __NR_llistxattr 233
#define __NR_flistxattr 234
#define __NR_removexattr 235
#define __NR_lremovexattr 236
#define __NR_fremovexattr 237
#define __NR_tkill 238
#define __NR_sendfile64 239
#define __NR_futex 240
#define __NR_sched_setaffinity 241
#define __NR_sched_getaffinity 242
#define __NR_set_thread_area 243
#define __NR_get_thread_area 244
#define __NR_io_setup 245
#define __NR_io_destroy 246
#define __NR_io_getevents 247
#define __NR_io_submit 248
#define __NR_io_cancel 249
#define __NR_fadvise64 250
#define __NR_exit_group 252
#define __NR_lookup_dcookie 253
#define __NR_epoll_create 254
#define __NR_epoll_ctl 255
#define __NR_epoll_wait 256
#define __NR_remap_file_pages 257
#define __NR_set_tid_address 258
#define __NR_timer_create 259
#define __NR_timer_settime 260
#define __NR_timer_gettime 261
#define __NR_timer_getoverrun 262
#define __NR_timer_delete 263
#define __NR_clock_settime 264
#define __NR_clock_gettime 265
#define __NR_clock_getres 266
#define __NR_clock_nanosleep 267
#define __NR_statfs64 268
#define __NR_fstatfs64 269
#define __NR_tgkill 270
#define __NR_utimes 271
#define __NR_fadvise64_64 272
#define __NR_vserver 273
#define __NR_mbind 274
#define __NR_get_mempolicy 275
#define __NR_set_mempolicy 276
#define __NR_mq_open 277
#define __NR_mq_unlink 278
#define __NR_mq_timedsend 279
#define __NR_mq_timedreceive 280
#define __NR_mq_notify 281
#define __NR_mq_getsetattr 282
#define __NR_kexec_load 283
#define __NR_waitid 284
#define __NR_add_key 286
#define __NR_request_key 287
#define __NR_keyctl 288
#define __NR_ioprio_set 289
#define __NR_ioprio_get 290
#define __NR_inotify_init 291
#define __NR_inotify_add_watch 292
#define __NR_inotify_rm_watch 293
#define __NR_migrate_pages 294
#define __NR_openat 295
#define __NR_mkdirat 296
#define __NR_mknodat 297
#define __NR_fchownat 298
#define __NR_futimesat 299
#define __NR_fstatat64 300
#define __NR_unlinkat 301
#define __NR_renameat 302
#define __NR_linkat 303
#define __NR_symlinkat 304
#define __NR_readlinkat 305
#define __NR_fchmodat 306
#define __NR_faccessat 307
#define __NR_pselect6 308
#define __NR_ppoll 309
#define __NR_unshare 310
#define __NR_set_robust_list 311
#define __NR_get_robust_list 312
#define __NR_splice 313
#define __NR_sync_file_range 314
#define __NR_tee 315
#define __NR_vmsplice 316
#define __NR_move_pages 317
#define __NR_getcpu 318
#define __NR_epoll_pwait 319
#define __NR_utimensat 320
#define __NR_signalfd 321
#define __NR_timerfd_create 322
#define __NR_eventfd 323
#define __NR_fallocate 324
#define __NR_timerfd_settime 325
#define __NR_timerfd_gettime 326
#define __NR_signalfd4 327
#define __NR_eventfd2 328
#define __NR_epoll_create1 329
#define __NR_dup3 330
#define __NR_pipe2 331
#define __NR_inotify_init1 332
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
#define __NR_recvmmsg 337
#define __NR_fanotify_init 338
#define __NR_fanotify_mark 339
#define __NR_prlimit64 340
#define __NR_name_to_handle_at 341
#define __NR_open_by_handle_at 342
#define __NR_clock_adjtime 343
#define __NR_syncfs 344
#define __NR_sendmmsg 345
#define __NR_setns 346
#define __NR_process_vm_readv 347
#define __NR_process_vm_writev 348
#define __NR_kcmp 349
#define __NR_finit_module 350
#define __NR_sched_setattr 351
#define __NR_sched_getattr 352
#define __NR_renameat2 353
#define __NR_seccomp 354
#define __NR_getrandom 355
#define __NR_memfd_create 356
#define __NR_bpf 357
#define __NR_execveat 358
#define __NR_socket 359
#define __NR_socketpair 360
#define __NR_bind 361
#define __NR_connect 362
#define __NR_listen 363
#define __NR_accept4 364
#define __NR_getsockopt 365
#define __NR_setsockopt 366
#define __NR_getsockname 367
#define __NR_getpeername 368
#define __NR_sendto 369
#define __NR_sendmsg 370
#define __NR_recvfrom 371
#define __NR_recvmsg 372
#define __NR_shutdown 373
#define __NR_userfaultfd 374
#define __NR_membarrier 375
#define __NR_mlock2 376
''')
arch_x32 = parse_defs(generic, '''
#define __NR_read (__X32_SYSCALL_BIT + 0)
#define __NR_write (__X32_SYSCALL_BIT + 1)
#define __NR_open (__X32_SYSCALL_BIT + 2)
#define __NR_close (__X32_SYSCALL_BIT + 3)
#define __NR_stat (__X32_SYSCALL_BIT + 4)
#define __NR_fstat (__X32_SYSCALL_BIT + 5)
#define __NR_lstat (__X32_SYSCALL_BIT + 6)
#define __NR_poll (__X32_SYSCALL_BIT + 7)
#define __NR_lseek (__X32_SYSCALL_BIT + 8)
#define __NR_mmap (__X32_SYSCALL_BIT + 9)
#define __NR_mprotect (__X32_SYSCALL_BIT + 10)
#define __NR_munmap (__X32_SYSCALL_BIT + 11)
#define __NR_brk (__X32_SYSCALL_BIT + 12)
#define __NR_rt_sigprocmask (__X32_SYSCALL_BIT + 14)
#define __NR_pread64 (__X32_SYSCALL_BIT + 17)
#define __NR_pwrite64 (__X32_SYSCALL_BIT + 18)
#define __NR_access (__X32_SYSCALL_BIT + 21)
#define __NR_pipe (__X32_SYSCALL_BIT + 22)
#define __NR_select (__X32_SYSCALL_BIT + 23)
#define __NR_sched_yield (__X32_SYSCALL_BIT + 24)
#define __NR_mremap (__X32_SYSCALL_BIT + 25)
#define __NR_msync (__X32_SYSCALL_BIT + 26)
#define __NR_mincore (__X32_SYSCALL_BIT + 27)
#define __NR_madvise (__X32_SYSCALL_BIT + 28)
#define __NR_shmget (__X32_SYSCALL_BIT + 29)
#define __NR_shmat (__X32_SYSCALL_BIT + 30)
#define __NR_shmctl (__X32_SYSCALL_BIT + 31)
#define __NR_dup (__X32_SYSCALL_BIT + 32)
#define __NR_dup2 (__X32_SYSCALL_BIT + 33)
#define __NR_pause (__X32_SYSCALL_BIT + 34)
#define __NR_nanosleep (__X32_SYSCALL_BIT + 35)
#define __NR_getitimer (__X32_SYSCALL_BIT + 36)
#define __NR_alarm (__X32_SYSCALL_BIT + 37)
#define __NR_setitimer (__X32_SYSCALL_BIT + 38)
#define __NR_getpid (__X32_SYSCALL_BIT + 39)
#define __NR_sendfile (__X32_SYSCALL_BIT + 40)
#define __NR_socket (__X32_SYSCALL_BIT + 41)
#define __NR_connect (__X32_SYSCALL_BIT + 42)
#define __NR_accept (__X32_SYSCALL_BIT + 43)
#define __NR_sendto (__X32_SYSCALL_BIT + 44)
#define __NR_shutdown (__X32_SYSCALL_BIT + 48)
#define __NR_bind (__X32_SYSCALL_BIT + 49)
#define __NR_listen (__X32_SYSCALL_BIT + 50)
#define __NR_getsockname (__X32_SYSCALL_BIT + 51)
#define __NR_getpeername (__X32_SYSCALL_BIT + 52)
#define __NR_socketpair (__X32_SYSCALL_BIT + 53)
#define __NR_clone (__X32_SYSCALL_BIT + 56)
#define __NR_fork (__X32_SYSCALL_BIT + 57)
#define __NR_vfork (__X32_SYSCALL_BIT + 58)
#define __NR_exit (__X32_SYSCALL_BIT + 60)
#define __NR_wait4 (__X32_SYSCALL_BIT + 61)
#define __NR_kill (__X32_SYSCALL_BIT + 62)
#define __NR_uname (__X32_SYSCALL_BIT + 63)
#define __NR_semget (__X32_SYSCALL_BIT + 64)
#define __NR_semop (__X32_SYSCALL_BIT + 65)
#define __NR_semctl (__X32_SYSCALL_BIT + 66)
#define __NR_shmdt (__X32_SYSCALL_BIT + 67)
#define __NR_msgget (__X32_SYSCALL_BIT + 68)
#define __NR_msgsnd (__X32_SYSCALL_BIT + 69)
#define __NR_msgrcv (__X32_SYSCALL_BIT + 70)
#define __NR_msgctl (__X32_SYSCALL_BIT + 71)
#define __NR_fcntl (__X32_SYSCALL_BIT + 72)
#define __NR_flock (__X32_SYSCALL_BIT + 73)
#define __NR_fsync (__X32_SYSCALL_BIT + 74)
#define __NR_fdatasync (__X32_SYSCALL_BIT + 75)
#define __NR_truncate (__X32_SYSCALL_BIT + 76)
#define __NR_ftruncate (__X32_SYSCALL_BIT + 77)
#define __NR_getdents (__X32_SYSCALL_BIT + 78)
#define __NR_getcwd (__X32_SYSCALL_BIT + 79)
#define __NR_chdir (__X32_SYSCALL_BIT + 80)
#define __NR_fchdir (__X32_SYSCALL_BIT + 81)
#define __NR_rename (__X32_SYSCALL_BIT + 82)
#define __NR_mkdir (__X32_SYSCALL_BIT + 83)
#define __NR_rmdir (__X32_SYSCALL_BIT + 84)
#define __NR_creat (__X32_SYSCALL_BIT + 85)
#define __NR_link (__X32_SYSCALL_BIT + 86)
#define __NR_unlink (__X32_SYSCALL_BIT + 87)
#define __NR_symlink (__X32_SYSCALL_BIT + 88)
#define __NR_readlink (__X32_SYSCALL_BIT + 89)
#define __NR_chmod (__X32_SYSCALL_BIT + 90)
#define __NR_fchmod (__X32_SYSCALL_BIT + 91)
#define __NR_chown (__X32_SYSCALL_BIT + 92)
#define __NR_fchown (__X32_SYSCALL_BIT + 93)
#define __NR_lchown (__X32_SYSCALL_BIT + 94)
#define __NR_umask (__X32_SYSCALL_BIT + 95)
#define __NR_gettimeofday (__X32_SYSCALL_BIT + 96)
#define __NR_getrlimit (__X32_SYSCALL_BIT + 97)
#define __NR_getrusage (__X32_SYSCALL_BIT + 98)
#define __NR_sysinfo (__X32_SYSCALL_BIT + 99)
#define __NR_times (__X32_SYSCALL_BIT + 100)
#define __NR_getuid (__X32_SYSCALL_BIT + 102)
#define __NR_syslog (__X32_SYSCALL_BIT + 103)
#define __NR_getgid (__X32_SYSCALL_BIT + 104)
#define __NR_setuid (__X32_SYSCALL_BIT + 105)
#define __NR_setgid (__X32_SYSCALL_BIT + 106)
#define __NR_geteuid (__X32_SYSCALL_BIT + 107)
#define __NR_getegid (__X32_SYSCALL_BIT + 108)
#define __NR_setpgid (__X32_SYSCALL_BIT + 109)
#define __NR_getppid (__X32_SYSCALL_BIT + 110)
#define __NR_getpgrp (__X32_SYSCALL_BIT + 111)
#define __NR_setsid (__X32_SYSCALL_BIT + 112)
#define __NR_setreuid (__X32_SYSCALL_BIT + 113)
#define __NR_setregid (__X32_SYSCALL_BIT + 114)
#define __NR_getgroups (__X32_SYSCALL_BIT + 115)
#define __NR_setgroups (__X32_SYSCALL_BIT + 116)
#define __NR_setresuid (__X32_SYSCALL_BIT + 117)
#define __NR_getresuid (__X32_SYSCALL_BIT + 118)
#define __NR_setresgid (__X32_SYSCALL_BIT + 119)
#define __NR_getresgid (__X32_SYSCALL_BIT + 120)
#define __NR_getpgid (__X32_SYSCALL_BIT + 121)
#define __NR_setfsuid (__X32_SYSCALL_BIT + 122)
#define __NR_setfsgid (__X32_SYSCALL_BIT + 123)
#define __NR_getsid (__X32_SYSCALL_BIT + 124)
#define __NR_capget (__X32_SYSCALL_BIT + 125)
#define __NR_capset (__X32_SYSCALL_BIT + 126)
#define __NR_rt_sigsuspend (__X32_SYSCALL_BIT + 130)
#define __NR_utime (__X32_SYSCALL_BIT + 132)
#define __NR_mknod (__X32_SYSCALL_BIT + 133)
#define __NR_personality (__X32_SYSCALL_BIT + 135)
#define __NR_ustat (__X32_SYSCALL_BIT + 136)
#define __NR_statfs (__X32_SYSCALL_BIT + 137)
#define __NR_fstatfs (__X32_SYSCALL_BIT + 138)
#define __NR_sysfs (__X32_SYSCALL_BIT + 139)
#define __NR_getpriority (__X32_SYSCALL_BIT + 140)
#define __NR_setpriority (__X32_SYSCALL_BIT + 141)
#define __NR_sched_setparam (__X32_SYSCALL_BIT + 142)
#define __NR_sched_getparam (__X32_SYSCALL_BIT + 143)
#define __NR_sched_setscheduler (__X32_SYSCALL_BIT + 144)
#define __NR_sched_getscheduler (__X32_SYSCALL_BIT + 145)
#define __NR_sched_get_priority_max (__X32_SYSCALL_BIT + 146)
#define __NR_sched_get_priority_min (__X32_SYSCALL_BIT + 147)
#define __NR_sched_rr_get_interval (__X32_SYSCALL_BIT + 148)
#define __NR_mlock (__X32_SYSCALL_BIT + 149)
#define __NR_munlock (__X32_SYSCALL_BIT + 150)
#define __NR_mlockall (__X32_SYSCALL_BIT + 151)
#define __NR_munlockall (__X32_SYSCALL_BIT + 152)
#define __NR_vhangup (__X32_SYSCALL_BIT + 153)
#define __NR_modify_ldt (__X32_SYSCALL_BIT + 154)
#define __NR_pivot_root (__X32_SYSCALL_BIT + 155)
#define __NR_prctl (__X32_SYSCALL_BIT + 157)
#define __NR_arch_prctl (__X32_SYSCALL_BIT + 158)
#define __NR_adjtimex (__X32_SYSCALL_BIT + 159)
#define __NR_setrlimit (__X32_SYSCALL_BIT + 160)
#define __NR_chroot (__X32_SYSCALL_BIT + 161)
#define __NR_sync (__X32_SYSCALL_BIT + 162)
#define __NR_acct (__X32_SYSCALL_BIT + 163)
#define __NR_settimeofday (__X32_SYSCALL_BIT + 164)
#define __NR_mount (__X32_SYSCALL_BIT + 165)
#define __NR_umount2 (__X32_SYSCALL_BIT + 166)
#define __NR_swapon (__X32_SYSCALL_BIT + 167)
#define __NR_swapoff (__X32_SYSCALL_BIT + 168)
#define __NR_reboot (__X32_SYSCALL_BIT + 169)
#define __NR_sethostname (__X32_SYSCALL_BIT + 170)
#define __NR_setdomainname (__X32_SYSCALL_BIT + 171)
#define __NR_iopl (__X32_SYSCALL_BIT + 172)
#define __NR_ioperm (__X32_SYSCALL_BIT + 173)
#define __NR_init_module (__X32_SYSCALL_BIT + 175)
#define __NR_delete_module (__X32_SYSCALL_BIT + 176)
#define __NR_quotactl (__X32_SYSCALL_BIT + 179)
#define __NR_getpmsg (__X32_SYSCALL_BIT + 181)
#define __NR_putpmsg (__X32_SYSCALL_BIT + 182)
#define __NR_afs_syscall (__X32_SYSCALL_BIT + 183)
#define __NR_tuxcall (__X32_SYSCALL_BIT + 184)
#define __NR_security (__X32_SYSCALL_BIT + 185)
#define __NR_gettid (__X32_SYSCALL_BIT + 186)
#define __NR_readahead (__X32_SYSCALL_BIT + 187)
#define __NR_setxattr (__X32_SYSCALL_BIT + 188)
#define __NR_lsetxattr (__X32_SYSCALL_BIT + 189)
#define __NR_fsetxattr (__X32_SYSCALL_BIT + 190)
#define __NR_getxattr (__X32_SYSCALL_BIT + 191)
#define __NR_lgetxattr (__X32_SYSCALL_BIT + 192)
#define __NR_fgetxattr (__X32_SYSCALL_BIT + 193)
#define __NR_listxattr (__X32_SYSCALL_BIT + 194)
#define __NR_llistxattr (__X32_SYSCALL_BIT + 195)
#define __NR_flistxattr (__X32_SYSCALL_BIT + 196)
#define __NR_removexattr (__X32_SYSCALL_BIT + 197)
#define __NR_lremovexattr (__X32_SYSCALL_BIT + 198)
#define __NR_fremovexattr (__X32_SYSCALL_BIT + 199)
#define __NR_tkill (__X32_SYSCALL_BIT + 200)
#define __NR_time (__X32_SYSCALL_BIT + 201)
#define __NR_futex (__X32_SYSCALL_BIT + 202)
#define __NR_sched_setaffinity (__X32_SYSCALL_BIT + 203)
#define __NR_sched_getaffinity (__X32_SYSCALL_BIT + 204)
#define __NR_io_destroy (__X32_SYSCALL_BIT + 207)
#define __NR_io_getevents (__X32_SYSCALL_BIT + 208)
#define __NR_io_cancel (__X32_SYSCALL_BIT + 210)
#define __NR_lookup_dcookie (__X32_SYSCALL_BIT + 212)
#define __NR_epoll_create (__X32_SYSCALL_BIT + 213)
#define __NR_remap_file_pages (__X32_SYSCALL_BIT + 216)
#define __NR_getdents64 (__X32_SYSCALL_BIT + 217)
#define __NR_set_tid_address (__X32_SYSCALL_BIT + 218)
#define __NR_restart_syscall (__X32_SYSCALL_BIT + 219)
#define __NR_semtimedop (__X32_SYSCALL_BIT + 220)
#define __NR_fadvise64 (__X32_SYSCALL_BIT + 221)
#define __NR_timer_settime (__X32_SYSCALL_BIT + 223)
#define __NR_timer_gettime (__X32_SYSCALL_BIT + 224)
#define __NR_timer_getoverrun (__X32_SYSCALL_BIT + 225)
#define __NR_timer_delete (__X32_SYSCALL_BIT + 226)
#define __NR_clock_settime (__X32_SYSCALL_BIT + 227)
#define __NR_clock_gettime (__X32_SYSCALL_BIT + 228)
#define __NR_clock_getres (__X32_SYSCALL_BIT + 229)
#define __NR_clock_nanosleep (__X32_SYSCALL_BIT + 230)
#define __NR_exit_group (__X32_SYSCALL_BIT + 231)
#define __NR_epoll_wait (__X32_SYSCALL_BIT + 232)
#define __NR_epoll_ctl (__X32_SYSCALL_BIT + 233)
#define __NR_tgkill (__X32_SYSCALL_BIT + 234)
#define __NR_utimes (__X32_SYSCALL_BIT + 235)
#define __NR_mbind (__X32_SYSCALL_BIT + 237)
#define __NR_set_mempolicy (__X32_SYSCALL_BIT + 238)
#define __NR_get_mempolicy (__X32_SYSCALL_BIT + 239)
#define __NR_mq_open (__X32_SYSCALL_BIT + 240)
#define __NR_mq_unlink (__X32_SYSCALL_BIT + 241)
#define __NR_mq_timedsend (__X32_SYSCALL_BIT + 242)
#define __NR_mq_timedreceive (__X32_SYSCALL_BIT + 243)
#define __NR_mq_getsetattr (__X32_SYSCALL_BIT + 245)
#define __NR_add_key (__X32_SYSCALL_BIT + 248)
#define __NR_request_key (__X32_SYSCALL_BIT + 249)
#define __NR_keyctl (__X32_SYSCALL_BIT + 250)
#define __NR_ioprio_set (__X32_SYSCALL_BIT + 251)
#define __NR_ioprio_get (__X32_SYSCALL_BIT + 252)
#define __NR_inotify_init (__X32_SYSCALL_BIT + 253)
#define __NR_inotify_add_watch (__X32_SYSCALL_BIT + 254)
#define __NR_inotify_rm_watch (__X32_SYSCALL_BIT + 255)
#define __NR_migrate_pages (__X32_SYSCALL_BIT + 256)
#define __NR_openat (__X32_SYSCALL_BIT + 257)
#define __NR_mkdirat (__X32_SYSCALL_BIT + 258)
#define __NR_mknodat (__X32_SYSCALL_BIT + 259)
#define __NR_fchownat (__X32_SYSCALL_BIT + 260)
#define __NR_futimesat (__X32_SYSCALL_BIT + 261)
#define __NR_newfstatat (__X32_SYSCALL_BIT + 262)
#define __NR_unlinkat (__X32_SYSCALL_BIT + 263)
#define __NR_renameat (__X32_SYSCALL_BIT + 264)
#define __NR_linkat (__X32_SYSCALL_BIT + 265)
#define __NR_symlinkat (__X32_SYSCALL_BIT + 266)
#define __NR_readlinkat (__X32_SYSCALL_BIT + 267)
#define __NR_fchmodat (__X32_SYSCALL_BIT + 268)
#define __NR_faccessat (__X32_SYSCALL_BIT + 269)
#define __NR_pselect6 (__X32_SYSCALL_BIT + 270)
#define __NR_ppoll (__X32_SYSCALL_BIT + 271)
#define __NR_unshare (__X32_SYSCALL_BIT + 272)
#define __NR_splice (__X32_SYSCALL_BIT + 275)
#define __NR_tee (__X32_SYSCALL_BIT + 276)
#define __NR_sync_file_range (__X32_SYSCALL_BIT + 277)
#define __NR_utimensat (__X32_SYSCALL_BIT + 280)
#define __NR_epoll_pwait (__X32_SYSCALL_BIT + 281)
#define __NR_signalfd (__X32_SYSCALL_BIT + 282)
#define __NR_timerfd_create (__X32_SYSCALL_BIT + 283)
#define __NR_eventfd (__X32_SYSCALL_BIT + 284)
#define __NR_fallocate (__X32_SYSCALL_BIT + 285)
#define __NR_timerfd_settime (__X32_SYSCALL_BIT + 286)
#define __NR_timerfd_gettime (__X32_SYSCALL_BIT + 287)
#define __NR_accept4 (__X32_SYSCALL_BIT + 288)
#define __NR_signalfd4 (__X32_SYSCALL_BIT + 289)
#define __NR_eventfd2 (__X32_SYSCALL_BIT + 290)
#define __NR_epoll_create1 (__X32_SYSCALL_BIT + 291)
#define __NR_dup3 (__X32_SYSCALL_BIT + 292)
#define __NR_pipe2 (__X32_SYSCALL_BIT + 293)
#define __NR_inotify_init1 (__X32_SYSCALL_BIT + 294)
#define __NR_perf_event_open (__X32_SYSCALL_BIT + 298)
#define __NR_fanotify_init (__X32_SYSCALL_BIT + 300)
#define __NR_fanotify_mark (__X32_SYSCALL_BIT + 301)
#define __NR_prlimit64 (__X32_SYSCALL_BIT + 302)
#define __NR_name_to_handle_at (__X32_SYSCALL_BIT + 303)
#define __NR_open_by_handle_at (__X32_SYSCALL_BIT + 304)
#define __NR_clock_adjtime (__X32_SYSCALL_BIT + 305)
#define __NR_syncfs (__X32_SYSCALL_BIT + 306)
#define __NR_setns (__X32_SYSCALL_BIT + 308)
#define __NR_getcpu (__X32_SYSCALL_BIT + 309)
#define __NR_kcmp (__X32_SYSCALL_BIT + 312)
#define __NR_finit_module (__X32_SYSCALL_BIT + 313)
#define __NR_sched_setattr (__X32_SYSCALL_BIT + 314)
#define __NR_sched_getattr (__X32_SYSCALL_BIT + 315)
#define __NR_renameat2 (__X32_SYSCALL_BIT + 316)
#define __NR_seccomp (__X32_SYSCALL_BIT + 317)
#define __NR_getrandom (__X32_SYSCALL_BIT + 318)
#define __NR_memfd_create (__X32_SYSCALL_BIT + 319)
#define __NR_kexec_file_load (__X32_SYSCALL_BIT + 320)
#define __NR_bpf (__X32_SYSCALL_BIT + 321)
#define __NR_userfaultfd (__X32_SYSCALL_BIT + 323)
#define __NR_membarrier (__X32_SYSCALL_BIT + 324)
#define __NR_mlock2 (__X32_SYSCALL_BIT + 325)
#define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512)
#define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513)
#define __NR_ioctl (__X32_SYSCALL_BIT + 514)
#define __NR_readv (__X32_SYSCALL_BIT + 515)
#define __NR_writev (__X32_SYSCALL_BIT + 516)
#define __NR_recvfrom (__X32_SYSCALL_BIT + 517)
#define __NR_sendmsg (__X32_SYSCALL_BIT + 518)
#define __NR_recvmsg (__X32_SYSCALL_BIT + 519)
#define __NR_execve (__X32_SYSCALL_BIT + 520)
#define __NR_ptrace (__X32_SYSCALL_BIT + 521)
#define __NR_rt_sigpending (__X32_SYSCALL_BIT + 522)
#define __NR_rt_sigtimedwait (__X32_SYSCALL_BIT + 523)
#define __NR_rt_sigqueueinfo (__X32_SYSCALL_BIT + 524)
#define __NR_sigaltstack (__X32_SYSCALL_BIT + 525)
#define __NR_timer_create (__X32_SYSCALL_BIT + 526)
#define __NR_mq_notify (__X32_SYSCALL_BIT + 527)
#define __NR_kexec_load (__X32_SYSCALL_BIT + 528)
#define __NR_waitid (__X32_SYSCALL_BIT + 529)
#define __NR_set_robust_list (__X32_SYSCALL_BIT + 530)
#define __NR_get_robust_list (__X32_SYSCALL_BIT + 531)
#define __NR_vmsplice (__X32_SYSCALL_BIT + 532)
#define __NR_move_pages (__X32_SYSCALL_BIT + 533)
#define __NR_preadv (__X32_SYSCALL_BIT + 534)
#define __NR_pwritev (__X32_SYSCALL_BIT + 535)
#define __NR_rt_tgsigqueueinfo (__X32_SYSCALL_BIT + 536)
#define __NR_recvmmsg (__X32_SYSCALL_BIT + 537)
#define __NR_sendmmsg (__X32_SYSCALL_BIT + 538)
#define __NR_process_vm_readv (__X32_SYSCALL_BIT + 539)
#define __NR_process_vm_writev (__X32_SYSCALL_BIT + 540)
#define __NR_setsockopt (__X32_SYSCALL_BIT + 541)
#define __NR_getsockopt (__X32_SYSCALL_BIT + 542)
#define __NR_io_setup (__X32_SYSCALL_BIT + 543)
#define __NR_io_submit (__X32_SYSCALL_BIT + 544)
#define __NR_execveat (__X32_SYSCALL_BIT + 545)
''')
``` |
{
"source": "jonaskaz/Line-Robot",
"score": 3
} |
#### File: Line-Robot/python/plot.py
```python
from os import read
import serial
from serial import Serial
import matplotlib.pyplot as plt
import numpy as np
import time
import pandas as pd
import seaborn as sns
# set up serial communication, comment this out if not connected to arduino
# ser = serial.Serial('COM6',9600)
# ser.close()
# ser.open()
def read_ser_data():
''' Reads data from serial and parses each line into the following format:
[IR1_val, IR2_val, IR3_val, LW_speed, RW_speed]
'''
ser_data = ser.readline().decode()
parsed_ser_data = [float(x) for x in ser_data.split()]
if len(parsed_ser_data) == 5:
return parsed_ser_data
else:
return 0
def collect_data():
''' Collects data of run
'''
with open("plot_data.csv", 'w') as fd:
fd.write("index,IR1,IR2,IR3,LW_speed,RW_speed\n")
# collect data
collect = True # TODO: control this with a button later
# t_end = time.time() + collection_time
index = 0
# while collect and (time.time() < t_end):
while collect:
# wait for a bit
time.sleep(0.001)
# fetch new serial data
ser_data = read_ser_data()
print(ser_data)
if ser_data == 0:
collect = False
break
# append to csv
with open("plot_data.csv", 'a') as fd:
fd.write(f"{index}, {ser_data[0]},{ser_data[1]},{ser_data[2]},{ser_data[3]},{ser_data[4]}\n")
index += 1
def plot_existing_data():
''' Create heatmap from existing data.
'''
df = pd.read_csv('plot_data2 copy.csv',header=0)
index = df['index'].tolist()
IR1 = df['IR1'].tolist()
IR2 = df['IR2'].tolist()
IR3 = df['IR3'].tolist()
LW_speed = df['LW_speed'].tolist()
RW_speed = df['RW_speed'].tolist()
# plot lines
plt.plot(index, smooth(IR1), label = "IR_left")
plt.plot(index, IR2, label = "IR_right")
plt.plot(index, IR3, label = "IR_middle")
plt.plot(index, LW_speed, label = "LW_speed")
plt.plot(index, RW_speed, label = "RW_speed")
plt.legend()
plt.show()
if __name__ == "__main__":
# collect_data()
plot_existing_data()
``` |
{
"source": "jonaskaz/Noodle-API",
"score": 3
} |
#### File: Noodle-API/noodle/main.py
```python
from threading import current_thread
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .config import GAME_MODE, ORDER_MODE
from pydantic import BaseModel
from typing import Optional
import queue
class Payload(BaseModel):
mode: int
flavor: str
toppings: Optional[list] = []
app = FastAPI()
origins = [
"http://localhost:3000",
"localhost:3000",
"http://cup-noodle-app.herokuapp.com",
"https://cup-noodle-app.herokuapp.com",
"cup-noodle-app.herokuapp.com",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
orders = queue.Queue()
@app.get("/", tags=["root"])
async def read_root() -> dict:
return {"message": "Welcome"}
@app.get("/order", tags=["orders"])
async def get_order() -> dict:
if orders.empty():
current_order = {}
else:
current_order = orders.get()
return current_order
@app.post("/order", tags=["orders"])
async def post_order(order: Payload) -> dict:
orders.put(order)
return {
"data": { "Order successfully posted." }
}
```
#### File: Noodle-API/tests/test_routes.py
```python
def test_post_order(client, order_payload):
res = client.post("/order", json = order_payload)
assert res.status_code == 200
def test_post_game(client, game_payload):
res = client.post("/order", json = game_payload)
assert res.status_code == 200
def test_get_order(client):
res1 = client.get("/order")
assert res1.json()["mode"] == 0
assert res1.json()["toppings"] == ["onions", "spice"]
res2 = client.get("/order")
assert res2.json()["mode"] == 1
res3 = client.get("/order")
assert res3.json() == {}
``` |
{
"source": "JonasKemmer/AliasFinder",
"score": 3
} |
#### File: AliasFinder/aliasfinder/af_calc.py
```python
import numpy as np
from scipy.signal import find_peaks
import aliasfinder.af_utils as af_utils
def get_phase_info(gls_obj,
power_threshold=None,
sim=False,
frequency_array=None):
""" Get information on the phase of all peaks above a given power
threshold.
Parameters
----------
gls_obj : object
A GLS object from Zechmeisters gls.py.
power_threshold : float, optional
The power threshold (ZK) above which the peaks should be analyzed.
Default is 0.5.
sim : boolean, optional
If the commited gls object is a simulated one or not.
Default is False, if it is set to true the frequency array must be
given too.
frequency_array : array, optional
If a simulated periodogram is commited, this array must contain the
frequencies of the peaks in the GLS of the observed data.
Returns
-------
array
An array containing the freqencies, powers and phases of all
analyzed peaks.
"""
if sim:
indexes = np.searchsorted(gls_obj.freq, frequency_array)
else:
indexes, _ = find_peaks(gls_obj.p, height=power_threshold)
phases = np.zeros(len(indexes))
freqs = np.zeros(len(indexes))
powers = np.zeros(len(indexes))
for i in range(len(indexes)):
k = indexes[i]
pmax = gls_obj.p[k]
# Best parameters
fbest = gls_obj.freq[k]
ph = np.arctan2(gls_obj._a[k], gls_obj._b[k]) / (2. * np.pi)
phases[i] = ph
freqs[i] = fbest
powers[i] = pmax
return [freqs, powers, phases]
def sim_any_sinmode(gls_obj, in_freq, times):
""" Simulate a sinusoidal signal with a given frequency and amplitude.
Parameters
----------
gls_obj : object
A GLS object from Zechmeisters gls.py.
in_freq : float
Frequency of the sinus which is calculated.
times : array
Time array for which the amplitude of the signal is calculated.
Returns
-------
array
Array of the amplitudes of the simulated sinusoidal at the input times.
"""
k = np.where(np.round(gls_obj.freq, 5) == in_freq)[0]
if len(k) > 1:
k = k[0]
amp = np.sqrt(gls_obj._a[k]**2 + gls_obj._b[k]**2)
ph = np.arctan2(gls_obj._a[k], gls_obj._b[k]) / (2. * np.pi)
T0 = times.min() - ph / in_freq
offset = gls_obj._off[k] + gls_obj._Y
return amp * np.sin(2 * np.pi * in_freq * (times - T0)) + offset
def sample_gls(times, gls_obs, freq, jitter, rvs_err, fbeg, fend, object_name,
peaks_data, search_phase_range):
"""The function to calculate a simulated GLS periodogram.
Parameters
----------
times : array
The times of observation of the observed RVs.
gls_obj : object
A GLS object from Zechmeisters gls.py from the observed RVs.
freq : float
The frequency for which the GLS should be simulated.
jitter : float
The jitter which is added to the simulated data.
rvs_err : array
The errors of the measured RVs. They are adopted as the errors of
the simulated RVs
fbeg : float, optional
Start frequency of the GLS periodogram.
fend : float, optional
End frequency of the GLS periodogram.
object_name : str, optional
Name of the object from which the RV data originate.
peaks_data : array
The array that contains the information about the peaks measured in the
observed RVs. The frequencies are used to get information about these
peaks in the simulated GLS
search_phase_range : float
The range around the peaks in the observed data for which peaks in
the simulated GLS are searched for.
Returns
-------
arrays
Returns the powers at the corresponding frequencies of the simulated
GLS as well as the phases of the peaks in the simulated GLS.
"""
np.random.seed()
rvs_sim = np.ones(np.shape(times)) \
+ sim_any_sinmode(gls_obs, freq, times) \
+ np.random.normal(0, np.sqrt(jitter**2), times.size)
ls_sim = af_utils.get_gls(times,
rvs_sim,
rvs_err,
fbeg,
fend,
object_name,
freq_array=gls_obs.freq)
dummy_freq_array = np.zeros(np.size(peaks_data[0]))
# search for phases of max power using a certian frequency
# range and the prior of data peaks
for j in range(0, np.size(peaks_data[0])):
index_frequencies = np.where(
np.logical_and(
ls_sim.freq >= peaks_data[0][j] - search_phase_range,
ls_sim.freq <= peaks_data[0][j] + search_phase_range))
index_maxfreqs = max(np.arange(len(ls_sim.p[index_frequencies])),
key=ls_sim.p[index_frequencies].__getitem__)
index_maxfreq = np.argwhere(
ls_sim.freq == ls_sim.freq[index_frequencies][index_maxfreqs])[0]
dummy_freq_array[j] = ls_sim.freq[index_maxfreq]
peaks_sim = get_phase_info(ls_sim,
frequency_array=dummy_freq_array,
sim=True)
return ls_sim.power, ls_sim.freq, (peaks_sim[2] % 1) * 2. * np.pi
def get_metric(gls_obs=None, gls_sim_powers=None):
""" The function to calculate the likelihood.
Parameters
----------
gls_obs : object, optional
The GLS of the observed data.
gls_sim_powers : array, optional
The array which contains the power information from the simulated GLS.
"""
freq = gls_obs.freq[:]
data_powers = gls_obs.power[:]
sim_powers = np.median(gls_sim_powers, axis=0)
sigma_powers = np.std(gls_sim_powers, axis=0)
#Li=np.exp(-(data_powers-sim_powers)**2/sigma_powers**2,dtype=np.float128)
#chisq=(data_powers-sim_powers)**2/sigma_powers**2
squarediff = (data_powers - sim_powers)**2
L = np.sum(squarediff)
return L
``` |
{
"source": "JonasKemmer/exostriker",
"score": 2
} |
#### File: exostriker/lib/act.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Activity(object):
def setupUi(self, Activity):
Activity.setObjectName("Activity")
Activity.resize(906, 717)
font = QtGui.QFont()
font.setPointSize(9)
Activity.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../../../../.designer/backup/33_striker.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Activity.setWindowIcon(icon)
Activity.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedKingdom))
self.gridLayout_2 = QtWidgets.QGridLayout(Activity)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setHorizontalSpacing(4)
self.gridLayout.setVerticalSpacing(2)
self.gridLayout.setObjectName("gridLayout")
self.label_low_freq = QtWidgets.QLabel(Activity)
self.label_low_freq.setObjectName("label_low_freq")
self.gridLayout.addWidget(self.label_low_freq, 2, 5, 1, 1)
self.regres_wl = QtWidgets.QDoubleSpinBox(Activity)
self.regres_wl.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.regres_wl.setFont(font)
self.regres_wl.setMinimum(0.1)
self.regres_wl.setMaximum(2.0)
self.regres_wl.setSingleStep(0.1)
self.regres_wl.setProperty("value", 0.5)
self.regres_wl.setObjectName("regres_wl")
self.gridLayout.addWidget(self.regres_wl, 8, 3, 1, 1)
self.regres_bt = QtWidgets.QDoubleSpinBox(Activity)
self.regres_bt.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.regres_bt.setFont(font)
self.regres_bt.setMaximum(1.0)
self.regres_bt.setSingleStep(0.1)
self.regres_bt.setProperty("value", 0.5)
self.regres_bt.setObjectName("regres_bt")
self.gridLayout.addWidget(self.regres_bt, 8, 4, 1, 1)
self.radio_Splines = QtWidgets.QRadioButton(Activity)
self.radio_Splines.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_Splines.setFont(font)
self.radio_Splines.setObjectName("radio_Splines")
self.buttonGroup_trendOptions = QtWidgets.QButtonGroup(Activity)
self.buttonGroup_trendOptions.setObjectName("buttonGroup_trendOptions")
self.buttonGroup_trendOptions.addButton(self.radio_Splines)
self.gridLayout.addWidget(self.radio_Splines, 5, 0, 1, 2)
self.spline_bt = QtWidgets.QDoubleSpinBox(Activity)
self.spline_bt.setEnabled(True)
self.spline_bt.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.spline_bt.setFont(font)
self.spline_bt.setMaximum(20000000.0)
self.spline_bt.setSingleStep(0.1)
self.spline_bt.setProperty("value", 5.0)
self.spline_bt.setObjectName("spline_bt")
self.gridLayout.addWidget(self.spline_bt, 5, 4, 1, 1)
self.radio_Regressions = QtWidgets.QRadioButton(Activity)
self.radio_Regressions.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_Regressions.setFont(font)
self.radio_Regressions.setObjectName("radio_Regressions")
self.buttonGroup_trendOptions.addButton(self.radio_Regressions)
self.gridLayout.addWidget(self.radio_Regressions, 8, 0, 1, 2)
self.label_wl = QtWidgets.QLabel(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.label_wl.setFont(font)
self.label_wl.setObjectName("label_wl")
self.gridLayout.addWidget(self.label_wl, 2, 3, 1, 1)
self.reset_data = QtWidgets.QPushButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.reset_data.setFont(font)
self.reset_data.setObjectName("reset_data")
self.gridLayout.addWidget(self.reset_data, 2, 8, 1, 1)
self.comboBox_poly = QtWidgets.QComboBox(Activity)
self.comboBox_poly.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_poly.setFont(font)
self.comboBox_poly.setObjectName("comboBox_poly")
self.gridLayout.addWidget(self.comboBox_poly, 6, 2, 1, 1)
self.flatten_data = QtWidgets.QRadioButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.flatten_data.setFont(font)
self.flatten_data.setChecked(True)
self.flatten_data.setObjectName("flatten_data")
self.buttonGroup_plot2 = QtWidgets.QButtonGroup(Activity)
self.buttonGroup_plot2.setObjectName("buttonGroup_plot2")
self.buttonGroup_plot2.addButton(self.flatten_data)
self.gridLayout.addWidget(self.flatten_data, 3, 10, 1, 1)
self.poly_bt = QtWidgets.QDoubleSpinBox(Activity)
self.poly_bt.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.poly_bt.setFont(font)
self.poly_bt.setMaximum(1.0)
self.poly_bt.setSingleStep(0.1)
self.poly_bt.setProperty("value", 0.5)
self.poly_bt.setObjectName("poly_bt")
self.gridLayout.addWidget(self.poly_bt, 6, 4, 1, 1)
spacerItem = QtWidgets.QSpacerItem(13, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 3, 5, 1, 1)
self.print_stat = QtWidgets.QPushButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.print_stat.setFont(font)
self.print_stat.setObjectName("print_stat")
self.gridLayout.addWidget(self.print_stat, 2, 7, 1, 1)
self.line_2 = QtWidgets.QFrame(Activity)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout.addWidget(self.line_2, 2, 6, 8, 1)
self.saveProduct = QtWidgets.QPushButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.saveProduct.setFont(font)
self.saveProduct.setObjectName("saveProduct")
self.gridLayout.addWidget(self.saveProduct, 8, 10, 1, 1)
self.radio_GPs = QtWidgets.QRadioButton(Activity)
self.radio_GPs.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_GPs.setFont(font)
self.radio_GPs.setObjectName("radio_GPs")
self.buttonGroup_trendOptions.addButton(self.radio_GPs)
self.gridLayout.addWidget(self.radio_GPs, 9, 0, 1, 2)
self.GLS_of_data = QtWidgets.QRadioButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.GLS_of_data.setFont(font)
self.GLS_of_data.setObjectName("GLS_of_data")
self.buttonGroup_plot2.addButton(self.GLS_of_data)
self.gridLayout.addWidget(self.GLS_of_data, 4, 10, 1, 1)
self.GP_period = QtWidgets.QDoubleSpinBox(Activity)
self.GP_period.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.GP_period.setFont(font)
self.GP_period.setObjectName("GP_period")
self.gridLayout.addWidget(self.GP_period, 9, 4, 1, 1)
self.checkBox_GP_robust = QtWidgets.QCheckBox(Activity)
self.checkBox_GP_robust.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.checkBox_GP_robust.setFont(font)
self.checkBox_GP_robust.setObjectName("checkBox_GP_robust")
self.gridLayout.addWidget(self.checkBox_GP_robust, 9, 5, 1, 1)
self.spline_wl = QtWidgets.QDoubleSpinBox(Activity)
self.spline_wl.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.spline_wl.setFont(font)
self.spline_wl.setMinimum(0.1)
self.spline_wl.setMaximum(2000000.0)
self.spline_wl.setSingleStep(0.1)
self.spline_wl.setProperty("value", 180.0)
self.spline_wl.setObjectName("spline_wl")
self.gridLayout.addWidget(self.spline_wl, 5, 3, 1, 1)
self.click_to_reject = QtWidgets.QCheckBox(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.click_to_reject.setFont(font)
self.click_to_reject.setObjectName("click_to_reject")
self.gridLayout.addWidget(self.click_to_reject, 3, 7, 1, 2)
self.readme_button = QtWidgets.QPushButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.readme_button.setFont(font)
self.readme_button.setObjectName("readme_button")
self.gridLayout.addWidget(self.readme_button, 9, 10, 1, 1)
self.radio_remove_mean = QtWidgets.QRadioButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_remove_mean.setFont(font)
self.radio_remove_mean.setChecked(False)
self.radio_remove_mean.setObjectName("radio_remove_mean")
self.buttonGroup_trendOptions.addButton(self.radio_remove_mean)
self.gridLayout.addWidget(self.radio_remove_mean, 3, 1, 1, 1)
self.comboBox_splines = QtWidgets.QComboBox(Activity)
self.comboBox_splines.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_splines.setFont(font)
self.comboBox_splines.setObjectName("comboBox_splines")
self.gridLayout.addWidget(self.comboBox_splines, 5, 2, 1, 1)
self.comboBox_GP = QtWidgets.QComboBox(Activity)
self.comboBox_GP.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_GP.setFont(font)
self.comboBox_GP.setObjectName("comboBox_GP")
self.gridLayout.addWidget(self.comboBox_GP, 9, 2, 1, 1)
self.kernel_size = QtWidgets.QDoubleSpinBox(Activity)
self.kernel_size.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.kernel_size.setFont(font)
self.kernel_size.setDecimals(1)
self.kernel_size.setMaximum(100.0)
self.kernel_size.setSingleStep(0.1)
self.kernel_size.setProperty("value", 5.0)
self.kernel_size.setObjectName("kernel_size")
self.gridLayout.addWidget(self.kernel_size, 9, 3, 1, 1)
self.try_button = QtWidgets.QPushButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.try_button.setFont(font)
self.try_button.setObjectName("try_button")
self.gridLayout.addWidget(self.try_button, 2, 10, 1, 1)
self.GLS_of_detr_data = QtWidgets.QRadioButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.GLS_of_detr_data.setFont(font)
self.GLS_of_detr_data.setObjectName("GLS_of_detr_data")
self.buttonGroup_plot2.addButton(self.GLS_of_detr_data)
self.gridLayout.addWidget(self.GLS_of_detr_data, 5, 10, 1, 1)
self.comboBox_regs = QtWidgets.QComboBox(Activity)
self.comboBox_regs.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_regs.setFont(font)
self.comboBox_regs.setObjectName("comboBox_regs")
self.gridLayout.addWidget(self.comboBox_regs, 8, 2, 1, 1)
self.GLS_of_model = QtWidgets.QRadioButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.GLS_of_model.setFont(font)
self.GLS_of_model.setObjectName("GLS_of_model")
self.buttonGroup_plot2.addButton(self.GLS_of_model)
self.gridLayout.addWidget(self.GLS_of_model, 6, 10, 1, 1)
self.comboBox_sliders = QtWidgets.QComboBox(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_sliders.setFont(font)
self.comboBox_sliders.setObjectName("comboBox_sliders")
self.gridLayout.addWidget(self.comboBox_sliders, 4, 2, 1, 1)
self.poly_wl = QtWidgets.QDoubleSpinBox(Activity)
self.poly_wl.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.poly_wl.setFont(font)
self.poly_wl.setMinimum(0.1)
self.poly_wl.setMaximum(2.0)
self.poly_wl.setSingleStep(0.1)
self.poly_wl.setProperty("value", 0.5)
self.poly_wl.setObjectName("poly_wl")
self.gridLayout.addWidget(self.poly_wl, 6, 3, 1, 1)
self.line = QtWidgets.QFrame(Activity)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout.addWidget(self.line, 2, 9, 9, 1)
self.label_method = QtWidgets.QLabel(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.label_method.setFont(font)
self.label_method.setObjectName("label_method")
self.gridLayout.addWidget(self.label_method, 2, 2, 1, 1)
self.label_high_freq = QtWidgets.QLabel(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.label_high_freq.setFont(font)
self.label_high_freq.setObjectName("label_high_freq")
self.gridLayout.addWidget(self.label_high_freq, 2, 4, 1, 1)
self.radio_remove_median = QtWidgets.QRadioButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_remove_median.setFont(font)
self.radio_remove_median.setChecked(True)
self.radio_remove_median.setObjectName("radio_remove_median")
self.buttonGroup_trendOptions.addButton(self.radio_remove_median)
self.gridLayout.addWidget(self.radio_remove_median, 3, 0, 1, 1)
self.radio_Polynomials = QtWidgets.QRadioButton(Activity)
self.radio_Polynomials.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_Polynomials.setFont(font)
self.radio_Polynomials.setObjectName("radio_Polynomials")
self.buttonGroup_trendOptions.addButton(self.radio_Polynomials)
self.gridLayout.addWidget(self.radio_Polynomials, 6, 0, 1, 2)
self.filter_order = QtWidgets.QDoubleSpinBox(Activity)
self.filter_order.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.filter_order.setFont(font)
self.filter_order.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedKingdom))
self.filter_order.setDecimals(0)
self.filter_order.setMinimum(1.0)
self.filter_order.setMaximum(30.0)
self.filter_order.setSingleStep(1.0)
self.filter_order.setProperty("value", 3.0)
self.filter_order.setObjectName("filter_order")
self.gridLayout.addWidget(self.filter_order, 4, 3, 1, 1)
self.radio_timeW = QtWidgets.QRadioButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_timeW.setFont(font)
self.radio_timeW.setObjectName("radio_timeW")
self.buttonGroup_trendOptions.addButton(self.radio_timeW)
self.gridLayout.addWidget(self.radio_timeW, 4, 0, 1, 2)
self.plot = PlotWidget(Activity)
self.plot.setMouseTracking(True)
self.plot.setObjectName("plot")
self.gridLayout.addWidget(self.plot, 0, 0, 1, 11)
self.plot_2 = PlotWidget(Activity)
self.plot_2.setMouseTracking(True)
self.plot_2.setObjectName("plot_2")
self.gridLayout.addWidget(self.plot_2, 1, 0, 1, 11)
self.label = QtWidgets.QLabel(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 2, 0, 1, 2)
self.filter_high_freq = QtWidgets.QDoubleSpinBox(Activity)
self.filter_high_freq.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.filter_high_freq.setFont(font)
self.filter_high_freq.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedKingdom))
self.filter_high_freq.setDecimals(3)
self.filter_high_freq.setMinimum(1.0)
self.filter_high_freq.setMaximum(999999.0)
self.filter_high_freq.setSingleStep(1.0)
self.filter_high_freq.setProperty("value", 1.0)
self.filter_high_freq.setObjectName("filter_high_freq")
self.gridLayout.addWidget(self.filter_high_freq, 4, 4, 1, 1)
self.filter_low_freq = QtWidgets.QDoubleSpinBox(Activity)
self.filter_low_freq.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.filter_low_freq.setFont(font)
self.filter_low_freq.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedKingdom))
self.filter_low_freq.setDecimals(3)
self.filter_low_freq.setMinimum(1.0)
self.filter_low_freq.setMaximum(999999.0)
self.filter_low_freq.setSingleStep(1.0)
self.filter_low_freq.setProperty("value", 365.0)
self.filter_low_freq.setObjectName("filter_low_freq")
self.gridLayout.addWidget(self.filter_low_freq, 4, 5, 1, 1)
self.add_epoch = QtWidgets.QPushButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.add_epoch.setFont(font)
self.add_epoch.setObjectName("add_epoch")
self.gridLayout.addWidget(self.add_epoch, 8, 7, 1, 1)
self.extra_BJD = QtWidgets.QDoubleSpinBox(Activity)
self.extra_BJD.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setPointSize(9)
self.extra_BJD.setFont(font)
self.extra_BJD.setDecimals(3)
self.extra_BJD.setMinimum(-9999999.0)
self.extra_BJD.setMaximum(9999999.0)
self.extra_BJD.setProperty("value", 2457000.0)
self.extra_BJD.setObjectName("extra_BJD")
self.gridLayout.addWidget(self.extra_BJD, 8, 8, 1, 1)
self.bin_data = QtWidgets.QDoubleSpinBox(Activity)
self.bin_data.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.bin_data.setFont(font)
self.bin_data.setSuffix("")
self.bin_data.setDecimals(6)
self.bin_data.setMinimum(1e-05)
self.bin_data.setSingleStep(0.001)
self.bin_data.setProperty("value", 0.01)
self.bin_data.setObjectName("bin_data")
self.gridLayout.addWidget(self.bin_data, 6, 8, 1, 1)
self.button_bin_data = QtWidgets.QPushButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.button_bin_data.setFont(font)
self.button_bin_data.setObjectName("button_bin_data")
self.gridLayout.addWidget(self.button_bin_data, 6, 7, 1, 1)
self.act_sigma_clip = QtWidgets.QDoubleSpinBox(Activity)
self.act_sigma_clip.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.act_sigma_clip.setFont(font)
self.act_sigma_clip.setDecimals(4)
self.act_sigma_clip.setMinimum(0.5)
self.act_sigma_clip.setMaximum(30.0)
self.act_sigma_clip.setSingleStep(0.5)
self.act_sigma_clip.setProperty("value", 10.0)
self.act_sigma_clip.setObjectName("act_sigma_clip")
self.gridLayout.addWidget(self.act_sigma_clip, 5, 8, 1, 1)
self.button_sigma_clip = QtWidgets.QPushButton(Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.button_sigma_clip.setFont(font)
self.button_sigma_clip.setObjectName("button_sigma_clip")
self.gridLayout.addWidget(self.button_sigma_clip, 5, 7, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi(Activity)
QtCore.QMetaObject.connectSlotsByName(Activity)
def retranslateUi(self, Activity):
_translate = QtCore.QCoreApplication.translate
Activity.setWindowTitle(_translate("Activity", "Activity"))
self.label_low_freq.setText(_translate("Activity", "low freq "))
self.radio_Splines.setText(_translate("Activity", "Splines"))
self.radio_Regressions.setText(_translate("Activity", "Regressions"))
self.label_wl.setText(_translate("Activity", "Window length"))
self.reset_data.setText(_translate("Activity", "Reset"))
self.flatten_data.setText(_translate("Activity", "detrended data"))
self.print_stat.setText(_translate("Activity", "Print stat"))
self.saveProduct.setText(_translate("Activity", "Save modif. data"))
self.radio_GPs.setText(_translate("Activity", "Gaussian Processes"))
self.GLS_of_data.setText(_translate("Activity", "GLS of input data"))
self.checkBox_GP_robust.setText(_translate("Activity", "robust"))
self.click_to_reject.setText(_translate("Activity", "remove outliers"))
self.readme_button.setText(_translate("Activity", "READ ME"))
self.radio_remove_mean.setText(_translate("Activity", "Mean"))
self.try_button.setText(_translate("Activity", "Try!"))
self.GLS_of_detr_data.setText(_translate("Activity", "GLS of modif. data"))
self.GLS_of_model.setText(_translate("Activity", "GLS of model"))
self.label_method.setText(_translate("Activity", "Method"))
self.label_high_freq.setText(_translate("Activity", "high freq"))
self.radio_remove_median.setText(_translate("Activity", "Median"))
self.radio_Polynomials.setText(_translate("Activity", "Polynomials"))
self.radio_timeW.setText(_translate("Activity", "Filter"))
self.label.setText(_translate("Activity", "Subtract"))
self.add_epoch.setText(_translate("Activity", "Add/Remove BJD"))
self.button_bin_data.setText(_translate("Activity", "Bin data [d]"))
self.button_sigma_clip.setText(_translate("Activity", "sigma clip"))
from pyqtgraph import PlotWidget
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Activity = QtWidgets.QWidget()
ui = Ui_Activity()
ui.setupUi(Activity)
Activity.show()
sys.exit(app.exec_())
```
#### File: exostriker/lib/TESS_pdc_window.py
```python
import sys #,os
from PyQt5 import QtWidgets,QtGui
class pdc(QtWidgets.QDialog):
def __init__(self, parent = None):
# super(show_symbols, self).__init__(parent)
super(pdc, self).__init__()
self.layout = QtWidgets.QVBoxLayout(self)
self.title = 'This is a test window'
# self.setFixedSize(550, 800)
# self.widget = QtWidgets.QWidget(self)
# self.widget.setLayout(QtWidgets.QGridLayout())
# self.widget.layout().addWidget(self.text)
#self.layout=QtWidgets.QGridLayout() # layout for the central widget
self.widget=QtGui.QWidget(self) # central widget
# self.widget.setLayout(layout)
self.init_buttons()
def init_buttons(self):
self.radio_group=QtGui.QButtonGroup(self.widget) # Number group
self.button1 = QtWidgets.QRadioButton('SAP FLUX', self)
self.button2 = QtWidgets.QRadioButton('PDCSAP FLUX"', self)
self.radio_group.addButton(self.button1)
self.radio_group.addButton(self.button2)
self.radio_group.setId(self.button1,1)
self.radio_group.setId(self.button2,2)
self.layout.addWidget(self.button1)
self.layout.addWidget(self.button2)
self.cancel_button = QtGui.QPushButton('Accept', self)
self.layout.addWidget(self.cancel_button)
self.button1.setChecked(True)
self.cancel_button.clicked.connect(self.close)
#self.Ok_button.clicked.connect(self.get_radio)
def return_but_N(self):
# Return list of values. It need map with str (self.lineedit.text() will return QString)
return self.radio_group.checkedId()
# static method to create the dialog and return button pressed
@staticmethod
def get_radio(parent = None):
dialog = pdc(parent)
result = dialog.exec_()
rad_but = dialog.return_but_N()
return (rad_but)
if __name__ == '__main__':
# app = QtWidgets.QApplication(sys.argv)
#w = show_symbols()
# w.show()
#sys.exit(app.exec_())
app = QtGui.QApplication([])
but = pdc.DateDialog.get_radio()
# print("{} {} {}".format(date, time, ok))
app.exec_()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.